@@ -5013,6 +5013,13 @@ static int connection_paths_to_skb(struct sk_buff *skb, struct drbd_connection *
5013
5013
return - EMSGSIZE ;
5014
5014
}
5015
5015
5016
+ static void connection_to_statistics (struct connection_statistics * s , struct drbd_connection * connection )
5017
+ {
5018
+ s -> conn_congested = test_bit (NET_CONGESTED , & connection -> transport .flags );
5019
+ s -> ap_in_flight = atomic_read (& connection -> ap_in_flight );
5020
+ s -> rs_in_flight = atomic_read (& connection -> rs_in_flight );
5021
+ }
5022
+
5016
5023
enum { SINGLE_RESOURCE , ITERATE_RESOURCES };
5017
5024
5018
5025
int drbd_adm_dump_connections (struct sk_buff * skb , struct netlink_callback * cb )
@@ -5118,7 +5125,7 @@ int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
5118
5125
err = connection_info_to_skb (skb , & connection_info , !capable (CAP_SYS_ADMIN ));
5119
5126
if (err )
5120
5127
goto out ;
5121
- connection_statistics . conn_congested = test_bit ( NET_CONGESTED , & connection -> transport . flags );
5128
+ connection_to_statistics ( & connection_statistics , connection );
5122
5129
err = connection_statistics_to_skb (skb , & connection_statistics , !capable (CAP_SYS_ADMIN ));
5123
5130
if (err )
5124
5131
goto out ;
@@ -5137,21 +5144,65 @@ int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
5137
5144
}
5138
5145
5139
5146
static void peer_device_to_statistics (struct peer_device_statistics * s ,
5140
- struct drbd_peer_device * peer_device )
5147
+ struct drbd_peer_device * pd )
5141
5148
{
5142
- struct drbd_device * device = peer_device -> device ;
5149
+ struct drbd_device * device = pd -> device ;
5150
+ unsigned long now = jiffies ;
5151
+ unsigned long rs_left = 0 ;
5152
+ int i ;
5153
+
5154
+ /* userspace should get "future proof" units,
5155
+ * convert to sectors or milli seconds as appropriate */
5143
5156
5144
5157
memset (s , 0 , sizeof (* s ));
5145
- s -> peer_dev_received = peer_device -> recv_cnt ;
5146
- s -> peer_dev_sent = peer_device -> send_cnt ;
5147
- s -> peer_dev_pending = atomic_read (& peer_device -> ap_pending_cnt ) +
5148
- atomic_read (& peer_device -> rs_pending_cnt );
5149
- s -> peer_dev_unacked = atomic_read (& peer_device -> unacked_cnt );
5150
- s -> peer_dev_out_of_sync = drbd_bm_total_weight (peer_device ) << (BM_BLOCK_SHIFT - 9 );
5151
- s -> peer_dev_resync_failed = peer_device -> rs_failed << (BM_BLOCK_SHIFT - 9 );
5158
+ s -> peer_dev_received = pd -> recv_cnt ;
5159
+ s -> peer_dev_sent = pd -> send_cnt ;
5160
+ s -> peer_dev_pending = atomic_read (& pd -> ap_pending_cnt ) +
5161
+ atomic_read (& pd -> rs_pending_cnt );
5162
+ s -> peer_dev_unacked = atomic_read (& pd -> unacked_cnt );
5163
+ s -> peer_dev_out_of_sync = BM_BIT_TO_SECT (drbd_bm_total_weight (pd ));
5164
+
5165
+ if (is_verify_state (pd , NOW )) {
5166
+ rs_left = BM_BIT_TO_SECT (pd -> ov_left );
5167
+ s -> peer_dev_ov_start_sector = pd -> ov_start_sector ;
5168
+ s -> peer_dev_ov_stop_sector = pd -> ov_stop_sector ;
5169
+ s -> peer_dev_ov_position = pd -> ov_position ;
5170
+ s -> peer_dev_ov_left = BM_BIT_TO_SECT (pd -> ov_left );
5171
+ s -> peer_dev_ov_skipped = BM_BIT_TO_SECT (pd -> ov_skipped );
5172
+ } else if (is_sync_state (pd , NOW )) {
5173
+ rs_left = s -> peer_dev_out_of_sync - BM_BIT_TO_SECT (pd -> rs_failed );
5174
+ s -> peer_dev_resync_failed = BM_BIT_TO_SECT (pd -> rs_failed );
5175
+ s -> peer_dev_rs_same_csum = BM_BIT_TO_SECT (pd -> rs_same_csum );
5176
+ }
5177
+
5178
+ if (rs_left ) {
5179
+ enum drbd_repl_state repl_state = pd -> repl_state [NOW ];
5180
+ if (repl_state == L_SYNC_TARGET || repl_state == L_VERIFY_S )
5181
+ s -> peer_dev_rs_c_sync_rate = pd -> c_sync_rate ;
5182
+
5183
+ s -> peer_dev_rs_total = BM_BIT_TO_SECT (pd -> rs_total );
5184
+
5185
+ s -> peer_dev_rs_dt_start_ms = jiffies_to_msecs (now - pd -> rs_start );
5186
+ s -> peer_dev_rs_paused_ms = jiffies_to_msecs (pd -> rs_paused );
5187
+
5188
+ i = (pd -> rs_last_mark + 2 ) % DRBD_SYNC_MARKS ;
5189
+ s -> peer_dev_rs_dt0_ms = jiffies_to_msecs (now - pd -> rs_mark_time [i ]);
5190
+ s -> peer_dev_rs_db0_sectors = BM_BIT_TO_SECT (pd -> rs_mark_left [i ]) - rs_left ;
5191
+
5192
+ i = (pd -> rs_last_mark + DRBD_SYNC_MARKS - 1 ) % DRBD_SYNC_MARKS ;
5193
+ s -> peer_dev_rs_dt1_ms = jiffies_to_msecs (now - pd -> rs_mark_time [i ]);
5194
+ s -> peer_dev_rs_db1_sectors = BM_BIT_TO_SECT (pd -> rs_mark_left [i ]) - rs_left ;
5195
+
5196
+ /* long term average:
5197
+ * dt = rs_dt_start_ms - rs_paused_ms;
5198
+ * db = rs_total - rs_left, which is
5199
+ * rs_total - (ov_left? ov_left : out_of_sync - rs_failed)
5200
+ */
5201
+ }
5202
+
5152
5203
if (get_ldev (device )) {
5153
5204
struct drbd_md * md = & device -> ldev -> md ;
5154
- struct drbd_peer_md * peer_md = & md -> peers [peer_device -> node_id ];
5205
+ struct drbd_peer_md * peer_md = & md -> peers [pd -> node_id ];
5155
5206
5156
5207
spin_lock_irq (& md -> uuid_lock );
5157
5208
s -> peer_dev_bitmap_uuid = peer_md -> bitmap_uuid ;
@@ -5954,7 +6005,7 @@ void notify_connection_state(struct sk_buff *skb,
5954
6005
connection_info_to_skb (skb , connection_info , true)))
5955
6006
goto nla_put_failure ;
5956
6007
connection_paths_to_skb (skb , connection );
5957
- connection_statistics . conn_congested = test_bit ( NET_CONGESTED , & connection -> transport . flags );
6008
+ connection_to_statistics ( & connection_statistics , connection );
5958
6009
connection_statistics_to_skb (skb , & connection_statistics , !capable (CAP_SYS_ADMIN ));
5959
6010
genlmsg_end (skb , dh );
5960
6011
if (multicast ) {
0 commit comments