1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* /proc/net/ support for AF_RXRPC |
3 | * |
4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
5 | * Written by David Howells (dhowells@redhat.com) |
6 | */ |
7 | |
8 | #include <linux/module.h> |
9 | #include <net/sock.h> |
10 | #include <net/af_rxrpc.h> |
11 | #include "ar-internal.h" |
12 | |
13 | static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = { |
14 | [RXRPC_CONN_UNUSED] = "Unused " , |
15 | [RXRPC_CONN_CLIENT_UNSECURED] = "ClUnsec " , |
16 | [RXRPC_CONN_CLIENT] = "Client " , |
17 | [RXRPC_CONN_SERVICE_PREALLOC] = "SvPrealc" , |
18 | [RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec " , |
19 | [RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall " , |
20 | [RXRPC_CONN_SERVICE] = "SvSecure" , |
21 | [RXRPC_CONN_ABORTED] = "Aborted " , |
22 | }; |
23 | |
24 | /* |
25 | * generate a list of extant and dead calls in /proc/net/rxrpc_calls |
26 | */ |
27 | static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos) |
28 | __acquires(rcu) |
29 | { |
30 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_net(seq)); |
31 | |
32 | rcu_read_lock(); |
33 | return seq_list_start_head_rcu(head: &rxnet->calls, pos: *_pos); |
34 | } |
35 | |
36 | static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
37 | { |
38 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_net(seq)); |
39 | |
40 | return seq_list_next_rcu(v, head: &rxnet->calls, ppos: pos); |
41 | } |
42 | |
43 | static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) |
44 | __releases(rcu) |
45 | { |
46 | rcu_read_unlock(); |
47 | } |
48 | |
49 | static int rxrpc_call_seq_show(struct seq_file *seq, void *v) |
50 | { |
51 | struct rxrpc_local *local; |
52 | struct rxrpc_call *call; |
53 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_net(seq)); |
54 | enum rxrpc_call_state state; |
55 | unsigned long timeout = 0; |
56 | rxrpc_seq_t acks_hard_ack; |
57 | char lbuff[50], rbuff[50]; |
58 | |
59 | if (v == &rxnet->calls) { |
60 | seq_puts(m: seq, |
61 | s: "Proto Local " |
62 | " Remote " |
63 | " SvID ConnID CallID End Use State Abort " |
64 | " DebugId TxSeq TW RxSeq RW RxSerial CW RxTimo\n" ); |
65 | return 0; |
66 | } |
67 | |
68 | call = list_entry(v, struct rxrpc_call, link); |
69 | |
70 | local = call->local; |
71 | if (local) |
72 | sprintf(buf: lbuff, fmt: "%pISpc" , &local->srx.transport); |
73 | else |
74 | strcpy(p: lbuff, q: "no_local" ); |
75 | |
76 | sprintf(buf: rbuff, fmt: "%pISpc" , &call->dest_srx.transport); |
77 | |
78 | state = rxrpc_call_state(call); |
79 | if (state != RXRPC_CALL_SERVER_PREALLOC) { |
80 | timeout = READ_ONCE(call->expect_rx_by); |
81 | timeout -= jiffies; |
82 | } |
83 | |
84 | acks_hard_ack = READ_ONCE(call->acks_hard_ack); |
85 | seq_printf(m: seq, |
86 | fmt: "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u" |
87 | " %-8.8s %08x %08x %08x %02x %08x %02x %08x %02x %06lx\n" , |
88 | lbuff, |
89 | rbuff, |
90 | call->dest_srx.srx_service, |
91 | call->cid, |
92 | call->call_id, |
93 | rxrpc_is_service_call(call) ? "Svc" : "Clt" , |
94 | refcount_read(r: &call->ref), |
95 | rxrpc_call_states[state], |
96 | call->abort_code, |
97 | call->debug_id, |
98 | acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack, |
99 | call->ackr_window, call->ackr_wtop - call->ackr_window, |
100 | call->rx_serial, |
101 | call->cong_cwnd, |
102 | timeout); |
103 | |
104 | return 0; |
105 | } |
106 | |
107 | const struct seq_operations rxrpc_call_seq_ops = { |
108 | .start = rxrpc_call_seq_start, |
109 | .next = rxrpc_call_seq_next, |
110 | .stop = rxrpc_call_seq_stop, |
111 | .show = rxrpc_call_seq_show, |
112 | }; |
113 | |
114 | /* |
115 | * generate a list of extant virtual connections in /proc/net/rxrpc_conns |
116 | */ |
117 | static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos) |
118 | __acquires(rxnet->conn_lock) |
119 | { |
120 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_net(seq)); |
121 | |
122 | read_lock(&rxnet->conn_lock); |
123 | return seq_list_start_head(head: &rxnet->conn_proc_list, pos: *_pos); |
124 | } |
125 | |
126 | static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v, |
127 | loff_t *pos) |
128 | { |
129 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_net(seq)); |
130 | |
131 | return seq_list_next(v, head: &rxnet->conn_proc_list, ppos: pos); |
132 | } |
133 | |
134 | static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) |
135 | __releases(rxnet->conn_lock) |
136 | { |
137 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_net(seq)); |
138 | |
139 | read_unlock(&rxnet->conn_lock); |
140 | } |
141 | |
142 | static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) |
143 | { |
144 | struct rxrpc_connection *conn; |
145 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_net(seq)); |
146 | const char *state; |
147 | char lbuff[50], rbuff[50]; |
148 | |
149 | if (v == &rxnet->conn_proc_list) { |
150 | seq_puts(m: seq, |
151 | s: "Proto Local " |
152 | " Remote " |
153 | " SvID ConnID End Ref Act State Key " |
154 | " Serial ISerial CallId0 CallId1 CallId2 CallId3\n" |
155 | ); |
156 | return 0; |
157 | } |
158 | |
159 | conn = list_entry(v, struct rxrpc_connection, proc_link); |
160 | if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) { |
161 | strcpy(p: lbuff, q: "no_local" ); |
162 | strcpy(p: rbuff, q: "no_connection" ); |
163 | goto print; |
164 | } |
165 | |
166 | sprintf(buf: lbuff, fmt: "%pISpc" , &conn->local->srx.transport); |
167 | sprintf(buf: rbuff, fmt: "%pISpc" , &conn->peer->srx.transport); |
168 | print: |
169 | state = rxrpc_is_conn_aborted(conn) ? |
170 | rxrpc_call_completions[conn->completion] : |
171 | rxrpc_conn_states[conn->state]; |
172 | seq_printf(m: seq, |
173 | fmt: "UDP %-47.47s %-47.47s %4x %08x %s %3u %3d" |
174 | " %s %08x %08x %08x %08x %08x %08x %08x\n" , |
175 | lbuff, |
176 | rbuff, |
177 | conn->service_id, |
178 | conn->proto.cid, |
179 | rxrpc_conn_is_service(conn) ? "Svc" : "Clt" , |
180 | refcount_read(r: &conn->ref), |
181 | atomic_read(v: &conn->active), |
182 | state, |
183 | key_serial(key: conn->key), |
184 | atomic_read(v: &conn->serial), |
185 | conn->hi_serial, |
186 | conn->channels[0].call_id, |
187 | conn->channels[1].call_id, |
188 | conn->channels[2].call_id, |
189 | conn->channels[3].call_id); |
190 | |
191 | return 0; |
192 | } |
193 | |
194 | const struct seq_operations rxrpc_connection_seq_ops = { |
195 | .start = rxrpc_connection_seq_start, |
196 | .next = rxrpc_connection_seq_next, |
197 | .stop = rxrpc_connection_seq_stop, |
198 | .show = rxrpc_connection_seq_show, |
199 | }; |
200 | |
201 | /* |
202 | * generate a list of extant virtual peers in /proc/net/rxrpc/peers |
203 | */ |
204 | static int rxrpc_peer_seq_show(struct seq_file *seq, void *v) |
205 | { |
206 | struct rxrpc_peer *peer; |
207 | time64_t now; |
208 | char lbuff[50], rbuff[50]; |
209 | |
210 | if (v == SEQ_START_TOKEN) { |
211 | seq_puts(m: seq, |
212 | s: "Proto Local " |
213 | " Remote " |
214 | " Use SST MTU LastUse RTT RTO\n" |
215 | ); |
216 | return 0; |
217 | } |
218 | |
219 | peer = list_entry(v, struct rxrpc_peer, hash_link); |
220 | |
221 | sprintf(buf: lbuff, fmt: "%pISpc" , &peer->local->srx.transport); |
222 | |
223 | sprintf(buf: rbuff, fmt: "%pISpc" , &peer->srx.transport); |
224 | |
225 | now = ktime_get_seconds(); |
226 | seq_printf(m: seq, |
227 | fmt: "UDP %-47.47s %-47.47s %3u" |
228 | " %3u %5u %6llus %8u %8u\n" , |
229 | lbuff, |
230 | rbuff, |
231 | refcount_read(r: &peer->ref), |
232 | peer->cong_ssthresh, |
233 | peer->mtu, |
234 | now - peer->last_tx_at, |
235 | peer->srtt_us >> 3, |
236 | jiffies_to_usecs(j: peer->rto_j)); |
237 | |
238 | return 0; |
239 | } |
240 | |
241 | static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos) |
242 | __acquires(rcu) |
243 | { |
244 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_net(seq)); |
245 | unsigned int bucket, n; |
246 | unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash); |
247 | void *p; |
248 | |
249 | rcu_read_lock(); |
250 | |
251 | if (*_pos >= UINT_MAX) |
252 | return NULL; |
253 | |
254 | n = *_pos & ((1U << shift) - 1); |
255 | bucket = *_pos >> shift; |
256 | for (;;) { |
257 | if (bucket >= HASH_SIZE(rxnet->peer_hash)) { |
258 | *_pos = UINT_MAX; |
259 | return NULL; |
260 | } |
261 | if (n == 0) { |
262 | if (bucket == 0) |
263 | return SEQ_START_TOKEN; |
264 | *_pos += 1; |
265 | n++; |
266 | } |
267 | |
268 | p = seq_hlist_start_rcu(head: &rxnet->peer_hash[bucket], pos: n - 1); |
269 | if (p) |
270 | return p; |
271 | bucket++; |
272 | n = 1; |
273 | *_pos = (bucket << shift) | n; |
274 | } |
275 | } |
276 | |
277 | static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos) |
278 | { |
279 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_net(seq)); |
280 | unsigned int bucket, n; |
281 | unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash); |
282 | void *p; |
283 | |
284 | if (*_pos >= UINT_MAX) |
285 | return NULL; |
286 | |
287 | bucket = *_pos >> shift; |
288 | |
289 | p = seq_hlist_next_rcu(v, head: &rxnet->peer_hash[bucket], ppos: _pos); |
290 | if (p) |
291 | return p; |
292 | |
293 | for (;;) { |
294 | bucket++; |
295 | n = 1; |
296 | *_pos = (bucket << shift) | n; |
297 | |
298 | if (bucket >= HASH_SIZE(rxnet->peer_hash)) { |
299 | *_pos = UINT_MAX; |
300 | return NULL; |
301 | } |
302 | if (n == 0) { |
303 | *_pos += 1; |
304 | n++; |
305 | } |
306 | |
307 | p = seq_hlist_start_rcu(head: &rxnet->peer_hash[bucket], pos: n - 1); |
308 | if (p) |
309 | return p; |
310 | } |
311 | } |
312 | |
313 | static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v) |
314 | __releases(rcu) |
315 | { |
316 | rcu_read_unlock(); |
317 | } |
318 | |
319 | |
320 | const struct seq_operations rxrpc_peer_seq_ops = { |
321 | .start = rxrpc_peer_seq_start, |
322 | .next = rxrpc_peer_seq_next, |
323 | .stop = rxrpc_peer_seq_stop, |
324 | .show = rxrpc_peer_seq_show, |
325 | }; |
326 | |
327 | /* |
328 | * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals |
329 | */ |
330 | static int rxrpc_local_seq_show(struct seq_file *seq, void *v) |
331 | { |
332 | struct rxrpc_local *local; |
333 | char lbuff[50]; |
334 | |
335 | if (v == SEQ_START_TOKEN) { |
336 | seq_puts(m: seq, |
337 | s: "Proto Local " |
338 | " Use Act RxQ\n" ); |
339 | return 0; |
340 | } |
341 | |
342 | local = hlist_entry(v, struct rxrpc_local, link); |
343 | |
344 | sprintf(buf: lbuff, fmt: "%pISpc" , &local->srx.transport); |
345 | |
346 | seq_printf(m: seq, |
347 | fmt: "UDP %-47.47s %3u %3u %3u\n" , |
348 | lbuff, |
349 | refcount_read(r: &local->ref), |
350 | atomic_read(v: &local->active_users), |
351 | local->rx_queue.qlen); |
352 | |
353 | return 0; |
354 | } |
355 | |
356 | static void *rxrpc_local_seq_start(struct seq_file *seq, loff_t *_pos) |
357 | __acquires(rcu) |
358 | { |
359 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_net(seq)); |
360 | unsigned int n; |
361 | |
362 | rcu_read_lock(); |
363 | |
364 | if (*_pos >= UINT_MAX) |
365 | return NULL; |
366 | |
367 | n = *_pos; |
368 | if (n == 0) |
369 | return SEQ_START_TOKEN; |
370 | |
371 | return seq_hlist_start_rcu(head: &rxnet->local_endpoints, pos: n - 1); |
372 | } |
373 | |
374 | static void *rxrpc_local_seq_next(struct seq_file *seq, void *v, loff_t *_pos) |
375 | { |
376 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_net(seq)); |
377 | |
378 | if (*_pos >= UINT_MAX) |
379 | return NULL; |
380 | |
381 | return seq_hlist_next_rcu(v, head: &rxnet->local_endpoints, ppos: _pos); |
382 | } |
383 | |
384 | static void rxrpc_local_seq_stop(struct seq_file *seq, void *v) |
385 | __releases(rcu) |
386 | { |
387 | rcu_read_unlock(); |
388 | } |
389 | |
390 | const struct seq_operations rxrpc_local_seq_ops = { |
391 | .start = rxrpc_local_seq_start, |
392 | .next = rxrpc_local_seq_next, |
393 | .stop = rxrpc_local_seq_stop, |
394 | .show = rxrpc_local_seq_show, |
395 | }; |
396 | |
397 | /* |
398 | * Display stats in /proc/net/rxrpc/stats |
399 | */ |
400 | int rxrpc_stats_show(struct seq_file *seq, void *v) |
401 | { |
402 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_single_net(seq)); |
403 | |
404 | seq_printf(m: seq, |
405 | fmt: "Data : send=%u sendf=%u fail=%u\n" , |
406 | atomic_read(v: &rxnet->stat_tx_data_send), |
407 | atomic_read(v: &rxnet->stat_tx_data_send_frag), |
408 | atomic_read(v: &rxnet->stat_tx_data_send_fail)); |
409 | seq_printf(m: seq, |
410 | fmt: "Data-Tx : nr=%u retrans=%u uf=%u cwr=%u\n" , |
411 | atomic_read(v: &rxnet->stat_tx_data), |
412 | atomic_read(v: &rxnet->stat_tx_data_retrans), |
413 | atomic_read(v: &rxnet->stat_tx_data_underflow), |
414 | atomic_read(v: &rxnet->stat_tx_data_cwnd_reset)); |
415 | seq_printf(m: seq, |
416 | fmt: "Data-Rx : nr=%u reqack=%u jumbo=%u\n" , |
417 | atomic_read(v: &rxnet->stat_rx_data), |
418 | atomic_read(v: &rxnet->stat_rx_data_reqack), |
419 | atomic_read(v: &rxnet->stat_rx_data_jumbo)); |
420 | seq_printf(m: seq, |
421 | fmt: "Ack : fill=%u send=%u skip=%u\n" , |
422 | atomic_read(v: &rxnet->stat_tx_ack_fill), |
423 | atomic_read(v: &rxnet->stat_tx_ack_send), |
424 | atomic_read(v: &rxnet->stat_tx_ack_skip)); |
425 | seq_printf(m: seq, |
426 | fmt: "Ack-Tx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n" , |
427 | atomic_read(v: &rxnet->stat_tx_acks[RXRPC_ACK_REQUESTED]), |
428 | atomic_read(v: &rxnet->stat_tx_acks[RXRPC_ACK_DUPLICATE]), |
429 | atomic_read(v: &rxnet->stat_tx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]), |
430 | atomic_read(v: &rxnet->stat_tx_acks[RXRPC_ACK_EXCEEDS_WINDOW]), |
431 | atomic_read(v: &rxnet->stat_tx_acks[RXRPC_ACK_NOSPACE]), |
432 | atomic_read(v: &rxnet->stat_tx_acks[RXRPC_ACK_PING]), |
433 | atomic_read(v: &rxnet->stat_tx_acks[RXRPC_ACK_PING_RESPONSE]), |
434 | atomic_read(v: &rxnet->stat_tx_acks[RXRPC_ACK_DELAY]), |
435 | atomic_read(v: &rxnet->stat_tx_acks[RXRPC_ACK_IDLE])); |
436 | seq_printf(m: seq, |
437 | fmt: "Ack-Rx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n" , |
438 | atomic_read(v: &rxnet->stat_rx_acks[RXRPC_ACK_REQUESTED]), |
439 | atomic_read(v: &rxnet->stat_rx_acks[RXRPC_ACK_DUPLICATE]), |
440 | atomic_read(v: &rxnet->stat_rx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]), |
441 | atomic_read(v: &rxnet->stat_rx_acks[RXRPC_ACK_EXCEEDS_WINDOW]), |
442 | atomic_read(v: &rxnet->stat_rx_acks[RXRPC_ACK_NOSPACE]), |
443 | atomic_read(v: &rxnet->stat_rx_acks[RXRPC_ACK_PING]), |
444 | atomic_read(v: &rxnet->stat_rx_acks[RXRPC_ACK_PING_RESPONSE]), |
445 | atomic_read(v: &rxnet->stat_rx_acks[RXRPC_ACK_DELAY]), |
446 | atomic_read(v: &rxnet->stat_rx_acks[RXRPC_ACK_IDLE])); |
447 | seq_printf(m: seq, |
448 | fmt: "Why-Req-A: acklost=%u already=%u mrtt=%u ortt=%u\n" , |
449 | atomic_read(v: &rxnet->stat_why_req_ack[rxrpc_reqack_ack_lost]), |
450 | atomic_read(v: &rxnet->stat_why_req_ack[rxrpc_reqack_already_on]), |
451 | atomic_read(v: &rxnet->stat_why_req_ack[rxrpc_reqack_more_rtt]), |
452 | atomic_read(v: &rxnet->stat_why_req_ack[rxrpc_reqack_old_rtt])); |
453 | seq_printf(m: seq, |
454 | fmt: "Why-Req-A: nolast=%u retx=%u slows=%u smtxw=%u\n" , |
455 | atomic_read(v: &rxnet->stat_why_req_ack[rxrpc_reqack_no_srv_last]), |
456 | atomic_read(v: &rxnet->stat_why_req_ack[rxrpc_reqack_retrans]), |
457 | atomic_read(v: &rxnet->stat_why_req_ack[rxrpc_reqack_slow_start]), |
458 | atomic_read(v: &rxnet->stat_why_req_ack[rxrpc_reqack_small_txwin])); |
459 | seq_printf(m: seq, |
460 | fmt: "Buffers : txb=%u rxb=%u\n" , |
461 | atomic_read(v: &rxrpc_nr_txbuf), |
462 | atomic_read(v: &rxrpc_n_rx_skbs)); |
463 | seq_printf(m: seq, |
464 | fmt: "IO-thread: loops=%u\n" , |
465 | atomic_read(v: &rxnet->stat_io_loop)); |
466 | return 0; |
467 | } |
468 | |
469 | /* |
470 | * Clear stats if /proc/net/rxrpc/stats is written to. |
471 | */ |
472 | int rxrpc_stats_clear(struct file *file, char *buf, size_t size) |
473 | { |
474 | struct seq_file *m = file->private_data; |
475 | struct rxrpc_net *rxnet = rxrpc_net(net: seq_file_single_net(seq: m)); |
476 | |
477 | if (size > 1 || (size == 1 && buf[0] != '\n')) |
478 | return -EINVAL; |
479 | |
480 | atomic_set(v: &rxnet->stat_tx_data, i: 0); |
481 | atomic_set(v: &rxnet->stat_tx_data_retrans, i: 0); |
482 | atomic_set(v: &rxnet->stat_tx_data_underflow, i: 0); |
483 | atomic_set(v: &rxnet->stat_tx_data_cwnd_reset, i: 0); |
484 | atomic_set(v: &rxnet->stat_tx_data_send, i: 0); |
485 | atomic_set(v: &rxnet->stat_tx_data_send_frag, i: 0); |
486 | atomic_set(v: &rxnet->stat_tx_data_send_fail, i: 0); |
487 | atomic_set(v: &rxnet->stat_rx_data, i: 0); |
488 | atomic_set(v: &rxnet->stat_rx_data_reqack, i: 0); |
489 | atomic_set(v: &rxnet->stat_rx_data_jumbo, i: 0); |
490 | |
491 | atomic_set(v: &rxnet->stat_tx_ack_fill, i: 0); |
492 | atomic_set(v: &rxnet->stat_tx_ack_send, i: 0); |
493 | atomic_set(v: &rxnet->stat_tx_ack_skip, i: 0); |
494 | memset(&rxnet->stat_tx_acks, 0, sizeof(rxnet->stat_tx_acks)); |
495 | memset(&rxnet->stat_rx_acks, 0, sizeof(rxnet->stat_rx_acks)); |
496 | |
497 | memset(&rxnet->stat_why_req_ack, 0, sizeof(rxnet->stat_why_req_ack)); |
498 | |
499 | atomic_set(v: &rxnet->stat_io_loop, i: 0); |
500 | return size; |
501 | } |
502 | |