1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/net/sunrpc/stats.c
4 *
5 * procfs-based user access to generic RPC statistics. The stats files
6 * reside in /proc/net/rpc.
7 *
8 * The read routines assume that the buffer passed in is just big enough.
9 * If you implement an RPC service that has its own stats routine which
10 * appends the generic RPC stats, make sure you don't exceed the PAGE_SIZE
11 * limit.
12 *
13 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
14 */
15
16#include <linux/module.h>
17#include <linux/slab.h>
18
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/sunrpc/clnt.h>
24#include <linux/sunrpc/svcsock.h>
25#include <linux/sunrpc/metrics.h>
26#include <linux/rcupdate.h>
27
28#include <trace/events/sunrpc.h>
29
30#include "netns.h"
31
32#define RPCDBG_FACILITY RPCDBG_MISC
33
34/*
35 * Get RPC client stats
36 */
37static int rpc_proc_show(struct seq_file *seq, void *v) {
38 const struct rpc_stat *statp = seq->private;
39 const struct rpc_program *prog = statp->program;
40 unsigned int i, j;
41
42 seq_printf(m: seq,
43 fmt: "net %u %u %u %u\n",
44 statp->netcnt,
45 statp->netudpcnt,
46 statp->nettcpcnt,
47 statp->nettcpconn);
48 seq_printf(m: seq,
49 fmt: "rpc %u %u %u\n",
50 statp->rpccnt,
51 statp->rpcretrans,
52 statp->rpcauthrefresh);
53
54 for (i = 0; i < prog->nrvers; i++) {
55 const struct rpc_version *vers = prog->version[i];
56 if (!vers)
57 continue;
58 seq_printf(m: seq, fmt: "proc%u %u",
59 vers->number, vers->nrprocs);
60 for (j = 0; j < vers->nrprocs; j++)
61 seq_printf(m: seq, fmt: " %u", vers->counts[j]);
62 seq_putc(m: seq, c: '\n');
63 }
64 return 0;
65}
66
67static int rpc_proc_open(struct inode *inode, struct file *file)
68{
69 return single_open(file, rpc_proc_show, pde_data(inode));
70}
71
72static const struct proc_ops rpc_proc_ops = {
73 .proc_open = rpc_proc_open,
74 .proc_read = seq_read,
75 .proc_lseek = seq_lseek,
76 .proc_release = single_release,
77};
78
79/*
80 * Get RPC server stats
81 */
82void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp)
83{
84 const struct svc_program *prog = statp->program;
85 const struct svc_version *vers;
86 unsigned int i, j, k;
87 unsigned long count;
88
89 seq_printf(m: seq,
90 fmt: "net %u %u %u %u\n",
91 statp->netcnt,
92 statp->netudpcnt,
93 statp->nettcpcnt,
94 statp->nettcpconn);
95 seq_printf(m: seq,
96 fmt: "rpc %u %u %u %u %u\n",
97 statp->rpccnt,
98 statp->rpcbadfmt+statp->rpcbadauth+statp->rpcbadclnt,
99 statp->rpcbadfmt,
100 statp->rpcbadauth,
101 statp->rpcbadclnt);
102
103 for (i = 0; i < prog->pg_nvers; i++) {
104 vers = prog->pg_vers[i];
105 if (!vers)
106 continue;
107 seq_printf(m: seq, fmt: "proc%d %u", i, vers->vs_nproc);
108 for (j = 0; j < vers->vs_nproc; j++) {
109 count = 0;
110 for_each_possible_cpu(k)
111 count += per_cpu(vers->vs_count[j], k);
112 seq_printf(m: seq, fmt: " %lu", count);
113 }
114 seq_putc(m: seq, c: '\n');
115 }
116}
117EXPORT_SYMBOL_GPL(svc_seq_show);
118
119/**
120 * rpc_alloc_iostats - allocate an rpc_iostats structure
121 * @clnt: RPC program, version, and xprt
122 *
123 */
124struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
125{
126 struct rpc_iostats *stats;
127 int i;
128
129 stats = kcalloc(n: clnt->cl_maxproc, size: sizeof(*stats), GFP_KERNEL);
130 if (stats) {
131 for (i = 0; i < clnt->cl_maxproc; i++)
132 spin_lock_init(&stats[i].om_lock);
133 }
134 return stats;
135}
136EXPORT_SYMBOL_GPL(rpc_alloc_iostats);
137
138/**
139 * rpc_free_iostats - release an rpc_iostats structure
140 * @stats: doomed rpc_iostats structure
141 *
142 */
143void rpc_free_iostats(struct rpc_iostats *stats)
144{
145 kfree(objp: stats);
146}
147EXPORT_SYMBOL_GPL(rpc_free_iostats);
148
149/**
150 * rpc_count_iostats_metrics - tally up per-task stats
151 * @task: completed rpc_task
152 * @op_metrics: stat structure for OP that will accumulate stats from @task
153 */
154void rpc_count_iostats_metrics(const struct rpc_task *task,
155 struct rpc_iostats *op_metrics)
156{
157 struct rpc_rqst *req = task->tk_rqstp;
158 ktime_t backlog, execute, now;
159
160 if (!op_metrics || !req)
161 return;
162
163 now = ktime_get();
164 spin_lock(lock: &op_metrics->om_lock);
165
166 op_metrics->om_ops++;
167 /* kernel API: om_ops must never become larger than om_ntrans */
168 op_metrics->om_ntrans += max(req->rq_ntrans, 1);
169 op_metrics->om_timeouts += task->tk_timeouts;
170
171 op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
172 op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
173
174 backlog = 0;
175 if (ktime_to_ns(kt: req->rq_xtime)) {
176 backlog = ktime_sub(req->rq_xtime, task->tk_start);
177 op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog);
178 }
179
180 op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
181
182 execute = ktime_sub(now, task->tk_start);
183 op_metrics->om_execute = ktime_add(op_metrics->om_execute, execute);
184 if (task->tk_status < 0)
185 op_metrics->om_error_status++;
186
187 spin_unlock(lock: &op_metrics->om_lock);
188
189 trace_rpc_stats_latency(task: req->rq_task, backlog, rtt: req->rq_rtt, execute);
190}
191EXPORT_SYMBOL_GPL(rpc_count_iostats_metrics);
192
193/**
194 * rpc_count_iostats - tally up per-task stats
195 * @task: completed rpc_task
196 * @stats: array of stat structures
197 *
198 * Uses the statidx from @task
199 */
200void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
201{
202 rpc_count_iostats_metrics(task,
203 &stats[task->tk_msg.rpc_proc->p_statidx]);
204}
205EXPORT_SYMBOL_GPL(rpc_count_iostats);
206
207static void _print_name(struct seq_file *seq, unsigned int op,
208 const struct rpc_procinfo *procs)
209{
210 if (procs[op].p_name)
211 seq_printf(m: seq, fmt: "\t%12s: ", procs[op].p_name);
212 else if (op == 0)
213 seq_printf(m: seq, fmt: "\t NULL: ");
214 else
215 seq_printf(m: seq, fmt: "\t%12u: ", op);
216}
217
218static void _add_rpc_iostats(struct rpc_iostats *a, struct rpc_iostats *b)
219{
220 a->om_ops += b->om_ops;
221 a->om_ntrans += b->om_ntrans;
222 a->om_timeouts += b->om_timeouts;
223 a->om_bytes_sent += b->om_bytes_sent;
224 a->om_bytes_recv += b->om_bytes_recv;
225 a->om_queue = ktime_add(a->om_queue, b->om_queue);
226 a->om_rtt = ktime_add(a->om_rtt, b->om_rtt);
227 a->om_execute = ktime_add(a->om_execute, b->om_execute);
228 a->om_error_status += b->om_error_status;
229}
230
231static void _print_rpc_iostats(struct seq_file *seq, struct rpc_iostats *stats,
232 int op, const struct rpc_procinfo *procs)
233{
234 _print_name(seq, op, procs);
235 seq_printf(m: seq, fmt: "%lu %lu %lu %llu %llu %llu %llu %llu %lu\n",
236 stats->om_ops,
237 stats->om_ntrans,
238 stats->om_timeouts,
239 stats->om_bytes_sent,
240 stats->om_bytes_recv,
241 ktime_to_ms(kt: stats->om_queue),
242 ktime_to_ms(kt: stats->om_rtt),
243 ktime_to_ms(kt: stats->om_execute),
244 stats->om_error_status);
245}
246
247static int do_print_stats(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *seqv)
248{
249 struct seq_file *seq = seqv;
250
251 xprt->ops->print_stats(xprt, seq);
252 return 0;
253}
254
255void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt)
256{
257 unsigned int op, maxproc = clnt->cl_maxproc;
258
259 if (!clnt->cl_metrics)
260 return;
261
262 seq_printf(m: seq, fmt: "\tRPC iostats version: %s ", RPC_IOSTATS_VERS);
263 seq_printf(m: seq, fmt: "p/v: %u/%u (%s)\n",
264 clnt->cl_prog, clnt->cl_vers, clnt->cl_program->name);
265
266 rpc_clnt_iterate_for_each_xprt(clnt, fn: do_print_stats, data: seq);
267
268 seq_printf(m: seq, fmt: "\tper-op statistics\n");
269 for (op = 0; op < maxproc; op++) {
270 struct rpc_iostats stats = {};
271 struct rpc_clnt *next = clnt;
272 do {
273 _add_rpc_iostats(a: &stats, b: &next->cl_metrics[op]);
274 if (next == next->cl_parent)
275 break;
276 next = next->cl_parent;
277 } while (next);
278 _print_rpc_iostats(seq, stats: &stats, op, procs: clnt->cl_procinfo);
279 }
280}
281EXPORT_SYMBOL_GPL(rpc_clnt_show_stats);
282
283/*
284 * Register/unregister RPC proc files
285 */
286static inline struct proc_dir_entry *
287do_register(struct net *net, const char *name, void *data,
288 const struct proc_ops *proc_ops)
289{
290 struct sunrpc_net *sn;
291
292 dprintk("RPC: registering /proc/net/rpc/%s\n", name);
293 sn = net_generic(net, id: sunrpc_net_id);
294 return proc_create_data(name, 0, sn->proc_net_rpc, proc_ops, data);
295}
296
297struct proc_dir_entry *
298rpc_proc_register(struct net *net, struct rpc_stat *statp)
299{
300 return do_register(net, name: statp->program->name, data: statp, proc_ops: &rpc_proc_ops);
301}
302EXPORT_SYMBOL_GPL(rpc_proc_register);
303
304void
305rpc_proc_unregister(struct net *net, const char *name)
306{
307 struct sunrpc_net *sn;
308
309 sn = net_generic(net, id: sunrpc_net_id);
310 remove_proc_entry(name, sn->proc_net_rpc);
311}
312EXPORT_SYMBOL_GPL(rpc_proc_unregister);
313
314struct proc_dir_entry *
315svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops)
316{
317 return do_register(net, name: statp->program->pg_name, data: statp, proc_ops);
318}
319EXPORT_SYMBOL_GPL(svc_proc_register);
320
321void
322svc_proc_unregister(struct net *net, const char *name)
323{
324 struct sunrpc_net *sn;
325
326 sn = net_generic(net, id: sunrpc_net_id);
327 remove_proc_entry(name, sn->proc_net_rpc);
328}
329EXPORT_SYMBOL_GPL(svc_proc_unregister);
330
331int rpc_proc_init(struct net *net)
332{
333 struct sunrpc_net *sn;
334
335 dprintk("RPC: registering /proc/net/rpc\n");
336 sn = net_generic(net, id: sunrpc_net_id);
337 sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net);
338 if (sn->proc_net_rpc == NULL)
339 return -ENOMEM;
340
341 return 0;
342}
343
344void rpc_proc_exit(struct net *net)
345{
346 dprintk("RPC: unregistering /proc/net/rpc\n");
347 remove_proc_entry("rpc", net->proc_net);
348}
349

source code of linux/net/sunrpc/stats.c