1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/ceph/ceph_debug.h> |
3 | |
4 | #include <linux/device.h> |
5 | #include <linux/slab.h> |
6 | #include <linux/module.h> |
7 | #include <linux/ctype.h> |
8 | #include <linux/debugfs.h> |
9 | #include <linux/seq_file.h> |
10 | #include <linux/math64.h> |
11 | #include <linux/ktime.h> |
12 | |
13 | #include <linux/ceph/libceph.h> |
14 | #include <linux/ceph/mon_client.h> |
15 | #include <linux/ceph/auth.h> |
16 | #include <linux/ceph/debugfs.h> |
17 | |
18 | #include "super.h" |
19 | |
20 | #ifdef CONFIG_DEBUG_FS |
21 | |
22 | #include "mds_client.h" |
23 | #include "metric.h" |
24 | |
25 | static int mdsmap_show(struct seq_file *s, void *p) |
26 | { |
27 | int i; |
28 | struct ceph_fs_client *fsc = s->private; |
29 | struct ceph_mdsmap *mdsmap; |
30 | |
31 | if (!fsc->mdsc || !fsc->mdsc->mdsmap) |
32 | return 0; |
33 | mdsmap = fsc->mdsc->mdsmap; |
34 | seq_printf(m: s, fmt: "epoch %d\n" , mdsmap->m_epoch); |
35 | seq_printf(m: s, fmt: "root %d\n" , mdsmap->m_root); |
36 | seq_printf(m: s, fmt: "max_mds %d\n" , mdsmap->m_max_mds); |
37 | seq_printf(m: s, fmt: "session_timeout %d\n" , mdsmap->m_session_timeout); |
38 | seq_printf(m: s, fmt: "session_autoclose %d\n" , mdsmap->m_session_autoclose); |
39 | for (i = 0; i < mdsmap->possible_max_rank; i++) { |
40 | struct ceph_entity_addr *addr = &mdsmap->m_info[i].addr; |
41 | int state = mdsmap->m_info[i].state; |
42 | seq_printf(m: s, fmt: "\tmds%d\t%s\t(%s)\n" , i, |
43 | ceph_pr_addr(addr), |
44 | ceph_mds_state_name(s: state)); |
45 | } |
46 | return 0; |
47 | } |
48 | |
49 | /* |
50 | * mdsc debugfs |
51 | */ |
52 | static int mdsc_show(struct seq_file *s, void *p) |
53 | { |
54 | struct ceph_fs_client *fsc = s->private; |
55 | struct ceph_mds_client *mdsc = fsc->mdsc; |
56 | struct ceph_mds_request *req; |
57 | struct rb_node *rp; |
58 | int pathlen = 0; |
59 | u64 pathbase; |
60 | char *path; |
61 | |
62 | mutex_lock(&mdsc->mutex); |
63 | for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) { |
64 | req = rb_entry(rp, struct ceph_mds_request, r_node); |
65 | |
66 | if (req->r_request && req->r_session) |
67 | seq_printf(m: s, fmt: "%lld\tmds%d\t" , req->r_tid, |
68 | req->r_session->s_mds); |
69 | else if (!req->r_request) |
70 | seq_printf(m: s, fmt: "%lld\t(no request)\t" , req->r_tid); |
71 | else |
72 | seq_printf(m: s, fmt: "%lld\t(no session)\t" , req->r_tid); |
73 | |
74 | seq_printf(m: s, fmt: "%s" , ceph_mds_op_name(op: req->r_op)); |
75 | |
76 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
77 | seq_puts(m: s, s: "\t(unsafe)" ); |
78 | else |
79 | seq_puts(m: s, s: "\t" ); |
80 | |
81 | if (req->r_inode) { |
82 | seq_printf(m: s, fmt: " #%llx" , ceph_ino(inode: req->r_inode)); |
83 | } else if (req->r_dentry) { |
84 | path = ceph_mdsc_build_path(dentry: req->r_dentry, plen: &pathlen, |
85 | base: &pathbase, for_wire: 0); |
86 | if (IS_ERR(ptr: path)) |
87 | path = NULL; |
88 | spin_lock(lock: &req->r_dentry->d_lock); |
89 | seq_printf(m: s, fmt: " #%llx/%pd (%s)" , |
90 | ceph_ino(inode: d_inode(dentry: req->r_dentry->d_parent)), |
91 | req->r_dentry, |
92 | path ? path : "" ); |
93 | spin_unlock(lock: &req->r_dentry->d_lock); |
94 | ceph_mdsc_free_path(path, len: pathlen); |
95 | } else if (req->r_path1) { |
96 | seq_printf(m: s, fmt: " #%llx/%s" , req->r_ino1.ino, |
97 | req->r_path1); |
98 | } else { |
99 | seq_printf(m: s, fmt: " #%llx" , req->r_ino1.ino); |
100 | } |
101 | |
102 | if (req->r_old_dentry) { |
103 | path = ceph_mdsc_build_path(dentry: req->r_old_dentry, plen: &pathlen, |
104 | base: &pathbase, for_wire: 0); |
105 | if (IS_ERR(ptr: path)) |
106 | path = NULL; |
107 | spin_lock(lock: &req->r_old_dentry->d_lock); |
108 | seq_printf(m: s, fmt: " #%llx/%pd (%s)" , |
109 | req->r_old_dentry_dir ? |
110 | ceph_ino(inode: req->r_old_dentry_dir) : 0, |
111 | req->r_old_dentry, |
112 | path ? path : "" ); |
113 | spin_unlock(lock: &req->r_old_dentry->d_lock); |
114 | ceph_mdsc_free_path(path, len: pathlen); |
115 | } else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) { |
116 | if (req->r_ino2.ino) |
117 | seq_printf(m: s, fmt: " #%llx/%s" , req->r_ino2.ino, |
118 | req->r_path2); |
119 | else |
120 | seq_printf(m: s, fmt: " %s" , req->r_path2); |
121 | } |
122 | |
123 | seq_puts(m: s, s: "\n" ); |
124 | } |
125 | mutex_unlock(lock: &mdsc->mutex); |
126 | |
127 | return 0; |
128 | } |
129 | |
130 | #define CEPH_LAT_METRIC_SHOW(name, total, avg, min, max, sq) { \ |
131 | s64 _total, _avg, _min, _max, _sq, _st; \ |
132 | _avg = ktime_to_us(avg); \ |
133 | _min = ktime_to_us(min == KTIME_MAX ? 0 : min); \ |
134 | _max = ktime_to_us(max); \ |
135 | _total = total - 1; \ |
136 | _sq = _total > 0 ? DIV64_U64_ROUND_CLOSEST(sq, _total) : 0; \ |
137 | _st = int_sqrt64(_sq); \ |
138 | _st = ktime_to_us(_st); \ |
139 | seq_printf(s, "%-14s%-12lld%-16lld%-16lld%-16lld%lld\n", \ |
140 | name, total, _avg, _min, _max, _st); \ |
141 | } |
142 | |
143 | #define CEPH_SZ_METRIC_SHOW(name, total, avg, min, max, sum) { \ |
144 | u64 _min = min == U64_MAX ? 0 : min; \ |
145 | seq_printf(s, "%-14s%-12lld%-16llu%-16llu%-16llu%llu\n", \ |
146 | name, total, avg, _min, max, sum); \ |
147 | } |
148 | |
149 | static int metrics_file_show(struct seq_file *s, void *p) |
150 | { |
151 | struct ceph_fs_client *fsc = s->private; |
152 | struct ceph_client_metric *m = &fsc->mdsc->metric; |
153 | |
154 | seq_printf(m: s, fmt: "item total\n" ); |
155 | seq_printf(m: s, fmt: "------------------------------------------\n" ); |
156 | seq_printf(m: s, fmt: "%-35s%lld\n" , "total inodes" , |
157 | percpu_counter_sum(fbc: &m->total_inodes)); |
158 | seq_printf(m: s, fmt: "%-35s%lld\n" , "opened files" , |
159 | atomic64_read(v: &m->opened_files)); |
160 | seq_printf(m: s, fmt: "%-35s%lld\n" , "pinned i_caps" , |
161 | atomic64_read(v: &m->total_caps)); |
162 | seq_printf(m: s, fmt: "%-35s%lld\n" , "opened inodes" , |
163 | percpu_counter_sum(fbc: &m->opened_inodes)); |
164 | return 0; |
165 | } |
166 | |
167 | static const char * const metric_str[] = { |
168 | "read" , |
169 | "write" , |
170 | "metadata" , |
171 | "copyfrom" |
172 | }; |
173 | static int metrics_latency_show(struct seq_file *s, void *p) |
174 | { |
175 | struct ceph_fs_client *fsc = s->private; |
176 | struct ceph_client_metric *cm = &fsc->mdsc->metric; |
177 | struct ceph_metric *m; |
178 | s64 total, avg, min, max, sq; |
179 | int i; |
180 | |
181 | seq_printf(m: s, fmt: "item total avg_lat(us) min_lat(us) max_lat(us) stdev(us)\n" ); |
182 | seq_printf(m: s, fmt: "-----------------------------------------------------------------------------------\n" ); |
183 | |
184 | for (i = 0; i < METRIC_MAX; i++) { |
185 | m = &cm->metric[i]; |
186 | spin_lock(lock: &m->lock); |
187 | total = m->total; |
188 | avg = m->latency_avg; |
189 | min = m->latency_min; |
190 | max = m->latency_max; |
191 | sq = m->latency_sq_sum; |
192 | spin_unlock(lock: &m->lock); |
193 | CEPH_LAT_METRIC_SHOW(metric_str[i], total, avg, min, max, sq); |
194 | } |
195 | |
196 | return 0; |
197 | } |
198 | |
199 | static int metrics_size_show(struct seq_file *s, void *p) |
200 | { |
201 | struct ceph_fs_client *fsc = s->private; |
202 | struct ceph_client_metric *cm = &fsc->mdsc->metric; |
203 | struct ceph_metric *m; |
204 | s64 total; |
205 | u64 sum, avg, min, max; |
206 | int i; |
207 | |
208 | seq_printf(m: s, fmt: "item total avg_sz(bytes) min_sz(bytes) max_sz(bytes) total_sz(bytes)\n" ); |
209 | seq_printf(m: s, fmt: "----------------------------------------------------------------------------------------\n" ); |
210 | |
211 | for (i = 0; i < METRIC_MAX; i++) { |
212 | /* skip 'metadata' as it doesn't use the size metric */ |
213 | if (i == METRIC_METADATA) |
214 | continue; |
215 | m = &cm->metric[i]; |
216 | spin_lock(lock: &m->lock); |
217 | total = m->total; |
218 | sum = m->size_sum; |
219 | avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0; |
220 | min = m->size_min; |
221 | max = m->size_max; |
222 | spin_unlock(lock: &m->lock); |
223 | CEPH_SZ_METRIC_SHOW(metric_str[i], total, avg, min, max, sum); |
224 | } |
225 | |
226 | return 0; |
227 | } |
228 | |
229 | static int metrics_caps_show(struct seq_file *s, void *p) |
230 | { |
231 | struct ceph_fs_client *fsc = s->private; |
232 | struct ceph_client_metric *m = &fsc->mdsc->metric; |
233 | int nr_caps = 0; |
234 | |
235 | seq_printf(m: s, fmt: "item total miss hit\n" ); |
236 | seq_printf(m: s, fmt: "-------------------------------------------------\n" ); |
237 | |
238 | seq_printf(m: s, fmt: "%-14s%-16lld%-16lld%lld\n" , "d_lease" , |
239 | atomic64_read(v: &m->total_dentries), |
240 | percpu_counter_sum(fbc: &m->d_lease_mis), |
241 | percpu_counter_sum(fbc: &m->d_lease_hit)); |
242 | |
243 | nr_caps = atomic64_read(v: &m->total_caps); |
244 | seq_printf(m: s, fmt: "%-14s%-16d%-16lld%lld\n" , "caps" , nr_caps, |
245 | percpu_counter_sum(fbc: &m->i_caps_mis), |
246 | percpu_counter_sum(fbc: &m->i_caps_hit)); |
247 | |
248 | return 0; |
249 | } |
250 | |
251 | static int caps_show_cb(struct inode *inode, int mds, void *p) |
252 | { |
253 | struct ceph_inode_info *ci = ceph_inode(inode); |
254 | struct seq_file *s = p; |
255 | struct ceph_cap *cap; |
256 | |
257 | spin_lock(lock: &ci->i_ceph_lock); |
258 | cap = __get_cap_for_mds(ci, mds); |
259 | if (cap) |
260 | seq_printf(m: s, fmt: "0x%-17llx%-3d%-17s%-17s\n" , ceph_ino(inode), |
261 | cap->session->s_mds, |
262 | ceph_cap_string(c: cap->issued), |
263 | ceph_cap_string(c: cap->implemented)); |
264 | spin_unlock(lock: &ci->i_ceph_lock); |
265 | return 0; |
266 | } |
267 | |
268 | static int caps_show(struct seq_file *s, void *p) |
269 | { |
270 | struct ceph_fs_client *fsc = s->private; |
271 | struct ceph_mds_client *mdsc = fsc->mdsc; |
272 | int total, avail, used, reserved, min, i; |
273 | struct cap_wait *cw; |
274 | |
275 | ceph_reservation_status(client: fsc, total: &total, avail: &avail, used: &used, reserved: &reserved, min: &min); |
276 | seq_printf(m: s, fmt: "total\t\t%d\n" |
277 | "avail\t\t%d\n" |
278 | "used\t\t%d\n" |
279 | "reserved\t%d\n" |
280 | "min\t\t%d\n\n" , |
281 | total, avail, used, reserved, min); |
282 | seq_printf(m: s, fmt: "ino mds issued implemented\n" ); |
283 | seq_printf(m: s, fmt: "--------------------------------------------------\n" ); |
284 | |
285 | mutex_lock(&mdsc->mutex); |
286 | for (i = 0; i < mdsc->max_sessions; i++) { |
287 | struct ceph_mds_session *session; |
288 | |
289 | session = __ceph_lookup_mds_session(mdsc, mds: i); |
290 | if (!session) |
291 | continue; |
292 | mutex_unlock(lock: &mdsc->mutex); |
293 | mutex_lock(&session->s_mutex); |
294 | ceph_iterate_session_caps(session, cb: caps_show_cb, arg: s); |
295 | mutex_unlock(lock: &session->s_mutex); |
296 | ceph_put_mds_session(s: session); |
297 | mutex_lock(&mdsc->mutex); |
298 | } |
299 | mutex_unlock(lock: &mdsc->mutex); |
300 | |
301 | seq_printf(m: s, fmt: "\n\nWaiters:\n--------\n" ); |
302 | seq_printf(m: s, fmt: "tgid ino need want\n" ); |
303 | seq_printf(m: s, fmt: "-----------------------------------------------------\n" ); |
304 | |
305 | spin_lock(lock: &mdsc->caps_list_lock); |
306 | list_for_each_entry(cw, &mdsc->cap_wait_list, list) { |
307 | seq_printf(m: s, fmt: "%-13d0x%-17llx%-17s%-17s\n" , cw->tgid, cw->ino, |
308 | ceph_cap_string(c: cw->need), |
309 | ceph_cap_string(c: cw->want)); |
310 | } |
311 | spin_unlock(lock: &mdsc->caps_list_lock); |
312 | |
313 | return 0; |
314 | } |
315 | |
316 | static int mds_sessions_show(struct seq_file *s, void *ptr) |
317 | { |
318 | struct ceph_fs_client *fsc = s->private; |
319 | struct ceph_mds_client *mdsc = fsc->mdsc; |
320 | struct ceph_auth_client *ac = fsc->client->monc.auth; |
321 | struct ceph_options *opt = fsc->client->options; |
322 | int mds; |
323 | |
324 | mutex_lock(&mdsc->mutex); |
325 | |
326 | /* The 'num' portion of an 'entity name' */ |
327 | seq_printf(m: s, fmt: "global_id %llu\n" , ac->global_id); |
328 | |
329 | /* The -o name mount argument */ |
330 | seq_printf(m: s, fmt: "name \"%s\"\n" , opt->name ? opt->name : "" ); |
331 | |
332 | /* The list of MDS session rank+state */ |
333 | for (mds = 0; mds < mdsc->max_sessions; mds++) { |
334 | struct ceph_mds_session *session = |
335 | __ceph_lookup_mds_session(mdsc, mds); |
336 | if (!session) { |
337 | continue; |
338 | } |
339 | mutex_unlock(lock: &mdsc->mutex); |
340 | seq_printf(m: s, fmt: "mds.%d %s\n" , |
341 | session->s_mds, |
342 | ceph_session_state_name(s: session->s_state)); |
343 | |
344 | ceph_put_mds_session(s: session); |
345 | mutex_lock(&mdsc->mutex); |
346 | } |
347 | mutex_unlock(lock: &mdsc->mutex); |
348 | |
349 | return 0; |
350 | } |
351 | |
352 | static int status_show(struct seq_file *s, void *p) |
353 | { |
354 | struct ceph_fs_client *fsc = s->private; |
355 | struct ceph_entity_inst *inst = &fsc->client->msgr.inst; |
356 | struct ceph_entity_addr *client_addr = ceph_client_addr(client: fsc->client); |
357 | |
358 | seq_printf(m: s, fmt: "instance: %s.%lld %s/%u\n" , ENTITY_NAME(inst->name), |
359 | ceph_pr_addr(addr: client_addr), le32_to_cpu(client_addr->nonce)); |
360 | seq_printf(m: s, fmt: "blocklisted: %s\n" , fsc->blocklisted ? "true" : "false" ); |
361 | |
362 | return 0; |
363 | } |
364 | |
365 | DEFINE_SHOW_ATTRIBUTE(mdsmap); |
366 | DEFINE_SHOW_ATTRIBUTE(mdsc); |
367 | DEFINE_SHOW_ATTRIBUTE(caps); |
368 | DEFINE_SHOW_ATTRIBUTE(mds_sessions); |
369 | DEFINE_SHOW_ATTRIBUTE(status); |
370 | DEFINE_SHOW_ATTRIBUTE(metrics_file); |
371 | DEFINE_SHOW_ATTRIBUTE(metrics_latency); |
372 | DEFINE_SHOW_ATTRIBUTE(metrics_size); |
373 | DEFINE_SHOW_ATTRIBUTE(metrics_caps); |
374 | |
375 | |
376 | /* |
377 | * debugfs |
378 | */ |
379 | static int congestion_kb_set(void *data, u64 val) |
380 | { |
381 | struct ceph_fs_client *fsc = (struct ceph_fs_client *)data; |
382 | |
383 | fsc->mount_options->congestion_kb = (int)val; |
384 | return 0; |
385 | } |
386 | |
387 | static int congestion_kb_get(void *data, u64 *val) |
388 | { |
389 | struct ceph_fs_client *fsc = (struct ceph_fs_client *)data; |
390 | |
391 | *val = (u64)fsc->mount_options->congestion_kb; |
392 | return 0; |
393 | } |
394 | |
395 | DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get, |
396 | congestion_kb_set, "%llu\n" ); |
397 | |
398 | |
399 | void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc) |
400 | { |
401 | dout("ceph_fs_debugfs_cleanup\n" ); |
402 | debugfs_remove(dentry: fsc->debugfs_bdi); |
403 | debugfs_remove(dentry: fsc->debugfs_congestion_kb); |
404 | debugfs_remove(dentry: fsc->debugfs_mdsmap); |
405 | debugfs_remove(dentry: fsc->debugfs_mds_sessions); |
406 | debugfs_remove(dentry: fsc->debugfs_caps); |
407 | debugfs_remove(dentry: fsc->debugfs_status); |
408 | debugfs_remove(dentry: fsc->debugfs_mdsc); |
409 | debugfs_remove_recursive(dentry: fsc->debugfs_metrics_dir); |
410 | } |
411 | |
412 | void ceph_fs_debugfs_init(struct ceph_fs_client *fsc) |
413 | { |
414 | char name[100]; |
415 | |
416 | dout("ceph_fs_debugfs_init\n" ); |
417 | fsc->debugfs_congestion_kb = |
418 | debugfs_create_file(name: "writeback_congestion_kb" , |
419 | mode: 0600, |
420 | parent: fsc->client->debugfs_dir, |
421 | data: fsc, |
422 | fops: &congestion_kb_fops); |
423 | |
424 | snprintf(buf: name, size: sizeof(name), fmt: "../../bdi/%s" , |
425 | bdi_dev_name(bdi: fsc->sb->s_bdi)); |
426 | fsc->debugfs_bdi = |
427 | debugfs_create_symlink(name: "bdi" , |
428 | parent: fsc->client->debugfs_dir, |
429 | dest: name); |
430 | |
431 | fsc->debugfs_mdsmap = debugfs_create_file(name: "mdsmap" , |
432 | mode: 0400, |
433 | parent: fsc->client->debugfs_dir, |
434 | data: fsc, |
435 | fops: &mdsmap_fops); |
436 | |
437 | fsc->debugfs_mds_sessions = debugfs_create_file(name: "mds_sessions" , |
438 | mode: 0400, |
439 | parent: fsc->client->debugfs_dir, |
440 | data: fsc, |
441 | fops: &mds_sessions_fops); |
442 | |
443 | fsc->debugfs_mdsc = debugfs_create_file(name: "mdsc" , |
444 | mode: 0400, |
445 | parent: fsc->client->debugfs_dir, |
446 | data: fsc, |
447 | fops: &mdsc_fops); |
448 | |
449 | fsc->debugfs_caps = debugfs_create_file(name: "caps" , |
450 | mode: 0400, |
451 | parent: fsc->client->debugfs_dir, |
452 | data: fsc, |
453 | fops: &caps_fops); |
454 | |
455 | fsc->debugfs_status = debugfs_create_file(name: "status" , |
456 | mode: 0400, |
457 | parent: fsc->client->debugfs_dir, |
458 | data: fsc, |
459 | fops: &status_fops); |
460 | |
461 | fsc->debugfs_metrics_dir = debugfs_create_dir(name: "metrics" , |
462 | parent: fsc->client->debugfs_dir); |
463 | |
464 | debugfs_create_file(name: "file" , mode: 0400, parent: fsc->debugfs_metrics_dir, data: fsc, |
465 | fops: &metrics_file_fops); |
466 | debugfs_create_file(name: "latency" , mode: 0400, parent: fsc->debugfs_metrics_dir, data: fsc, |
467 | fops: &metrics_latency_fops); |
468 | debugfs_create_file(name: "size" , mode: 0400, parent: fsc->debugfs_metrics_dir, data: fsc, |
469 | fops: &metrics_size_fops); |
470 | debugfs_create_file(name: "caps" , mode: 0400, parent: fsc->debugfs_metrics_dir, data: fsc, |
471 | fops: &metrics_caps_fops); |
472 | } |
473 | |
474 | |
475 | #else /* CONFIG_DEBUG_FS */ |
476 | |
477 | void ceph_fs_debugfs_init(struct ceph_fs_client *fsc) |
478 | { |
479 | } |
480 | |
481 | void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc) |
482 | { |
483 | } |
484 | |
485 | #endif /* CONFIG_DEBUG_FS */ |
486 | |