1// SPDX-License-Identifier: GPL-2.0
2#ifndef NO_BCACHEFS_CHARDEV
3
4#include "bcachefs.h"
5#include "bcachefs_ioctl.h"
6#include "buckets.h"
7#include "chardev.h"
8#include "journal.h"
9#include "move.h"
10#include "recovery_passes.h"
11#include "replicas.h"
12#include "super.h"
13#include "super-io.h"
14#include "thread_with_file.h"
15
16#include <linux/cdev.h>
17#include <linux/device.h>
18#include <linux/fs.h>
19#include <linux/ioctl.h>
20#include <linux/major.h>
21#include <linux/sched/task.h>
22#include <linux/slab.h>
23#include <linux/uaccess.h>
24
25/* returns with ref on ca->ref */
26static struct bch_dev *bch2_device_lookup(struct bch_fs *c, u64 dev,
27 unsigned flags)
28{
29 struct bch_dev *ca;
30
31 if (flags & BCH_BY_INDEX) {
32 if (dev >= c->sb.nr_devices)
33 return ERR_PTR(error: -EINVAL);
34
35 rcu_read_lock();
36 ca = rcu_dereference(c->devs[dev]);
37 if (ca)
38 percpu_ref_get(ref: &ca->ref);
39 rcu_read_unlock();
40
41 if (!ca)
42 return ERR_PTR(error: -EINVAL);
43 } else {
44 char *path;
45
46 path = strndup_user((const char __user *)
47 (unsigned long) dev, PATH_MAX);
48 if (IS_ERR(ptr: path))
49 return ERR_CAST(ptr: path);
50
51 ca = bch2_dev_lookup(c, path);
52 kfree(objp: path);
53 }
54
55 return ca;
56}
57
58#if 0
59static long bch2_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg)
60{
61 struct bch_ioctl_assemble arg;
62 struct bch_fs *c;
63 u64 *user_devs = NULL;
64 char **devs = NULL;
65 unsigned i;
66 int ret = -EFAULT;
67
68 if (copy_from_user(&arg, user_arg, sizeof(arg)))
69 return -EFAULT;
70
71 if (arg.flags || arg.pad)
72 return -EINVAL;
73
74 user_devs = kmalloc_array(arg.nr_devs, sizeof(u64), GFP_KERNEL);
75 if (!user_devs)
76 return -ENOMEM;
77
78 devs = kcalloc(arg.nr_devs, sizeof(char *), GFP_KERNEL);
79
80 if (copy_from_user(user_devs, user_arg->devs,
81 sizeof(u64) * arg.nr_devs))
82 goto err;
83
84 for (i = 0; i < arg.nr_devs; i++) {
85 devs[i] = strndup_user((const char __user *)(unsigned long)
86 user_devs[i],
87 PATH_MAX);
88 ret= PTR_ERR_OR_ZERO(devs[i]);
89 if (ret)
90 goto err;
91 }
92
93 c = bch2_fs_open(devs, arg.nr_devs, bch2_opts_empty());
94 ret = PTR_ERR_OR_ZERO(c);
95 if (!ret)
96 closure_put(&c->cl);
97err:
98 if (devs)
99 for (i = 0; i < arg.nr_devs; i++)
100 kfree(devs[i]);
101 kfree(devs);
102 return ret;
103}
104
105static long bch2_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg)
106{
107 struct bch_ioctl_incremental arg;
108 const char *err;
109 char *path;
110
111 if (copy_from_user(&arg, user_arg, sizeof(arg)))
112 return -EFAULT;
113
114 if (arg.flags || arg.pad)
115 return -EINVAL;
116
117 path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
118 ret = PTR_ERR_OR_ZERO(path);
119 if (ret)
120 return ret;
121
122 err = bch2_fs_open_incremental(path);
123 kfree(path);
124
125 if (err) {
126 pr_err("Could not register bcachefs devices: %s", err);
127 return -EINVAL;
128 }
129
130 return 0;
131}
132#endif
133
134struct fsck_thread {
135 struct thread_with_stdio thr;
136 struct bch_fs *c;
137 struct bch_opts opts;
138};
139
140static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr)
141{
142 struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr);
143 kfree(objp: thr);
144}
145
146static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio)
147{
148 struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr);
149 struct bch_fs *c = thr->c;
150
151 int ret = PTR_ERR_OR_ZERO(ptr: c);
152 if (ret)
153 return ret;
154
155 ret = bch2_fs_start(thr->c);
156 if (ret)
157 goto err;
158
159 if (test_bit(BCH_FS_errors_fixed, &c->flags)) {
160 bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name);
161 ret |= 1;
162 }
163 if (test_bit(BCH_FS_error, &c->flags)) {
164 bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name);
165 ret |= 4;
166 }
167err:
168 bch2_fs_stop(c);
169 return ret;
170}
171
172static const struct thread_with_stdio_ops bch2_offline_fsck_ops = {
173 .exit = bch2_fsck_thread_exit,
174 .fn = bch2_fsck_offline_thread_fn,
175};
176
177static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg)
178{
179 struct bch_ioctl_fsck_offline arg;
180 struct fsck_thread *thr = NULL;
181 darray_str(devs) = {};
182 long ret = 0;
183
184 if (copy_from_user(to: &arg, from: user_arg, n: sizeof(arg)))
185 return -EFAULT;
186
187 if (arg.flags)
188 return -EINVAL;
189
190 if (!capable(CAP_SYS_ADMIN))
191 return -EPERM;
192
193 for (size_t i = 0; i < arg.nr_devs; i++) {
194 u64 dev_u64;
195 ret = copy_from_user_errcode(to: &dev_u64, from: &user_arg->devs[i], n: sizeof(u64));
196 if (ret)
197 goto err;
198
199 char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX);
200 ret = PTR_ERR_OR_ZERO(ptr: dev_str);
201 if (ret)
202 goto err;
203
204 ret = darray_push(&devs, dev_str);
205 if (ret) {
206 kfree(objp: dev_str);
207 goto err;
208 }
209 }
210
211 thr = kzalloc(size: sizeof(*thr), GFP_KERNEL);
212 if (!thr) {
213 ret = -ENOMEM;
214 goto err;
215 }
216
217 thr->opts = bch2_opts_empty();
218
219 if (arg.opts) {
220 char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
221
222 ret = PTR_ERR_OR_ZERO(ptr: optstr) ?:
223 bch2_parse_mount_opts(NULL, &thr->opts, optstr);
224 kfree(objp: optstr);
225
226 if (ret)
227 goto err;
228 }
229
230 opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio);
231
232 /* We need request_key() to be called before we punt to kthread: */
233 opt_set(thr->opts, nostart, true);
234
235 bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops);
236
237 thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts);
238
239 if (!IS_ERR(ptr: thr->c) &&
240 thr->c->opts.errors == BCH_ON_ERROR_panic)
241 thr->c->opts.errors = BCH_ON_ERROR_ro;
242
243 ret = __bch2_run_thread_with_stdio(&thr->thr);
244out:
245 darray_for_each(devs, i)
246 kfree(objp: *i);
247 darray_exit(&devs);
248 return ret;
249err:
250 if (thr)
251 bch2_fsck_thread_exit(thr: &thr->thr);
252 pr_err("ret %s", bch2_err_str(ret));
253 goto out;
254}
255
256static long bch2_global_ioctl(unsigned cmd, void __user *arg)
257{
258 long ret;
259
260 switch (cmd) {
261#if 0
262 case BCH_IOCTL_ASSEMBLE:
263 return bch2_ioctl_assemble(arg);
264 case BCH_IOCTL_INCREMENTAL:
265 return bch2_ioctl_incremental(arg);
266#endif
267 case BCH_IOCTL_FSCK_OFFLINE: {
268 ret = bch2_ioctl_fsck_offline(user_arg: arg);
269 break;
270 }
271 default:
272 ret = -ENOTTY;
273 break;
274 }
275
276 if (ret < 0)
277 ret = bch2_err_class(err: ret);
278 return ret;
279}
280
281static long bch2_ioctl_query_uuid(struct bch_fs *c,
282 struct bch_ioctl_query_uuid __user *user_arg)
283{
284 return copy_to_user_errcode(to: &user_arg->uuid, from: &c->sb.user_uuid,
285 n: sizeof(c->sb.user_uuid));
286}
287
288#if 0
289static long bch2_ioctl_start(struct bch_fs *c, struct bch_ioctl_start arg)
290{
291 if (!capable(CAP_SYS_ADMIN))
292 return -EPERM;
293
294 if (arg.flags || arg.pad)
295 return -EINVAL;
296
297 return bch2_fs_start(c);
298}
299
300static long bch2_ioctl_stop(struct bch_fs *c)
301{
302 if (!capable(CAP_SYS_ADMIN))
303 return -EPERM;
304
305 bch2_fs_stop(c);
306 return 0;
307}
308#endif
309
310static long bch2_ioctl_disk_add(struct bch_fs *c, struct bch_ioctl_disk arg)
311{
312 char *path;
313 int ret;
314
315 if (!capable(CAP_SYS_ADMIN))
316 return -EPERM;
317
318 if (arg.flags || arg.pad)
319 return -EINVAL;
320
321 path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
322 ret = PTR_ERR_OR_ZERO(ptr: path);
323 if (ret)
324 return ret;
325
326 ret = bch2_dev_add(c, path);
327 kfree(objp: path);
328
329 return ret;
330}
331
332static long bch2_ioctl_disk_remove(struct bch_fs *c, struct bch_ioctl_disk arg)
333{
334 struct bch_dev *ca;
335
336 if (!capable(CAP_SYS_ADMIN))
337 return -EPERM;
338
339 if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
340 BCH_FORCE_IF_METADATA_LOST|
341 BCH_FORCE_IF_DEGRADED|
342 BCH_BY_INDEX)) ||
343 arg.pad)
344 return -EINVAL;
345
346 ca = bch2_device_lookup(c, dev: arg.dev, flags: arg.flags);
347 if (IS_ERR(ptr: ca))
348 return PTR_ERR(ptr: ca);
349
350 return bch2_dev_remove(c, ca, arg.flags);
351}
352
353static long bch2_ioctl_disk_online(struct bch_fs *c, struct bch_ioctl_disk arg)
354{
355 char *path;
356 int ret;
357
358 if (!capable(CAP_SYS_ADMIN))
359 return -EPERM;
360
361 if (arg.flags || arg.pad)
362 return -EINVAL;
363
364 path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
365 ret = PTR_ERR_OR_ZERO(ptr: path);
366 if (ret)
367 return ret;
368
369 ret = bch2_dev_online(c, path);
370 kfree(objp: path);
371 return ret;
372}
373
374static long bch2_ioctl_disk_offline(struct bch_fs *c, struct bch_ioctl_disk arg)
375{
376 struct bch_dev *ca;
377 int ret;
378
379 if (!capable(CAP_SYS_ADMIN))
380 return -EPERM;
381
382 if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
383 BCH_FORCE_IF_METADATA_LOST|
384 BCH_FORCE_IF_DEGRADED|
385 BCH_BY_INDEX)) ||
386 arg.pad)
387 return -EINVAL;
388
389 ca = bch2_device_lookup(c, dev: arg.dev, flags: arg.flags);
390 if (IS_ERR(ptr: ca))
391 return PTR_ERR(ptr: ca);
392
393 ret = bch2_dev_offline(c, ca, arg.flags);
394 percpu_ref_put(ref: &ca->ref);
395 return ret;
396}
397
398static long bch2_ioctl_disk_set_state(struct bch_fs *c,
399 struct bch_ioctl_disk_set_state arg)
400{
401 struct bch_dev *ca;
402 int ret;
403
404 if (!capable(CAP_SYS_ADMIN))
405 return -EPERM;
406
407 if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
408 BCH_FORCE_IF_METADATA_LOST|
409 BCH_FORCE_IF_DEGRADED|
410 BCH_BY_INDEX)) ||
411 arg.pad[0] || arg.pad[1] || arg.pad[2] ||
412 arg.new_state >= BCH_MEMBER_STATE_NR)
413 return -EINVAL;
414
415 ca = bch2_device_lookup(c, dev: arg.dev, flags: arg.flags);
416 if (IS_ERR(ptr: ca))
417 return PTR_ERR(ptr: ca);
418
419 ret = bch2_dev_set_state(c, ca, arg.new_state, arg.flags);
420 if (ret)
421 bch_err(c, "Error setting device state: %s", bch2_err_str(ret));
422
423 percpu_ref_put(ref: &ca->ref);
424 return ret;
425}
426
427struct bch_data_ctx {
428 struct thread_with_file thr;
429
430 struct bch_fs *c;
431 struct bch_ioctl_data arg;
432 struct bch_move_stats stats;
433};
434
435static int bch2_data_thread(void *arg)
436{
437 struct bch_data_ctx *ctx = container_of(arg, struct bch_data_ctx, thr);
438
439 ctx->thr.ret = bch2_data_job(ctx->c, &ctx->stats, ctx->arg);
440 ctx->stats.data_type = U8_MAX;
441 return 0;
442}
443
444static int bch2_data_job_release(struct inode *inode, struct file *file)
445{
446 struct bch_data_ctx *ctx = container_of(file->private_data, struct bch_data_ctx, thr);
447
448 bch2_thread_with_file_exit(&ctx->thr);
449 kfree(objp: ctx);
450 return 0;
451}
452
453static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
454 size_t len, loff_t *ppos)
455{
456 struct bch_data_ctx *ctx = container_of(file->private_data, struct bch_data_ctx, thr);
457 struct bch_fs *c = ctx->c;
458 struct bch_ioctl_data_event e = {
459 .type = BCH_DATA_EVENT_PROGRESS,
460 .p.data_type = ctx->stats.data_type,
461 .p.btree_id = ctx->stats.pos.btree,
462 .p.pos = ctx->stats.pos.pos,
463 .p.sectors_done = atomic64_read(v: &ctx->stats.sectors_seen),
464 .p.sectors_total = bch2_fs_usage_read_short(c).used,
465 };
466
467 if (len < sizeof(e))
468 return -EINVAL;
469
470 return copy_to_user_errcode(to: buf, from: &e, n: sizeof(e)) ?: sizeof(e);
471}
472
473static const struct file_operations bcachefs_data_ops = {
474 .release = bch2_data_job_release,
475 .read = bch2_data_job_read,
476 .llseek = no_llseek,
477};
478
479static long bch2_ioctl_data(struct bch_fs *c,
480 struct bch_ioctl_data arg)
481{
482 struct bch_data_ctx *ctx;
483 int ret;
484
485 if (!capable(CAP_SYS_ADMIN))
486 return -EPERM;
487
488 if (arg.op >= BCH_DATA_OP_NR || arg.flags)
489 return -EINVAL;
490
491 ctx = kzalloc(size: sizeof(*ctx), GFP_KERNEL);
492 if (!ctx)
493 return -ENOMEM;
494
495 ctx->c = c;
496 ctx->arg = arg;
497
498 ret = bch2_run_thread_with_file(&ctx->thr,
499 &bcachefs_data_ops,
500 fn: bch2_data_thread);
501 if (ret < 0)
502 kfree(objp: ctx);
503 return ret;
504}
505
506static long bch2_ioctl_fs_usage(struct bch_fs *c,
507 struct bch_ioctl_fs_usage __user *user_arg)
508{
509 struct bch_ioctl_fs_usage *arg = NULL;
510 struct bch_replicas_usage *dst_e, *dst_end;
511 struct bch_fs_usage_online *src;
512 u32 replica_entries_bytes;
513 unsigned i;
514 int ret = 0;
515
516 if (!test_bit(BCH_FS_started, &c->flags))
517 return -EINVAL;
518
519 if (get_user(replica_entries_bytes, &user_arg->replica_entries_bytes))
520 return -EFAULT;
521
522 arg = kzalloc(size: size_add(addend1: sizeof(*arg), addend2: replica_entries_bytes), GFP_KERNEL);
523 if (!arg)
524 return -ENOMEM;
525
526 src = bch2_fs_usage_read(c);
527 if (!src) {
528 ret = -ENOMEM;
529 goto err;
530 }
531
532 arg->capacity = c->capacity;
533 arg->used = bch2_fs_sectors_used(c, src);
534 arg->online_reserved = src->online_reserved;
535
536 for (i = 0; i < BCH_REPLICAS_MAX; i++)
537 arg->persistent_reserved[i] = src->u.persistent_reserved[i];
538
539 dst_e = arg->replicas;
540 dst_end = (void *) arg->replicas + replica_entries_bytes;
541
542 for (i = 0; i < c->replicas.nr; i++) {
543 struct bch_replicas_entry_v1 *src_e =
544 cpu_replicas_entry(r: &c->replicas, i);
545
546 /* check that we have enough space for one replicas entry */
547 if (dst_e + 1 > dst_end) {
548 ret = -ERANGE;
549 break;
550 }
551
552 dst_e->sectors = src->u.replicas[i];
553 dst_e->r = *src_e;
554
555 /* recheck after setting nr_devs: */
556 if (replicas_usage_next(u: dst_e) > dst_end) {
557 ret = -ERANGE;
558 break;
559 }
560
561 memcpy(dst_e->r.devs, src_e->devs, src_e->nr_devs);
562
563 dst_e = replicas_usage_next(u: dst_e);
564 }
565
566 arg->replica_entries_bytes = (void *) dst_e - (void *) arg->replicas;
567
568 percpu_up_read(sem: &c->mark_lock);
569 kfree(objp: src);
570
571 if (ret)
572 goto err;
573
574 ret = copy_to_user_errcode(to: user_arg, from: arg,
575 n: sizeof(*arg) + arg->replica_entries_bytes);
576err:
577 kfree(objp: arg);
578 return ret;
579}
580
581/* obsolete, didn't allow for new data types: */
582static long bch2_ioctl_dev_usage(struct bch_fs *c,
583 struct bch_ioctl_dev_usage __user *user_arg)
584{
585 struct bch_ioctl_dev_usage arg;
586 struct bch_dev_usage src;
587 struct bch_dev *ca;
588 unsigned i;
589
590 if (!test_bit(BCH_FS_started, &c->flags))
591 return -EINVAL;
592
593 if (copy_from_user(to: &arg, from: user_arg, n: sizeof(arg)))
594 return -EFAULT;
595
596 if ((arg.flags & ~BCH_BY_INDEX) ||
597 arg.pad[0] ||
598 arg.pad[1] ||
599 arg.pad[2])
600 return -EINVAL;
601
602 ca = bch2_device_lookup(c, dev: arg.dev, flags: arg.flags);
603 if (IS_ERR(ptr: ca))
604 return PTR_ERR(ptr: ca);
605
606 src = bch2_dev_usage_read(ca);
607
608 arg.state = ca->mi.state;
609 arg.bucket_size = ca->mi.bucket_size;
610 arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
611
612 for (i = 0; i < BCH_DATA_NR; i++) {
613 arg.d[i].buckets = src.d[i].buckets;
614 arg.d[i].sectors = src.d[i].sectors;
615 arg.d[i].fragmented = src.d[i].fragmented;
616 }
617
618 percpu_ref_put(ref: &ca->ref);
619
620 return copy_to_user_errcode(to: user_arg, from: &arg, n: sizeof(arg));
621}
622
623static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
624 struct bch_ioctl_dev_usage_v2 __user *user_arg)
625{
626 struct bch_ioctl_dev_usage_v2 arg;
627 struct bch_dev_usage src;
628 struct bch_dev *ca;
629 int ret = 0;
630
631 if (!test_bit(BCH_FS_started, &c->flags))
632 return -EINVAL;
633
634 if (copy_from_user(to: &arg, from: user_arg, n: sizeof(arg)))
635 return -EFAULT;
636
637 if ((arg.flags & ~BCH_BY_INDEX) ||
638 arg.pad[0] ||
639 arg.pad[1] ||
640 arg.pad[2])
641 return -EINVAL;
642
643 ca = bch2_device_lookup(c, dev: arg.dev, flags: arg.flags);
644 if (IS_ERR(ptr: ca))
645 return PTR_ERR(ptr: ca);
646
647 src = bch2_dev_usage_read(ca);
648
649 arg.state = ca->mi.state;
650 arg.bucket_size = ca->mi.bucket_size;
651 arg.nr_data_types = min(arg.nr_data_types, BCH_DATA_NR);
652 arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
653
654 ret = copy_to_user_errcode(to: user_arg, from: &arg, n: sizeof(arg));
655 if (ret)
656 goto err;
657
658 for (unsigned i = 0; i < arg.nr_data_types; i++) {
659 struct bch_ioctl_dev_usage_type t = {
660 .buckets = src.d[i].buckets,
661 .sectors = src.d[i].sectors,
662 .fragmented = src.d[i].fragmented,
663 };
664
665 ret = copy_to_user_errcode(to: &user_arg->d[i], from: &t, n: sizeof(t));
666 if (ret)
667 goto err;
668 }
669err:
670 percpu_ref_put(ref: &ca->ref);
671 return ret;
672}
673
674static long bch2_ioctl_read_super(struct bch_fs *c,
675 struct bch_ioctl_read_super arg)
676{
677 struct bch_dev *ca = NULL;
678 struct bch_sb *sb;
679 int ret = 0;
680
681 if (!capable(CAP_SYS_ADMIN))
682 return -EPERM;
683
684 if ((arg.flags & ~(BCH_BY_INDEX|BCH_READ_DEV)) ||
685 arg.pad)
686 return -EINVAL;
687
688 mutex_lock(&c->sb_lock);
689
690 if (arg.flags & BCH_READ_DEV) {
691 ca = bch2_device_lookup(c, dev: arg.dev, flags: arg.flags);
692
693 if (IS_ERR(ptr: ca)) {
694 ret = PTR_ERR(ptr: ca);
695 goto err;
696 }
697
698 sb = ca->disk_sb.sb;
699 } else {
700 sb = c->disk_sb.sb;
701 }
702
703 if (vstruct_bytes(sb) > arg.size) {
704 ret = -ERANGE;
705 goto err;
706 }
707
708 ret = copy_to_user_errcode(to: (void __user *)(unsigned long)arg.sb, from: sb,
709 vstruct_bytes(sb));
710err:
711 if (!IS_ERR_OR_NULL(ptr: ca))
712 percpu_ref_put(ref: &ca->ref);
713 mutex_unlock(lock: &c->sb_lock);
714 return ret;
715}
716
717static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
718 struct bch_ioctl_disk_get_idx arg)
719{
720 dev_t dev = huge_decode_dev(dev: arg.dev);
721
722 if (!capable(CAP_SYS_ADMIN))
723 return -EPERM;
724
725 if (!dev)
726 return -EINVAL;
727
728 for_each_online_member(c, ca)
729 if (ca->dev == dev) {
730 percpu_ref_put(ref: &ca->io_ref);
731 return ca->dev_idx;
732 }
733
734 return -BCH_ERR_ENOENT_dev_idx_not_found;
735}
736
737static long bch2_ioctl_disk_resize(struct bch_fs *c,
738 struct bch_ioctl_disk_resize arg)
739{
740 struct bch_dev *ca;
741 int ret;
742
743 if (!capable(CAP_SYS_ADMIN))
744 return -EPERM;
745
746 if ((arg.flags & ~BCH_BY_INDEX) ||
747 arg.pad)
748 return -EINVAL;
749
750 ca = bch2_device_lookup(c, dev: arg.dev, flags: arg.flags);
751 if (IS_ERR(ptr: ca))
752 return PTR_ERR(ptr: ca);
753
754 ret = bch2_dev_resize(c, ca, arg.nbuckets);
755
756 percpu_ref_put(ref: &ca->ref);
757 return ret;
758}
759
760static long bch2_ioctl_disk_resize_journal(struct bch_fs *c,
761 struct bch_ioctl_disk_resize_journal arg)
762{
763 struct bch_dev *ca;
764 int ret;
765
766 if (!capable(CAP_SYS_ADMIN))
767 return -EPERM;
768
769 if ((arg.flags & ~BCH_BY_INDEX) ||
770 arg.pad)
771 return -EINVAL;
772
773 if (arg.nbuckets > U32_MAX)
774 return -EINVAL;
775
776 ca = bch2_device_lookup(c, dev: arg.dev, flags: arg.flags);
777 if (IS_ERR(ptr: ca))
778 return PTR_ERR(ptr: ca);
779
780 ret = bch2_set_nr_journal_buckets(c, ca, nr: arg.nbuckets);
781
782 percpu_ref_put(ref: &ca->ref);
783 return ret;
784}
785
786static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio)
787{
788 struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr);
789 struct bch_fs *c = thr->c;
790
791 c->stdio_filter = current;
792 c->stdio = &thr->thr.stdio;
793
794 /*
795 * XXX: can we figure out a way to do this without mucking with c->opts?
796 */
797 unsigned old_fix_errors = c->opts.fix_errors;
798 if (opt_defined(thr->opts, fix_errors))
799 c->opts.fix_errors = thr->opts.fix_errors;
800 else
801 c->opts.fix_errors = FSCK_FIX_ask;
802
803 c->opts.fsck = true;
804 set_bit(nr: BCH_FS_fsck_running, addr: &c->flags);
805
806 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info;
807 int ret = bch2_run_online_recovery_passes(c);
808
809 clear_bit(nr: BCH_FS_fsck_running, addr: &c->flags);
810 bch_err_fn(c, ret);
811
812 c->stdio = NULL;
813 c->stdio_filter = NULL;
814 c->opts.fix_errors = old_fix_errors;
815
816 up(sem: &c->online_fsck_mutex);
817 bch2_ro_ref_put(c);
818 return ret;
819}
820
821static const struct thread_with_stdio_ops bch2_online_fsck_ops = {
822 .exit = bch2_fsck_thread_exit,
823 .fn = bch2_fsck_online_thread_fn,
824};
825
826static long bch2_ioctl_fsck_online(struct bch_fs *c,
827 struct bch_ioctl_fsck_online arg)
828{
829 struct fsck_thread *thr = NULL;
830 long ret = 0;
831
832 if (arg.flags)
833 return -EINVAL;
834
835 if (!capable(CAP_SYS_ADMIN))
836 return -EPERM;
837
838 if (!bch2_ro_ref_tryget(c))
839 return -EROFS;
840
841 if (down_trylock(sem: &c->online_fsck_mutex)) {
842 bch2_ro_ref_put(c);
843 return -EAGAIN;
844 }
845
846 thr = kzalloc(size: sizeof(*thr), GFP_KERNEL);
847 if (!thr) {
848 ret = -ENOMEM;
849 goto err;
850 }
851
852 thr->c = c;
853 thr->opts = bch2_opts_empty();
854
855 if (arg.opts) {
856 char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
857
858 ret = PTR_ERR_OR_ZERO(ptr: optstr) ?:
859 bch2_parse_mount_opts(c, &thr->opts, optstr);
860 kfree(objp: optstr);
861
862 if (ret)
863 goto err;
864 }
865
866 ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_online_fsck_ops);
867err:
868 if (ret < 0) {
869 bch_err_fn(c, ret);
870 if (thr)
871 bch2_fsck_thread_exit(thr: &thr->thr);
872 up(sem: &c->online_fsck_mutex);
873 bch2_ro_ref_put(c);
874 }
875 return ret;
876}
877
878#define BCH_IOCTL(_name, _argtype) \
879do { \
880 _argtype i; \
881 \
882 if (copy_from_user(&i, arg, sizeof(i))) \
883 return -EFAULT; \
884 ret = bch2_ioctl_##_name(c, i); \
885 goto out; \
886} while (0)
887
888long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
889{
890 long ret;
891
892 switch (cmd) {
893 case BCH_IOCTL_QUERY_UUID:
894 return bch2_ioctl_query_uuid(c, user_arg: arg);
895 case BCH_IOCTL_FS_USAGE:
896 return bch2_ioctl_fs_usage(c, user_arg: arg);
897 case BCH_IOCTL_DEV_USAGE:
898 return bch2_ioctl_dev_usage(c, user_arg: arg);
899 case BCH_IOCTL_DEV_USAGE_V2:
900 return bch2_ioctl_dev_usage_v2(c, user_arg: arg);
901#if 0
902 case BCH_IOCTL_START:
903 BCH_IOCTL(start, struct bch_ioctl_start);
904 case BCH_IOCTL_STOP:
905 return bch2_ioctl_stop(c);
906#endif
907 case BCH_IOCTL_READ_SUPER:
908 BCH_IOCTL(read_super, struct bch_ioctl_read_super);
909 case BCH_IOCTL_DISK_GET_IDX:
910 BCH_IOCTL(disk_get_idx, struct bch_ioctl_disk_get_idx);
911 }
912
913 if (!test_bit(BCH_FS_started, &c->flags))
914 return -EINVAL;
915
916 switch (cmd) {
917 case BCH_IOCTL_DISK_ADD:
918 BCH_IOCTL(disk_add, struct bch_ioctl_disk);
919 case BCH_IOCTL_DISK_REMOVE:
920 BCH_IOCTL(disk_remove, struct bch_ioctl_disk);
921 case BCH_IOCTL_DISK_ONLINE:
922 BCH_IOCTL(disk_online, struct bch_ioctl_disk);
923 case BCH_IOCTL_DISK_OFFLINE:
924 BCH_IOCTL(disk_offline, struct bch_ioctl_disk);
925 case BCH_IOCTL_DISK_SET_STATE:
926 BCH_IOCTL(disk_set_state, struct bch_ioctl_disk_set_state);
927 case BCH_IOCTL_DATA:
928 BCH_IOCTL(data, struct bch_ioctl_data);
929 case BCH_IOCTL_DISK_RESIZE:
930 BCH_IOCTL(disk_resize, struct bch_ioctl_disk_resize);
931 case BCH_IOCTL_DISK_RESIZE_JOURNAL:
932 BCH_IOCTL(disk_resize_journal, struct bch_ioctl_disk_resize_journal);
933 case BCH_IOCTL_FSCK_ONLINE:
934 BCH_IOCTL(fsck_online, struct bch_ioctl_fsck_online);
935 default:
936 return -ENOTTY;
937 }
938out:
939 if (ret < 0)
940 ret = bch2_err_class(err: ret);
941 return ret;
942}
943
944static DEFINE_IDR(bch_chardev_minor);
945
946static long bch2_chardev_ioctl(struct file *filp, unsigned cmd, unsigned long v)
947{
948 unsigned minor = iminor(inode: file_inode(f: filp));
949 struct bch_fs *c = minor < U8_MAX ? idr_find(&bch_chardev_minor, id: minor) : NULL;
950 void __user *arg = (void __user *) v;
951
952 return c
953 ? bch2_fs_ioctl(c, cmd, arg)
954 : bch2_global_ioctl(cmd, arg);
955}
956
957static const struct file_operations bch_chardev_fops = {
958 .owner = THIS_MODULE,
959 .unlocked_ioctl = bch2_chardev_ioctl,
960 .open = nonseekable_open,
961};
962
963static int bch_chardev_major;
964static struct class *bch_chardev_class;
965static struct device *bch_chardev;
966
967void bch2_fs_chardev_exit(struct bch_fs *c)
968{
969 if (!IS_ERR_OR_NULL(ptr: c->chardev))
970 device_unregister(dev: c->chardev);
971 if (c->minor >= 0)
972 idr_remove(&bch_chardev_minor, id: c->minor);
973}
974
975int bch2_fs_chardev_init(struct bch_fs *c)
976{
977 c->minor = idr_alloc(&bch_chardev_minor, ptr: c, start: 0, end: 0, GFP_KERNEL);
978 if (c->minor < 0)
979 return c->minor;
980
981 c->chardev = device_create(cls: bch_chardev_class, NULL,
982 MKDEV(bch_chardev_major, c->minor), drvdata: c,
983 fmt: "bcachefs%u-ctl", c->minor);
984 if (IS_ERR(ptr: c->chardev))
985 return PTR_ERR(ptr: c->chardev);
986
987 return 0;
988}
989
990void bch2_chardev_exit(void)
991{
992 if (!IS_ERR_OR_NULL(ptr: bch_chardev_class))
993 device_destroy(cls: bch_chardev_class,
994 MKDEV(bch_chardev_major, U8_MAX));
995 if (!IS_ERR_OR_NULL(ptr: bch_chardev_class))
996 class_destroy(cls: bch_chardev_class);
997 if (bch_chardev_major > 0)
998 unregister_chrdev(major: bch_chardev_major, name: "bcachefs");
999}
1000
1001int __init bch2_chardev_init(void)
1002{
1003 bch_chardev_major = register_chrdev(major: 0, name: "bcachefs-ctl", fops: &bch_chardev_fops);
1004 if (bch_chardev_major < 0)
1005 return bch_chardev_major;
1006
1007 bch_chardev_class = class_create(name: "bcachefs");
1008 if (IS_ERR(ptr: bch_chardev_class))
1009 return PTR_ERR(ptr: bch_chardev_class);
1010
1011 bch_chardev = device_create(cls: bch_chardev_class, NULL,
1012 MKDEV(bch_chardev_major, U8_MAX),
1013 NULL, fmt: "bcachefs-ctl");
1014 if (IS_ERR(ptr: bch_chardev))
1015 return PTR_ERR(ptr: bch_chardev);
1016
1017 return 0;
1018}
1019
1020#endif /* NO_BCACHEFS_CHARDEV */
1021

source code of linux/fs/bcachefs/chardev.c