1// SPDX-License-Identifier: GPL-2.0
2#include "bcachefs.h"
3#include "btree_update.h"
4#include "errcode.h"
5#include "error.h"
6#include "inode.h"
7#include "quota.h"
8#include "snapshot.h"
9#include "super-io.h"
10
11static const char * const bch2_quota_types[] = {
12 "user",
13 "group",
14 "project",
15};
16
17static const char * const bch2_quota_counters[] = {
18 "space",
19 "inodes",
20};
21
22static int bch2_sb_quota_validate(struct bch_sb *sb, struct bch_sb_field *f,
23 struct printbuf *err)
24{
25 struct bch_sb_field_quota *q = field_to_type(f, quota);
26
27 if (vstruct_bytes(&q->field) < sizeof(*q)) {
28 prt_printf(err, "wrong size (got %zu should be %zu)",
29 vstruct_bytes(&q->field), sizeof(*q));
30 return -BCH_ERR_invalid_sb_quota;
31 }
32
33 return 0;
34}
35
36static void bch2_sb_quota_to_text(struct printbuf *out, struct bch_sb *sb,
37 struct bch_sb_field *f)
38{
39 struct bch_sb_field_quota *q = field_to_type(f, quota);
40 unsigned qtyp, counter;
41
42 for (qtyp = 0; qtyp < ARRAY_SIZE(q->q); qtyp++) {
43 prt_printf(out, "%s: flags %llx",
44 bch2_quota_types[qtyp],
45 le64_to_cpu(q->q[qtyp].flags));
46
47 for (counter = 0; counter < Q_COUNTERS; counter++)
48 prt_printf(out, " %s timelimit %u warnlimit %u",
49 bch2_quota_counters[counter],
50 le32_to_cpu(q->q[qtyp].c[counter].timelimit),
51 le32_to_cpu(q->q[qtyp].c[counter].warnlimit));
52
53 prt_newline(out);
54 }
55}
56
57const struct bch_sb_field_ops bch_sb_field_ops_quota = {
58 .validate = bch2_sb_quota_validate,
59 .to_text = bch2_sb_quota_to_text,
60};
61
62int bch2_quota_invalid(struct bch_fs *c, struct bkey_s_c k,
63 enum bkey_invalid_flags flags,
64 struct printbuf *err)
65{
66 int ret = 0;
67
68 bkey_fsck_err_on(k.k->p.inode >= QTYP_NR, c, err,
69 quota_type_invalid,
70 "invalid quota type (%llu >= %u)",
71 k.k->p.inode, QTYP_NR);
72fsck_err:
73 return ret;
74}
75
76void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c,
77 struct bkey_s_c k)
78{
79 struct bkey_s_c_quota dq = bkey_s_c_to_quota(k);
80 unsigned i;
81
82 for (i = 0; i < Q_COUNTERS; i++)
83 prt_printf(out, "%s hardlimit %llu softlimit %llu",
84 bch2_quota_counters[i],
85 le64_to_cpu(dq.v->c[i].hardlimit),
86 le64_to_cpu(dq.v->c[i].softlimit));
87}
88
89#ifdef CONFIG_BCACHEFS_QUOTA
90
91#include <linux/cred.h>
92#include <linux/fs.h>
93#include <linux/quota.h>
94
95static void qc_info_to_text(struct printbuf *out, struct qc_info *i)
96{
97 printbuf_tabstops_reset(out);
98 printbuf_tabstop_push(out, 20);
99
100 prt_str(out, str: "i_fieldmask");
101 prt_tab(out);
102 prt_printf(out, "%x", i->i_fieldmask);
103 prt_newline(out);
104
105 prt_str(out, str: "i_flags");
106 prt_tab(out);
107 prt_printf(out, "%u", i->i_flags);
108 prt_newline(out);
109
110 prt_str(out, str: "i_spc_timelimit");
111 prt_tab(out);
112 prt_printf(out, "%u", i->i_spc_timelimit);
113 prt_newline(out);
114
115 prt_str(out, str: "i_ino_timelimit");
116 prt_tab(out);
117 prt_printf(out, "%u", i->i_ino_timelimit);
118 prt_newline(out);
119
120 prt_str(out, str: "i_rt_spc_timelimit");
121 prt_tab(out);
122 prt_printf(out, "%u", i->i_rt_spc_timelimit);
123 prt_newline(out);
124
125 prt_str(out, str: "i_spc_warnlimit");
126 prt_tab(out);
127 prt_printf(out, "%u", i->i_spc_warnlimit);
128 prt_newline(out);
129
130 prt_str(out, str: "i_ino_warnlimit");
131 prt_tab(out);
132 prt_printf(out, "%u", i->i_ino_warnlimit);
133 prt_newline(out);
134
135 prt_str(out, str: "i_rt_spc_warnlimit");
136 prt_tab(out);
137 prt_printf(out, "%u", i->i_rt_spc_warnlimit);
138 prt_newline(out);
139}
140
141static void qc_dqblk_to_text(struct printbuf *out, struct qc_dqblk *q)
142{
143 printbuf_tabstops_reset(out);
144 printbuf_tabstop_push(out, 20);
145
146 prt_str(out, str: "d_fieldmask");
147 prt_tab(out);
148 prt_printf(out, "%x", q->d_fieldmask);
149 prt_newline(out);
150
151 prt_str(out, str: "d_spc_hardlimit");
152 prt_tab(out);
153 prt_printf(out, "%llu", q->d_spc_hardlimit);
154 prt_newline(out);
155
156 prt_str(out, str: "d_spc_softlimit");
157 prt_tab(out);
158 prt_printf(out, "%llu", q->d_spc_softlimit);
159 prt_newline(out);
160
161 prt_str(out, str: "d_ino_hardlimit");
162 prt_tab(out);
163 prt_printf(out, "%llu", q->d_ino_hardlimit);
164 prt_newline(out);
165
166 prt_str(out, str: "d_ino_softlimit");
167 prt_tab(out);
168 prt_printf(out, "%llu", q->d_ino_softlimit);
169 prt_newline(out);
170
171 prt_str(out, str: "d_space");
172 prt_tab(out);
173 prt_printf(out, "%llu", q->d_space);
174 prt_newline(out);
175
176 prt_str(out, str: "d_ino_count");
177 prt_tab(out);
178 prt_printf(out, "%llu", q->d_ino_count);
179 prt_newline(out);
180
181 prt_str(out, str: "d_ino_timer");
182 prt_tab(out);
183 prt_printf(out, "%llu", q->d_ino_timer);
184 prt_newline(out);
185
186 prt_str(out, str: "d_spc_timer");
187 prt_tab(out);
188 prt_printf(out, "%llu", q->d_spc_timer);
189 prt_newline(out);
190
191 prt_str(out, str: "d_ino_warns");
192 prt_tab(out);
193 prt_printf(out, "%i", q->d_ino_warns);
194 prt_newline(out);
195
196 prt_str(out, str: "d_spc_warns");
197 prt_tab(out);
198 prt_printf(out, "%i", q->d_spc_warns);
199 prt_newline(out);
200}
201
202static inline unsigned __next_qtype(unsigned i, unsigned qtypes)
203{
204 qtypes >>= i;
205 return qtypes ? i + __ffs(qtypes) : QTYP_NR;
206}
207
208#define for_each_set_qtype(_c, _i, _q, _qtypes) \
209 for (_i = 0; \
210 (_i = __next_qtype(_i, _qtypes), \
211 _q = &(_c)->quotas[_i], \
212 _i < QTYP_NR); \
213 _i++)
214
215static bool ignore_hardlimit(struct bch_memquota_type *q)
216{
217 if (capable(CAP_SYS_RESOURCE))
218 return true;
219#if 0
220 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
221
222 return capable(CAP_SYS_RESOURCE) &&
223 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
224 !(info->dqi_flags & DQF_ROOT_SQUASH));
225#endif
226 return false;
227}
228
229enum quota_msg {
230 SOFTWARN, /* Softlimit reached */
231 SOFTLONGWARN, /* Grace time expired */
232 HARDWARN, /* Hardlimit reached */
233
234 HARDBELOW, /* Usage got below inode hardlimit */
235 SOFTBELOW, /* Usage got below inode softlimit */
236};
237
238static int quota_nl[][Q_COUNTERS] = {
239 [HARDWARN][Q_SPC] = QUOTA_NL_BHARDWARN,
240 [SOFTLONGWARN][Q_SPC] = QUOTA_NL_BSOFTLONGWARN,
241 [SOFTWARN][Q_SPC] = QUOTA_NL_BSOFTWARN,
242 [HARDBELOW][Q_SPC] = QUOTA_NL_BHARDBELOW,
243 [SOFTBELOW][Q_SPC] = QUOTA_NL_BSOFTBELOW,
244
245 [HARDWARN][Q_INO] = QUOTA_NL_IHARDWARN,
246 [SOFTLONGWARN][Q_INO] = QUOTA_NL_ISOFTLONGWARN,
247 [SOFTWARN][Q_INO] = QUOTA_NL_ISOFTWARN,
248 [HARDBELOW][Q_INO] = QUOTA_NL_IHARDBELOW,
249 [SOFTBELOW][Q_INO] = QUOTA_NL_ISOFTBELOW,
250};
251
252struct quota_msgs {
253 u8 nr;
254 struct {
255 u8 qtype;
256 u8 msg;
257 } m[QTYP_NR * Q_COUNTERS];
258};
259
260static void prepare_msg(unsigned qtype,
261 enum quota_counters counter,
262 struct quota_msgs *msgs,
263 enum quota_msg msg_type)
264{
265 BUG_ON(msgs->nr >= ARRAY_SIZE(msgs->m));
266
267 msgs->m[msgs->nr].qtype = qtype;
268 msgs->m[msgs->nr].msg = quota_nl[msg_type][counter];
269 msgs->nr++;
270}
271
272static void prepare_warning(struct memquota_counter *qc,
273 unsigned qtype,
274 enum quota_counters counter,
275 struct quota_msgs *msgs,
276 enum quota_msg msg_type)
277{
278 if (qc->warning_issued & (1 << msg_type))
279 return;
280
281 prepare_msg(qtype, counter, msgs, msg_type);
282}
283
284static void flush_warnings(struct bch_qid qid,
285 struct super_block *sb,
286 struct quota_msgs *msgs)
287{
288 unsigned i;
289
290 for (i = 0; i < msgs->nr; i++)
291 quota_send_warning(qid: make_kqid(from: &init_user_ns, type: msgs->m[i].qtype, qid: qid.q[i]),
292 dev: sb->s_dev, warntype: msgs->m[i].msg);
293}
294
295static int bch2_quota_check_limit(struct bch_fs *c,
296 unsigned qtype,
297 struct bch_memquota *mq,
298 struct quota_msgs *msgs,
299 enum quota_counters counter,
300 s64 v,
301 enum quota_acct_mode mode)
302{
303 struct bch_memquota_type *q = &c->quotas[qtype];
304 struct memquota_counter *qc = &mq->c[counter];
305 u64 n = qc->v + v;
306
307 BUG_ON((s64) n < 0);
308
309 if (mode == KEY_TYPE_QUOTA_NOCHECK)
310 return 0;
311
312 if (v <= 0) {
313 if (n < qc->hardlimit &&
314 (qc->warning_issued & (1 << HARDWARN))) {
315 qc->warning_issued &= ~(1 << HARDWARN);
316 prepare_msg(qtype, counter, msgs, msg_type: HARDBELOW);
317 }
318
319 if (n < qc->softlimit &&
320 (qc->warning_issued & (1 << SOFTWARN))) {
321 qc->warning_issued &= ~(1 << SOFTWARN);
322 prepare_msg(qtype, counter, msgs, msg_type: SOFTBELOW);
323 }
324
325 qc->warning_issued = 0;
326 return 0;
327 }
328
329 if (qc->hardlimit &&
330 qc->hardlimit < n &&
331 !ignore_hardlimit(q)) {
332 prepare_warning(qc, qtype, counter, msgs, msg_type: HARDWARN);
333 return -EDQUOT;
334 }
335
336 if (qc->softlimit &&
337 qc->softlimit < n) {
338 if (qc->timer == 0) {
339 qc->timer = ktime_get_real_seconds() + q->limits[counter].timelimit;
340 prepare_warning(qc, qtype, counter, msgs, msg_type: SOFTWARN);
341 } else if (ktime_get_real_seconds() >= qc->timer &&
342 !ignore_hardlimit(q)) {
343 prepare_warning(qc, qtype, counter, msgs, msg_type: SOFTLONGWARN);
344 return -EDQUOT;
345 }
346 }
347
348 return 0;
349}
350
351int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
352 enum quota_counters counter, s64 v,
353 enum quota_acct_mode mode)
354{
355 unsigned qtypes = enabled_qtypes(c);
356 struct bch_memquota_type *q;
357 struct bch_memquota *mq[QTYP_NR];
358 struct quota_msgs msgs;
359 unsigned i;
360 int ret = 0;
361
362 memset(&msgs, 0, sizeof(msgs));
363
364 for_each_set_qtype(c, i, q, qtypes) {
365 mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_KERNEL);
366 if (!mq[i])
367 return -ENOMEM;
368 }
369
370 for_each_set_qtype(c, i, q, qtypes)
371 mutex_lock_nested(lock: &q->lock, subclass: i);
372
373 for_each_set_qtype(c, i, q, qtypes) {
374 ret = bch2_quota_check_limit(c, qtype: i, mq: mq[i], msgs: &msgs, counter, v, mode);
375 if (ret)
376 goto err;
377 }
378
379 for_each_set_qtype(c, i, q, qtypes)
380 mq[i]->c[counter].v += v;
381err:
382 for_each_set_qtype(c, i, q, qtypes)
383 mutex_unlock(lock: &q->lock);
384
385 flush_warnings(qid, sb: c->vfs_sb, msgs: &msgs);
386
387 return ret;
388}
389
390static void __bch2_quota_transfer(struct bch_memquota *src_q,
391 struct bch_memquota *dst_q,
392 enum quota_counters counter, s64 v)
393{
394 BUG_ON(v > src_q->c[counter].v);
395 BUG_ON(v + dst_q->c[counter].v < v);
396
397 src_q->c[counter].v -= v;
398 dst_q->c[counter].v += v;
399}
400
401int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
402 struct bch_qid dst,
403 struct bch_qid src, u64 space,
404 enum quota_acct_mode mode)
405{
406 struct bch_memquota_type *q;
407 struct bch_memquota *src_q[3], *dst_q[3];
408 struct quota_msgs msgs;
409 unsigned i;
410 int ret = 0;
411
412 qtypes &= enabled_qtypes(c);
413
414 memset(&msgs, 0, sizeof(msgs));
415
416 for_each_set_qtype(c, i, q, qtypes) {
417 src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_KERNEL);
418 dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_KERNEL);
419 if (!src_q[i] || !dst_q[i])
420 return -ENOMEM;
421 }
422
423 for_each_set_qtype(c, i, q, qtypes)
424 mutex_lock_nested(lock: &q->lock, subclass: i);
425
426 for_each_set_qtype(c, i, q, qtypes) {
427 ret = bch2_quota_check_limit(c, qtype: i, mq: dst_q[i], msgs: &msgs, counter: Q_SPC,
428 v: dst_q[i]->c[Q_SPC].v + space,
429 mode);
430 if (ret)
431 goto err;
432
433 ret = bch2_quota_check_limit(c, qtype: i, mq: dst_q[i], msgs: &msgs, counter: Q_INO,
434 v: dst_q[i]->c[Q_INO].v + 1,
435 mode);
436 if (ret)
437 goto err;
438 }
439
440 for_each_set_qtype(c, i, q, qtypes) {
441 __bch2_quota_transfer(src_q: src_q[i], dst_q: dst_q[i], counter: Q_SPC, v: space);
442 __bch2_quota_transfer(src_q: src_q[i], dst_q: dst_q[i], counter: Q_INO, v: 1);
443 }
444
445err:
446 for_each_set_qtype(c, i, q, qtypes)
447 mutex_unlock(lock: &q->lock);
448
449 flush_warnings(qid: dst, sb: c->vfs_sb, msgs: &msgs);
450
451 return ret;
452}
453
454static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k,
455 struct qc_dqblk *qdq)
456{
457 struct bkey_s_c_quota dq;
458 struct bch_memquota_type *q;
459 struct bch_memquota *mq;
460 unsigned i;
461
462 BUG_ON(k.k->p.inode >= QTYP_NR);
463
464 if (!((1U << k.k->p.inode) & enabled_qtypes(c)))
465 return 0;
466
467 switch (k.k->type) {
468 case KEY_TYPE_quota:
469 dq = bkey_s_c_to_quota(k);
470 q = &c->quotas[k.k->p.inode];
471
472 mutex_lock(&q->lock);
473 mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL);
474 if (!mq) {
475 mutex_unlock(lock: &q->lock);
476 return -ENOMEM;
477 }
478
479 for (i = 0; i < Q_COUNTERS; i++) {
480 mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit);
481 mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit);
482 }
483
484 if (qdq && qdq->d_fieldmask & QC_SPC_TIMER)
485 mq->c[Q_SPC].timer = qdq->d_spc_timer;
486 if (qdq && qdq->d_fieldmask & QC_SPC_WARNS)
487 mq->c[Q_SPC].warns = qdq->d_spc_warns;
488 if (qdq && qdq->d_fieldmask & QC_INO_TIMER)
489 mq->c[Q_INO].timer = qdq->d_ino_timer;
490 if (qdq && qdq->d_fieldmask & QC_INO_WARNS)
491 mq->c[Q_INO].warns = qdq->d_ino_warns;
492
493 mutex_unlock(lock: &q->lock);
494 }
495
496 return 0;
497}
498
499void bch2_fs_quota_exit(struct bch_fs *c)
500{
501 unsigned i;
502
503 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
504 genradix_free(&c->quotas[i].table);
505}
506
507void bch2_fs_quota_init(struct bch_fs *c)
508{
509 unsigned i;
510
511 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
512 mutex_init(&c->quotas[i].lock);
513}
514
515static struct bch_sb_field_quota *bch2_sb_get_or_create_quota(struct bch_sb_handle *sb)
516{
517 struct bch_sb_field_quota *sb_quota = bch2_sb_field_get(sb->sb, quota);
518
519 if (sb_quota)
520 return sb_quota;
521
522 sb_quota = bch2_sb_field_resize(sb, quota, sizeof(*sb_quota) / sizeof(u64));
523 if (sb_quota) {
524 unsigned qtype, qc;
525
526 for (qtype = 0; qtype < QTYP_NR; qtype++)
527 for (qc = 0; qc < Q_COUNTERS; qc++)
528 sb_quota->q[qtype].c[qc].timelimit =
529 cpu_to_le32(7 * 24 * 60 * 60);
530 }
531
532 return sb_quota;
533}
534
535static void bch2_sb_quota_read(struct bch_fs *c)
536{
537 struct bch_sb_field_quota *sb_quota;
538 unsigned i, j;
539
540 sb_quota = bch2_sb_field_get(c->disk_sb.sb, quota);
541 if (!sb_quota)
542 return;
543
544 for (i = 0; i < QTYP_NR; i++) {
545 struct bch_memquota_type *q = &c->quotas[i];
546
547 for (j = 0; j < Q_COUNTERS; j++) {
548 q->limits[j].timelimit =
549 le32_to_cpu(sb_quota->q[i].c[j].timelimit);
550 q->limits[j].warnlimit =
551 le32_to_cpu(sb_quota->q[i].c[j].warnlimit);
552 }
553 }
554}
555
556static int bch2_fs_quota_read_inode(struct btree_trans *trans,
557 struct btree_iter *iter,
558 struct bkey_s_c k)
559{
560 struct bch_fs *c = trans->c;
561 struct bch_inode_unpacked u;
562 struct bch_snapshot_tree s_t;
563 int ret;
564
565 ret = bch2_snapshot_tree_lookup(trans,
566 bch2_snapshot_tree(c, id: k.k->p.snapshot), &s_t);
567 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
568 "%s: snapshot tree %u not found", __func__,
569 snapshot_t(c, k.k->p.snapshot)->tree);
570 if (ret)
571 return ret;
572
573 if (!s_t.master_subvol)
574 goto advance;
575
576 ret = bch2_inode_find_by_inum_nowarn_trans(trans,
577 (subvol_inum) {
578 le32_to_cpu(s_t.master_subvol),
579 k.k->p.offset,
580 }, &u);
581 /*
582 * Inode might be deleted in this snapshot - the easiest way to handle
583 * that is to just skip it here:
584 */
585 if (bch2_err_matches(ret, ENOENT))
586 goto advance;
587
588 if (ret)
589 return ret;
590
591 bch2_quota_acct(c, qid: bch_qid(u: &u), counter: Q_SPC, v: u.bi_sectors,
592 mode: KEY_TYPE_QUOTA_NOCHECK);
593 bch2_quota_acct(c, qid: bch_qid(u: &u), counter: Q_INO, v: 1,
594 mode: KEY_TYPE_QUOTA_NOCHECK);
595advance:
596 bch2_btree_iter_set_pos(iter, new_pos: bpos_nosnap_successor(p: iter->pos));
597 return 0;
598}
599
600int bch2_fs_quota_read(struct bch_fs *c)
601{
602
603 mutex_lock(&c->sb_lock);
604 struct bch_sb_field_quota *sb_quota = bch2_sb_get_or_create_quota(sb: &c->disk_sb);
605 if (!sb_quota) {
606 mutex_unlock(lock: &c->sb_lock);
607 return -BCH_ERR_ENOSPC_sb_quota;
608 }
609
610 bch2_sb_quota_read(c);
611 mutex_unlock(lock: &c->sb_lock);
612
613 int ret = bch2_trans_run(c,
614 for_each_btree_key(trans, iter, BTREE_ID_quotas, POS_MIN,
615 BTREE_ITER_PREFETCH, k,
616 __bch2_quota_set(c, k, NULL)) ?:
617 for_each_btree_key(trans, iter, BTREE_ID_inodes, POS_MIN,
618 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
619 bch2_fs_quota_read_inode(trans, &iter, k)));
620 bch_err_fn(c, ret);
621 return ret;
622}
623
624/* Enable/disable/delete quotas for an entire filesystem: */
625
626static int bch2_quota_enable(struct super_block *sb, unsigned uflags)
627{
628 struct bch_fs *c = sb->s_fs_info;
629 struct bch_sb_field_quota *sb_quota;
630 int ret = 0;
631
632 if (sb->s_flags & SB_RDONLY)
633 return -EROFS;
634
635 /* Accounting must be enabled at mount time: */
636 if (uflags & (FS_QUOTA_UDQ_ACCT|FS_QUOTA_GDQ_ACCT|FS_QUOTA_PDQ_ACCT))
637 return -EINVAL;
638
639 /* Can't enable enforcement without accounting: */
640 if ((uflags & FS_QUOTA_UDQ_ENFD) && !c->opts.usrquota)
641 return -EINVAL;
642
643 if ((uflags & FS_QUOTA_GDQ_ENFD) && !c->opts.grpquota)
644 return -EINVAL;
645
646 if (uflags & FS_QUOTA_PDQ_ENFD && !c->opts.prjquota)
647 return -EINVAL;
648
649 mutex_lock(&c->sb_lock);
650 sb_quota = bch2_sb_get_or_create_quota(sb: &c->disk_sb);
651 if (!sb_quota) {
652 ret = -BCH_ERR_ENOSPC_sb_quota;
653 goto unlock;
654 }
655
656 if (uflags & FS_QUOTA_UDQ_ENFD)
657 SET_BCH_SB_USRQUOTA(k: c->disk_sb.sb, v: true);
658
659 if (uflags & FS_QUOTA_GDQ_ENFD)
660 SET_BCH_SB_GRPQUOTA(k: c->disk_sb.sb, v: true);
661
662 if (uflags & FS_QUOTA_PDQ_ENFD)
663 SET_BCH_SB_PRJQUOTA(k: c->disk_sb.sb, v: true);
664
665 bch2_write_super(c);
666unlock:
667 mutex_unlock(lock: &c->sb_lock);
668
669 return bch2_err_class(err: ret);
670}
671
672static int bch2_quota_disable(struct super_block *sb, unsigned uflags)
673{
674 struct bch_fs *c = sb->s_fs_info;
675
676 if (sb->s_flags & SB_RDONLY)
677 return -EROFS;
678
679 mutex_lock(&c->sb_lock);
680 if (uflags & FS_QUOTA_UDQ_ENFD)
681 SET_BCH_SB_USRQUOTA(k: c->disk_sb.sb, v: false);
682
683 if (uflags & FS_QUOTA_GDQ_ENFD)
684 SET_BCH_SB_GRPQUOTA(k: c->disk_sb.sb, v: false);
685
686 if (uflags & FS_QUOTA_PDQ_ENFD)
687 SET_BCH_SB_PRJQUOTA(k: c->disk_sb.sb, v: false);
688
689 bch2_write_super(c);
690 mutex_unlock(lock: &c->sb_lock);
691
692 return 0;
693}
694
695static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
696{
697 struct bch_fs *c = sb->s_fs_info;
698 int ret;
699
700 if (sb->s_flags & SB_RDONLY)
701 return -EROFS;
702
703 if (uflags & FS_USER_QUOTA) {
704 if (c->opts.usrquota)
705 return -EINVAL;
706
707 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
708 POS(QTYP_USR, 0),
709 POS(QTYP_USR, U64_MAX),
710 0, NULL);
711 if (ret)
712 return ret;
713 }
714
715 if (uflags & FS_GROUP_QUOTA) {
716 if (c->opts.grpquota)
717 return -EINVAL;
718
719 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
720 POS(QTYP_GRP, 0),
721 POS(QTYP_GRP, U64_MAX),
722 0, NULL);
723 if (ret)
724 return ret;
725 }
726
727 if (uflags & FS_PROJ_QUOTA) {
728 if (c->opts.prjquota)
729 return -EINVAL;
730
731 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
732 POS(QTYP_PRJ, 0),
733 POS(QTYP_PRJ, U64_MAX),
734 0, NULL);
735 if (ret)
736 return ret;
737 }
738
739 return 0;
740}
741
742/*
743 * Return quota status information, such as enforcements, quota file inode
744 * numbers etc.
745 */
746static int bch2_quota_get_state(struct super_block *sb, struct qc_state *state)
747{
748 struct bch_fs *c = sb->s_fs_info;
749 unsigned qtypes = enabled_qtypes(c);
750 unsigned i;
751
752 memset(state, 0, sizeof(*state));
753
754 for (i = 0; i < QTYP_NR; i++) {
755 state->s_state[i].flags |= QCI_SYSFILE;
756
757 if (!(qtypes & (1 << i)))
758 continue;
759
760 state->s_state[i].flags |= QCI_ACCT_ENABLED;
761
762 state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit;
763 state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit;
764
765 state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit;
766 state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit;
767 }
768
769 return 0;
770}
771
772/*
773 * Adjust quota timers & warnings
774 */
775static int bch2_quota_set_info(struct super_block *sb, int type,
776 struct qc_info *info)
777{
778 struct bch_fs *c = sb->s_fs_info;
779 struct bch_sb_field_quota *sb_quota;
780 int ret = 0;
781
782 if (0) {
783 struct printbuf buf = PRINTBUF;
784
785 qc_info_to_text(out: &buf, i: info);
786 pr_info("setting:\n%s", buf.buf);
787 printbuf_exit(&buf);
788 }
789
790 if (sb->s_flags & SB_RDONLY)
791 return -EROFS;
792
793 if (type >= QTYP_NR)
794 return -EINVAL;
795
796 if (!((1 << type) & enabled_qtypes(c)))
797 return -ESRCH;
798
799 if (info->i_fieldmask &
800 ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS))
801 return -EINVAL;
802
803 mutex_lock(&c->sb_lock);
804 sb_quota = bch2_sb_get_or_create_quota(sb: &c->disk_sb);
805 if (!sb_quota) {
806 ret = -BCH_ERR_ENOSPC_sb_quota;
807 goto unlock;
808 }
809
810 if (info->i_fieldmask & QC_SPC_TIMER)
811 sb_quota->q[type].c[Q_SPC].timelimit =
812 cpu_to_le32(info->i_spc_timelimit);
813
814 if (info->i_fieldmask & QC_SPC_WARNS)
815 sb_quota->q[type].c[Q_SPC].warnlimit =
816 cpu_to_le32(info->i_spc_warnlimit);
817
818 if (info->i_fieldmask & QC_INO_TIMER)
819 sb_quota->q[type].c[Q_INO].timelimit =
820 cpu_to_le32(info->i_ino_timelimit);
821
822 if (info->i_fieldmask & QC_INO_WARNS)
823 sb_quota->q[type].c[Q_INO].warnlimit =
824 cpu_to_le32(info->i_ino_warnlimit);
825
826 bch2_sb_quota_read(c);
827
828 bch2_write_super(c);
829unlock:
830 mutex_unlock(lock: &c->sb_lock);
831
832 return bch2_err_class(err: ret);
833}
834
835/* Get/set individual quotas: */
836
837static void __bch2_quota_get(struct qc_dqblk *dst, struct bch_memquota *src)
838{
839 dst->d_space = src->c[Q_SPC].v << 9;
840 dst->d_spc_hardlimit = src->c[Q_SPC].hardlimit << 9;
841 dst->d_spc_softlimit = src->c[Q_SPC].softlimit << 9;
842 dst->d_spc_timer = src->c[Q_SPC].timer;
843 dst->d_spc_warns = src->c[Q_SPC].warns;
844
845 dst->d_ino_count = src->c[Q_INO].v;
846 dst->d_ino_hardlimit = src->c[Q_INO].hardlimit;
847 dst->d_ino_softlimit = src->c[Q_INO].softlimit;
848 dst->d_ino_timer = src->c[Q_INO].timer;
849 dst->d_ino_warns = src->c[Q_INO].warns;
850}
851
852static int bch2_get_quota(struct super_block *sb, struct kqid kqid,
853 struct qc_dqblk *qdq)
854{
855 struct bch_fs *c = sb->s_fs_info;
856 struct bch_memquota_type *q = &c->quotas[kqid.type];
857 qid_t qid = from_kqid(to: &init_user_ns, qid: kqid);
858 struct bch_memquota *mq;
859
860 memset(qdq, 0, sizeof(*qdq));
861
862 mutex_lock(&q->lock);
863 mq = genradix_ptr(&q->table, qid);
864 if (mq)
865 __bch2_quota_get(dst: qdq, src: mq);
866 mutex_unlock(lock: &q->lock);
867
868 return 0;
869}
870
871static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid,
872 struct qc_dqblk *qdq)
873{
874 struct bch_fs *c = sb->s_fs_info;
875 struct bch_memquota_type *q = &c->quotas[kqid->type];
876 qid_t qid = from_kqid(to: &init_user_ns, qid: *kqid);
877 struct genradix_iter iter;
878 struct bch_memquota *mq;
879 int ret = 0;
880
881 mutex_lock(&q->lock);
882
883 genradix_for_each_from(&q->table, iter, mq, qid)
884 if (memcmp(p: mq, page_address(ZERO_PAGE(0)), size: sizeof(*mq))) {
885 __bch2_quota_get(dst: qdq, src: mq);
886 *kqid = make_kqid(current_user_ns(), type: kqid->type, qid: iter.pos);
887 goto found;
888 }
889
890 ret = -ENOENT;
891found:
892 mutex_unlock(lock: &q->lock);
893 return bch2_err_class(err: ret);
894}
895
896static int bch2_set_quota_trans(struct btree_trans *trans,
897 struct bkey_i_quota *new_quota,
898 struct qc_dqblk *qdq)
899{
900 struct btree_iter iter;
901 struct bkey_s_c k;
902 int ret;
903
904 k = bch2_bkey_get_iter(trans, iter: &iter, btree_id: BTREE_ID_quotas, pos: new_quota->k.p,
905 flags: BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
906 ret = bkey_err(k);
907 if (unlikely(ret))
908 return ret;
909
910 if (k.k->type == KEY_TYPE_quota)
911 new_quota->v = *bkey_s_c_to_quota(k).v;
912
913 if (qdq->d_fieldmask & QC_SPC_SOFT)
914 new_quota->v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9);
915 if (qdq->d_fieldmask & QC_SPC_HARD)
916 new_quota->v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9);
917
918 if (qdq->d_fieldmask & QC_INO_SOFT)
919 new_quota->v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit);
920 if (qdq->d_fieldmask & QC_INO_HARD)
921 new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
922
923 ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0);
924 bch2_trans_iter_exit(trans, &iter);
925 return ret;
926}
927
928static int bch2_set_quota(struct super_block *sb, struct kqid qid,
929 struct qc_dqblk *qdq)
930{
931 struct bch_fs *c = sb->s_fs_info;
932 struct bkey_i_quota new_quota;
933 int ret;
934
935 if (0) {
936 struct printbuf buf = PRINTBUF;
937
938 qc_dqblk_to_text(out: &buf, q: qdq);
939 pr_info("setting:\n%s", buf.buf);
940 printbuf_exit(&buf);
941 }
942
943 if (sb->s_flags & SB_RDONLY)
944 return -EROFS;
945
946 bkey_quota_init(k: &new_quota.k_i);
947 new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
948
949 ret = bch2_trans_do(c, NULL, NULL, 0,
950 bch2_set_quota_trans(trans, &new_quota, qdq)) ?:
951 __bch2_quota_set(c, k: bkey_i_to_s_c(k: &new_quota.k_i), qdq);
952
953 return bch2_err_class(err: ret);
954}
955
956const struct quotactl_ops bch2_quotactl_operations = {
957 .quota_enable = bch2_quota_enable,
958 .quota_disable = bch2_quota_disable,
959 .rm_xquota = bch2_quota_remove,
960
961 .get_state = bch2_quota_get_state,
962 .set_info = bch2_quota_set_info,
963
964 .get_dqblk = bch2_get_quota,
965 .get_nextdqblk = bch2_get_next_quota,
966 .set_dqblk = bch2_set_quota,
967};
968
969#endif /* CONFIG_BCACHEFS_QUOTA */
970

source code of linux/fs/bcachefs/quota.c