1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Quota code necessary even when VFS quota support is not compiled |
4 | * into the kernel. The interesting stuff is over in dquot.c, here |
5 | * we have symbols for initial quotactl(2) handling, the sysctl(2) |
6 | * variables, etc - things needed even when quota support disabled. |
7 | */ |
8 | |
9 | #include <linux/fs.h> |
10 | #include <linux/namei.h> |
11 | #include <linux/slab.h> |
12 | #include <asm/current.h> |
13 | #include <linux/blkdev.h> |
14 | #include <linux/uaccess.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/security.h> |
17 | #include <linux/syscalls.h> |
18 | #include <linux/capability.h> |
19 | #include <linux/quotaops.h> |
20 | #include <linux/types.h> |
21 | #include <linux/mount.h> |
22 | #include <linux/writeback.h> |
23 | #include <linux/nospec.h> |
24 | #include "compat.h" |
25 | #include "../internal.h" |
26 | |
27 | static int check_quotactl_permission(struct super_block *sb, int type, int cmd, |
28 | qid_t id) |
29 | { |
30 | switch (cmd) { |
31 | /* these commands do not require any special privilegues */ |
32 | case Q_GETFMT: |
33 | case Q_SYNC: |
34 | case Q_GETINFO: |
35 | case Q_XGETQSTAT: |
36 | case Q_XGETQSTATV: |
37 | case Q_XQUOTASYNC: |
38 | break; |
39 | /* allow to query information for dquots we "own" */ |
40 | case Q_GETQUOTA: |
41 | case Q_XGETQUOTA: |
42 | if ((type == USRQUOTA && uid_eq(current_euid(), right: make_kuid(current_user_ns(), uid: id))) || |
43 | (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), gid: id)))) |
44 | break; |
45 | fallthrough; |
46 | default: |
47 | if (!capable(CAP_SYS_ADMIN)) |
48 | return -EPERM; |
49 | } |
50 | |
51 | return security_quotactl(cmds: cmd, type, id, sb); |
52 | } |
53 | |
54 | static void quota_sync_one(struct super_block *sb, void *arg) |
55 | { |
56 | int type = *(int *)arg; |
57 | |
58 | if (sb->s_qcop && sb->s_qcop->quota_sync && |
59 | (sb->s_quota_types & (1 << type))) |
60 | sb->s_qcop->quota_sync(sb, type); |
61 | } |
62 | |
63 | static int quota_sync_all(int type) |
64 | { |
65 | int ret; |
66 | |
67 | ret = security_quotactl(Q_SYNC, type, id: 0, NULL); |
68 | if (!ret) |
69 | iterate_supers(quota_sync_one, &type); |
70 | return ret; |
71 | } |
72 | |
73 | unsigned int qtype_enforce_flag(int type) |
74 | { |
75 | switch (type) { |
76 | case USRQUOTA: |
77 | return FS_QUOTA_UDQ_ENFD; |
78 | case GRPQUOTA: |
79 | return FS_QUOTA_GDQ_ENFD; |
80 | case PRJQUOTA: |
81 | return FS_QUOTA_PDQ_ENFD; |
82 | } |
83 | return 0; |
84 | } |
85 | |
86 | static int quota_quotaon(struct super_block *sb, int type, qid_t id, |
87 | const struct path *path) |
88 | { |
89 | if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable) |
90 | return -ENOSYS; |
91 | if (sb->s_qcop->quota_enable) |
92 | return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type)); |
93 | if (IS_ERR(ptr: path)) |
94 | return PTR_ERR(ptr: path); |
95 | return sb->s_qcop->quota_on(sb, type, id, path); |
96 | } |
97 | |
98 | static int quota_quotaoff(struct super_block *sb, int type) |
99 | { |
100 | if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable) |
101 | return -ENOSYS; |
102 | if (sb->s_qcop->quota_disable) |
103 | return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type)); |
104 | return sb->s_qcop->quota_off(sb, type); |
105 | } |
106 | |
107 | static int quota_getfmt(struct super_block *sb, int type, void __user *addr) |
108 | { |
109 | __u32 fmt; |
110 | |
111 | if (!sb_has_quota_active(sb, type)) |
112 | return -ESRCH; |
113 | fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; |
114 | if (copy_to_user(to: addr, from: &fmt, n: sizeof(fmt))) |
115 | return -EFAULT; |
116 | return 0; |
117 | } |
118 | |
119 | static int quota_getinfo(struct super_block *sb, int type, void __user *addr) |
120 | { |
121 | struct qc_state state; |
122 | struct qc_type_state *tstate; |
123 | struct if_dqinfo uinfo; |
124 | int ret; |
125 | |
126 | if (!sb->s_qcop->get_state) |
127 | return -ENOSYS; |
128 | ret = sb->s_qcop->get_state(sb, &state); |
129 | if (ret) |
130 | return ret; |
131 | tstate = state.s_state + type; |
132 | if (!(tstate->flags & QCI_ACCT_ENABLED)) |
133 | return -ESRCH; |
134 | memset(&uinfo, 0, sizeof(uinfo)); |
135 | uinfo.dqi_bgrace = tstate->spc_timelimit; |
136 | uinfo.dqi_igrace = tstate->ino_timelimit; |
137 | if (tstate->flags & QCI_SYSFILE) |
138 | uinfo.dqi_flags |= DQF_SYS_FILE; |
139 | if (tstate->flags & QCI_ROOT_SQUASH) |
140 | uinfo.dqi_flags |= DQF_ROOT_SQUASH; |
141 | uinfo.dqi_valid = IIF_ALL; |
142 | if (copy_to_user(to: addr, from: &uinfo, n: sizeof(uinfo))) |
143 | return -EFAULT; |
144 | return 0; |
145 | } |
146 | |
147 | static int quota_setinfo(struct super_block *sb, int type, void __user *addr) |
148 | { |
149 | struct if_dqinfo info; |
150 | struct qc_info qinfo; |
151 | |
152 | if (copy_from_user(to: &info, from: addr, n: sizeof(info))) |
153 | return -EFAULT; |
154 | if (!sb->s_qcop->set_info) |
155 | return -ENOSYS; |
156 | if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE)) |
157 | return -EINVAL; |
158 | memset(&qinfo, 0, sizeof(qinfo)); |
159 | if (info.dqi_valid & IIF_FLAGS) { |
160 | if (info.dqi_flags & ~DQF_SETINFO_MASK) |
161 | return -EINVAL; |
162 | if (info.dqi_flags & DQF_ROOT_SQUASH) |
163 | qinfo.i_flags |= QCI_ROOT_SQUASH; |
164 | qinfo.i_fieldmask |= QC_FLAGS; |
165 | } |
166 | if (info.dqi_valid & IIF_BGRACE) { |
167 | qinfo.i_spc_timelimit = info.dqi_bgrace; |
168 | qinfo.i_fieldmask |= QC_SPC_TIMER; |
169 | } |
170 | if (info.dqi_valid & IIF_IGRACE) { |
171 | qinfo.i_ino_timelimit = info.dqi_igrace; |
172 | qinfo.i_fieldmask |= QC_INO_TIMER; |
173 | } |
174 | return sb->s_qcop->set_info(sb, type, &qinfo); |
175 | } |
176 | |
177 | static inline qsize_t qbtos(qsize_t blocks) |
178 | { |
179 | return blocks << QIF_DQBLKSIZE_BITS; |
180 | } |
181 | |
182 | static inline qsize_t stoqb(qsize_t space) |
183 | { |
184 | return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; |
185 | } |
186 | |
187 | static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) |
188 | { |
189 | memset(dst, 0, sizeof(*dst)); |
190 | dst->dqb_bhardlimit = stoqb(space: src->d_spc_hardlimit); |
191 | dst->dqb_bsoftlimit = stoqb(space: src->d_spc_softlimit); |
192 | dst->dqb_curspace = src->d_space; |
193 | dst->dqb_ihardlimit = src->d_ino_hardlimit; |
194 | dst->dqb_isoftlimit = src->d_ino_softlimit; |
195 | dst->dqb_curinodes = src->d_ino_count; |
196 | dst->dqb_btime = src->d_spc_timer; |
197 | dst->dqb_itime = src->d_ino_timer; |
198 | dst->dqb_valid = QIF_ALL; |
199 | } |
200 | |
201 | static int quota_getquota(struct super_block *sb, int type, qid_t id, |
202 | void __user *addr) |
203 | { |
204 | struct kqid qid; |
205 | struct qc_dqblk fdq; |
206 | struct if_dqblk idq; |
207 | int ret; |
208 | |
209 | if (!sb->s_qcop->get_dqblk) |
210 | return -ENOSYS; |
211 | qid = make_kqid(current_user_ns(), type, qid: id); |
212 | if (!qid_has_mapping(ns: sb->s_user_ns, qid)) |
213 | return -EINVAL; |
214 | ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); |
215 | if (ret) |
216 | return ret; |
217 | copy_to_if_dqblk(dst: &idq, src: &fdq); |
218 | |
219 | if (compat_need_64bit_alignment_fixup()) { |
220 | struct compat_if_dqblk __user *compat_dqblk = addr; |
221 | |
222 | if (copy_to_user(to: compat_dqblk, from: &idq, n: sizeof(*compat_dqblk))) |
223 | return -EFAULT; |
224 | if (put_user(idq.dqb_valid, &compat_dqblk->dqb_valid)) |
225 | return -EFAULT; |
226 | } else { |
227 | if (copy_to_user(to: addr, from: &idq, n: sizeof(idq))) |
228 | return -EFAULT; |
229 | } |
230 | return 0; |
231 | } |
232 | |
233 | /* |
234 | * Return quota for next active quota >= this id, if any exists, |
235 | * otherwise return -ENOENT via ->get_nextdqblk |
236 | */ |
237 | static int quota_getnextquota(struct super_block *sb, int type, qid_t id, |
238 | void __user *addr) |
239 | { |
240 | struct kqid qid; |
241 | struct qc_dqblk fdq; |
242 | struct if_nextdqblk idq; |
243 | int ret; |
244 | |
245 | if (!sb->s_qcop->get_nextdqblk) |
246 | return -ENOSYS; |
247 | qid = make_kqid(current_user_ns(), type, qid: id); |
248 | if (!qid_has_mapping(ns: sb->s_user_ns, qid)) |
249 | return -EINVAL; |
250 | ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq); |
251 | if (ret) |
252 | return ret; |
253 | /* struct if_nextdqblk is a superset of struct if_dqblk */ |
254 | copy_to_if_dqblk(dst: (struct if_dqblk *)&idq, src: &fdq); |
255 | idq.dqb_id = from_kqid(current_user_ns(), qid); |
256 | if (copy_to_user(to: addr, from: &idq, n: sizeof(idq))) |
257 | return -EFAULT; |
258 | return 0; |
259 | } |
260 | |
261 | static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) |
262 | { |
263 | dst->d_spc_hardlimit = qbtos(blocks: src->dqb_bhardlimit); |
264 | dst->d_spc_softlimit = qbtos(blocks: src->dqb_bsoftlimit); |
265 | dst->d_space = src->dqb_curspace; |
266 | dst->d_ino_hardlimit = src->dqb_ihardlimit; |
267 | dst->d_ino_softlimit = src->dqb_isoftlimit; |
268 | dst->d_ino_count = src->dqb_curinodes; |
269 | dst->d_spc_timer = src->dqb_btime; |
270 | dst->d_ino_timer = src->dqb_itime; |
271 | |
272 | dst->d_fieldmask = 0; |
273 | if (src->dqb_valid & QIF_BLIMITS) |
274 | dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; |
275 | if (src->dqb_valid & QIF_SPACE) |
276 | dst->d_fieldmask |= QC_SPACE; |
277 | if (src->dqb_valid & QIF_ILIMITS) |
278 | dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; |
279 | if (src->dqb_valid & QIF_INODES) |
280 | dst->d_fieldmask |= QC_INO_COUNT; |
281 | if (src->dqb_valid & QIF_BTIME) |
282 | dst->d_fieldmask |= QC_SPC_TIMER; |
283 | if (src->dqb_valid & QIF_ITIME) |
284 | dst->d_fieldmask |= QC_INO_TIMER; |
285 | } |
286 | |
287 | static int quota_setquota(struct super_block *sb, int type, qid_t id, |
288 | void __user *addr) |
289 | { |
290 | struct qc_dqblk fdq; |
291 | struct if_dqblk idq; |
292 | struct kqid qid; |
293 | |
294 | if (compat_need_64bit_alignment_fixup()) { |
295 | struct compat_if_dqblk __user *compat_dqblk = addr; |
296 | |
297 | if (copy_from_user(to: &idq, from: compat_dqblk, n: sizeof(*compat_dqblk)) || |
298 | get_user(idq.dqb_valid, &compat_dqblk->dqb_valid)) |
299 | return -EFAULT; |
300 | } else { |
301 | if (copy_from_user(to: &idq, from: addr, n: sizeof(idq))) |
302 | return -EFAULT; |
303 | } |
304 | if (!sb->s_qcop->set_dqblk) |
305 | return -ENOSYS; |
306 | qid = make_kqid(current_user_ns(), type, qid: id); |
307 | if (!qid_has_mapping(ns: sb->s_user_ns, qid)) |
308 | return -EINVAL; |
309 | copy_from_if_dqblk(dst: &fdq, src: &idq); |
310 | return sb->s_qcop->set_dqblk(sb, qid, &fdq); |
311 | } |
312 | |
313 | static int quota_enable(struct super_block *sb, void __user *addr) |
314 | { |
315 | __u32 flags; |
316 | |
317 | if (copy_from_user(to: &flags, from: addr, n: sizeof(flags))) |
318 | return -EFAULT; |
319 | if (!sb->s_qcop->quota_enable) |
320 | return -ENOSYS; |
321 | return sb->s_qcop->quota_enable(sb, flags); |
322 | } |
323 | |
324 | static int quota_disable(struct super_block *sb, void __user *addr) |
325 | { |
326 | __u32 flags; |
327 | |
328 | if (copy_from_user(to: &flags, from: addr, n: sizeof(flags))) |
329 | return -EFAULT; |
330 | if (!sb->s_qcop->quota_disable) |
331 | return -ENOSYS; |
332 | return sb->s_qcop->quota_disable(sb, flags); |
333 | } |
334 | |
335 | static int quota_state_to_flags(struct qc_state *state) |
336 | { |
337 | int flags = 0; |
338 | |
339 | if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) |
340 | flags |= FS_QUOTA_UDQ_ACCT; |
341 | if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED) |
342 | flags |= FS_QUOTA_UDQ_ENFD; |
343 | if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) |
344 | flags |= FS_QUOTA_GDQ_ACCT; |
345 | if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED) |
346 | flags |= FS_QUOTA_GDQ_ENFD; |
347 | if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) |
348 | flags |= FS_QUOTA_PDQ_ACCT; |
349 | if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED) |
350 | flags |= FS_QUOTA_PDQ_ENFD; |
351 | return flags; |
352 | } |
353 | |
354 | static int quota_getstate(struct super_block *sb, int type, |
355 | struct fs_quota_stat *fqs) |
356 | { |
357 | struct qc_state state; |
358 | int ret; |
359 | |
360 | memset(&state, 0, sizeof (struct qc_state)); |
361 | ret = sb->s_qcop->get_state(sb, &state); |
362 | if (ret < 0) |
363 | return ret; |
364 | |
365 | memset(fqs, 0, sizeof(*fqs)); |
366 | fqs->qs_version = FS_QSTAT_VERSION; |
367 | fqs->qs_flags = quota_state_to_flags(state: &state); |
368 | /* No quota enabled? */ |
369 | if (!fqs->qs_flags) |
370 | return -ENOSYS; |
371 | fqs->qs_incoredqs = state.s_incoredqs; |
372 | |
373 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; |
374 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; |
375 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; |
376 | fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; |
377 | fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; |
378 | |
379 | /* Inodes may be allocated even if inactive; copy out if present */ |
380 | if (state.s_state[USRQUOTA].ino) { |
381 | fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; |
382 | fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; |
383 | fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; |
384 | } |
385 | if (state.s_state[GRPQUOTA].ino) { |
386 | fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; |
387 | fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; |
388 | fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; |
389 | } |
390 | if (state.s_state[PRJQUOTA].ino) { |
391 | /* |
392 | * Q_XGETQSTAT doesn't have room for both group and project |
393 | * quotas. So, allow the project quota values to be copied out |
394 | * only if there is no group quota information available. |
395 | */ |
396 | if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) { |
397 | fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino; |
398 | fqs->qs_gquota.qfs_nblks = |
399 | state.s_state[PRJQUOTA].blocks; |
400 | fqs->qs_gquota.qfs_nextents = |
401 | state.s_state[PRJQUOTA].nextents; |
402 | } |
403 | } |
404 | return 0; |
405 | } |
406 | |
407 | static int compat_copy_fs_qfilestat(struct compat_fs_qfilestat __user *to, |
408 | struct fs_qfilestat *from) |
409 | { |
410 | if (copy_to_user(to, from, n: sizeof(*to)) || |
411 | put_user(from->qfs_nextents, &to->qfs_nextents)) |
412 | return -EFAULT; |
413 | return 0; |
414 | } |
415 | |
416 | static int compat_copy_fs_quota_stat(struct compat_fs_quota_stat __user *to, |
417 | struct fs_quota_stat *from) |
418 | { |
419 | if (put_user(from->qs_version, &to->qs_version) || |
420 | put_user(from->qs_flags, &to->qs_flags) || |
421 | put_user(from->qs_pad, &to->qs_pad) || |
422 | compat_copy_fs_qfilestat(to: &to->qs_uquota, from: &from->qs_uquota) || |
423 | compat_copy_fs_qfilestat(to: &to->qs_gquota, from: &from->qs_gquota) || |
424 | put_user(from->qs_incoredqs, &to->qs_incoredqs) || |
425 | put_user(from->qs_btimelimit, &to->qs_btimelimit) || |
426 | put_user(from->qs_itimelimit, &to->qs_itimelimit) || |
427 | put_user(from->qs_rtbtimelimit, &to->qs_rtbtimelimit) || |
428 | put_user(from->qs_bwarnlimit, &to->qs_bwarnlimit) || |
429 | put_user(from->qs_iwarnlimit, &to->qs_iwarnlimit)) |
430 | return -EFAULT; |
431 | return 0; |
432 | } |
433 | |
434 | static int quota_getxstate(struct super_block *sb, int type, void __user *addr) |
435 | { |
436 | struct fs_quota_stat fqs; |
437 | int ret; |
438 | |
439 | if (!sb->s_qcop->get_state) |
440 | return -ENOSYS; |
441 | ret = quota_getstate(sb, type, fqs: &fqs); |
442 | if (ret) |
443 | return ret; |
444 | |
445 | if (compat_need_64bit_alignment_fixup()) |
446 | return compat_copy_fs_quota_stat(to: addr, from: &fqs); |
447 | if (copy_to_user(to: addr, from: &fqs, n: sizeof(fqs))) |
448 | return -EFAULT; |
449 | return 0; |
450 | } |
451 | |
452 | static int quota_getstatev(struct super_block *sb, int type, |
453 | struct fs_quota_statv *fqs) |
454 | { |
455 | struct qc_state state; |
456 | int ret; |
457 | |
458 | memset(&state, 0, sizeof (struct qc_state)); |
459 | ret = sb->s_qcop->get_state(sb, &state); |
460 | if (ret < 0) |
461 | return ret; |
462 | |
463 | memset(fqs, 0, sizeof(*fqs)); |
464 | fqs->qs_version = FS_QSTAT_VERSION; |
465 | fqs->qs_flags = quota_state_to_flags(state: &state); |
466 | /* No quota enabled? */ |
467 | if (!fqs->qs_flags) |
468 | return -ENOSYS; |
469 | fqs->qs_incoredqs = state.s_incoredqs; |
470 | |
471 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; |
472 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; |
473 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; |
474 | fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit; |
475 | fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit; |
476 | fqs->qs_rtbwarnlimit = state.s_state[type].rt_spc_warnlimit; |
477 | |
478 | /* Inodes may be allocated even if inactive; copy out if present */ |
479 | if (state.s_state[USRQUOTA].ino) { |
480 | fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino; |
481 | fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks; |
482 | fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents; |
483 | } |
484 | if (state.s_state[GRPQUOTA].ino) { |
485 | fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino; |
486 | fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks; |
487 | fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents; |
488 | } |
489 | if (state.s_state[PRJQUOTA].ino) { |
490 | fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino; |
491 | fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks; |
492 | fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents; |
493 | } |
494 | return 0; |
495 | } |
496 | |
497 | static int quota_getxstatev(struct super_block *sb, int type, void __user *addr) |
498 | { |
499 | struct fs_quota_statv fqs; |
500 | int ret; |
501 | |
502 | if (!sb->s_qcop->get_state) |
503 | return -ENOSYS; |
504 | |
505 | memset(&fqs, 0, sizeof(fqs)); |
506 | if (copy_from_user(to: &fqs, from: addr, n: 1)) /* Just read qs_version */ |
507 | return -EFAULT; |
508 | |
509 | /* If this kernel doesn't support user specified version, fail */ |
510 | switch (fqs.qs_version) { |
511 | case FS_QSTATV_VERSION1: |
512 | break; |
513 | default: |
514 | return -EINVAL; |
515 | } |
516 | ret = quota_getstatev(sb, type, fqs: &fqs); |
517 | if (!ret && copy_to_user(to: addr, from: &fqs, n: sizeof(fqs))) |
518 | return -EFAULT; |
519 | return ret; |
520 | } |
521 | |
522 | /* |
523 | * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them |
524 | * out of there as xfsprogs rely on definitions being in that header file. So |
525 | * just define same functions here for quota purposes. |
526 | */ |
527 | #define XFS_BB_SHIFT 9 |
528 | |
529 | static inline u64 quota_bbtob(u64 blocks) |
530 | { |
531 | return blocks << XFS_BB_SHIFT; |
532 | } |
533 | |
534 | static inline u64 quota_btobb(u64 bytes) |
535 | { |
536 | return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; |
537 | } |
538 | |
539 | static inline s64 copy_from_xfs_dqblk_ts(const struct fs_disk_quota *d, |
540 | __s32 timer, __s8 timer_hi) |
541 | { |
542 | if (d->d_fieldmask & FS_DQ_BIGTIME) |
543 | return (u32)timer | (s64)timer_hi << 32; |
544 | return timer; |
545 | } |
546 | |
547 | static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) |
548 | { |
549 | dst->d_spc_hardlimit = quota_bbtob(blocks: src->d_blk_hardlimit); |
550 | dst->d_spc_softlimit = quota_bbtob(blocks: src->d_blk_softlimit); |
551 | dst->d_ino_hardlimit = src->d_ino_hardlimit; |
552 | dst->d_ino_softlimit = src->d_ino_softlimit; |
553 | dst->d_space = quota_bbtob(blocks: src->d_bcount); |
554 | dst->d_ino_count = src->d_icount; |
555 | dst->d_ino_timer = copy_from_xfs_dqblk_ts(d: src, timer: src->d_itimer, |
556 | timer_hi: src->d_itimer_hi); |
557 | dst->d_spc_timer = copy_from_xfs_dqblk_ts(d: src, timer: src->d_btimer, |
558 | timer_hi: src->d_btimer_hi); |
559 | dst->d_ino_warns = src->d_iwarns; |
560 | dst->d_spc_warns = src->d_bwarns; |
561 | dst->d_rt_spc_hardlimit = quota_bbtob(blocks: src->d_rtb_hardlimit); |
562 | dst->d_rt_spc_softlimit = quota_bbtob(blocks: src->d_rtb_softlimit); |
563 | dst->d_rt_space = quota_bbtob(blocks: src->d_rtbcount); |
564 | dst->d_rt_spc_timer = copy_from_xfs_dqblk_ts(d: src, timer: src->d_rtbtimer, |
565 | timer_hi: src->d_rtbtimer_hi); |
566 | dst->d_rt_spc_warns = src->d_rtbwarns; |
567 | dst->d_fieldmask = 0; |
568 | if (src->d_fieldmask & FS_DQ_ISOFT) |
569 | dst->d_fieldmask |= QC_INO_SOFT; |
570 | if (src->d_fieldmask & FS_DQ_IHARD) |
571 | dst->d_fieldmask |= QC_INO_HARD; |
572 | if (src->d_fieldmask & FS_DQ_BSOFT) |
573 | dst->d_fieldmask |= QC_SPC_SOFT; |
574 | if (src->d_fieldmask & FS_DQ_BHARD) |
575 | dst->d_fieldmask |= QC_SPC_HARD; |
576 | if (src->d_fieldmask & FS_DQ_RTBSOFT) |
577 | dst->d_fieldmask |= QC_RT_SPC_SOFT; |
578 | if (src->d_fieldmask & FS_DQ_RTBHARD) |
579 | dst->d_fieldmask |= QC_RT_SPC_HARD; |
580 | if (src->d_fieldmask & FS_DQ_BTIMER) |
581 | dst->d_fieldmask |= QC_SPC_TIMER; |
582 | if (src->d_fieldmask & FS_DQ_ITIMER) |
583 | dst->d_fieldmask |= QC_INO_TIMER; |
584 | if (src->d_fieldmask & FS_DQ_RTBTIMER) |
585 | dst->d_fieldmask |= QC_RT_SPC_TIMER; |
586 | if (src->d_fieldmask & FS_DQ_BWARNS) |
587 | dst->d_fieldmask |= QC_SPC_WARNS; |
588 | if (src->d_fieldmask & FS_DQ_IWARNS) |
589 | dst->d_fieldmask |= QC_INO_WARNS; |
590 | if (src->d_fieldmask & FS_DQ_RTBWARNS) |
591 | dst->d_fieldmask |= QC_RT_SPC_WARNS; |
592 | if (src->d_fieldmask & FS_DQ_BCOUNT) |
593 | dst->d_fieldmask |= QC_SPACE; |
594 | if (src->d_fieldmask & FS_DQ_ICOUNT) |
595 | dst->d_fieldmask |= QC_INO_COUNT; |
596 | if (src->d_fieldmask & FS_DQ_RTBCOUNT) |
597 | dst->d_fieldmask |= QC_RT_SPACE; |
598 | } |
599 | |
600 | static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst, |
601 | struct fs_disk_quota *src) |
602 | { |
603 | memset(dst, 0, sizeof(*dst)); |
604 | dst->i_spc_timelimit = src->d_btimer; |
605 | dst->i_ino_timelimit = src->d_itimer; |
606 | dst->i_rt_spc_timelimit = src->d_rtbtimer; |
607 | dst->i_ino_warnlimit = src->d_iwarns; |
608 | dst->i_spc_warnlimit = src->d_bwarns; |
609 | dst->i_rt_spc_warnlimit = src->d_rtbwarns; |
610 | if (src->d_fieldmask & FS_DQ_BWARNS) |
611 | dst->i_fieldmask |= QC_SPC_WARNS; |
612 | if (src->d_fieldmask & FS_DQ_IWARNS) |
613 | dst->i_fieldmask |= QC_INO_WARNS; |
614 | if (src->d_fieldmask & FS_DQ_RTBWARNS) |
615 | dst->i_fieldmask |= QC_RT_SPC_WARNS; |
616 | if (src->d_fieldmask & FS_DQ_BTIMER) |
617 | dst->i_fieldmask |= QC_SPC_TIMER; |
618 | if (src->d_fieldmask & FS_DQ_ITIMER) |
619 | dst->i_fieldmask |= QC_INO_TIMER; |
620 | if (src->d_fieldmask & FS_DQ_RTBTIMER) |
621 | dst->i_fieldmask |= QC_RT_SPC_TIMER; |
622 | } |
623 | |
624 | static int quota_setxquota(struct super_block *sb, int type, qid_t id, |
625 | void __user *addr) |
626 | { |
627 | struct fs_disk_quota fdq; |
628 | struct qc_dqblk qdq; |
629 | struct kqid qid; |
630 | |
631 | if (copy_from_user(to: &fdq, from: addr, n: sizeof(fdq))) |
632 | return -EFAULT; |
633 | if (!sb->s_qcop->set_dqblk) |
634 | return -ENOSYS; |
635 | qid = make_kqid(current_user_ns(), type, qid: id); |
636 | if (!qid_has_mapping(ns: sb->s_user_ns, qid)) |
637 | return -EINVAL; |
638 | /* Are we actually setting timer / warning limits for all users? */ |
639 | if (from_kqid(to: sb->s_user_ns, qid) == 0 && |
640 | fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) { |
641 | struct qc_info qinfo; |
642 | int ret; |
643 | |
644 | if (!sb->s_qcop->set_info) |
645 | return -EINVAL; |
646 | copy_qcinfo_from_xfs_dqblk(dst: &qinfo, src: &fdq); |
647 | ret = sb->s_qcop->set_info(sb, type, &qinfo); |
648 | if (ret) |
649 | return ret; |
650 | /* These are already done */ |
651 | fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK); |
652 | } |
653 | copy_from_xfs_dqblk(dst: &qdq, src: &fdq); |
654 | return sb->s_qcop->set_dqblk(sb, qid, &qdq); |
655 | } |
656 | |
657 | static inline void copy_to_xfs_dqblk_ts(const struct fs_disk_quota *d, |
658 | __s32 *timer_lo, __s8 *timer_hi, s64 timer) |
659 | { |
660 | *timer_lo = timer; |
661 | if (d->d_fieldmask & FS_DQ_BIGTIME) |
662 | *timer_hi = timer >> 32; |
663 | } |
664 | |
665 | static inline bool want_bigtime(s64 timer) |
666 | { |
667 | return timer > S32_MAX || timer < S32_MIN; |
668 | } |
669 | |
670 | static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, |
671 | int type, qid_t id) |
672 | { |
673 | memset(dst, 0, sizeof(*dst)); |
674 | if (want_bigtime(timer: src->d_ino_timer) || want_bigtime(timer: src->d_spc_timer) || |
675 | want_bigtime(timer: src->d_rt_spc_timer)) |
676 | dst->d_fieldmask |= FS_DQ_BIGTIME; |
677 | dst->d_version = FS_DQUOT_VERSION; |
678 | dst->d_id = id; |
679 | if (type == USRQUOTA) |
680 | dst->d_flags = FS_USER_QUOTA; |
681 | else if (type == PRJQUOTA) |
682 | dst->d_flags = FS_PROJ_QUOTA; |
683 | else |
684 | dst->d_flags = FS_GROUP_QUOTA; |
685 | dst->d_blk_hardlimit = quota_btobb(bytes: src->d_spc_hardlimit); |
686 | dst->d_blk_softlimit = quota_btobb(bytes: src->d_spc_softlimit); |
687 | dst->d_ino_hardlimit = src->d_ino_hardlimit; |
688 | dst->d_ino_softlimit = src->d_ino_softlimit; |
689 | dst->d_bcount = quota_btobb(bytes: src->d_space); |
690 | dst->d_icount = src->d_ino_count; |
691 | copy_to_xfs_dqblk_ts(d: dst, timer_lo: &dst->d_itimer, timer_hi: &dst->d_itimer_hi, |
692 | timer: src->d_ino_timer); |
693 | copy_to_xfs_dqblk_ts(d: dst, timer_lo: &dst->d_btimer, timer_hi: &dst->d_btimer_hi, |
694 | timer: src->d_spc_timer); |
695 | dst->d_iwarns = src->d_ino_warns; |
696 | dst->d_bwarns = src->d_spc_warns; |
697 | dst->d_rtb_hardlimit = quota_btobb(bytes: src->d_rt_spc_hardlimit); |
698 | dst->d_rtb_softlimit = quota_btobb(bytes: src->d_rt_spc_softlimit); |
699 | dst->d_rtbcount = quota_btobb(bytes: src->d_rt_space); |
700 | copy_to_xfs_dqblk_ts(d: dst, timer_lo: &dst->d_rtbtimer, timer_hi: &dst->d_rtbtimer_hi, |
701 | timer: src->d_rt_spc_timer); |
702 | dst->d_rtbwarns = src->d_rt_spc_warns; |
703 | } |
704 | |
705 | static int quota_getxquota(struct super_block *sb, int type, qid_t id, |
706 | void __user *addr) |
707 | { |
708 | struct fs_disk_quota fdq; |
709 | struct qc_dqblk qdq; |
710 | struct kqid qid; |
711 | int ret; |
712 | |
713 | if (!sb->s_qcop->get_dqblk) |
714 | return -ENOSYS; |
715 | qid = make_kqid(current_user_ns(), type, qid: id); |
716 | if (!qid_has_mapping(ns: sb->s_user_ns, qid)) |
717 | return -EINVAL; |
718 | ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); |
719 | if (ret) |
720 | return ret; |
721 | copy_to_xfs_dqblk(dst: &fdq, src: &qdq, type, id); |
722 | if (copy_to_user(to: addr, from: &fdq, n: sizeof(fdq))) |
723 | return -EFAULT; |
724 | return ret; |
725 | } |
726 | |
727 | /* |
728 | * Return quota for next active quota >= this id, if any exists, |
729 | * otherwise return -ENOENT via ->get_nextdqblk. |
730 | */ |
731 | static int quota_getnextxquota(struct super_block *sb, int type, qid_t id, |
732 | void __user *addr) |
733 | { |
734 | struct fs_disk_quota fdq; |
735 | struct qc_dqblk qdq; |
736 | struct kqid qid; |
737 | qid_t id_out; |
738 | int ret; |
739 | |
740 | if (!sb->s_qcop->get_nextdqblk) |
741 | return -ENOSYS; |
742 | qid = make_kqid(current_user_ns(), type, qid: id); |
743 | if (!qid_has_mapping(ns: sb->s_user_ns, qid)) |
744 | return -EINVAL; |
745 | ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq); |
746 | if (ret) |
747 | return ret; |
748 | id_out = from_kqid(current_user_ns(), qid); |
749 | copy_to_xfs_dqblk(dst: &fdq, src: &qdq, type, id: id_out); |
750 | if (copy_to_user(to: addr, from: &fdq, n: sizeof(fdq))) |
751 | return -EFAULT; |
752 | return ret; |
753 | } |
754 | |
755 | static int quota_rmxquota(struct super_block *sb, void __user *addr) |
756 | { |
757 | __u32 flags; |
758 | |
759 | if (copy_from_user(to: &flags, from: addr, n: sizeof(flags))) |
760 | return -EFAULT; |
761 | if (!sb->s_qcop->rm_xquota) |
762 | return -ENOSYS; |
763 | return sb->s_qcop->rm_xquota(sb, flags); |
764 | } |
765 | |
766 | /* Copy parameters and call proper function */ |
767 | static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, |
768 | void __user *addr, const struct path *path) |
769 | { |
770 | int ret; |
771 | |
772 | type = array_index_nospec(type, MAXQUOTAS); |
773 | /* |
774 | * Quota not supported on this fs? Check this before s_quota_types |
775 | * since they needn't be set if quota is not supported at all. |
776 | */ |
777 | if (!sb->s_qcop) |
778 | return -ENOSYS; |
779 | if (!(sb->s_quota_types & (1 << type))) |
780 | return -EINVAL; |
781 | |
782 | ret = check_quotactl_permission(sb, type, cmd, id); |
783 | if (ret < 0) |
784 | return ret; |
785 | |
786 | switch (cmd) { |
787 | case Q_QUOTAON: |
788 | return quota_quotaon(sb, type, id, path); |
789 | case Q_QUOTAOFF: |
790 | return quota_quotaoff(sb, type); |
791 | case Q_GETFMT: |
792 | return quota_getfmt(sb, type, addr); |
793 | case Q_GETINFO: |
794 | return quota_getinfo(sb, type, addr); |
795 | case Q_SETINFO: |
796 | return quota_setinfo(sb, type, addr); |
797 | case Q_GETQUOTA: |
798 | return quota_getquota(sb, type, id, addr); |
799 | case Q_GETNEXTQUOTA: |
800 | return quota_getnextquota(sb, type, id, addr); |
801 | case Q_SETQUOTA: |
802 | return quota_setquota(sb, type, id, addr); |
803 | case Q_SYNC: |
804 | if (!sb->s_qcop->quota_sync) |
805 | return -ENOSYS; |
806 | return sb->s_qcop->quota_sync(sb, type); |
807 | case Q_XQUOTAON: |
808 | return quota_enable(sb, addr); |
809 | case Q_XQUOTAOFF: |
810 | return quota_disable(sb, addr); |
811 | case Q_XQUOTARM: |
812 | return quota_rmxquota(sb, addr); |
813 | case Q_XGETQSTAT: |
814 | return quota_getxstate(sb, type, addr); |
815 | case Q_XGETQSTATV: |
816 | return quota_getxstatev(sb, type, addr); |
817 | case Q_XSETQLIM: |
818 | return quota_setxquota(sb, type, id, addr); |
819 | case Q_XGETQUOTA: |
820 | return quota_getxquota(sb, type, id, addr); |
821 | case Q_XGETNEXTQUOTA: |
822 | return quota_getnextxquota(sb, type, id, addr); |
823 | case Q_XQUOTASYNC: |
824 | if (sb_rdonly(sb)) |
825 | return -EROFS; |
826 | /* XFS quotas are fully coherent now, making this call a noop */ |
827 | return 0; |
828 | default: |
829 | return -EINVAL; |
830 | } |
831 | } |
832 | |
833 | /* Return 1 if 'cmd' will block on frozen filesystem */ |
834 | static int quotactl_cmd_write(int cmd) |
835 | { |
836 | /* |
837 | * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access |
838 | * as dquot_acquire() may allocate space for new structure and OCFS2 |
839 | * needs to increment on-disk use count. |
840 | */ |
841 | switch (cmd) { |
842 | case Q_GETFMT: |
843 | case Q_GETINFO: |
844 | case Q_SYNC: |
845 | case Q_XGETQSTAT: |
846 | case Q_XGETQSTATV: |
847 | case Q_XGETQUOTA: |
848 | case Q_XGETNEXTQUOTA: |
849 | case Q_XQUOTASYNC: |
850 | return 0; |
851 | } |
852 | return 1; |
853 | } |
854 | |
855 | /* Return true if quotactl command is manipulating quota on/off state */ |
856 | static bool quotactl_cmd_onoff(int cmd) |
857 | { |
858 | return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) || |
859 | (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF); |
860 | } |
861 | |
862 | /* |
863 | * look up a superblock on which quota ops will be performed |
864 | * - use the name of a block device to find the superblock thereon |
865 | */ |
866 | static struct super_block *quotactl_block(const char __user *special, int cmd) |
867 | { |
868 | #ifdef CONFIG_BLOCK |
869 | struct super_block *sb; |
870 | struct filename *tmp = getname(special); |
871 | bool excl = false, thawed = false; |
872 | int error; |
873 | dev_t dev; |
874 | |
875 | if (IS_ERR(ptr: tmp)) |
876 | return ERR_CAST(ptr: tmp); |
877 | error = lookup_bdev(pathname: tmp->name, dev: &dev); |
878 | putname(name: tmp); |
879 | if (error) |
880 | return ERR_PTR(error); |
881 | |
882 | if (quotactl_cmd_onoff(cmd)) { |
883 | excl = true; |
884 | thawed = true; |
885 | } else if (quotactl_cmd_write(cmd)) { |
886 | thawed = true; |
887 | } |
888 | |
889 | retry: |
890 | sb = user_get_super(dev, excl); |
891 | if (!sb) |
892 | return ERR_PTR(error: -ENODEV); |
893 | if (thawed && sb->s_writers.frozen != SB_UNFROZEN) { |
894 | if (excl) |
895 | up_write(sem: &sb->s_umount); |
896 | else |
897 | up_read(sem: &sb->s_umount); |
898 | /* Wait for sb to unfreeze */ |
899 | sb_start_write(sb); |
900 | sb_end_write(sb); |
901 | put_super(sb); |
902 | goto retry; |
903 | } |
904 | return sb; |
905 | |
906 | #else |
907 | return ERR_PTR(-ENODEV); |
908 | #endif |
909 | } |
910 | |
911 | /* |
912 | * This is the system call interface. This communicates with |
913 | * the user-level programs. Currently this only supports diskquota |
914 | * calls. Maybe we need to add the process quotas etc. in the future, |
915 | * but we probably should use rlimits for that. |
916 | */ |
917 | SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, |
918 | qid_t, id, void __user *, addr) |
919 | { |
920 | uint cmds, type; |
921 | struct super_block *sb = NULL; |
922 | struct path path, *pathp = NULL; |
923 | int ret; |
924 | |
925 | cmds = cmd >> SUBCMDSHIFT; |
926 | type = cmd & SUBCMDMASK; |
927 | |
928 | if (type >= MAXQUOTAS) |
929 | return -EINVAL; |
930 | |
931 | /* |
932 | * As a special case Q_SYNC can be called without a specific device. |
933 | * It will iterate all superblocks that have quota enabled and call |
934 | * the sync action on each of them. |
935 | */ |
936 | if (!special) { |
937 | if (cmds == Q_SYNC) |
938 | return quota_sync_all(type); |
939 | return -ENODEV; |
940 | } |
941 | |
942 | /* |
943 | * Path for quotaon has to be resolved before grabbing superblock |
944 | * because that gets s_umount sem which is also possibly needed by path |
945 | * resolution (think about autofs) and thus deadlocks could arise. |
946 | */ |
947 | if (cmds == Q_QUOTAON) { |
948 | ret = user_path_at(AT_FDCWD, name: addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, path: &path); |
949 | if (ret) |
950 | pathp = ERR_PTR(error: ret); |
951 | else |
952 | pathp = &path; |
953 | } |
954 | |
955 | sb = quotactl_block(special, cmd: cmds); |
956 | if (IS_ERR(ptr: sb)) { |
957 | ret = PTR_ERR(ptr: sb); |
958 | goto out; |
959 | } |
960 | |
961 | ret = do_quotactl(sb, type, cmd: cmds, id, addr, path: pathp); |
962 | |
963 | if (!quotactl_cmd_onoff(cmd: cmds)) |
964 | drop_super(sb); |
965 | else |
966 | drop_super_exclusive(sb); |
967 | out: |
968 | if (pathp && !IS_ERR(ptr: pathp)) |
969 | path_put(pathp); |
970 | return ret; |
971 | } |
972 | |
973 | SYSCALL_DEFINE4(quotactl_fd, unsigned int, fd, unsigned int, cmd, |
974 | qid_t, id, void __user *, addr) |
975 | { |
976 | struct super_block *sb; |
977 | unsigned int cmds = cmd >> SUBCMDSHIFT; |
978 | unsigned int type = cmd & SUBCMDMASK; |
979 | struct fd f; |
980 | int ret; |
981 | |
982 | f = fdget_raw(fd); |
983 | if (!f.file) |
984 | return -EBADF; |
985 | |
986 | ret = -EINVAL; |
987 | if (type >= MAXQUOTAS) |
988 | goto out; |
989 | |
990 | if (quotactl_cmd_write(cmd: cmds)) { |
991 | ret = mnt_want_write(mnt: f.file->f_path.mnt); |
992 | if (ret) |
993 | goto out; |
994 | } |
995 | |
996 | sb = f.file->f_path.mnt->mnt_sb; |
997 | if (quotactl_cmd_onoff(cmd: cmds)) |
998 | down_write(sem: &sb->s_umount); |
999 | else |
1000 | down_read(sem: &sb->s_umount); |
1001 | |
1002 | ret = do_quotactl(sb, type, cmd: cmds, id, addr, path: ERR_PTR(error: -EINVAL)); |
1003 | |
1004 | if (quotactl_cmd_onoff(cmd: cmds)) |
1005 | up_write(sem: &sb->s_umount); |
1006 | else |
1007 | up_read(sem: &sb->s_umount); |
1008 | |
1009 | if (quotactl_cmd_write(cmd: cmds)) |
1010 | mnt_drop_write(mnt: f.file->f_path.mnt); |
1011 | out: |
1012 | fdput(fd: f); |
1013 | return ret; |
1014 | } |
1015 | |