1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Copyright (C) 2014 Facebook. All rights reserved. |
4 | */ |
5 | |
6 | #ifndef BTRFS_QGROUP_H |
7 | #define BTRFS_QGROUP_H |
8 | |
9 | #include <linux/spinlock.h> |
10 | #include <linux/rbtree.h> |
11 | #include <linux/kobject.h> |
12 | #include "ulist.h" |
13 | #include "delayed-ref.h" |
14 | #include "misc.h" |
15 | |
16 | /* |
17 | * Btrfs qgroup overview |
18 | * |
19 | * Btrfs qgroup splits into 3 main part: |
20 | * 1) Reserve |
21 | * Reserve metadata/data space for incoming operations |
22 | * Affect how qgroup limit works |
23 | * |
24 | * 2) Trace |
25 | * Tell btrfs qgroup to trace dirty extents. |
26 | * |
27 | * Dirty extents including: |
28 | * - Newly allocated extents |
29 | * - Extents going to be deleted (in this trans) |
30 | * - Extents whose owner is going to be modified |
31 | * |
32 | * This is the main part affects whether qgroup numbers will stay |
33 | * consistent. |
34 | * Btrfs qgroup can trace clean extents and won't cause any problem, |
35 | * but it will consume extra CPU time, it should be avoided if possible. |
36 | * |
37 | * 3) Account |
38 | * Btrfs qgroup will updates its numbers, based on dirty extents traced |
39 | * in previous step. |
40 | * |
41 | * Normally at qgroup rescan and transaction commit time. |
42 | */ |
43 | |
44 | /* |
45 | * Special performance optimization for balance. |
46 | * |
47 | * For balance, we need to swap subtree of subvolume and reloc trees. |
48 | * In theory, we need to trace all subtree blocks of both subvolume and reloc |
49 | * trees, since their owner has changed during such swap. |
50 | * |
51 | * However since balance has ensured that both subtrees are containing the |
52 | * same contents and have the same tree structures, such swap won't cause |
53 | * qgroup number change. |
54 | * |
55 | * But there is a race window between subtree swap and transaction commit, |
56 | * during that window, if we increase/decrease tree level or merge/split tree |
57 | * blocks, we still need to trace the original subtrees. |
58 | * |
59 | * So for balance, we use a delayed subtree tracing, whose workflow is: |
60 | * |
61 | * 1) Record the subtree root block get swapped. |
62 | * |
63 | * During subtree swap: |
64 | * O = Old tree blocks |
65 | * N = New tree blocks |
66 | * reloc tree subvolume tree X |
67 | * Root Root |
68 | * / \ / \ |
69 | * NA OB OA OB |
70 | * / | | \ / | | \ |
71 | * NC ND OE OF OC OD OE OF |
72 | * |
73 | * In this case, NA and OA are going to be swapped, record (NA, OA) into |
74 | * subvolume tree X. |
75 | * |
76 | * 2) After subtree swap. |
77 | * reloc tree subvolume tree X |
78 | * Root Root |
79 | * / \ / \ |
80 | * OA OB NA OB |
81 | * / | | \ / | | \ |
82 | * OC OD OE OF NC ND OE OF |
83 | * |
84 | * 3a) COW happens for OB |
85 | * If we are going to COW tree block OB, we check OB's bytenr against |
86 | * tree X's swapped_blocks structure. |
87 | * If it doesn't fit any, nothing will happen. |
88 | * |
89 | * 3b) COW happens for NA |
90 | * Check NA's bytenr against tree X's swapped_blocks, and get a hit. |
91 | * Then we do subtree scan on both subtrees OA and NA. |
92 | * Resulting 6 tree blocks to be scanned (OA, OC, OD, NA, NC, ND). |
93 | * |
94 | * Then no matter what we do to subvolume tree X, qgroup numbers will |
95 | * still be correct. |
96 | * Then NA's record gets removed from X's swapped_blocks. |
97 | * |
98 | * 4) Transaction commit |
99 | * Any record in X's swapped_blocks gets removed, since there is no |
100 | * modification to the swapped subtrees, no need to trigger heavy qgroup |
101 | * subtree rescan for them. |
102 | */ |
103 | |
104 | /* |
105 | * These flags share the flags field of the btrfs_qgroup_status_item with the |
106 | * persisted flags defined in btrfs_tree.h. |
107 | * |
108 | * To minimize the chance of collision with new persisted status flags, these |
109 | * count backwards from the MSB. |
110 | */ |
111 | #define BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN (1ULL << 63) |
112 | #define BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING (1ULL << 62) |
113 | |
114 | /* |
115 | * Record a dirty extent, and info qgroup to update quota on it |
116 | * TODO: Use kmem cache to alloc it. |
117 | */ |
118 | struct btrfs_qgroup_extent_record { |
119 | struct rb_node node; |
120 | u64 bytenr; |
121 | u64 num_bytes; |
122 | |
123 | /* |
124 | * For qgroup reserved data space freeing. |
125 | * |
126 | * @data_rsv_refroot and @data_rsv will be recorded after |
127 | * BTRFS_ADD_DELAYED_EXTENT is called. |
128 | * And will be used to free reserved qgroup space at |
129 | * transaction commit time. |
130 | */ |
131 | u32 data_rsv; /* reserved data space needs to be freed */ |
132 | u64 data_rsv_refroot; /* which root the reserved data belongs to */ |
133 | struct ulist *old_roots; |
134 | }; |
135 | |
136 | struct btrfs_qgroup_swapped_block { |
137 | struct rb_node node; |
138 | |
139 | int level; |
140 | bool trace_leaf; |
141 | |
142 | /* bytenr/generation of the tree block in subvolume tree after swap */ |
143 | u64 subvol_bytenr; |
144 | u64 subvol_generation; |
145 | |
146 | /* bytenr/generation of the tree block in reloc tree after swap */ |
147 | u64 reloc_bytenr; |
148 | u64 reloc_generation; |
149 | |
150 | u64 last_snapshot; |
151 | struct btrfs_key first_key; |
152 | }; |
153 | |
154 | /* |
155 | * Qgroup reservation types: |
156 | * |
157 | * DATA: |
158 | * space reserved for data |
159 | * |
160 | * META_PERTRANS: |
161 | * Space reserved for metadata (per-transaction) |
162 | * Due to the fact that qgroup data is only updated at transaction commit |
163 | * time, reserved space for metadata must be kept until transaction |
164 | * commits. |
165 | * Any metadata reserved that are used in btrfs_start_transaction() should |
166 | * be of this type. |
167 | * |
168 | * META_PREALLOC: |
169 | * There are cases where metadata space is reserved before starting |
170 | * transaction, and then btrfs_join_transaction() to get a trans handle. |
171 | * Any metadata reserved for such usage should be of this type. |
172 | * And after join_transaction() part (or all) of such reservation should |
173 | * be converted into META_PERTRANS. |
174 | */ |
175 | enum btrfs_qgroup_rsv_type { |
176 | BTRFS_QGROUP_RSV_DATA, |
177 | BTRFS_QGROUP_RSV_META_PERTRANS, |
178 | BTRFS_QGROUP_RSV_META_PREALLOC, |
179 | BTRFS_QGROUP_RSV_LAST, |
180 | }; |
181 | |
182 | /* |
183 | * Represents how many bytes we have reserved for this qgroup. |
184 | * |
185 | * Each type should have different reservation behavior. |
186 | * E.g, data follows its io_tree flag modification, while |
187 | * *currently* meta is just reserve-and-clear during transaction. |
188 | * |
189 | * TODO: Add new type for reservation which can survive transaction commit. |
190 | * Current metadata reservation behavior is not suitable for such case. |
191 | */ |
192 | struct btrfs_qgroup_rsv { |
193 | u64 values[BTRFS_QGROUP_RSV_LAST]; |
194 | }; |
195 | |
196 | /* |
197 | * one struct for each qgroup, organized in fs_info->qgroup_tree. |
198 | */ |
199 | struct btrfs_qgroup { |
200 | u64 qgroupid; |
201 | |
202 | /* |
203 | * state |
204 | */ |
205 | u64 rfer; /* referenced */ |
206 | u64 rfer_cmpr; /* referenced compressed */ |
207 | u64 excl; /* exclusive */ |
208 | u64 excl_cmpr; /* exclusive compressed */ |
209 | |
210 | /* |
211 | * limits |
212 | */ |
213 | u64 lim_flags; /* which limits are set */ |
214 | u64 max_rfer; |
215 | u64 max_excl; |
216 | u64 rsv_rfer; |
217 | u64 rsv_excl; |
218 | |
219 | /* |
220 | * reservation tracking |
221 | */ |
222 | struct btrfs_qgroup_rsv rsv; |
223 | |
224 | /* |
225 | * lists |
226 | */ |
227 | struct list_head groups; /* groups this group is member of */ |
228 | struct list_head members; /* groups that are members of this group */ |
229 | struct list_head dirty; /* dirty groups */ |
230 | |
231 | /* |
232 | * For qgroup iteration usage. |
233 | * |
234 | * The iteration list should always be empty until qgroup_iterator_add() |
235 | * is called. And should be reset to empty after the iteration is |
236 | * finished. |
237 | */ |
238 | struct list_head iterator; |
239 | |
240 | /* |
241 | * For nested iterator usage. |
242 | * |
243 | * Here we support at most one level of nested iterator calls like: |
244 | * |
245 | * LIST_HEAD(all_qgroups); |
246 | * { |
247 | * LIST_HEAD(local_qgroups); |
248 | * qgroup_iterator_add(local_qgroups, qg); |
249 | * qgroup_iterator_nested_add(all_qgroups, qg); |
250 | * do_some_work(local_qgroups); |
251 | * qgroup_iterator_clean(local_qgroups); |
252 | * } |
253 | * do_some_work(all_qgroups); |
254 | * qgroup_iterator_nested_clean(all_qgroups); |
255 | */ |
256 | struct list_head nested_iterator; |
257 | struct rb_node node; /* tree of qgroups */ |
258 | |
259 | /* |
260 | * temp variables for accounting operations |
261 | * Refer to qgroup_shared_accounting() for details. |
262 | */ |
263 | u64 old_refcnt; |
264 | u64 new_refcnt; |
265 | |
266 | /* |
267 | * Sysfs kobjectid |
268 | */ |
269 | struct kobject kobj; |
270 | }; |
271 | |
272 | struct btrfs_squota_delta { |
273 | /* The fstree root this delta counts against. */ |
274 | u64 root; |
275 | /* The number of bytes in the extent being counted. */ |
276 | u64 num_bytes; |
277 | /* The number of bytes reserved for this extent. */ |
278 | u64 rsv_bytes; |
279 | /* The generation the extent was created in. */ |
280 | u64 generation; |
281 | /* Whether we are using or freeing the extent. */ |
282 | bool is_inc; |
283 | /* Whether the extent is data or metadata. */ |
284 | bool is_data; |
285 | }; |
286 | |
287 | static inline u64 btrfs_qgroup_subvolid(u64 qgroupid) |
288 | { |
289 | return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1)); |
290 | } |
291 | |
292 | /* |
293 | * For qgroup event trace points only |
294 | */ |
295 | enum { |
296 | ENUM_BIT(QGROUP_RESERVE), |
297 | ENUM_BIT(QGROUP_RELEASE), |
298 | ENUM_BIT(QGROUP_FREE), |
299 | }; |
300 | |
301 | enum btrfs_qgroup_mode { |
302 | BTRFS_QGROUP_MODE_DISABLED, |
303 | BTRFS_QGROUP_MODE_FULL, |
304 | BTRFS_QGROUP_MODE_SIMPLE |
305 | }; |
306 | |
307 | enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info); |
308 | bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info); |
309 | bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info); |
310 | int btrfs_quota_enable(struct btrfs_fs_info *fs_info, |
311 | struct btrfs_ioctl_quota_ctl_args *quota_ctl_args); |
312 | int btrfs_quota_disable(struct btrfs_fs_info *fs_info); |
313 | int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info); |
314 | void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info); |
315 | int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, |
316 | bool interruptible); |
317 | int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst); |
318 | int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, |
319 | u64 dst); |
320 | int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid); |
321 | int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid); |
322 | int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, |
323 | struct btrfs_qgroup_limit *limit); |
324 | int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info); |
325 | void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info); |
326 | struct btrfs_delayed_extent_op; |
327 | |
328 | int btrfs_qgroup_trace_extent_nolock( |
329 | struct btrfs_fs_info *fs_info, |
330 | struct btrfs_delayed_ref_root *delayed_refs, |
331 | struct btrfs_qgroup_extent_record *record); |
332 | int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, |
333 | struct btrfs_qgroup_extent_record *qrecord); |
334 | int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, |
335 | u64 num_bytes); |
336 | int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, |
337 | struct extent_buffer *eb); |
338 | int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, |
339 | struct extent_buffer *root_eb, |
340 | u64 root_gen, int root_level); |
341 | int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, |
342 | u64 num_bytes, struct ulist *old_roots, |
343 | struct ulist *new_roots); |
344 | int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans); |
345 | int btrfs_run_qgroups(struct btrfs_trans_handle *trans); |
346 | int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, |
347 | u64 objectid, u64 inode_rootid, |
348 | struct btrfs_qgroup_inherit *inherit); |
349 | void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, |
350 | u64 ref_root, u64 num_bytes, |
351 | enum btrfs_qgroup_rsv_type type); |
352 | |
353 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
354 | int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, |
355 | u64 rfer, u64 excl); |
356 | #endif |
357 | |
358 | /* New io_tree based accurate qgroup reserve API */ |
359 | int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, |
360 | struct extent_changeset **reserved, u64 start, u64 len); |
361 | int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len); |
362 | int btrfs_qgroup_free_data(struct btrfs_inode *inode, |
363 | struct extent_changeset *reserved, u64 start, |
364 | u64 len); |
365 | int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, |
366 | enum btrfs_qgroup_rsv_type type, bool enforce); |
367 | int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, |
368 | enum btrfs_qgroup_rsv_type type, bool enforce, |
369 | bool noflush); |
370 | /* Reserve metadata space for pertrans and prealloc type */ |
371 | static inline int btrfs_qgroup_reserve_meta_pertrans(struct btrfs_root *root, |
372 | int num_bytes, bool enforce) |
373 | { |
374 | return __btrfs_qgroup_reserve_meta(root, num_bytes, |
375 | type: BTRFS_QGROUP_RSV_META_PERTRANS, |
376 | enforce, noflush: false); |
377 | } |
378 | static inline int btrfs_qgroup_reserve_meta_prealloc(struct btrfs_root *root, |
379 | int num_bytes, bool enforce, |
380 | bool noflush) |
381 | { |
382 | return __btrfs_qgroup_reserve_meta(root, num_bytes, |
383 | type: BTRFS_QGROUP_RSV_META_PREALLOC, |
384 | enforce, noflush); |
385 | } |
386 | |
387 | void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, |
388 | enum btrfs_qgroup_rsv_type type); |
389 | |
390 | /* Free per-transaction meta reservation for error handling */ |
391 | static inline void btrfs_qgroup_free_meta_pertrans(struct btrfs_root *root, |
392 | int num_bytes) |
393 | { |
394 | __btrfs_qgroup_free_meta(root, num_bytes, |
395 | type: BTRFS_QGROUP_RSV_META_PERTRANS); |
396 | } |
397 | |
398 | /* Pre-allocated meta reservation can be freed at need */ |
399 | static inline void btrfs_qgroup_free_meta_prealloc(struct btrfs_root *root, |
400 | int num_bytes) |
401 | { |
402 | __btrfs_qgroup_free_meta(root, num_bytes, |
403 | type: BTRFS_QGROUP_RSV_META_PREALLOC); |
404 | } |
405 | |
406 | void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root); |
407 | void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes); |
408 | void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode); |
409 | |
410 | /* btrfs_qgroup_swapped_blocks related functions */ |
411 | void btrfs_qgroup_init_swapped_blocks( |
412 | struct btrfs_qgroup_swapped_blocks *swapped_blocks); |
413 | |
414 | void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root); |
415 | int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, |
416 | struct btrfs_root *subvol_root, |
417 | struct btrfs_block_group *bg, |
418 | struct extent_buffer *subvol_parent, int subvol_slot, |
419 | struct extent_buffer *reloc_parent, int reloc_slot, |
420 | u64 last_snapshot); |
421 | int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, |
422 | struct btrfs_root *root, struct extent_buffer *eb); |
423 | void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans); |
424 | bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info); |
425 | int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info, |
426 | struct btrfs_squota_delta *delta); |
427 | |
428 | #endif |
429 | |