1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _BCACHEFS_BSET_H |
3 | #define _BCACHEFS_BSET_H |
4 | |
5 | #include <linux/kernel.h> |
6 | #include <linux/types.h> |
7 | |
8 | #include "bcachefs.h" |
9 | #include "bkey.h" |
10 | #include "bkey_methods.h" |
11 | #include "btree_types.h" |
12 | #include "util.h" /* for time_stats */ |
13 | #include "vstructs.h" |
14 | |
15 | /* |
16 | * BKEYS: |
17 | * |
18 | * A bkey contains a key, a size field, a variable number of pointers, and some |
19 | * ancillary flag bits. |
20 | * |
21 | * We use two different functions for validating bkeys, bkey_invalid and |
22 | * bkey_deleted(). |
23 | * |
24 | * The one exception to the rule that ptr_invalid() filters out invalid keys is |
25 | * that it also filters out keys of size 0 - these are keys that have been |
26 | * completely overwritten. It'd be safe to delete these in memory while leaving |
27 | * them on disk, just unnecessary work - so we filter them out when resorting |
28 | * instead. |
29 | * |
30 | * We can't filter out stale keys when we're resorting, because garbage |
31 | * collection needs to find them to ensure bucket gens don't wrap around - |
32 | * unless we're rewriting the btree node those stale keys still exist on disk. |
33 | * |
34 | * We also implement functions here for removing some number of sectors from the |
35 | * front or the back of a bkey - this is mainly used for fixing overlapping |
36 | * extents, by removing the overlapping sectors from the older key. |
37 | * |
38 | * BSETS: |
39 | * |
40 | * A bset is an array of bkeys laid out contiguously in memory in sorted order, |
41 | * along with a header. A btree node is made up of a number of these, written at |
42 | * different times. |
43 | * |
44 | * There could be many of them on disk, but we never allow there to be more than |
45 | * 4 in memory - we lazily resort as needed. |
46 | * |
47 | * We implement code here for creating and maintaining auxiliary search trees |
48 | * (described below) for searching an individial bset, and on top of that we |
49 | * implement a btree iterator. |
50 | * |
51 | * BTREE ITERATOR: |
52 | * |
53 | * Most of the code in bcache doesn't care about an individual bset - it needs |
54 | * to search entire btree nodes and iterate over them in sorted order. |
55 | * |
56 | * The btree iterator code serves both functions; it iterates through the keys |
57 | * in a btree node in sorted order, starting from either keys after a specific |
58 | * point (if you pass it a search key) or the start of the btree node. |
59 | * |
60 | * AUXILIARY SEARCH TREES: |
61 | * |
62 | * Since keys are variable length, we can't use a binary search on a bset - we |
63 | * wouldn't be able to find the start of the next key. But binary searches are |
64 | * slow anyways, due to terrible cache behaviour; bcache originally used binary |
65 | * searches and that code topped out at under 50k lookups/second. |
66 | * |
67 | * So we need to construct some sort of lookup table. Since we only insert keys |
68 | * into the last (unwritten) set, most of the keys within a given btree node are |
69 | * usually in sets that are mostly constant. We use two different types of |
70 | * lookup tables to take advantage of this. |
71 | * |
72 | * Both lookup tables share in common that they don't index every key in the |
73 | * set; they index one key every BSET_CACHELINE bytes, and then a linear search |
74 | * is used for the rest. |
75 | * |
76 | * For sets that have been written to disk and are no longer being inserted |
77 | * into, we construct a binary search tree in an array - traversing a binary |
78 | * search tree in an array gives excellent locality of reference and is very |
79 | * fast, since both children of any node are adjacent to each other in memory |
80 | * (and their grandchildren, and great grandchildren...) - this means |
81 | * prefetching can be used to great effect. |
82 | * |
83 | * It's quite useful performance wise to keep these nodes small - not just |
84 | * because they're more likely to be in L2, but also because we can prefetch |
85 | * more nodes on a single cacheline and thus prefetch more iterations in advance |
86 | * when traversing this tree. |
87 | * |
88 | * Nodes in the auxiliary search tree must contain both a key to compare against |
89 | * (we don't want to fetch the key from the set, that would defeat the purpose), |
90 | * and a pointer to the key. We use a few tricks to compress both of these. |
91 | * |
92 | * To compress the pointer, we take advantage of the fact that one node in the |
93 | * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have |
94 | * a function (to_inorder()) that takes the index of a node in a binary tree and |
95 | * returns what its index would be in an inorder traversal, so we only have to |
96 | * store the low bits of the offset. |
97 | * |
98 | * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To |
99 | * compress that, we take advantage of the fact that when we're traversing the |
100 | * search tree at every iteration we know that both our search key and the key |
101 | * we're looking for lie within some range - bounded by our previous |
102 | * comparisons. (We special case the start of a search so that this is true even |
103 | * at the root of the tree). |
104 | * |
105 | * So we know the key we're looking for is between a and b, and a and b don't |
106 | * differ higher than bit 50, we don't need to check anything higher than bit |
107 | * 50. |
108 | * |
109 | * We don't usually need the rest of the bits, either; we only need enough bits |
110 | * to partition the key range we're currently checking. Consider key n - the |
111 | * key our auxiliary search tree node corresponds to, and key p, the key |
112 | * immediately preceding n. The lowest bit we need to store in the auxiliary |
113 | * search tree is the highest bit that differs between n and p. |
114 | * |
115 | * Note that this could be bit 0 - we might sometimes need all 80 bits to do the |
116 | * comparison. But we'd really like our nodes in the auxiliary search tree to be |
117 | * of fixed size. |
118 | * |
119 | * The solution is to make them fixed size, and when we're constructing a node |
120 | * check if p and n differed in the bits we needed them to. If they don't we |
121 | * flag that node, and when doing lookups we fallback to comparing against the |
122 | * real key. As long as this doesn't happen to often (and it seems to reliably |
123 | * happen a bit less than 1% of the time), we win - even on failures, that key |
124 | * is then more likely to be in cache than if we were doing binary searches all |
125 | * the way, since we're touching so much less memory. |
126 | * |
127 | * The keys in the auxiliary search tree are stored in (software) floating |
128 | * point, with an exponent and a mantissa. The exponent needs to be big enough |
129 | * to address all the bits in the original key, but the number of bits in the |
130 | * mantissa is somewhat arbitrary; more bits just gets us fewer failures. |
131 | * |
132 | * We need 7 bits for the exponent and 3 bits for the key's offset (since keys |
133 | * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes. |
134 | * We need one node per 128 bytes in the btree node, which means the auxiliary |
135 | * search trees take up 3% as much memory as the btree itself. |
136 | * |
137 | * Constructing these auxiliary search trees is moderately expensive, and we |
138 | * don't want to be constantly rebuilding the search tree for the last set |
139 | * whenever we insert another key into it. For the unwritten set, we use a much |
140 | * simpler lookup table - it's just a flat array, so index i in the lookup table |
141 | * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing |
142 | * within each byte range works the same as with the auxiliary search trees. |
143 | * |
144 | * These are much easier to keep up to date when we insert a key - we do it |
145 | * somewhat lazily; when we shift a key up we usually just increment the pointer |
146 | * to it, only when it would overflow do we go to the trouble of finding the |
147 | * first key in that range of bytes again. |
148 | */ |
149 | |
150 | enum bset_aux_tree_type { |
151 | BSET_NO_AUX_TREE, |
152 | BSET_RO_AUX_TREE, |
153 | BSET_RW_AUX_TREE, |
154 | }; |
155 | |
156 | #define BSET_TREE_NR_TYPES 3 |
157 | |
158 | #define BSET_NO_AUX_TREE_VAL (U16_MAX) |
159 | #define BSET_RW_AUX_TREE_VAL (U16_MAX - 1) |
160 | |
161 | static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree *t) |
162 | { |
163 | switch (t->extra) { |
164 | case BSET_NO_AUX_TREE_VAL: |
165 | EBUG_ON(t->size); |
166 | return BSET_NO_AUX_TREE; |
167 | case BSET_RW_AUX_TREE_VAL: |
168 | EBUG_ON(!t->size); |
169 | return BSET_RW_AUX_TREE; |
170 | default: |
171 | EBUG_ON(!t->size); |
172 | return BSET_RO_AUX_TREE; |
173 | } |
174 | } |
175 | |
176 | /* |
177 | * BSET_CACHELINE was originally intended to match the hardware cacheline size - |
178 | * it used to be 64, but I realized the lookup code would touch slightly less |
179 | * memory if it was 128. |
180 | * |
181 | * It definites the number of bytes (in struct bset) per struct bkey_float in |
182 | * the auxiliar search tree - when we're done searching the bset_float tree we |
183 | * have this many bytes left that we do a linear search over. |
184 | * |
185 | * Since (after level 5) every level of the bset_tree is on a new cacheline, |
186 | * we're touching one fewer cacheline in the bset tree in exchange for one more |
187 | * cacheline in the linear search - but the linear search might stop before it |
188 | * gets to the second cacheline. |
189 | */ |
190 | |
191 | #define BSET_CACHELINE 256 |
192 | |
193 | static inline size_t btree_keys_cachelines(const struct btree *b) |
194 | { |
195 | return (1U << b->byte_order) / BSET_CACHELINE; |
196 | } |
197 | |
198 | static inline size_t btree_aux_data_bytes(const struct btree *b) |
199 | { |
200 | return btree_keys_cachelines(b) * 8; |
201 | } |
202 | |
203 | static inline size_t btree_aux_data_u64s(const struct btree *b) |
204 | { |
205 | return btree_aux_data_bytes(b) / sizeof(u64); |
206 | } |
207 | |
208 | #define for_each_bset(_b, _t) \ |
209 | for (_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++) |
210 | |
211 | #define bset_tree_for_each_key(_b, _t, _k) \ |
212 | for (_k = btree_bkey_first(_b, _t); \ |
213 | _k != btree_bkey_last(_b, _t); \ |
214 | _k = bkey_p_next(_k)) |
215 | |
216 | static inline bool bset_has_ro_aux_tree(const struct bset_tree *t) |
217 | { |
218 | return bset_aux_tree_type(t) == BSET_RO_AUX_TREE; |
219 | } |
220 | |
221 | static inline bool bset_has_rw_aux_tree(struct bset_tree *t) |
222 | { |
223 | return bset_aux_tree_type(t) == BSET_RW_AUX_TREE; |
224 | } |
225 | |
226 | static inline void bch2_bset_set_no_aux_tree(struct btree *b, |
227 | struct bset_tree *t) |
228 | { |
229 | BUG_ON(t < b->set); |
230 | |
231 | for (; t < b->set + ARRAY_SIZE(b->set); t++) { |
232 | t->size = 0; |
233 | t->extra = BSET_NO_AUX_TREE_VAL; |
234 | t->aux_data_offset = U16_MAX; |
235 | } |
236 | } |
237 | |
238 | static inline void btree_node_set_format(struct btree *b, |
239 | struct bkey_format f) |
240 | { |
241 | int len; |
242 | |
243 | b->format = f; |
244 | b->nr_key_bits = bkey_format_key_bits(format: &f); |
245 | |
246 | len = bch2_compile_bkey_format(format: &b->format, out: b->aux_data); |
247 | BUG_ON(len < 0 || len > U8_MAX); |
248 | |
249 | b->unpack_fn_len = len; |
250 | |
251 | bch2_bset_set_no_aux_tree(b, t: b->set); |
252 | } |
253 | |
254 | static inline struct bset *bset_next_set(struct btree *b, |
255 | unsigned block_bytes) |
256 | { |
257 | struct bset *i = btree_bset_last(b); |
258 | |
259 | EBUG_ON(!is_power_of_2(block_bytes)); |
260 | |
261 | return ((void *) i) + round_up(vstruct_bytes(i), block_bytes); |
262 | } |
263 | |
264 | void bch2_btree_keys_init(struct btree *); |
265 | |
266 | void bch2_bset_init_first(struct btree *, struct bset *); |
267 | void bch2_bset_init_next(struct btree *, struct btree_node_entry *); |
268 | void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool); |
269 | |
270 | void bch2_bset_insert(struct btree *, struct btree_node_iter *, |
271 | struct bkey_packed *, struct bkey_i *, unsigned); |
272 | void bch2_bset_delete(struct btree *, struct bkey_packed *, unsigned); |
273 | |
274 | /* Bkey utility code */ |
275 | |
276 | /* packed or unpacked */ |
277 | static inline int bkey_cmp_p_or_unp(const struct btree *b, |
278 | const struct bkey_packed *l, |
279 | const struct bkey_packed *r_packed, |
280 | const struct bpos *r) |
281 | { |
282 | EBUG_ON(r_packed && !bkey_packed(r_packed)); |
283 | |
284 | if (unlikely(!bkey_packed(l))) |
285 | return bpos_cmp(l: packed_to_bkey_c(k: l)->p, r: *r); |
286 | |
287 | if (likely(r_packed)) |
288 | return __bch2_bkey_cmp_packed_format_checked(l, r_packed, b); |
289 | |
290 | return __bch2_bkey_cmp_left_packed_format_checked(b, l, r); |
291 | } |
292 | |
293 | static inline struct bset_tree * |
294 | bch2_bkey_to_bset_inlined(struct btree *b, struct bkey_packed *k) |
295 | { |
296 | unsigned offset = __btree_node_key_to_offset(b, k); |
297 | struct bset_tree *t; |
298 | |
299 | for_each_bset(b, t) |
300 | if (offset <= t->end_offset) { |
301 | EBUG_ON(offset < btree_bkey_first_offset(t)); |
302 | return t; |
303 | } |
304 | |
305 | BUG(); |
306 | } |
307 | |
308 | struct bset_tree *bch2_bkey_to_bset(struct btree *, struct bkey_packed *); |
309 | |
310 | struct bkey_packed *bch2_bkey_prev_filter(struct btree *, struct bset_tree *, |
311 | struct bkey_packed *, unsigned); |
312 | |
313 | static inline struct bkey_packed * |
314 | bch2_bkey_prev_all(struct btree *b, struct bset_tree *t, struct bkey_packed *k) |
315 | { |
316 | return bch2_bkey_prev_filter(b, t, k, 0); |
317 | } |
318 | |
319 | static inline struct bkey_packed * |
320 | bch2_bkey_prev(struct btree *b, struct bset_tree *t, struct bkey_packed *k) |
321 | { |
322 | return bch2_bkey_prev_filter(b, t, k, 1); |
323 | } |
324 | |
325 | /* Btree key iteration */ |
326 | |
327 | void bch2_btree_node_iter_push(struct btree_node_iter *, struct btree *, |
328 | const struct bkey_packed *, |
329 | const struct bkey_packed *); |
330 | void bch2_btree_node_iter_init(struct btree_node_iter *, struct btree *, |
331 | struct bpos *); |
332 | void bch2_btree_node_iter_init_from_start(struct btree_node_iter *, |
333 | struct btree *); |
334 | struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *, |
335 | struct btree *, |
336 | struct bset_tree *); |
337 | |
338 | void bch2_btree_node_iter_sort(struct btree_node_iter *, struct btree *); |
339 | void bch2_btree_node_iter_set_drop(struct btree_node_iter *, |
340 | struct btree_node_iter_set *); |
341 | void bch2_btree_node_iter_advance(struct btree_node_iter *, struct btree *); |
342 | |
343 | #define btree_node_iter_for_each(_iter, _set) \ |
344 | for (_set = (_iter)->data; \ |
345 | _set < (_iter)->data + ARRAY_SIZE((_iter)->data) && \ |
346 | (_set)->k != (_set)->end; \ |
347 | _set++) |
348 | |
349 | static inline bool __btree_node_iter_set_end(struct btree_node_iter *iter, |
350 | unsigned i) |
351 | { |
352 | return iter->data[i].k == iter->data[i].end; |
353 | } |
354 | |
355 | static inline bool bch2_btree_node_iter_end(struct btree_node_iter *iter) |
356 | { |
357 | return __btree_node_iter_set_end(iter, i: 0); |
358 | } |
359 | |
360 | /* |
361 | * When keys compare equal, deleted keys compare first: |
362 | * |
363 | * XXX: only need to compare pointers for keys that are both within a |
364 | * btree_node_iterator - we need to break ties for prev() to work correctly |
365 | */ |
366 | static inline int bkey_iter_cmp(const struct btree *b, |
367 | const struct bkey_packed *l, |
368 | const struct bkey_packed *r) |
369 | { |
370 | return bch2_bkey_cmp_packed(b, l, r) |
371 | ?: (int) bkey_deleted(r) - (int) bkey_deleted(l) |
372 | ?: cmp_int(l, r); |
373 | } |
374 | |
375 | static inline int btree_node_iter_cmp(const struct btree *b, |
376 | struct btree_node_iter_set l, |
377 | struct btree_node_iter_set r) |
378 | { |
379 | return bkey_iter_cmp(b, |
380 | l: __btree_node_offset_to_key(b, k: l.k), |
381 | r: __btree_node_offset_to_key(b, k: r.k)); |
382 | } |
383 | |
384 | /* These assume r (the search key) is not a deleted key: */ |
385 | static inline int bkey_iter_pos_cmp(const struct btree *b, |
386 | const struct bkey_packed *l, |
387 | const struct bpos *r) |
388 | { |
389 | return bkey_cmp_left_packed(b, l, r) |
390 | ?: -((int) bkey_deleted(l)); |
391 | } |
392 | |
393 | static inline int bkey_iter_cmp_p_or_unp(const struct btree *b, |
394 | const struct bkey_packed *l, |
395 | const struct bkey_packed *r_packed, |
396 | const struct bpos *r) |
397 | { |
398 | return bkey_cmp_p_or_unp(b, l, r_packed, r) |
399 | ?: -((int) bkey_deleted(l)); |
400 | } |
401 | |
402 | static inline struct bkey_packed * |
403 | __bch2_btree_node_iter_peek_all(struct btree_node_iter *iter, |
404 | struct btree *b) |
405 | { |
406 | return __btree_node_offset_to_key(b, k: iter->data->k); |
407 | } |
408 | |
409 | static inline struct bkey_packed * |
410 | bch2_btree_node_iter_peek_all(struct btree_node_iter *iter, struct btree *b) |
411 | { |
412 | return !bch2_btree_node_iter_end(iter) |
413 | ? __btree_node_offset_to_key(b, k: iter->data->k) |
414 | : NULL; |
415 | } |
416 | |
417 | static inline struct bkey_packed * |
418 | bch2_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b) |
419 | { |
420 | struct bkey_packed *k; |
421 | |
422 | while ((k = bch2_btree_node_iter_peek_all(iter, b)) && |
423 | bkey_deleted(k)) |
424 | bch2_btree_node_iter_advance(iter, b); |
425 | |
426 | return k; |
427 | } |
428 | |
429 | static inline struct bkey_packed * |
430 | bch2_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree *b) |
431 | { |
432 | struct bkey_packed *ret = bch2_btree_node_iter_peek_all(iter, b); |
433 | |
434 | if (ret) |
435 | bch2_btree_node_iter_advance(iter, b); |
436 | |
437 | return ret; |
438 | } |
439 | |
440 | struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *, |
441 | struct btree *); |
442 | struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *, |
443 | struct btree *); |
444 | |
445 | struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *, |
446 | struct btree *, |
447 | struct bkey *); |
448 | |
449 | #define for_each_btree_node_key(b, k, iter) \ |
450 | for (bch2_btree_node_iter_init_from_start((iter), (b)); \ |
451 | (k = bch2_btree_node_iter_peek((iter), (b))); \ |
452 | bch2_btree_node_iter_advance(iter, b)) |
453 | |
454 | #define for_each_btree_node_key_unpack(b, k, iter, unpacked) \ |
455 | for (bch2_btree_node_iter_init_from_start((iter), (b)); \ |
456 | (k = bch2_btree_node_iter_peek_unpack((iter), (b), (unpacked))).k;\ |
457 | bch2_btree_node_iter_advance(iter, b)) |
458 | |
459 | /* Accounting: */ |
460 | |
461 | struct btree_nr_keys bch2_btree_node_count_keys(struct btree *); |
462 | |
463 | static inline void btree_keys_account_key(struct btree_nr_keys *n, |
464 | unsigned bset, |
465 | struct bkey_packed *k, |
466 | int sign) |
467 | { |
468 | n->live_u64s += k->u64s * sign; |
469 | n->bset_u64s[bset] += k->u64s * sign; |
470 | |
471 | if (bkey_packed(k)) |
472 | n->packed_keys += sign; |
473 | else |
474 | n->unpacked_keys += sign; |
475 | } |
476 | |
477 | static inline void btree_keys_account_val_delta(struct btree *b, |
478 | struct bkey_packed *k, |
479 | int delta) |
480 | { |
481 | struct bset_tree *t = bch2_bkey_to_bset(b, k); |
482 | |
483 | b->nr.live_u64s += delta; |
484 | b->nr.bset_u64s[t - b->set] += delta; |
485 | } |
486 | |
487 | #define btree_keys_account_key_add(_nr, _bset_idx, _k) \ |
488 | btree_keys_account_key(_nr, _bset_idx, _k, 1) |
489 | #define btree_keys_account_key_drop(_nr, _bset_idx, _k) \ |
490 | btree_keys_account_key(_nr, _bset_idx, _k, -1) |
491 | |
492 | #define btree_account_key_add(_b, _k) \ |
493 | btree_keys_account_key(&(_b)->nr, \ |
494 | bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, 1) |
495 | #define btree_account_key_drop(_b, _k) \ |
496 | btree_keys_account_key(&(_b)->nr, \ |
497 | bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, -1) |
498 | |
499 | struct bset_stats { |
500 | struct { |
501 | size_t nr, bytes; |
502 | } sets[BSET_TREE_NR_TYPES]; |
503 | |
504 | size_t floats; |
505 | size_t failed; |
506 | }; |
507 | |
508 | void bch2_btree_keys_stats(const struct btree *, struct bset_stats *); |
509 | void bch2_bfloat_to_text(struct printbuf *, struct btree *, |
510 | struct bkey_packed *); |
511 | |
512 | /* Debug stuff */ |
513 | |
514 | void bch2_dump_bset(struct bch_fs *, struct btree *, struct bset *, unsigned); |
515 | void bch2_dump_btree_node(struct bch_fs *, struct btree *); |
516 | void bch2_dump_btree_node_iter(struct btree *, struct btree_node_iter *); |
517 | |
518 | #ifdef CONFIG_BCACHEFS_DEBUG |
519 | |
520 | void __bch2_verify_btree_nr_keys(struct btree *); |
521 | void bch2_btree_node_iter_verify(struct btree_node_iter *, struct btree *); |
522 | void bch2_verify_insert_pos(struct btree *, struct bkey_packed *, |
523 | struct bkey_packed *, unsigned); |
524 | |
525 | #else |
526 | |
527 | static inline void __bch2_verify_btree_nr_keys(struct btree *b) {} |
528 | static inline void bch2_btree_node_iter_verify(struct btree_node_iter *iter, |
529 | struct btree *b) {} |
530 | static inline void bch2_verify_insert_pos(struct btree *b, |
531 | struct bkey_packed *where, |
532 | struct bkey_packed *insert, |
533 | unsigned clobber_u64s) {} |
534 | #endif |
535 | |
536 | static inline void bch2_verify_btree_nr_keys(struct btree *b) |
537 | { |
538 | if (bch2_debug_check_btree_accounting) |
539 | __bch2_verify_btree_nr_keys(b); |
540 | } |
541 | |
542 | #endif /* _BCACHEFS_BSET_H */ |
543 | |