1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Code for working with individual keys, and sorted sets of keys with in a |
4 | * btree node |
5 | * |
6 | * Copyright 2012 Google, Inc. |
7 | */ |
8 | |
9 | #define pr_fmt(fmt) "bcache: %s() " fmt, __func__ |
10 | |
11 | #include "util.h" |
12 | #include "bset.h" |
13 | |
14 | #include <linux/console.h> |
15 | #include <linux/sched/clock.h> |
16 | #include <linux/random.h> |
17 | #include <linux/prefetch.h> |
18 | |
19 | #ifdef CONFIG_BCACHE_DEBUG |
20 | |
21 | void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set) |
22 | { |
23 | struct bkey *k, *next; |
24 | |
25 | for (k = i->start; k < bset_bkey_last(i); k = next) { |
26 | next = bkey_next(k); |
27 | |
28 | pr_err("block %u key %u/%u: " , set, |
29 | (unsigned int) ((u64 *) k - i->d), i->keys); |
30 | |
31 | if (b->ops->key_dump) |
32 | b->ops->key_dump(b, k); |
33 | else |
34 | pr_cont("%llu:%llu\n" , KEY_INODE(k), KEY_OFFSET(k)); |
35 | |
36 | if (next < bset_bkey_last(i) && |
37 | bkey_cmp(l: k, r: b->ops->is_extents ? |
38 | &START_KEY(next) : next) > 0) |
39 | pr_err("Key skipped backwards\n" ); |
40 | } |
41 | } |
42 | |
43 | void bch_dump_bucket(struct btree_keys *b) |
44 | { |
45 | unsigned int i; |
46 | |
47 | console_lock(); |
48 | for (i = 0; i <= b->nsets; i++) |
49 | bch_dump_bset(b, i: b->set[i].data, |
50 | set: bset_sector_offset(b, i: b->set[i].data)); |
51 | console_unlock(); |
52 | } |
53 | |
54 | int __bch_count_data(struct btree_keys *b) |
55 | { |
56 | unsigned int ret = 0; |
57 | struct btree_iter iter; |
58 | struct bkey *k; |
59 | |
60 | if (b->ops->is_extents) |
61 | for_each_key(b, k, &iter) |
62 | ret += KEY_SIZE(k); |
63 | return ret; |
64 | } |
65 | |
66 | void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) |
67 | { |
68 | va_list args; |
69 | struct bkey *k, *p = NULL; |
70 | struct btree_iter iter; |
71 | const char *err; |
72 | |
73 | for_each_key(b, k, &iter) { |
74 | if (b->ops->is_extents) { |
75 | err = "Keys out of order" ; |
76 | if (p && bkey_cmp(l: &START_KEY(p), r: &START_KEY(k)) > 0) |
77 | goto bug; |
78 | |
79 | if (bch_ptr_invalid(b, k)) |
80 | continue; |
81 | |
82 | err = "Overlapping keys" ; |
83 | if (p && bkey_cmp(l: p, r: &START_KEY(k)) > 0) |
84 | goto bug; |
85 | } else { |
86 | if (bch_ptr_bad(b, k)) |
87 | continue; |
88 | |
89 | err = "Duplicate keys" ; |
90 | if (p && !bkey_cmp(l: p, r: k)) |
91 | goto bug; |
92 | } |
93 | p = k; |
94 | } |
95 | #if 0 |
96 | err = "Key larger than btree node key" ; |
97 | if (p && bkey_cmp(p, &b->key) > 0) |
98 | goto bug; |
99 | #endif |
100 | return; |
101 | bug: |
102 | bch_dump_bucket(b); |
103 | |
104 | va_start(args, fmt); |
105 | vprintk(fmt, args); |
106 | va_end(args); |
107 | |
108 | panic(fmt: "bch_check_keys error: %s:\n" , err); |
109 | } |
110 | |
111 | static void bch_btree_iter_next_check(struct btree_iter *iter) |
112 | { |
113 | struct bkey *k = iter->data->k, *next = bkey_next(k); |
114 | |
115 | if (next < iter->data->end && |
116 | bkey_cmp(l: k, r: iter->b->ops->is_extents ? |
117 | &START_KEY(next) : next) > 0) { |
118 | bch_dump_bucket(b: iter->b); |
119 | panic(fmt: "Key skipped backwards\n" ); |
120 | } |
121 | } |
122 | |
123 | #else |
124 | |
125 | static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} |
126 | |
127 | #endif |
128 | |
129 | /* Keylists */ |
130 | |
131 | int __bch_keylist_realloc(struct keylist *l, unsigned int u64s) |
132 | { |
133 | size_t oldsize = bch_keylist_nkeys(l); |
134 | size_t newsize = oldsize + u64s; |
135 | uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p; |
136 | uint64_t *new_keys; |
137 | |
138 | newsize = roundup_pow_of_two(newsize); |
139 | |
140 | if (newsize <= KEYLIST_INLINE || |
141 | roundup_pow_of_two(oldsize) == newsize) |
142 | return 0; |
143 | |
144 | new_keys = krealloc(objp: old_keys, new_size: sizeof(uint64_t) * newsize, GFP_NOIO); |
145 | |
146 | if (!new_keys) |
147 | return -ENOMEM; |
148 | |
149 | if (!old_keys) |
150 | memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize); |
151 | |
152 | l->keys_p = new_keys; |
153 | l->top_p = new_keys + oldsize; |
154 | |
155 | return 0; |
156 | } |
157 | |
158 | /* Pop the top key of keylist by pointing l->top to its previous key */ |
159 | struct bkey *bch_keylist_pop(struct keylist *l) |
160 | { |
161 | struct bkey *k = l->keys; |
162 | |
163 | if (k == l->top) |
164 | return NULL; |
165 | |
166 | while (bkey_next(k) != l->top) |
167 | k = bkey_next(k); |
168 | |
169 | return l->top = k; |
170 | } |
171 | |
172 | /* Pop the bottom key of keylist and update l->top_p */ |
173 | void bch_keylist_pop_front(struct keylist *l) |
174 | { |
175 | l->top_p -= bkey_u64s(k: l->keys); |
176 | |
177 | memmove(l->keys, |
178 | bkey_next(l->keys), |
179 | bch_keylist_bytes(l)); |
180 | } |
181 | |
182 | /* Key/pointer manipulation */ |
183 | |
184 | void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, |
185 | unsigned int i) |
186 | { |
187 | BUG_ON(i > KEY_PTRS(src)); |
188 | |
189 | /* Only copy the header, key, and one pointer. */ |
190 | memcpy(dest, src, 2 * sizeof(uint64_t)); |
191 | dest->ptr[0] = src->ptr[i]; |
192 | SET_KEY_PTRS(k: dest, v: 1); |
193 | /* We didn't copy the checksum so clear that bit. */ |
194 | SET_KEY_CSUM(k: dest, v: 0); |
195 | } |
196 | |
197 | bool __bch_cut_front(const struct bkey *where, struct bkey *k) |
198 | { |
199 | unsigned int i, len = 0; |
200 | |
201 | if (bkey_cmp(l: where, r: &START_KEY(k)) <= 0) |
202 | return false; |
203 | |
204 | if (bkey_cmp(l: where, r: k) < 0) |
205 | len = KEY_OFFSET(k) - KEY_OFFSET(k: where); |
206 | else |
207 | bkey_copy_key(dest: k, src: where); |
208 | |
209 | for (i = 0; i < KEY_PTRS(k); i++) |
210 | SET_PTR_OFFSET(k, i, v: PTR_OFFSET(k, i) + KEY_SIZE(k) - len); |
211 | |
212 | BUG_ON(len > KEY_SIZE(k)); |
213 | SET_KEY_SIZE(k, v: len); |
214 | return true; |
215 | } |
216 | |
217 | bool __bch_cut_back(const struct bkey *where, struct bkey *k) |
218 | { |
219 | unsigned int len = 0; |
220 | |
221 | if (bkey_cmp(l: where, r: k) >= 0) |
222 | return false; |
223 | |
224 | BUG_ON(KEY_INODE(where) != KEY_INODE(k)); |
225 | |
226 | if (bkey_cmp(l: where, r: &START_KEY(k)) > 0) |
227 | len = KEY_OFFSET(k: where) - KEY_START(k); |
228 | |
229 | bkey_copy_key(dest: k, src: where); |
230 | |
231 | BUG_ON(len > KEY_SIZE(k)); |
232 | SET_KEY_SIZE(k, v: len); |
233 | return true; |
234 | } |
235 | |
236 | /* Auxiliary search trees */ |
237 | |
238 | /* 32 bits total: */ |
239 | #define BKEY_MID_BITS 3 |
240 | #define BKEY_EXPONENT_BITS 7 |
241 | #define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS) |
242 | #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1) |
243 | |
244 | struct bkey_float { |
245 | unsigned int exponent:BKEY_EXPONENT_BITS; |
246 | unsigned int m:BKEY_MID_BITS; |
247 | unsigned int mantissa:BKEY_MANTISSA_BITS; |
248 | } __packed; |
249 | |
250 | /* |
251 | * BSET_CACHELINE was originally intended to match the hardware cacheline size - |
252 | * it used to be 64, but I realized the lookup code would touch slightly less |
253 | * memory if it was 128. |
254 | * |
255 | * It definites the number of bytes (in struct bset) per struct bkey_float in |
256 | * the auxiliar search tree - when we're done searching the bset_float tree we |
257 | * have this many bytes left that we do a linear search over. |
258 | * |
259 | * Since (after level 5) every level of the bset_tree is on a new cacheline, |
260 | * we're touching one fewer cacheline in the bset tree in exchange for one more |
261 | * cacheline in the linear search - but the linear search might stop before it |
262 | * gets to the second cacheline. |
263 | */ |
264 | |
265 | #define BSET_CACHELINE 128 |
266 | |
267 | /* Space required for the btree node keys */ |
268 | static inline size_t btree_keys_bytes(struct btree_keys *b) |
269 | { |
270 | return PAGE_SIZE << b->page_order; |
271 | } |
272 | |
273 | static inline size_t btree_keys_cachelines(struct btree_keys *b) |
274 | { |
275 | return btree_keys_bytes(b) / BSET_CACHELINE; |
276 | } |
277 | |
278 | /* Space required for the auxiliary search trees */ |
279 | static inline size_t bset_tree_bytes(struct btree_keys *b) |
280 | { |
281 | return btree_keys_cachelines(b) * sizeof(struct bkey_float); |
282 | } |
283 | |
284 | /* Space required for the prev pointers */ |
285 | static inline size_t bset_prev_bytes(struct btree_keys *b) |
286 | { |
287 | return btree_keys_cachelines(b) * sizeof(uint8_t); |
288 | } |
289 | |
290 | /* Memory allocation */ |
291 | |
292 | void bch_btree_keys_free(struct btree_keys *b) |
293 | { |
294 | struct bset_tree *t = b->set; |
295 | |
296 | if (bset_prev_bytes(b) < PAGE_SIZE) |
297 | kfree(objp: t->prev); |
298 | else |
299 | free_pages(addr: (unsigned long) t->prev, |
300 | order: get_order(size: bset_prev_bytes(b))); |
301 | |
302 | if (bset_tree_bytes(b) < PAGE_SIZE) |
303 | kfree(objp: t->tree); |
304 | else |
305 | free_pages(addr: (unsigned long) t->tree, |
306 | order: get_order(size: bset_tree_bytes(b))); |
307 | |
308 | free_pages(addr: (unsigned long) t->data, order: b->page_order); |
309 | |
310 | t->prev = NULL; |
311 | t->tree = NULL; |
312 | t->data = NULL; |
313 | } |
314 | |
315 | int bch_btree_keys_alloc(struct btree_keys *b, |
316 | unsigned int page_order, |
317 | gfp_t gfp) |
318 | { |
319 | struct bset_tree *t = b->set; |
320 | |
321 | BUG_ON(t->data); |
322 | |
323 | b->page_order = page_order; |
324 | |
325 | t->data = (void *) __get_free_pages(__GFP_COMP|gfp, order: b->page_order); |
326 | if (!t->data) |
327 | goto err; |
328 | |
329 | t->tree = bset_tree_bytes(b) < PAGE_SIZE |
330 | ? kmalloc(size: bset_tree_bytes(b), flags: gfp) |
331 | : (void *) __get_free_pages(gfp_mask: gfp, order: get_order(size: bset_tree_bytes(b))); |
332 | if (!t->tree) |
333 | goto err; |
334 | |
335 | t->prev = bset_prev_bytes(b) < PAGE_SIZE |
336 | ? kmalloc(size: bset_prev_bytes(b), flags: gfp) |
337 | : (void *) __get_free_pages(gfp_mask: gfp, order: get_order(size: bset_prev_bytes(b))); |
338 | if (!t->prev) |
339 | goto err; |
340 | |
341 | return 0; |
342 | err: |
343 | bch_btree_keys_free(b); |
344 | return -ENOMEM; |
345 | } |
346 | |
347 | void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, |
348 | bool *expensive_debug_checks) |
349 | { |
350 | b->ops = ops; |
351 | b->expensive_debug_checks = expensive_debug_checks; |
352 | b->nsets = 0; |
353 | b->last_set_unwritten = 0; |
354 | |
355 | /* |
356 | * struct btree_keys in embedded in struct btree, and struct |
357 | * bset_tree is embedded into struct btree_keys. They are all |
358 | * initialized as 0 by kzalloc() in mca_bucket_alloc(), and |
359 | * b->set[0].data is allocated in bch_btree_keys_alloc(), so we |
360 | * don't have to initiate b->set[].size and b->set[].data here |
361 | * any more. |
362 | */ |
363 | } |
364 | |
365 | /* Binary tree stuff for auxiliary search trees */ |
366 | |
367 | /* |
368 | * return array index next to j when does in-order traverse |
369 | * of a binary tree which is stored in a linear array |
370 | */ |
371 | static unsigned int inorder_next(unsigned int j, unsigned int size) |
372 | { |
373 | if (j * 2 + 1 < size) { |
374 | j = j * 2 + 1; |
375 | |
376 | while (j * 2 < size) |
377 | j *= 2; |
378 | } else |
379 | j >>= ffz(j) + 1; |
380 | |
381 | return j; |
382 | } |
383 | |
384 | /* |
385 | * return array index previous to j when does in-order traverse |
386 | * of a binary tree which is stored in a linear array |
387 | */ |
388 | static unsigned int inorder_prev(unsigned int j, unsigned int size) |
389 | { |
390 | if (j * 2 < size) { |
391 | j = j * 2; |
392 | |
393 | while (j * 2 + 1 < size) |
394 | j = j * 2 + 1; |
395 | } else |
396 | j >>= ffs(j); |
397 | |
398 | return j; |
399 | } |
400 | |
401 | /* |
402 | * I have no idea why this code works... and I'm the one who wrote it |
403 | * |
404 | * However, I do know what it does: |
405 | * Given a binary tree constructed in an array (i.e. how you normally implement |
406 | * a heap), it converts a node in the tree - referenced by array index - to the |
407 | * index it would have if you did an inorder traversal. |
408 | * |
409 | * Also tested for every j, size up to size somewhere around 6 million. |
410 | * |
411 | * The binary tree starts at array index 1, not 0 |
412 | * extra is a function of size: |
413 | * extra = (size - rounddown_pow_of_two(size - 1)) << 1; |
414 | */ |
415 | static unsigned int __to_inorder(unsigned int j, |
416 | unsigned int size, |
417 | unsigned int ) |
418 | { |
419 | unsigned int b = fls(x: j); |
420 | unsigned int shift = fls(x: size - 1) - b; |
421 | |
422 | j ^= 1U << (b - 1); |
423 | j <<= 1; |
424 | j |= 1; |
425 | j <<= shift; |
426 | |
427 | if (j > extra) |
428 | j -= (j - extra) >> 1; |
429 | |
430 | return j; |
431 | } |
432 | |
433 | /* |
434 | * Return the cacheline index in bset_tree->data, where j is index |
435 | * from a linear array which stores the auxiliar binary tree |
436 | */ |
437 | static unsigned int to_inorder(unsigned int j, struct bset_tree *t) |
438 | { |
439 | return __to_inorder(j, size: t->size, extra: t->extra); |
440 | } |
441 | |
442 | static unsigned int __inorder_to_tree(unsigned int j, |
443 | unsigned int size, |
444 | unsigned int ) |
445 | { |
446 | unsigned int shift; |
447 | |
448 | if (j > extra) |
449 | j += j - extra; |
450 | |
451 | shift = ffs(j); |
452 | |
453 | j >>= shift; |
454 | j |= roundup_pow_of_two(size) >> shift; |
455 | |
456 | return j; |
457 | } |
458 | |
459 | /* |
460 | * Return an index from a linear array which stores the auxiliar binary |
461 | * tree, j is the cacheline index of t->data. |
462 | */ |
463 | static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t) |
464 | { |
465 | return __inorder_to_tree(j, size: t->size, extra: t->extra); |
466 | } |
467 | |
468 | #if 0 |
469 | void inorder_test(void) |
470 | { |
471 | unsigned long done = 0; |
472 | ktime_t start = ktime_get(); |
473 | |
474 | for (unsigned int size = 2; |
475 | size < 65536000; |
476 | size++) { |
477 | unsigned int extra = |
478 | (size - rounddown_pow_of_two(size - 1)) << 1; |
479 | unsigned int i = 1, j = rounddown_pow_of_two(size - 1); |
480 | |
481 | if (!(size % 4096)) |
482 | pr_notice("loop %u, %llu per us\n" , size, |
483 | done / ktime_us_delta(ktime_get(), start)); |
484 | |
485 | while (1) { |
486 | if (__inorder_to_tree(i, size, extra) != j) |
487 | panic("size %10u j %10u i %10u" , size, j, i); |
488 | |
489 | if (__to_inorder(j, size, extra) != i) |
490 | panic("size %10u j %10u i %10u" , size, j, i); |
491 | |
492 | if (j == rounddown_pow_of_two(size) - 1) |
493 | break; |
494 | |
495 | BUG_ON(inorder_prev(inorder_next(j, size), size) != j); |
496 | |
497 | j = inorder_next(j, size); |
498 | i++; |
499 | } |
500 | |
501 | done += size - 1; |
502 | } |
503 | } |
504 | #endif |
505 | |
506 | /* |
507 | * Cacheline/offset <-> bkey pointer arithmetic: |
508 | * |
509 | * t->tree is a binary search tree in an array; each node corresponds to a key |
510 | * in one cacheline in t->set (BSET_CACHELINE bytes). |
511 | * |
512 | * This means we don't have to store the full index of the key that a node in |
513 | * the binary tree points to; to_inorder() gives us the cacheline, and then |
514 | * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes. |
515 | * |
516 | * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to |
517 | * make this work. |
518 | * |
519 | * To construct the bfloat for an arbitrary key we need to know what the key |
520 | * immediately preceding it is: we have to check if the two keys differ in the |
521 | * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size |
522 | * of the previous key so we can walk backwards to it from t->tree[j]'s key. |
523 | */ |
524 | |
525 | static struct bkey *cacheline_to_bkey(struct bset_tree *t, |
526 | unsigned int cacheline, |
527 | unsigned int offset) |
528 | { |
529 | return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8; |
530 | } |
531 | |
532 | static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k) |
533 | { |
534 | return ((void *) k - (void *) t->data) / BSET_CACHELINE; |
535 | } |
536 | |
537 | static unsigned int bkey_to_cacheline_offset(struct bset_tree *t, |
538 | unsigned int cacheline, |
539 | struct bkey *k) |
540 | { |
541 | return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, offset: 0); |
542 | } |
543 | |
544 | static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j) |
545 | { |
546 | return cacheline_to_bkey(t, cacheline: to_inorder(j, t), offset: t->tree[j].m); |
547 | } |
548 | |
549 | static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j) |
550 | { |
551 | return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]); |
552 | } |
553 | |
554 | /* |
555 | * For the write set - the one we're currently inserting keys into - we don't |
556 | * maintain a full search tree, we just keep a simple lookup table in t->prev. |
557 | */ |
558 | static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline) |
559 | { |
560 | return cacheline_to_bkey(t, cacheline, offset: t->prev[cacheline]); |
561 | } |
562 | |
563 | static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift) |
564 | { |
565 | low >>= shift; |
566 | low |= (high << 1) << (63U - shift); |
567 | return low; |
568 | } |
569 | |
570 | /* |
571 | * Calculate mantissa value for struct bkey_float. |
572 | * If most significant bit of f->exponent is not set, then |
573 | * - f->exponent >> 6 is 0 |
574 | * - p[0] points to bkey->low |
575 | * - p[-1] borrows bits from KEY_INODE() of bkey->high |
576 | * if most isgnificant bits of f->exponent is set, then |
577 | * - f->exponent >> 6 is 1 |
578 | * - p[0] points to bits from KEY_INODE() of bkey->high |
579 | * - p[-1] points to other bits from KEY_INODE() of |
580 | * bkey->high too. |
581 | * See make_bfloat() to check when most significant bit of f->exponent |
582 | * is set or not. |
583 | */ |
584 | static inline unsigned int bfloat_mantissa(const struct bkey *k, |
585 | struct bkey_float *f) |
586 | { |
587 | const uint64_t *p = &k->low - (f->exponent >> 6); |
588 | |
589 | return shrd128(high: p[-1], low: p[0], shift: f->exponent & 63) & BKEY_MANTISSA_MASK; |
590 | } |
591 | |
592 | static void make_bfloat(struct bset_tree *t, unsigned int j) |
593 | { |
594 | struct bkey_float *f = &t->tree[j]; |
595 | struct bkey *m = tree_to_bkey(t, j); |
596 | struct bkey *p = tree_to_prev_bkey(t, j); |
597 | |
598 | struct bkey *l = is_power_of_2(n: j) |
599 | ? t->data->start |
600 | : tree_to_prev_bkey(t, j: j >> ffs(j)); |
601 | |
602 | struct bkey *r = is_power_of_2(n: j + 1) |
603 | ? bset_bkey_idx(i: t->data, idx: t->data->keys - bkey_u64s(k: &t->end)) |
604 | : tree_to_bkey(t, j: j >> (ffz(j) + 1)); |
605 | |
606 | BUG_ON(m < l || m > r); |
607 | BUG_ON(bkey_next(p) != m); |
608 | |
609 | /* |
610 | * If l and r have different KEY_INODE values (different backing |
611 | * device), f->exponent records how many least significant bits |
612 | * are different in KEY_INODE values and sets most significant |
613 | * bits to 1 (by +64). |
614 | * If l and r have same KEY_INODE value, f->exponent records |
615 | * how many different bits in least significant bits of bkey->low. |
616 | * See bfloat_mantiss() how the most significant bit of |
617 | * f->exponent is used to calculate bfloat mantissa value. |
618 | */ |
619 | if (KEY_INODE(k: l) != KEY_INODE(k: r)) |
620 | f->exponent = fls64(x: KEY_INODE(k: r) ^ KEY_INODE(k: l)) + 64; |
621 | else |
622 | f->exponent = fls64(x: r->low ^ l->low); |
623 | |
624 | f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0); |
625 | |
626 | /* |
627 | * Setting f->exponent = 127 flags this node as failed, and causes the |
628 | * lookup code to fall back to comparing against the original key. |
629 | */ |
630 | |
631 | if (bfloat_mantissa(k: m, f) != bfloat_mantissa(k: p, f)) |
632 | f->mantissa = bfloat_mantissa(k: m, f) - 1; |
633 | else |
634 | f->exponent = 127; |
635 | } |
636 | |
637 | static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t) |
638 | { |
639 | if (t != b->set) { |
640 | unsigned int j = roundup(t[-1].size, |
641 | 64 / sizeof(struct bkey_float)); |
642 | |
643 | t->tree = t[-1].tree + j; |
644 | t->prev = t[-1].prev + j; |
645 | } |
646 | |
647 | while (t < b->set + MAX_BSETS) |
648 | t++->size = 0; |
649 | } |
650 | |
651 | static void bch_bset_build_unwritten_tree(struct btree_keys *b) |
652 | { |
653 | struct bset_tree *t = bset_tree_last(b); |
654 | |
655 | BUG_ON(b->last_set_unwritten); |
656 | b->last_set_unwritten = 1; |
657 | |
658 | bset_alloc_tree(b, t); |
659 | |
660 | if (t->tree != b->set->tree + btree_keys_cachelines(b)) { |
661 | t->prev[0] = bkey_to_cacheline_offset(t, cacheline: 0, k: t->data->start); |
662 | t->size = 1; |
663 | } |
664 | } |
665 | |
666 | void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic) |
667 | { |
668 | if (i != b->set->data) { |
669 | b->set[++b->nsets].data = i; |
670 | i->seq = b->set->data->seq; |
671 | } else |
672 | get_random_bytes(buf: &i->seq, len: sizeof(uint64_t)); |
673 | |
674 | i->magic = magic; |
675 | i->version = 0; |
676 | i->keys = 0; |
677 | |
678 | bch_bset_build_unwritten_tree(b); |
679 | } |
680 | |
681 | /* |
682 | * Build auxiliary binary tree 'struct bset_tree *t', this tree is used to |
683 | * accelerate bkey search in a btree node (pointed by bset_tree->data in |
684 | * memory). After search in the auxiliar tree by calling bset_search_tree(), |
685 | * a struct bset_search_iter is returned which indicates range [l, r] from |
686 | * bset_tree->data where the searching bkey might be inside. Then a followed |
687 | * linear comparison does the exact search, see __bch_bset_search() for how |
688 | * the auxiliary tree is used. |
689 | */ |
690 | void bch_bset_build_written_tree(struct btree_keys *b) |
691 | { |
692 | struct bset_tree *t = bset_tree_last(b); |
693 | struct bkey *prev = NULL, *k = t->data->start; |
694 | unsigned int j, cacheline = 1; |
695 | |
696 | b->last_set_unwritten = 0; |
697 | |
698 | bset_alloc_tree(b, t); |
699 | |
700 | t->size = min_t(unsigned int, |
701 | bkey_to_cacheline(t, bset_bkey_last(t->data)), |
702 | b->set->tree + btree_keys_cachelines(b) - t->tree); |
703 | |
704 | if (t->size < 2) { |
705 | t->size = 0; |
706 | return; |
707 | } |
708 | |
709 | t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1; |
710 | |
711 | /* First we figure out where the first key in each cacheline is */ |
712 | for (j = inorder_next(j: 0, size: t->size); |
713 | j; |
714 | j = inorder_next(j, size: t->size)) { |
715 | while (bkey_to_cacheline(t, k) < cacheline) { |
716 | prev = k; |
717 | k = bkey_next(k); |
718 | } |
719 | |
720 | t->prev[j] = bkey_u64s(k: prev); |
721 | t->tree[j].m = bkey_to_cacheline_offset(t, cacheline: cacheline++, k); |
722 | } |
723 | |
724 | while (bkey_next(k) != bset_bkey_last(t->data)) |
725 | k = bkey_next(k); |
726 | |
727 | t->end = *k; |
728 | |
729 | /* Then we build the tree */ |
730 | for (j = inorder_next(j: 0, size: t->size); |
731 | j; |
732 | j = inorder_next(j, size: t->size)) |
733 | make_bfloat(t, j); |
734 | } |
735 | |
736 | /* Insert */ |
737 | |
738 | void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k) |
739 | { |
740 | struct bset_tree *t; |
741 | unsigned int inorder, j = 1; |
742 | |
743 | for (t = b->set; t <= bset_tree_last(b); t++) |
744 | if (k < bset_bkey_last(t->data)) |
745 | goto found_set; |
746 | |
747 | BUG(); |
748 | found_set: |
749 | if (!t->size || !bset_written(b, t)) |
750 | return; |
751 | |
752 | inorder = bkey_to_cacheline(t, k); |
753 | |
754 | if (k == t->data->start) |
755 | goto fix_left; |
756 | |
757 | if (bkey_next(k) == bset_bkey_last(t->data)) { |
758 | t->end = *k; |
759 | goto fix_right; |
760 | } |
761 | |
762 | j = inorder_to_tree(j: inorder, t); |
763 | |
764 | if (j && |
765 | j < t->size && |
766 | k == tree_to_bkey(t, j)) |
767 | fix_left: do { |
768 | make_bfloat(t, j); |
769 | j = j * 2; |
770 | } while (j < t->size); |
771 | |
772 | j = inorder_to_tree(j: inorder + 1, t); |
773 | |
774 | if (j && |
775 | j < t->size && |
776 | k == tree_to_prev_bkey(t, j)) |
777 | fix_right: do { |
778 | make_bfloat(t, j); |
779 | j = j * 2 + 1; |
780 | } while (j < t->size); |
781 | } |
782 | |
783 | static void bch_bset_fix_lookup_table(struct btree_keys *b, |
784 | struct bset_tree *t, |
785 | struct bkey *k) |
786 | { |
787 | unsigned int shift = bkey_u64s(k); |
788 | unsigned int j = bkey_to_cacheline(t, k); |
789 | |
790 | /* We're getting called from btree_split() or btree_gc, just bail out */ |
791 | if (!t->size) |
792 | return; |
793 | |
794 | /* |
795 | * k is the key we just inserted; we need to find the entry in the |
796 | * lookup table for the first key that is strictly greater than k: |
797 | * it's either k's cacheline or the next one |
798 | */ |
799 | while (j < t->size && |
800 | table_to_bkey(t, cacheline: j) <= k) |
801 | j++; |
802 | |
803 | /* |
804 | * Adjust all the lookup table entries, and find a new key for any that |
805 | * have gotten too big |
806 | */ |
807 | for (; j < t->size; j++) { |
808 | t->prev[j] += shift; |
809 | |
810 | if (t->prev[j] > 7) { |
811 | k = table_to_bkey(t, cacheline: j - 1); |
812 | |
813 | while (k < cacheline_to_bkey(t, cacheline: j, offset: 0)) |
814 | k = bkey_next(k); |
815 | |
816 | t->prev[j] = bkey_to_cacheline_offset(t, cacheline: j, k); |
817 | } |
818 | } |
819 | |
820 | if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree) |
821 | return; |
822 | |
823 | /* Possibly add a new entry to the end of the lookup table */ |
824 | |
825 | for (k = table_to_bkey(t, cacheline: t->size - 1); |
826 | k != bset_bkey_last(t->data); |
827 | k = bkey_next(k)) |
828 | if (t->size == bkey_to_cacheline(t, k)) { |
829 | t->prev[t->size] = |
830 | bkey_to_cacheline_offset(t, cacheline: t->size, k); |
831 | t->size++; |
832 | } |
833 | } |
834 | |
835 | /* |
836 | * Tries to merge l and r: l should be lower than r |
837 | * Returns true if we were able to merge. If we did merge, l will be the merged |
838 | * key, r will be untouched. |
839 | */ |
840 | bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r) |
841 | { |
842 | if (!b->ops->key_merge) |
843 | return false; |
844 | |
845 | /* |
846 | * Generic header checks |
847 | * Assumes left and right are in order |
848 | * Left and right must be exactly aligned |
849 | */ |
850 | if (!bch_bkey_equal_header(l, r) || |
851 | bkey_cmp(l, r: &START_KEY(r))) |
852 | return false; |
853 | |
854 | return b->ops->key_merge(b, l, r); |
855 | } |
856 | |
857 | void bch_bset_insert(struct btree_keys *b, struct bkey *where, |
858 | struct bkey *insert) |
859 | { |
860 | struct bset_tree *t = bset_tree_last(b); |
861 | |
862 | BUG_ON(!b->last_set_unwritten); |
863 | BUG_ON(bset_byte_offset(b, t->data) + |
864 | __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) > |
865 | PAGE_SIZE << b->page_order); |
866 | |
867 | memmove((uint64_t *) where + bkey_u64s(insert), |
868 | where, |
869 | (void *) bset_bkey_last(t->data) - (void *) where); |
870 | |
871 | t->data->keys += bkey_u64s(k: insert); |
872 | bkey_copy(where, insert); |
873 | bch_bset_fix_lookup_table(b, t, k: where); |
874 | } |
875 | |
876 | unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, |
877 | struct bkey *replace_key) |
878 | { |
879 | unsigned int status = BTREE_INSERT_STATUS_NO_INSERT; |
880 | struct bset *i = bset_tree_last(b)->data; |
881 | struct bkey *m, *prev = NULL; |
882 | struct btree_iter iter; |
883 | struct bkey preceding_key_on_stack = ZERO_KEY; |
884 | struct bkey *preceding_key_p = &preceding_key_on_stack; |
885 | |
886 | BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); |
887 | |
888 | /* |
889 | * If k has preceding key, preceding_key_p will be set to address |
890 | * of k's preceding key; otherwise preceding_key_p will be set |
891 | * to NULL inside preceding_key(). |
892 | */ |
893 | if (b->ops->is_extents) |
894 | preceding_key(k: &START_KEY(k), preceding_key_p: &preceding_key_p); |
895 | else |
896 | preceding_key(k, preceding_key_p: &preceding_key_p); |
897 | |
898 | m = bch_btree_iter_init(b, iter: &iter, search: preceding_key_p); |
899 | |
900 | if (b->ops->insert_fixup(b, k, &iter, replace_key)) |
901 | return status; |
902 | |
903 | status = BTREE_INSERT_STATUS_INSERT; |
904 | |
905 | while (m != bset_bkey_last(i) && |
906 | bkey_cmp(l: k, r: b->ops->is_extents ? &START_KEY(m) : m) > 0) { |
907 | prev = m; |
908 | m = bkey_next(k: m); |
909 | } |
910 | |
911 | /* prev is in the tree, if we merge we're done */ |
912 | status = BTREE_INSERT_STATUS_BACK_MERGE; |
913 | if (prev && |
914 | bch_bkey_try_merge(b, l: prev, r: k)) |
915 | goto merged; |
916 | #if 0 |
917 | status = BTREE_INSERT_STATUS_OVERWROTE; |
918 | if (m != bset_bkey_last(i) && |
919 | KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) |
920 | goto copy; |
921 | #endif |
922 | status = BTREE_INSERT_STATUS_FRONT_MERGE; |
923 | if (m != bset_bkey_last(i) && |
924 | bch_bkey_try_merge(b, l: k, r: m)) |
925 | goto copy; |
926 | |
927 | bch_bset_insert(b, where: m, insert: k); |
928 | copy: bkey_copy(m, k); |
929 | merged: |
930 | return status; |
931 | } |
932 | |
933 | /* Lookup */ |
934 | |
935 | struct bset_search_iter { |
936 | struct bkey *l, *r; |
937 | }; |
938 | |
939 | static struct bset_search_iter bset_search_write_set(struct bset_tree *t, |
940 | const struct bkey *search) |
941 | { |
942 | unsigned int li = 0, ri = t->size; |
943 | |
944 | while (li + 1 != ri) { |
945 | unsigned int m = (li + ri) >> 1; |
946 | |
947 | if (bkey_cmp(l: table_to_bkey(t, cacheline: m), r: search) > 0) |
948 | ri = m; |
949 | else |
950 | li = m; |
951 | } |
952 | |
953 | return (struct bset_search_iter) { |
954 | table_to_bkey(t, cacheline: li), |
955 | ri < t->size ? table_to_bkey(t, cacheline: ri) : bset_bkey_last(t->data) |
956 | }; |
957 | } |
958 | |
959 | static struct bset_search_iter bset_search_tree(struct bset_tree *t, |
960 | const struct bkey *search) |
961 | { |
962 | struct bkey *l, *r; |
963 | struct bkey_float *f; |
964 | unsigned int inorder, j, n = 1; |
965 | |
966 | do { |
967 | unsigned int p = n << 4; |
968 | |
969 | if (p < t->size) |
970 | prefetch(&t->tree[p]); |
971 | |
972 | j = n; |
973 | f = &t->tree[j]; |
974 | |
975 | if (likely(f->exponent != 127)) { |
976 | if (f->mantissa >= bfloat_mantissa(k: search, f)) |
977 | n = j * 2; |
978 | else |
979 | n = j * 2 + 1; |
980 | } else { |
981 | if (bkey_cmp(l: tree_to_bkey(t, j), r: search) > 0) |
982 | n = j * 2; |
983 | else |
984 | n = j * 2 + 1; |
985 | } |
986 | } while (n < t->size); |
987 | |
988 | inorder = to_inorder(j, t); |
989 | |
990 | /* |
991 | * n would have been the node we recursed to - the low bit tells us if |
992 | * we recursed left or recursed right. |
993 | */ |
994 | if (n & 1) { |
995 | l = cacheline_to_bkey(t, cacheline: inorder, offset: f->m); |
996 | |
997 | if (++inorder != t->size) { |
998 | f = &t->tree[inorder_next(j, size: t->size)]; |
999 | r = cacheline_to_bkey(t, cacheline: inorder, offset: f->m); |
1000 | } else |
1001 | r = bset_bkey_last(t->data); |
1002 | } else { |
1003 | r = cacheline_to_bkey(t, cacheline: inorder, offset: f->m); |
1004 | |
1005 | if (--inorder) { |
1006 | f = &t->tree[inorder_prev(j, size: t->size)]; |
1007 | l = cacheline_to_bkey(t, cacheline: inorder, offset: f->m); |
1008 | } else |
1009 | l = t->data->start; |
1010 | } |
1011 | |
1012 | return (struct bset_search_iter) {l, r}; |
1013 | } |
1014 | |
1015 | struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, |
1016 | const struct bkey *search) |
1017 | { |
1018 | struct bset_search_iter i; |
1019 | |
1020 | /* |
1021 | * First, we search for a cacheline, then lastly we do a linear search |
1022 | * within that cacheline. |
1023 | * |
1024 | * To search for the cacheline, there's three different possibilities: |
1025 | * * The set is too small to have a search tree, so we just do a linear |
1026 | * search over the whole set. |
1027 | * * The set is the one we're currently inserting into; keeping a full |
1028 | * auxiliary search tree up to date would be too expensive, so we |
1029 | * use a much simpler lookup table to do a binary search - |
1030 | * bset_search_write_set(). |
1031 | * * Or we use the auxiliary search tree we constructed earlier - |
1032 | * bset_search_tree() |
1033 | */ |
1034 | |
1035 | if (unlikely(!t->size)) { |
1036 | i.l = t->data->start; |
1037 | i.r = bset_bkey_last(t->data); |
1038 | } else if (bset_written(b, t)) { |
1039 | /* |
1040 | * Each node in the auxiliary search tree covers a certain range |
1041 | * of bits, and keys above and below the set it covers might |
1042 | * differ outside those bits - so we have to special case the |
1043 | * start and end - handle that here: |
1044 | */ |
1045 | |
1046 | if (unlikely(bkey_cmp(search, &t->end) >= 0)) |
1047 | return bset_bkey_last(t->data); |
1048 | |
1049 | if (unlikely(bkey_cmp(search, t->data->start) < 0)) |
1050 | return t->data->start; |
1051 | |
1052 | i = bset_search_tree(t, search); |
1053 | } else { |
1054 | BUG_ON(!b->nsets && |
1055 | t->size < bkey_to_cacheline(t, bset_bkey_last(t->data))); |
1056 | |
1057 | i = bset_search_write_set(t, search); |
1058 | } |
1059 | |
1060 | if (btree_keys_expensive_checks(b)) { |
1061 | BUG_ON(bset_written(b, t) && |
1062 | i.l != t->data->start && |
1063 | bkey_cmp(tree_to_prev_bkey(t, |
1064 | inorder_to_tree(bkey_to_cacheline(t, i.l), t)), |
1065 | search) > 0); |
1066 | |
1067 | BUG_ON(i.r != bset_bkey_last(t->data) && |
1068 | bkey_cmp(i.r, search) <= 0); |
1069 | } |
1070 | |
1071 | while (likely(i.l != i.r) && |
1072 | bkey_cmp(l: i.l, r: search) <= 0) |
1073 | i.l = bkey_next(k: i.l); |
1074 | |
1075 | return i.l; |
1076 | } |
1077 | |
1078 | /* Btree iterator */ |
1079 | |
1080 | typedef bool (btree_iter_cmp_fn)(struct btree_iter_set, |
1081 | struct btree_iter_set); |
1082 | |
1083 | static inline bool btree_iter_cmp(struct btree_iter_set l, |
1084 | struct btree_iter_set r) |
1085 | { |
1086 | return bkey_cmp(l: l.k, r: r.k) > 0; |
1087 | } |
1088 | |
1089 | static inline bool btree_iter_end(struct btree_iter *iter) |
1090 | { |
1091 | return !iter->used; |
1092 | } |
1093 | |
1094 | void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, |
1095 | struct bkey *end) |
1096 | { |
1097 | if (k != end) |
1098 | BUG_ON(!heap_add(iter, |
1099 | ((struct btree_iter_set) { k, end }), |
1100 | btree_iter_cmp)); |
1101 | } |
1102 | |
1103 | static struct bkey *__bch_btree_iter_init(struct btree_keys *b, |
1104 | struct btree_iter *iter, |
1105 | struct bkey *search, |
1106 | struct bset_tree *start) |
1107 | { |
1108 | struct bkey *ret = NULL; |
1109 | |
1110 | iter->size = ARRAY_SIZE(iter->data); |
1111 | iter->used = 0; |
1112 | |
1113 | #ifdef CONFIG_BCACHE_DEBUG |
1114 | iter->b = b; |
1115 | #endif |
1116 | |
1117 | for (; start <= bset_tree_last(b); start++) { |
1118 | ret = bch_bset_search(b, t: start, search); |
1119 | bch_btree_iter_push(iter, k: ret, bset_bkey_last(start->data)); |
1120 | } |
1121 | |
1122 | return ret; |
1123 | } |
1124 | |
1125 | struct bkey *bch_btree_iter_init(struct btree_keys *b, |
1126 | struct btree_iter *iter, |
1127 | struct bkey *search) |
1128 | { |
1129 | return __bch_btree_iter_init(b, iter, search, start: b->set); |
1130 | } |
1131 | |
1132 | static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, |
1133 | btree_iter_cmp_fn *cmp) |
1134 | { |
1135 | struct btree_iter_set b __maybe_unused; |
1136 | struct bkey *ret = NULL; |
1137 | |
1138 | if (!btree_iter_end(iter)) { |
1139 | bch_btree_iter_next_check(iter); |
1140 | |
1141 | ret = iter->data->k; |
1142 | iter->data->k = bkey_next(k: iter->data->k); |
1143 | |
1144 | if (iter->data->k > iter->data->end) { |
1145 | WARN_ONCE(1, "bset was corrupt!\n" ); |
1146 | iter->data->k = iter->data->end; |
1147 | } |
1148 | |
1149 | if (iter->data->k == iter->data->end) |
1150 | heap_pop(iter, b, cmp); |
1151 | else |
1152 | heap_sift(iter, 0, cmp); |
1153 | } |
1154 | |
1155 | return ret; |
1156 | } |
1157 | |
1158 | struct bkey *bch_btree_iter_next(struct btree_iter *iter) |
1159 | { |
1160 | return __bch_btree_iter_next(iter, cmp: btree_iter_cmp); |
1161 | |
1162 | } |
1163 | |
1164 | struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, |
1165 | struct btree_keys *b, ptr_filter_fn fn) |
1166 | { |
1167 | struct bkey *ret; |
1168 | |
1169 | do { |
1170 | ret = bch_btree_iter_next(iter); |
1171 | } while (ret && fn(b, ret)); |
1172 | |
1173 | return ret; |
1174 | } |
1175 | |
1176 | /* Mergesort */ |
1177 | |
1178 | void bch_bset_sort_state_free(struct bset_sort_state *state) |
1179 | { |
1180 | mempool_exit(pool: &state->pool); |
1181 | } |
1182 | |
1183 | int bch_bset_sort_state_init(struct bset_sort_state *state, |
1184 | unsigned int page_order) |
1185 | { |
1186 | spin_lock_init(&state->time.lock); |
1187 | |
1188 | state->page_order = page_order; |
1189 | state->crit_factor = int_sqrt(1 << page_order); |
1190 | |
1191 | return mempool_init_page_pool(pool: &state->pool, min_nr: 1, order: page_order); |
1192 | } |
1193 | |
1194 | static void btree_mergesort(struct btree_keys *b, struct bset *out, |
1195 | struct btree_iter *iter, |
1196 | bool fixup, bool remove_stale) |
1197 | { |
1198 | int i; |
1199 | struct bkey *k, *last = NULL; |
1200 | BKEY_PADDED(k) tmp; |
1201 | bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale |
1202 | ? bch_ptr_bad |
1203 | : bch_ptr_invalid; |
1204 | |
1205 | /* Heapify the iterator, using our comparison function */ |
1206 | for (i = iter->used / 2 - 1; i >= 0; --i) |
1207 | heap_sift(iter, i, b->ops->sort_cmp); |
1208 | |
1209 | while (!btree_iter_end(iter)) { |
1210 | if (b->ops->sort_fixup && fixup) |
1211 | k = b->ops->sort_fixup(iter, &tmp.k); |
1212 | else |
1213 | k = NULL; |
1214 | |
1215 | if (!k) |
1216 | k = __bch_btree_iter_next(iter, cmp: b->ops->sort_cmp); |
1217 | |
1218 | if (bad(b, k)) |
1219 | continue; |
1220 | |
1221 | if (!last) { |
1222 | last = out->start; |
1223 | bkey_copy(last, k); |
1224 | } else if (!bch_bkey_try_merge(b, l: last, r: k)) { |
1225 | last = bkey_next(k: last); |
1226 | bkey_copy(last, k); |
1227 | } |
1228 | } |
1229 | |
1230 | out->keys = last ? (uint64_t *) bkey_next(k: last) - out->d : 0; |
1231 | |
1232 | pr_debug("sorted %i keys\n" , out->keys); |
1233 | } |
1234 | |
1235 | static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, |
1236 | unsigned int start, unsigned int order, bool fixup, |
1237 | struct bset_sort_state *state) |
1238 | { |
1239 | uint64_t start_time; |
1240 | bool used_mempool = false; |
1241 | struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, |
1242 | order); |
1243 | if (!out) { |
1244 | struct page *outp; |
1245 | |
1246 | BUG_ON(order > state->page_order); |
1247 | |
1248 | outp = mempool_alloc(pool: &state->pool, GFP_NOIO); |
1249 | out = page_address(outp); |
1250 | used_mempool = true; |
1251 | order = state->page_order; |
1252 | } |
1253 | |
1254 | start_time = local_clock(); |
1255 | |
1256 | btree_mergesort(b, out, iter, fixup, remove_stale: false); |
1257 | b->nsets = start; |
1258 | |
1259 | if (!start && order == b->page_order) { |
1260 | /* |
1261 | * Our temporary buffer is the same size as the btree node's |
1262 | * buffer, we can just swap buffers instead of doing a big |
1263 | * memcpy() |
1264 | * |
1265 | * Don't worry event 'out' is allocated from mempool, it can |
1266 | * still be swapped here. Because state->pool is a page mempool |
1267 | * created by mempool_init_page_pool(), which allocates |
1268 | * pages by alloc_pages() indeed. |
1269 | */ |
1270 | |
1271 | out->magic = b->set->data->magic; |
1272 | out->seq = b->set->data->seq; |
1273 | out->version = b->set->data->version; |
1274 | swap(out, b->set->data); |
1275 | } else { |
1276 | b->set[start].data->keys = out->keys; |
1277 | memcpy(b->set[start].data->start, out->start, |
1278 | (void *) bset_bkey_last(out) - (void *) out->start); |
1279 | } |
1280 | |
1281 | if (used_mempool) |
1282 | mempool_free(virt_to_page(out), pool: &state->pool); |
1283 | else |
1284 | free_pages(addr: (unsigned long) out, order); |
1285 | |
1286 | bch_bset_build_written_tree(b); |
1287 | |
1288 | if (!start) |
1289 | bch_time_stats_update(stats: &state->time, time: start_time); |
1290 | } |
1291 | |
1292 | void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, |
1293 | struct bset_sort_state *state) |
1294 | { |
1295 | size_t order = b->page_order, keys = 0; |
1296 | struct btree_iter iter; |
1297 | int oldsize = bch_count_data(b); |
1298 | |
1299 | __bch_btree_iter_init(b, iter: &iter, NULL, start: &b->set[start]); |
1300 | |
1301 | if (start) { |
1302 | unsigned int i; |
1303 | |
1304 | for (i = start; i <= b->nsets; i++) |
1305 | keys += b->set[i].data->keys; |
1306 | |
1307 | order = get_order(__set_bytes(b->set->data, keys)); |
1308 | } |
1309 | |
1310 | __btree_sort(b, iter: &iter, start, order, fixup: false, state); |
1311 | |
1312 | EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize); |
1313 | } |
1314 | |
1315 | void bch_btree_sort_and_fix_extents(struct btree_keys *b, |
1316 | struct btree_iter *iter, |
1317 | struct bset_sort_state *state) |
1318 | { |
1319 | __btree_sort(b, iter, start: 0, order: b->page_order, fixup: true, state); |
1320 | } |
1321 | |
1322 | void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, |
1323 | struct bset_sort_state *state) |
1324 | { |
1325 | uint64_t start_time = local_clock(); |
1326 | struct btree_iter iter; |
1327 | |
1328 | bch_btree_iter_init(b, iter: &iter, NULL); |
1329 | |
1330 | btree_mergesort(b, out: new->set->data, iter: &iter, fixup: false, remove_stale: true); |
1331 | |
1332 | bch_time_stats_update(stats: &state->time, time: start_time); |
1333 | |
1334 | new->set->size = 0; // XXX: why? |
1335 | } |
1336 | |
1337 | #define SORT_CRIT (4096 / sizeof(uint64_t)) |
1338 | |
1339 | void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state) |
1340 | { |
1341 | unsigned int crit = SORT_CRIT; |
1342 | int i; |
1343 | |
1344 | /* Don't sort if nothing to do */ |
1345 | if (!b->nsets) |
1346 | goto out; |
1347 | |
1348 | for (i = b->nsets - 1; i >= 0; --i) { |
1349 | crit *= state->crit_factor; |
1350 | |
1351 | if (b->set[i].data->keys < crit) { |
1352 | bch_btree_sort_partial(b, start: i, state); |
1353 | return; |
1354 | } |
1355 | } |
1356 | |
1357 | /* Sort if we'd overflow */ |
1358 | if (b->nsets + 1 == MAX_BSETS) { |
1359 | bch_btree_sort(b, state); |
1360 | return; |
1361 | } |
1362 | |
1363 | out: |
1364 | bch_bset_build_written_tree(b); |
1365 | } |
1366 | |
1367 | void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats) |
1368 | { |
1369 | unsigned int i; |
1370 | |
1371 | for (i = 0; i <= b->nsets; i++) { |
1372 | struct bset_tree *t = &b->set[i]; |
1373 | size_t bytes = t->data->keys * sizeof(uint64_t); |
1374 | size_t j; |
1375 | |
1376 | if (bset_written(b, t)) { |
1377 | stats->sets_written++; |
1378 | stats->bytes_written += bytes; |
1379 | |
1380 | stats->floats += t->size - 1; |
1381 | |
1382 | for (j = 1; j < t->size; j++) |
1383 | if (t->tree[j].exponent == 127) |
1384 | stats->failed++; |
1385 | } else { |
1386 | stats->sets_unwritten++; |
1387 | stats->bytes_unwritten += bytes; |
1388 | } |
1389 | } |
1390 | } |
1391 | |