1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _BCACHE_BTREE_H |
3 | #define _BCACHE_BTREE_H |
4 | |
5 | /* |
6 | * THE BTREE: |
7 | * |
8 | * At a high level, bcache's btree is relatively standard b+ tree. All keys and |
9 | * pointers are in the leaves; interior nodes only have pointers to the child |
10 | * nodes. |
11 | * |
12 | * In the interior nodes, a struct bkey always points to a child btree node, and |
13 | * the key is the highest key in the child node - except that the highest key in |
14 | * an interior node is always MAX_KEY. The size field refers to the size on disk |
15 | * of the child node - this would allow us to have variable sized btree nodes |
16 | * (handy for keeping the depth of the btree 1 by expanding just the root). |
17 | * |
18 | * Btree nodes are themselves log structured, but this is hidden fairly |
19 | * thoroughly. Btree nodes on disk will in practice have extents that overlap |
20 | * (because they were written at different times), but in memory we never have |
21 | * overlapping extents - when we read in a btree node from disk, the first thing |
22 | * we do is resort all the sets of keys with a mergesort, and in the same pass |
23 | * we check for overlapping extents and adjust them appropriately. |
24 | * |
25 | * struct btree_op is a central interface to the btree code. It's used for |
26 | * specifying read vs. write locking, and the embedded closure is used for |
27 | * waiting on IO or reserve memory. |
28 | * |
29 | * BTREE CACHE: |
30 | * |
31 | * Btree nodes are cached in memory; traversing the btree might require reading |
32 | * in btree nodes which is handled mostly transparently. |
33 | * |
34 | * bch_btree_node_get() looks up a btree node in the cache and reads it in from |
35 | * disk if necessary. This function is almost never called directly though - the |
36 | * btree() macro is used to get a btree node, call some function on it, and |
37 | * unlock the node after the function returns. |
38 | * |
39 | * The root is special cased - it's taken out of the cache's lru (thus pinning |
40 | * it in memory), so we can find the root of the btree by just dereferencing a |
41 | * pointer instead of looking it up in the cache. This makes locking a bit |
42 | * tricky, since the root pointer is protected by the lock in the btree node it |
43 | * points to - the btree_root() macro handles this. |
44 | * |
45 | * In various places we must be able to allocate memory for multiple btree nodes |
46 | * in order to make forward progress. To do this we use the btree cache itself |
47 | * as a reserve; if __get_free_pages() fails, we'll find a node in the btree |
48 | * cache we can reuse. We can't allow more than one thread to be doing this at a |
49 | * time, so there's a lock, implemented by a pointer to the btree_op closure - |
50 | * this allows the btree_root() macro to implicitly release this lock. |
51 | * |
52 | * BTREE IO: |
53 | * |
54 | * Btree nodes never have to be explicitly read in; bch_btree_node_get() handles |
55 | * this. |
56 | * |
57 | * For writing, we have two btree_write structs embeddded in struct btree - one |
58 | * write in flight, and one being set up, and we toggle between them. |
59 | * |
60 | * Writing is done with a single function - bch_btree_write() really serves two |
61 | * different purposes and should be broken up into two different functions. When |
62 | * passing now = false, it merely indicates that the node is now dirty - calling |
63 | * it ensures that the dirty keys will be written at some point in the future. |
64 | * |
65 | * When passing now = true, bch_btree_write() causes a write to happen |
66 | * "immediately" (if there was already a write in flight, it'll cause the write |
67 | * to happen as soon as the previous write completes). It returns immediately |
68 | * though - but it takes a refcount on the closure in struct btree_op you passed |
69 | * to it, so a closure_sync() later can be used to wait for the write to |
70 | * complete. |
71 | * |
72 | * This is handy because btree_split() and garbage collection can issue writes |
73 | * in parallel, reducing the amount of time they have to hold write locks. |
74 | * |
75 | * LOCKING: |
76 | * |
77 | * When traversing the btree, we may need write locks starting at some level - |
78 | * inserting a key into the btree will typically only require a write lock on |
79 | * the leaf node. |
80 | * |
81 | * This is specified with the lock field in struct btree_op; lock = 0 means we |
82 | * take write locks at level <= 0, i.e. only leaf nodes. bch_btree_node_get() |
83 | * checks this field and returns the node with the appropriate lock held. |
84 | * |
85 | * If, after traversing the btree, the insertion code discovers it has to split |
86 | * then it must restart from the root and take new locks - to do this it changes |
87 | * the lock field and returns -EINTR, which causes the btree_root() macro to |
88 | * loop. |
89 | * |
90 | * Handling cache misses require a different mechanism for upgrading to a write |
91 | * lock. We do cache lookups with only a read lock held, but if we get a cache |
92 | * miss and we wish to insert this data into the cache, we have to insert a |
93 | * placeholder key to detect races - otherwise, we could race with a write and |
94 | * overwrite the data that was just written to the cache with stale data from |
95 | * the backing device. |
96 | * |
97 | * For this we use a sequence number that write locks and unlocks increment - to |
98 | * insert the check key it unlocks the btree node and then takes a write lock, |
99 | * and fails if the sequence number doesn't match. |
100 | */ |
101 | |
102 | #include "bset.h" |
103 | #include "debug.h" |
104 | |
105 | struct btree_write { |
106 | atomic_t *journal; |
107 | |
108 | /* If btree_split() frees a btree node, it writes a new pointer to that |
109 | * btree node indicating it was freed; it takes a refcount on |
110 | * c->prio_blocked because we can't write the gens until the new |
111 | * pointer is on disk. This allows btree_write_endio() to release the |
112 | * refcount that btree_split() took. |
113 | */ |
114 | int prio_blocked; |
115 | }; |
116 | |
117 | struct btree { |
118 | /* Hottest entries first */ |
119 | struct hlist_node hash; |
120 | |
121 | /* Key/pointer for this btree node */ |
122 | BKEY_PADDED(key); |
123 | |
124 | unsigned long seq; |
125 | struct rw_semaphore lock; |
126 | struct cache_set *c; |
127 | struct btree *parent; |
128 | |
129 | struct mutex write_lock; |
130 | |
131 | unsigned long flags; |
132 | uint16_t written; /* would be nice to kill */ |
133 | uint8_t level; |
134 | |
135 | struct btree_keys keys; |
136 | |
137 | /* For outstanding btree writes, used as a lock - protects write_idx */ |
138 | struct closure io; |
139 | struct semaphore io_mutex; |
140 | |
141 | struct list_head list; |
142 | struct delayed_work work; |
143 | |
144 | struct btree_write writes[2]; |
145 | struct bio *bio; |
146 | }; |
147 | |
148 | |
149 | |
150 | |
151 | #define BTREE_FLAG(flag) \ |
152 | static inline bool btree_node_ ## flag(struct btree *b) \ |
153 | { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \ |
154 | \ |
155 | static inline void set_btree_node_ ## flag(struct btree *b) \ |
156 | { set_bit(BTREE_NODE_ ## flag, &b->flags); } |
157 | |
158 | enum btree_flags { |
159 | BTREE_NODE_io_error, |
160 | BTREE_NODE_dirty, |
161 | BTREE_NODE_write_idx, |
162 | BTREE_NODE_journal_flush, |
163 | }; |
164 | |
165 | BTREE_FLAG(io_error); |
166 | BTREE_FLAG(dirty); |
167 | BTREE_FLAG(write_idx); |
168 | BTREE_FLAG(journal_flush); |
169 | |
170 | static inline struct btree_write *btree_current_write(struct btree *b) |
171 | { |
172 | return b->writes + btree_node_write_idx(b); |
173 | } |
174 | |
175 | static inline struct btree_write *btree_prev_write(struct btree *b) |
176 | { |
177 | return b->writes + (btree_node_write_idx(b) ^ 1); |
178 | } |
179 | |
180 | static inline struct bset *btree_bset_first(struct btree *b) |
181 | { |
182 | return b->keys.set->data; |
183 | } |
184 | |
185 | static inline struct bset *btree_bset_last(struct btree *b) |
186 | { |
187 | return bset_tree_last(b: &b->keys)->data; |
188 | } |
189 | |
190 | static inline unsigned int bset_block_offset(struct btree *b, struct bset *i) |
191 | { |
192 | return bset_sector_offset(b: &b->keys, i) >> b->c->block_bits; |
193 | } |
194 | |
195 | static inline void set_gc_sectors(struct cache_set *c) |
196 | { |
197 | atomic_set(v: &c->sectors_to_gc, i: c->cache->sb.bucket_size * c->nbuckets / 16); |
198 | } |
199 | |
200 | void bkey_put(struct cache_set *c, struct bkey *k); |
201 | |
202 | /* Looping macros */ |
203 | |
204 | #define for_each_cached_btree(b, c, iter) \ |
205 | for (iter = 0; \ |
206 | iter < ARRAY_SIZE((c)->bucket_hash); \ |
207 | iter++) \ |
208 | hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash) |
209 | |
210 | /* Recursing down the btree */ |
211 | |
212 | struct btree_op { |
213 | /* for waiting on btree reserve in btree_split() */ |
214 | wait_queue_entry_t wait; |
215 | |
216 | /* Btree level at which we start taking write locks */ |
217 | short lock; |
218 | |
219 | unsigned int insert_collision:1; |
220 | }; |
221 | |
222 | struct btree_check_state; |
223 | struct btree_check_info { |
224 | struct btree_check_state *state; |
225 | struct task_struct *thread; |
226 | int result; |
227 | }; |
228 | |
229 | #define BCH_BTR_CHKTHREAD_MAX 12 |
230 | struct btree_check_state { |
231 | struct cache_set *c; |
232 | int total_threads; |
233 | int key_idx; |
234 | spinlock_t idx_lock; |
235 | atomic_t started; |
236 | atomic_t enough; |
237 | wait_queue_head_t wait; |
238 | struct btree_check_info infos[BCH_BTR_CHKTHREAD_MAX]; |
239 | }; |
240 | |
241 | static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) |
242 | { |
243 | memset(op, 0, sizeof(struct btree_op)); |
244 | init_wait(&op->wait); |
245 | op->lock = write_lock_level; |
246 | } |
247 | |
248 | static inline void rw_lock(bool w, struct btree *b, int level) |
249 | { |
250 | w ? down_write(sem: &b->lock) |
251 | : down_read(sem: &b->lock); |
252 | if (w) |
253 | b->seq++; |
254 | } |
255 | |
256 | static inline void rw_unlock(bool w, struct btree *b) |
257 | { |
258 | if (w) |
259 | b->seq++; |
260 | (w ? up_write : up_read)(&b->lock); |
261 | } |
262 | |
263 | void bch_btree_node_read_done(struct btree *b); |
264 | void __bch_btree_node_write(struct btree *b, struct closure *parent); |
265 | void bch_btree_node_write(struct btree *b, struct closure *parent); |
266 | |
267 | void bch_btree_set_root(struct btree *b); |
268 | struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, |
269 | int level, bool wait, |
270 | struct btree *parent); |
271 | struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, |
272 | struct bkey *k, int level, bool write, |
273 | struct btree *parent); |
274 | |
275 | int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, |
276 | struct bkey *check_key); |
277 | int bch_btree_insert(struct cache_set *c, struct keylist *keys, |
278 | atomic_t *journal_ref, struct bkey *replace_key); |
279 | |
280 | int bch_gc_thread_start(struct cache_set *c); |
281 | void bch_initial_gc_finish(struct cache_set *c); |
282 | void bch_moving_gc(struct cache_set *c); |
283 | int bch_btree_check(struct cache_set *c); |
284 | void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k); |
285 | void bch_cannibalize_unlock(struct cache_set *c); |
286 | |
287 | static inline void wake_up_gc(struct cache_set *c) |
288 | { |
289 | wake_up(&c->gc_wait); |
290 | } |
291 | |
292 | static inline void force_wake_up_gc(struct cache_set *c) |
293 | { |
294 | /* |
295 | * Garbage collection thread only works when sectors_to_gc < 0, |
296 | * calling wake_up_gc() won't start gc thread if sectors_to_gc is |
297 | * not a nagetive value. |
298 | * Therefore sectors_to_gc is set to -1 here, before waking up |
299 | * gc thread by calling wake_up_gc(). Then gc_should_run() will |
300 | * give a chance to permit gc thread to run. "Give a chance" means |
301 | * before going into gc_should_run(), there is still possibility |
302 | * that c->sectors_to_gc being set to other positive value. So |
303 | * this routine won't 100% make sure gc thread will be woken up |
304 | * to run. |
305 | */ |
306 | atomic_set(v: &c->sectors_to_gc, i: -1); |
307 | wake_up_gc(c); |
308 | } |
309 | |
310 | /* |
311 | * These macros are for recursing down the btree - they handle the details of |
312 | * locking and looking up nodes in the cache for you. They're best treated as |
313 | * mere syntax when reading code that uses them. |
314 | * |
315 | * op->lock determines whether we take a read or a write lock at a given depth. |
316 | * If you've got a read lock and find that you need a write lock (i.e. you're |
317 | * going to have to split), set op->lock and return -EINTR; btree_root() will |
318 | * call you again and you'll have the correct lock. |
319 | */ |
320 | |
321 | /** |
322 | * btree - recurse down the btree on a specified key |
323 | * @fn: function to call, which will be passed the child node |
324 | * @key: key to recurse on |
325 | * @b: parent btree node |
326 | * @op: pointer to struct btree_op |
327 | */ |
328 | #define bcache_btree(fn, key, b, op, ...) \ |
329 | ({ \ |
330 | int _r, l = (b)->level - 1; \ |
331 | bool _w = l <= (op)->lock; \ |
332 | struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ |
333 | _w, b); \ |
334 | if (!IS_ERR(_child)) { \ |
335 | _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ |
336 | rw_unlock(_w, _child); \ |
337 | } else \ |
338 | _r = PTR_ERR(_child); \ |
339 | _r; \ |
340 | }) |
341 | |
342 | /** |
343 | * btree_root - call a function on the root of the btree |
344 | * @fn: function to call, which will be passed the child node |
345 | * @c: cache set |
346 | * @op: pointer to struct btree_op |
347 | */ |
348 | #define bcache_btree_root(fn, c, op, ...) \ |
349 | ({ \ |
350 | int _r = -EINTR; \ |
351 | do { \ |
352 | struct btree *_b = (c)->root; \ |
353 | bool _w = insert_lock(op, _b); \ |
354 | rw_lock(_w, _b, _b->level); \ |
355 | if (_b == (c)->root && \ |
356 | _w == insert_lock(op, _b)) { \ |
357 | _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ |
358 | } \ |
359 | rw_unlock(_w, _b); \ |
360 | bch_cannibalize_unlock(c); \ |
361 | if (_r == -EINTR) \ |
362 | schedule(); \ |
363 | } while (_r == -EINTR); \ |
364 | \ |
365 | finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ |
366 | _r; \ |
367 | }) |
368 | |
369 | #define MAP_DONE 0 |
370 | #define MAP_CONTINUE 1 |
371 | |
372 | #define MAP_ALL_NODES 0 |
373 | #define MAP_LEAF_NODES 1 |
374 | |
375 | #define MAP_END_KEY 1 |
376 | |
377 | typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b); |
378 | int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, |
379 | struct bkey *from, btree_map_nodes_fn *fn, int flags); |
380 | |
381 | static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, |
382 | struct bkey *from, btree_map_nodes_fn *fn) |
383 | { |
384 | return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES); |
385 | } |
386 | |
387 | static inline int bch_btree_map_leaf_nodes(struct btree_op *op, |
388 | struct cache_set *c, |
389 | struct bkey *from, |
390 | btree_map_nodes_fn *fn) |
391 | { |
392 | return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES); |
393 | } |
394 | |
395 | typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b, |
396 | struct bkey *k); |
397 | int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, |
398 | struct bkey *from, btree_map_keys_fn *fn, int flags); |
399 | int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, |
400 | struct bkey *from, btree_map_keys_fn *fn, |
401 | int flags); |
402 | |
403 | typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k); |
404 | |
405 | void bch_keybuf_init(struct keybuf *buf); |
406 | void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, |
407 | struct bkey *end, keybuf_pred_fn *pred); |
408 | bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, |
409 | struct bkey *end); |
410 | void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w); |
411 | struct keybuf_key *bch_keybuf_next(struct keybuf *buf); |
412 | struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, |
413 | struct keybuf *buf, |
414 | struct bkey *end, |
415 | keybuf_pred_fn *pred); |
416 | void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats); |
417 | #endif |
418 | |