1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2009-2011 Red Hat, Inc. |
4 | * |
5 | * Author: Mikulas Patocka <mpatocka@redhat.com> |
6 | * |
7 | * This file is released under the GPL. |
8 | */ |
9 | |
10 | #include <linux/dm-bufio.h> |
11 | |
12 | #include <linux/device-mapper.h> |
13 | #include <linux/dm-io.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/sched/mm.h> |
16 | #include <linux/jiffies.h> |
17 | #include <linux/vmalloc.h> |
18 | #include <linux/shrinker.h> |
19 | #include <linux/module.h> |
20 | #include <linux/rbtree.h> |
21 | #include <linux/stacktrace.h> |
22 | #include <linux/jump_label.h> |
23 | |
24 | #include "dm.h" |
25 | |
26 | #define DM_MSG_PREFIX "bufio" |
27 | |
28 | /* |
29 | * Memory management policy: |
30 | * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory |
31 | * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower). |
32 | * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers. |
33 | * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT |
34 | * dirty buffers. |
35 | */ |
36 | #define DM_BUFIO_MIN_BUFFERS 8 |
37 | |
38 | #define DM_BUFIO_MEMORY_PERCENT 2 |
39 | #define DM_BUFIO_VMALLOC_PERCENT 25 |
40 | #define DM_BUFIO_WRITEBACK_RATIO 3 |
41 | #define DM_BUFIO_LOW_WATERMARK_RATIO 16 |
42 | |
43 | /* |
44 | * Check buffer ages in this interval (seconds) |
45 | */ |
46 | #define DM_BUFIO_WORK_TIMER_SECS 30 |
47 | |
48 | /* |
49 | * Free buffers when they are older than this (seconds) |
50 | */ |
51 | #define DM_BUFIO_DEFAULT_AGE_SECS 300 |
52 | |
53 | /* |
54 | * The nr of bytes of cached data to keep around. |
55 | */ |
56 | #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024) |
57 | |
58 | /* |
59 | * Align buffer writes to this boundary. |
60 | * Tests show that SSDs have the highest IOPS when using 4k writes. |
61 | */ |
62 | #define DM_BUFIO_WRITE_ALIGN 4096 |
63 | |
64 | /* |
65 | * dm_buffer->list_mode |
66 | */ |
67 | #define LIST_CLEAN 0 |
68 | #define LIST_DIRTY 1 |
69 | #define LIST_SIZE 2 |
70 | |
71 | /*--------------------------------------------------------------*/ |
72 | |
73 | /* |
74 | * Rather than use an LRU list, we use a clock algorithm where entries |
75 | * are held in a circular list. When an entry is 'hit' a reference bit |
76 | * is set. The least recently used entry is approximated by running a |
77 | * cursor around the list selecting unreferenced entries. Referenced |
78 | * entries have their reference bit cleared as the cursor passes them. |
79 | */ |
80 | struct lru_entry { |
81 | struct list_head list; |
82 | atomic_t referenced; |
83 | }; |
84 | |
85 | struct lru_iter { |
86 | struct lru *lru; |
87 | struct list_head list; |
88 | struct lru_entry *stop; |
89 | struct lru_entry *e; |
90 | }; |
91 | |
92 | struct lru { |
93 | struct list_head *cursor; |
94 | unsigned long count; |
95 | |
96 | struct list_head iterators; |
97 | }; |
98 | |
99 | /*--------------*/ |
100 | |
101 | static void lru_init(struct lru *lru) |
102 | { |
103 | lru->cursor = NULL; |
104 | lru->count = 0; |
105 | INIT_LIST_HEAD(list: &lru->iterators); |
106 | } |
107 | |
108 | static void lru_destroy(struct lru *lru) |
109 | { |
110 | WARN_ON_ONCE(lru->cursor); |
111 | WARN_ON_ONCE(!list_empty(&lru->iterators)); |
112 | } |
113 | |
114 | /* |
115 | * Insert a new entry into the lru. |
116 | */ |
117 | static void lru_insert(struct lru *lru, struct lru_entry *le) |
118 | { |
119 | /* |
120 | * Don't be tempted to set to 1, makes the lru aspect |
121 | * perform poorly. |
122 | */ |
123 | atomic_set(v: &le->referenced, i: 0); |
124 | |
125 | if (lru->cursor) { |
126 | list_add_tail(new: &le->list, head: lru->cursor); |
127 | } else { |
128 | INIT_LIST_HEAD(list: &le->list); |
129 | lru->cursor = &le->list; |
130 | } |
131 | lru->count++; |
132 | } |
133 | |
134 | /*--------------*/ |
135 | |
136 | /* |
137 | * Convert a list_head pointer to an lru_entry pointer. |
138 | */ |
139 | static inline struct lru_entry *to_le(struct list_head *l) |
140 | { |
141 | return container_of(l, struct lru_entry, list); |
142 | } |
143 | |
144 | /* |
145 | * Initialize an lru_iter and add it to the list of cursors in the lru. |
146 | */ |
147 | static void lru_iter_begin(struct lru *lru, struct lru_iter *it) |
148 | { |
149 | it->lru = lru; |
150 | it->stop = lru->cursor ? to_le(l: lru->cursor->prev) : NULL; |
151 | it->e = lru->cursor ? to_le(l: lru->cursor) : NULL; |
152 | list_add(new: &it->list, head: &lru->iterators); |
153 | } |
154 | |
155 | /* |
156 | * Remove an lru_iter from the list of cursors in the lru. |
157 | */ |
158 | static inline void lru_iter_end(struct lru_iter *it) |
159 | { |
160 | list_del(entry: &it->list); |
161 | } |
162 | |
163 | /* Predicate function type to be used with lru_iter_next */ |
164 | typedef bool (*iter_predicate)(struct lru_entry *le, void *context); |
165 | |
166 | /* |
167 | * Advance the cursor to the next entry that passes the |
168 | * predicate, and return that entry. Returns NULL if the |
169 | * iteration is complete. |
170 | */ |
171 | static struct lru_entry *lru_iter_next(struct lru_iter *it, |
172 | iter_predicate pred, void *context) |
173 | { |
174 | struct lru_entry *e; |
175 | |
176 | while (it->e) { |
177 | e = it->e; |
178 | |
179 | /* advance the cursor */ |
180 | if (it->e == it->stop) |
181 | it->e = NULL; |
182 | else |
183 | it->e = to_le(l: it->e->list.next); |
184 | |
185 | if (pred(e, context)) |
186 | return e; |
187 | } |
188 | |
189 | return NULL; |
190 | } |
191 | |
192 | /* |
193 | * Invalidate a specific lru_entry and update all cursors in |
194 | * the lru accordingly. |
195 | */ |
196 | static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e) |
197 | { |
198 | struct lru_iter *it; |
199 | |
200 | list_for_each_entry(it, &lru->iterators, list) { |
201 | /* Move c->e forwards if necc. */ |
202 | if (it->e == e) { |
203 | it->e = to_le(l: it->e->list.next); |
204 | if (it->e == e) |
205 | it->e = NULL; |
206 | } |
207 | |
208 | /* Move it->stop backwards if necc. */ |
209 | if (it->stop == e) { |
210 | it->stop = to_le(l: it->stop->list.prev); |
211 | if (it->stop == e) |
212 | it->stop = NULL; |
213 | } |
214 | } |
215 | } |
216 | |
217 | /*--------------*/ |
218 | |
219 | /* |
220 | * Remove a specific entry from the lru. |
221 | */ |
222 | static void lru_remove(struct lru *lru, struct lru_entry *le) |
223 | { |
224 | lru_iter_invalidate(lru, e: le); |
225 | if (lru->count == 1) { |
226 | lru->cursor = NULL; |
227 | } else { |
228 | if (lru->cursor == &le->list) |
229 | lru->cursor = lru->cursor->next; |
230 | list_del(entry: &le->list); |
231 | } |
232 | lru->count--; |
233 | } |
234 | |
235 | /* |
236 | * Mark as referenced. |
237 | */ |
238 | static inline void lru_reference(struct lru_entry *le) |
239 | { |
240 | atomic_set(v: &le->referenced, i: 1); |
241 | } |
242 | |
243 | /*--------------*/ |
244 | |
245 | /* |
246 | * Remove the least recently used entry (approx), that passes the predicate. |
247 | * Returns NULL on failure. |
248 | */ |
249 | enum evict_result { |
250 | ER_EVICT, |
251 | ER_DONT_EVICT, |
252 | ER_STOP, /* stop looking for something to evict */ |
253 | }; |
254 | |
255 | typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context); |
256 | |
257 | static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep) |
258 | { |
259 | unsigned long tested = 0; |
260 | struct list_head *h = lru->cursor; |
261 | struct lru_entry *le; |
262 | |
263 | if (!h) |
264 | return NULL; |
265 | /* |
266 | * In the worst case we have to loop around twice. Once to clear |
267 | * the reference flags, and then again to discover the predicate |
268 | * fails for all entries. |
269 | */ |
270 | while (tested < lru->count) { |
271 | le = container_of(h, struct lru_entry, list); |
272 | |
273 | if (atomic_read(v: &le->referenced)) { |
274 | atomic_set(v: &le->referenced, i: 0); |
275 | } else { |
276 | tested++; |
277 | switch (pred(le, context)) { |
278 | case ER_EVICT: |
279 | /* |
280 | * Adjust the cursor, so we start the next |
281 | * search from here. |
282 | */ |
283 | lru->cursor = le->list.next; |
284 | lru_remove(lru, le); |
285 | return le; |
286 | |
287 | case ER_DONT_EVICT: |
288 | break; |
289 | |
290 | case ER_STOP: |
291 | lru->cursor = le->list.next; |
292 | return NULL; |
293 | } |
294 | } |
295 | |
296 | h = h->next; |
297 | |
298 | if (!no_sleep) |
299 | cond_resched(); |
300 | } |
301 | |
302 | return NULL; |
303 | } |
304 | |
305 | /*--------------------------------------------------------------*/ |
306 | |
307 | /* |
308 | * Buffer state bits. |
309 | */ |
310 | #define B_READING 0 |
311 | #define B_WRITING 1 |
312 | #define B_DIRTY 2 |
313 | |
314 | /* |
315 | * Describes how the block was allocated: |
316 | * kmem_cache_alloc(), __get_free_pages() or vmalloc(). |
317 | * See the comment at alloc_buffer_data. |
318 | */ |
319 | enum data_mode { |
320 | DATA_MODE_SLAB = 0, |
321 | DATA_MODE_GET_FREE_PAGES = 1, |
322 | DATA_MODE_VMALLOC = 2, |
323 | DATA_MODE_LIMIT = 3 |
324 | }; |
325 | |
326 | struct dm_buffer { |
327 | /* protected by the locks in dm_buffer_cache */ |
328 | struct rb_node node; |
329 | |
330 | /* immutable, so don't need protecting */ |
331 | sector_t block; |
332 | void *data; |
333 | unsigned char data_mode; /* DATA_MODE_* */ |
334 | |
335 | /* |
336 | * These two fields are used in isolation, so do not need |
337 | * a surrounding lock. |
338 | */ |
339 | atomic_t hold_count; |
340 | unsigned long last_accessed; |
341 | |
342 | /* |
343 | * Everything else is protected by the mutex in |
344 | * dm_bufio_client |
345 | */ |
346 | unsigned long state; |
347 | struct lru_entry lru; |
348 | unsigned char list_mode; /* LIST_* */ |
349 | blk_status_t read_error; |
350 | blk_status_t write_error; |
351 | unsigned int dirty_start; |
352 | unsigned int dirty_end; |
353 | unsigned int write_start; |
354 | unsigned int write_end; |
355 | struct list_head write_list; |
356 | struct dm_bufio_client *c; |
357 | void (*end_io)(struct dm_buffer *b, blk_status_t bs); |
358 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
359 | #define MAX_STACK 10 |
360 | unsigned int stack_len; |
361 | unsigned long stack_entries[MAX_STACK]; |
362 | #endif |
363 | }; |
364 | |
365 | /*--------------------------------------------------------------*/ |
366 | |
367 | /* |
368 | * The buffer cache manages buffers, particularly: |
369 | * - inc/dec of holder count |
370 | * - setting the last_accessed field |
371 | * - maintains clean/dirty state along with lru |
372 | * - selecting buffers that match predicates |
373 | * |
374 | * It does *not* handle: |
375 | * - allocation/freeing of buffers. |
376 | * - IO |
377 | * - Eviction or cache sizing. |
378 | * |
379 | * cache_get() and cache_put() are threadsafe, you do not need to |
380 | * protect these calls with a surrounding mutex. All the other |
381 | * methods are not threadsafe; they do use locking primitives, but |
382 | * only enough to ensure get/put are threadsafe. |
383 | */ |
384 | |
385 | struct buffer_tree { |
386 | union { |
387 | struct rw_semaphore lock; |
388 | rwlock_t spinlock; |
389 | } u; |
390 | struct rb_root root; |
391 | } ____cacheline_aligned_in_smp; |
392 | |
393 | struct dm_buffer_cache { |
394 | struct lru lru[LIST_SIZE]; |
395 | /* |
396 | * We spread entries across multiple trees to reduce contention |
397 | * on the locks. |
398 | */ |
399 | unsigned int num_locks; |
400 | bool no_sleep; |
401 | struct buffer_tree trees[]; |
402 | }; |
403 | |
404 | static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled); |
405 | |
406 | static inline unsigned int cache_index(sector_t block, unsigned int num_locks) |
407 | { |
408 | return dm_hash_locks_index(block, num_locks); |
409 | } |
410 | |
411 | static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) |
412 | { |
413 | if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) |
414 | read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); |
415 | else |
416 | down_read(sem: &bc->trees[cache_index(block, num_locks: bc->num_locks)].u.lock); |
417 | } |
418 | |
419 | static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) |
420 | { |
421 | if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) |
422 | read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); |
423 | else |
424 | up_read(sem: &bc->trees[cache_index(block, num_locks: bc->num_locks)].u.lock); |
425 | } |
426 | |
427 | static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) |
428 | { |
429 | if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) |
430 | write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); |
431 | else |
432 | down_write(sem: &bc->trees[cache_index(block, num_locks: bc->num_locks)].u.lock); |
433 | } |
434 | |
435 | static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) |
436 | { |
437 | if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) |
438 | write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); |
439 | else |
440 | up_write(sem: &bc->trees[cache_index(block, num_locks: bc->num_locks)].u.lock); |
441 | } |
442 | |
443 | /* |
444 | * Sometimes we want to repeatedly get and drop locks as part of an iteration. |
445 | * This struct helps avoid redundant drop and gets of the same lock. |
446 | */ |
447 | struct lock_history { |
448 | struct dm_buffer_cache *cache; |
449 | bool write; |
450 | unsigned int previous; |
451 | unsigned int no_previous; |
452 | }; |
453 | |
454 | static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write) |
455 | { |
456 | lh->cache = cache; |
457 | lh->write = write; |
458 | lh->no_previous = cache->num_locks; |
459 | lh->previous = lh->no_previous; |
460 | } |
461 | |
462 | static void __lh_lock(struct lock_history *lh, unsigned int index) |
463 | { |
464 | if (lh->write) { |
465 | if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) |
466 | write_lock_bh(&lh->cache->trees[index].u.spinlock); |
467 | else |
468 | down_write(sem: &lh->cache->trees[index].u.lock); |
469 | } else { |
470 | if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) |
471 | read_lock_bh(&lh->cache->trees[index].u.spinlock); |
472 | else |
473 | down_read(sem: &lh->cache->trees[index].u.lock); |
474 | } |
475 | } |
476 | |
477 | static void __lh_unlock(struct lock_history *lh, unsigned int index) |
478 | { |
479 | if (lh->write) { |
480 | if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) |
481 | write_unlock_bh(&lh->cache->trees[index].u.spinlock); |
482 | else |
483 | up_write(sem: &lh->cache->trees[index].u.lock); |
484 | } else { |
485 | if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) |
486 | read_unlock_bh(&lh->cache->trees[index].u.spinlock); |
487 | else |
488 | up_read(sem: &lh->cache->trees[index].u.lock); |
489 | } |
490 | } |
491 | |
492 | /* |
493 | * Make sure you call this since it will unlock the final lock. |
494 | */ |
495 | static void lh_exit(struct lock_history *lh) |
496 | { |
497 | if (lh->previous != lh->no_previous) { |
498 | __lh_unlock(lh, index: lh->previous); |
499 | lh->previous = lh->no_previous; |
500 | } |
501 | } |
502 | |
503 | /* |
504 | * Named 'next' because there is no corresponding |
505 | * 'up/unlock' call since it's done automatically. |
506 | */ |
507 | static void lh_next(struct lock_history *lh, sector_t b) |
508 | { |
509 | unsigned int index = cache_index(block: b, num_locks: lh->no_previous); /* no_previous is num_locks */ |
510 | |
511 | if (lh->previous != lh->no_previous) { |
512 | if (lh->previous != index) { |
513 | __lh_unlock(lh, index: lh->previous); |
514 | __lh_lock(lh, index); |
515 | lh->previous = index; |
516 | } |
517 | } else { |
518 | __lh_lock(lh, index); |
519 | lh->previous = index; |
520 | } |
521 | } |
522 | |
523 | static inline struct dm_buffer *le_to_buffer(struct lru_entry *le) |
524 | { |
525 | return container_of(le, struct dm_buffer, lru); |
526 | } |
527 | |
528 | static struct dm_buffer *list_to_buffer(struct list_head *l) |
529 | { |
530 | struct lru_entry *le = list_entry(l, struct lru_entry, list); |
531 | |
532 | if (!le) |
533 | return NULL; |
534 | |
535 | return le_to_buffer(le); |
536 | } |
537 | |
538 | static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep) |
539 | { |
540 | unsigned int i; |
541 | |
542 | bc->num_locks = num_locks; |
543 | bc->no_sleep = no_sleep; |
544 | |
545 | for (i = 0; i < bc->num_locks; i++) { |
546 | if (no_sleep) |
547 | rwlock_init(&bc->trees[i].u.spinlock); |
548 | else |
549 | init_rwsem(&bc->trees[i].u.lock); |
550 | bc->trees[i].root = RB_ROOT; |
551 | } |
552 | |
553 | lru_init(lru: &bc->lru[LIST_CLEAN]); |
554 | lru_init(lru: &bc->lru[LIST_DIRTY]); |
555 | } |
556 | |
557 | static void cache_destroy(struct dm_buffer_cache *bc) |
558 | { |
559 | unsigned int i; |
560 | |
561 | for (i = 0; i < bc->num_locks; i++) |
562 | WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); |
563 | |
564 | lru_destroy(lru: &bc->lru[LIST_CLEAN]); |
565 | lru_destroy(lru: &bc->lru[LIST_DIRTY]); |
566 | } |
567 | |
568 | /*--------------*/ |
569 | |
570 | /* |
571 | * not threadsafe, or racey depending how you look at it |
572 | */ |
573 | static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode) |
574 | { |
575 | return bc->lru[list_mode].count; |
576 | } |
577 | |
578 | static inline unsigned long cache_total(struct dm_buffer_cache *bc) |
579 | { |
580 | return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY); |
581 | } |
582 | |
583 | /*--------------*/ |
584 | |
585 | /* |
586 | * Gets a specific buffer, indexed by block. |
587 | * If the buffer is found then its holder count will be incremented and |
588 | * lru_reference will be called. |
589 | * |
590 | * threadsafe |
591 | */ |
592 | static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block) |
593 | { |
594 | struct rb_node *n = root->rb_node; |
595 | struct dm_buffer *b; |
596 | |
597 | while (n) { |
598 | b = container_of(n, struct dm_buffer, node); |
599 | |
600 | if (b->block == block) |
601 | return b; |
602 | |
603 | n = block < b->block ? n->rb_left : n->rb_right; |
604 | } |
605 | |
606 | return NULL; |
607 | } |
608 | |
609 | static void __cache_inc_buffer(struct dm_buffer *b) |
610 | { |
611 | atomic_inc(v: &b->hold_count); |
612 | WRITE_ONCE(b->last_accessed, jiffies); |
613 | } |
614 | |
615 | static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block) |
616 | { |
617 | struct dm_buffer *b; |
618 | |
619 | cache_read_lock(bc, block); |
620 | b = __cache_get(root: &bc->trees[cache_index(block, num_locks: bc->num_locks)].root, block); |
621 | if (b) { |
622 | lru_reference(le: &b->lru); |
623 | __cache_inc_buffer(b); |
624 | } |
625 | cache_read_unlock(bc, block); |
626 | |
627 | return b; |
628 | } |
629 | |
630 | /*--------------*/ |
631 | |
632 | /* |
633 | * Returns true if the hold count hits zero. |
634 | * threadsafe |
635 | */ |
636 | static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b) |
637 | { |
638 | bool r; |
639 | |
640 | cache_read_lock(bc, block: b->block); |
641 | BUG_ON(!atomic_read(&b->hold_count)); |
642 | r = atomic_dec_and_test(v: &b->hold_count); |
643 | cache_read_unlock(bc, block: b->block); |
644 | |
645 | return r; |
646 | } |
647 | |
648 | /*--------------*/ |
649 | |
650 | typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *); |
651 | |
652 | /* |
653 | * Evicts a buffer based on a predicate. The oldest buffer that |
654 | * matches the predicate will be selected. In addition to the |
655 | * predicate the hold_count of the selected buffer will be zero. |
656 | */ |
657 | struct evict_wrapper { |
658 | struct lock_history *lh; |
659 | b_predicate pred; |
660 | void *context; |
661 | }; |
662 | |
663 | /* |
664 | * Wraps the buffer predicate turning it into an lru predicate. Adds |
665 | * extra test for hold_count. |
666 | */ |
667 | static enum evict_result __evict_pred(struct lru_entry *le, void *context) |
668 | { |
669 | struct evict_wrapper *w = context; |
670 | struct dm_buffer *b = le_to_buffer(le); |
671 | |
672 | lh_next(lh: w->lh, b: b->block); |
673 | |
674 | if (atomic_read(v: &b->hold_count)) |
675 | return ER_DONT_EVICT; |
676 | |
677 | return w->pred(b, w->context); |
678 | } |
679 | |
680 | static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode, |
681 | b_predicate pred, void *context, |
682 | struct lock_history *lh) |
683 | { |
684 | struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; |
685 | struct lru_entry *le; |
686 | struct dm_buffer *b; |
687 | |
688 | le = lru_evict(lru: &bc->lru[list_mode], pred: __evict_pred, context: &w, no_sleep: bc->no_sleep); |
689 | if (!le) |
690 | return NULL; |
691 | |
692 | b = le_to_buffer(le); |
693 | /* __evict_pred will have locked the appropriate tree. */ |
694 | rb_erase(&b->node, &bc->trees[cache_index(block: b->block, num_locks: bc->num_locks)].root); |
695 | |
696 | return b; |
697 | } |
698 | |
699 | static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode, |
700 | b_predicate pred, void *context) |
701 | { |
702 | struct dm_buffer *b; |
703 | struct lock_history lh; |
704 | |
705 | lh_init(lh: &lh, cache: bc, write: true); |
706 | b = __cache_evict(bc, list_mode, pred, context, lh: &lh); |
707 | lh_exit(lh: &lh); |
708 | |
709 | return b; |
710 | } |
711 | |
712 | /*--------------*/ |
713 | |
714 | /* |
715 | * Mark a buffer as clean or dirty. Not threadsafe. |
716 | */ |
717 | static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode) |
718 | { |
719 | cache_write_lock(bc, block: b->block); |
720 | if (list_mode != b->list_mode) { |
721 | lru_remove(lru: &bc->lru[b->list_mode], le: &b->lru); |
722 | b->list_mode = list_mode; |
723 | lru_insert(lru: &bc->lru[b->list_mode], le: &b->lru); |
724 | } |
725 | cache_write_unlock(bc, block: b->block); |
726 | } |
727 | |
728 | /*--------------*/ |
729 | |
730 | /* |
731 | * Runs through the lru associated with 'old_mode', if the predicate matches then |
732 | * it moves them to 'new_mode'. Not threadsafe. |
733 | */ |
734 | static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, |
735 | b_predicate pred, void *context, struct lock_history *lh) |
736 | { |
737 | struct lru_entry *le; |
738 | struct dm_buffer *b; |
739 | struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; |
740 | |
741 | while (true) { |
742 | le = lru_evict(lru: &bc->lru[old_mode], pred: __evict_pred, context: &w, no_sleep: bc->no_sleep); |
743 | if (!le) |
744 | break; |
745 | |
746 | b = le_to_buffer(le); |
747 | b->list_mode = new_mode; |
748 | lru_insert(lru: &bc->lru[b->list_mode], le: &b->lru); |
749 | } |
750 | } |
751 | |
752 | static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, |
753 | b_predicate pred, void *context) |
754 | { |
755 | struct lock_history lh; |
756 | |
757 | lh_init(lh: &lh, cache: bc, write: true); |
758 | __cache_mark_many(bc, old_mode, new_mode, pred, context, lh: &lh); |
759 | lh_exit(lh: &lh); |
760 | } |
761 | |
762 | /*--------------*/ |
763 | |
764 | /* |
765 | * Iterates through all clean or dirty entries calling a function for each |
766 | * entry. The callback may terminate the iteration early. Not threadsafe. |
767 | */ |
768 | |
769 | /* |
770 | * Iterator functions should return one of these actions to indicate |
771 | * how the iteration should proceed. |
772 | */ |
773 | enum it_action { |
774 | IT_NEXT, |
775 | IT_COMPLETE, |
776 | }; |
777 | |
778 | typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context); |
779 | |
780 | static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode, |
781 | iter_fn fn, void *context, struct lock_history *lh) |
782 | { |
783 | struct lru *lru = &bc->lru[list_mode]; |
784 | struct lru_entry *le, *first; |
785 | |
786 | if (!lru->cursor) |
787 | return; |
788 | |
789 | first = le = to_le(l: lru->cursor); |
790 | do { |
791 | struct dm_buffer *b = le_to_buffer(le); |
792 | |
793 | lh_next(lh, b: b->block); |
794 | |
795 | switch (fn(b, context)) { |
796 | case IT_NEXT: |
797 | break; |
798 | |
799 | case IT_COMPLETE: |
800 | return; |
801 | } |
802 | cond_resched(); |
803 | |
804 | le = to_le(l: le->list.next); |
805 | } while (le != first); |
806 | } |
807 | |
808 | static void cache_iterate(struct dm_buffer_cache *bc, int list_mode, |
809 | iter_fn fn, void *context) |
810 | { |
811 | struct lock_history lh; |
812 | |
813 | lh_init(lh: &lh, cache: bc, write: false); |
814 | __cache_iterate(bc, list_mode, fn, context, lh: &lh); |
815 | lh_exit(lh: &lh); |
816 | } |
817 | |
818 | /*--------------*/ |
819 | |
820 | /* |
821 | * Passes ownership of the buffer to the cache. Returns false if the |
822 | * buffer was already present (in which case ownership does not pass). |
823 | * eg, a race with another thread. |
824 | * |
825 | * Holder count should be 1 on insertion. |
826 | * |
827 | * Not threadsafe. |
828 | */ |
829 | static bool __cache_insert(struct rb_root *root, struct dm_buffer *b) |
830 | { |
831 | struct rb_node **new = &root->rb_node, *parent = NULL; |
832 | struct dm_buffer *found; |
833 | |
834 | while (*new) { |
835 | found = container_of(*new, struct dm_buffer, node); |
836 | |
837 | if (found->block == b->block) |
838 | return false; |
839 | |
840 | parent = *new; |
841 | new = b->block < found->block ? |
842 | &found->node.rb_left : &found->node.rb_right; |
843 | } |
844 | |
845 | rb_link_node(node: &b->node, parent, rb_link: new); |
846 | rb_insert_color(&b->node, root); |
847 | |
848 | return true; |
849 | } |
850 | |
851 | static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b) |
852 | { |
853 | bool r; |
854 | |
855 | if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE)) |
856 | return false; |
857 | |
858 | cache_write_lock(bc, block: b->block); |
859 | BUG_ON(atomic_read(&b->hold_count) != 1); |
860 | r = __cache_insert(root: &bc->trees[cache_index(block: b->block, num_locks: bc->num_locks)].root, b); |
861 | if (r) |
862 | lru_insert(lru: &bc->lru[b->list_mode], le: &b->lru); |
863 | cache_write_unlock(bc, block: b->block); |
864 | |
865 | return r; |
866 | } |
867 | |
868 | /*--------------*/ |
869 | |
870 | /* |
871 | * Removes buffer from cache, ownership of the buffer passes back to the caller. |
872 | * Fails if the hold_count is not one (ie. the caller holds the only reference). |
873 | * |
874 | * Not threadsafe. |
875 | */ |
876 | static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b) |
877 | { |
878 | bool r; |
879 | |
880 | cache_write_lock(bc, block: b->block); |
881 | |
882 | if (atomic_read(v: &b->hold_count) != 1) { |
883 | r = false; |
884 | } else { |
885 | r = true; |
886 | rb_erase(&b->node, &bc->trees[cache_index(block: b->block, num_locks: bc->num_locks)].root); |
887 | lru_remove(lru: &bc->lru[b->list_mode], le: &b->lru); |
888 | } |
889 | |
890 | cache_write_unlock(bc, block: b->block); |
891 | |
892 | return r; |
893 | } |
894 | |
895 | /*--------------*/ |
896 | |
897 | typedef void (*b_release)(struct dm_buffer *); |
898 | |
899 | static struct dm_buffer *__find_next(struct rb_root *root, sector_t block) |
900 | { |
901 | struct rb_node *n = root->rb_node; |
902 | struct dm_buffer *b; |
903 | struct dm_buffer *best = NULL; |
904 | |
905 | while (n) { |
906 | b = container_of(n, struct dm_buffer, node); |
907 | |
908 | if (b->block == block) |
909 | return b; |
910 | |
911 | if (block <= b->block) { |
912 | n = n->rb_left; |
913 | best = b; |
914 | } else { |
915 | n = n->rb_right; |
916 | } |
917 | } |
918 | |
919 | return best; |
920 | } |
921 | |
922 | static void __remove_range(struct dm_buffer_cache *bc, |
923 | struct rb_root *root, |
924 | sector_t begin, sector_t end, |
925 | b_predicate pred, b_release release) |
926 | { |
927 | struct dm_buffer *b; |
928 | |
929 | while (true) { |
930 | cond_resched(); |
931 | |
932 | b = __find_next(root, block: begin); |
933 | if (!b || (b->block >= end)) |
934 | break; |
935 | |
936 | begin = b->block + 1; |
937 | |
938 | if (atomic_read(v: &b->hold_count)) |
939 | continue; |
940 | |
941 | if (pred(b, NULL) == ER_EVICT) { |
942 | rb_erase(&b->node, root); |
943 | lru_remove(lru: &bc->lru[b->list_mode], le: &b->lru); |
944 | release(b); |
945 | } |
946 | } |
947 | } |
948 | |
949 | static void cache_remove_range(struct dm_buffer_cache *bc, |
950 | sector_t begin, sector_t end, |
951 | b_predicate pred, b_release release) |
952 | { |
953 | unsigned int i; |
954 | |
955 | BUG_ON(bc->no_sleep); |
956 | for (i = 0; i < bc->num_locks; i++) { |
957 | down_write(sem: &bc->trees[i].u.lock); |
958 | __remove_range(bc, root: &bc->trees[i].root, begin, end, pred, release); |
959 | up_write(sem: &bc->trees[i].u.lock); |
960 | } |
961 | } |
962 | |
963 | /*----------------------------------------------------------------*/ |
964 | |
965 | /* |
966 | * Linking of buffers: |
967 | * All buffers are linked to buffer_cache with their node field. |
968 | * |
969 | * Clean buffers that are not being written (B_WRITING not set) |
970 | * are linked to lru[LIST_CLEAN] with their lru_list field. |
971 | * |
972 | * Dirty and clean buffers that are being written are linked to |
973 | * lru[LIST_DIRTY] with their lru_list field. When the write |
974 | * finishes, the buffer cannot be relinked immediately (because we |
975 | * are in an interrupt context and relinking requires process |
976 | * context), so some clean-not-writing buffers can be held on |
977 | * dirty_lru too. They are later added to lru in the process |
978 | * context. |
979 | */ |
980 | struct dm_bufio_client { |
981 | struct block_device *bdev; |
982 | unsigned int block_size; |
983 | s8 sectors_per_block_bits; |
984 | |
985 | bool no_sleep; |
986 | struct mutex lock; |
987 | spinlock_t spinlock; |
988 | |
989 | int async_write_error; |
990 | |
991 | void (*alloc_callback)(struct dm_buffer *buf); |
992 | void (*write_callback)(struct dm_buffer *buf); |
993 | struct kmem_cache *slab_buffer; |
994 | struct kmem_cache *slab_cache; |
995 | struct dm_io_client *dm_io; |
996 | |
997 | struct list_head reserved_buffers; |
998 | unsigned int need_reserved_buffers; |
999 | |
1000 | unsigned int minimum_buffers; |
1001 | |
1002 | sector_t start; |
1003 | |
1004 | struct shrinker *shrinker; |
1005 | struct work_struct shrink_work; |
1006 | atomic_long_t need_shrink; |
1007 | |
1008 | wait_queue_head_t free_buffer_wait; |
1009 | |
1010 | struct list_head client_list; |
1011 | |
1012 | /* |
1013 | * Used by global_cleanup to sort the clients list. |
1014 | */ |
1015 | unsigned long oldest_buffer; |
1016 | |
1017 | struct dm_buffer_cache cache; /* must be last member */ |
1018 | }; |
1019 | |
1020 | /*----------------------------------------------------------------*/ |
1021 | |
1022 | #define dm_bufio_in_request() (!!current->bio_list) |
1023 | |
1024 | static void dm_bufio_lock(struct dm_bufio_client *c) |
1025 | { |
1026 | if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) |
1027 | spin_lock_bh(lock: &c->spinlock); |
1028 | else |
1029 | mutex_lock_nested(lock: &c->lock, dm_bufio_in_request()); |
1030 | } |
1031 | |
1032 | static void dm_bufio_unlock(struct dm_bufio_client *c) |
1033 | { |
1034 | if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) |
1035 | spin_unlock_bh(lock: &c->spinlock); |
1036 | else |
1037 | mutex_unlock(lock: &c->lock); |
1038 | } |
1039 | |
1040 | /*----------------------------------------------------------------*/ |
1041 | |
1042 | /* |
1043 | * Default cache size: available memory divided by the ratio. |
1044 | */ |
1045 | static unsigned long dm_bufio_default_cache_size; |
1046 | |
1047 | /* |
1048 | * Total cache size set by the user. |
1049 | */ |
1050 | static unsigned long dm_bufio_cache_size; |
1051 | |
1052 | /* |
1053 | * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change |
1054 | * at any time. If it disagrees, the user has changed cache size. |
1055 | */ |
1056 | static unsigned long dm_bufio_cache_size_latch; |
1057 | |
1058 | static DEFINE_SPINLOCK(global_spinlock); |
1059 | |
1060 | /* |
1061 | * Buffers are freed after this timeout |
1062 | */ |
1063 | static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; |
1064 | static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; |
1065 | |
1066 | static unsigned long dm_bufio_peak_allocated; |
1067 | static unsigned long dm_bufio_allocated_kmem_cache; |
1068 | static unsigned long dm_bufio_allocated_get_free_pages; |
1069 | static unsigned long dm_bufio_allocated_vmalloc; |
1070 | static unsigned long dm_bufio_current_allocated; |
1071 | |
1072 | /*----------------------------------------------------------------*/ |
1073 | |
1074 | /* |
1075 | * The current number of clients. |
1076 | */ |
1077 | static int dm_bufio_client_count; |
1078 | |
1079 | /* |
1080 | * The list of all clients. |
1081 | */ |
1082 | static LIST_HEAD(dm_bufio_all_clients); |
1083 | |
1084 | /* |
1085 | * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count |
1086 | */ |
1087 | static DEFINE_MUTEX(dm_bufio_clients_lock); |
1088 | |
1089 | static struct workqueue_struct *dm_bufio_wq; |
1090 | static struct delayed_work dm_bufio_cleanup_old_work; |
1091 | static struct work_struct dm_bufio_replacement_work; |
1092 | |
1093 | |
1094 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
1095 | static void buffer_record_stack(struct dm_buffer *b) |
1096 | { |
1097 | b->stack_len = stack_trace_save(store: b->stack_entries, MAX_STACK, skipnr: 2); |
1098 | } |
1099 | #endif |
1100 | |
1101 | /*----------------------------------------------------------------*/ |
1102 | |
1103 | static void adjust_total_allocated(struct dm_buffer *b, bool unlink) |
1104 | { |
1105 | unsigned char data_mode; |
1106 | long diff; |
1107 | |
1108 | static unsigned long * const class_ptr[DATA_MODE_LIMIT] = { |
1109 | &dm_bufio_allocated_kmem_cache, |
1110 | &dm_bufio_allocated_get_free_pages, |
1111 | &dm_bufio_allocated_vmalloc, |
1112 | }; |
1113 | |
1114 | data_mode = b->data_mode; |
1115 | diff = (long)b->c->block_size; |
1116 | if (unlink) |
1117 | diff = -diff; |
1118 | |
1119 | spin_lock(lock: &global_spinlock); |
1120 | |
1121 | *class_ptr[data_mode] += diff; |
1122 | |
1123 | dm_bufio_current_allocated += diff; |
1124 | |
1125 | if (dm_bufio_current_allocated > dm_bufio_peak_allocated) |
1126 | dm_bufio_peak_allocated = dm_bufio_current_allocated; |
1127 | |
1128 | if (!unlink) { |
1129 | if (dm_bufio_current_allocated > dm_bufio_cache_size) |
1130 | queue_work(wq: dm_bufio_wq, work: &dm_bufio_replacement_work); |
1131 | } |
1132 | |
1133 | spin_unlock(lock: &global_spinlock); |
1134 | } |
1135 | |
1136 | /* |
1137 | * Change the number of clients and recalculate per-client limit. |
1138 | */ |
1139 | static void __cache_size_refresh(void) |
1140 | { |
1141 | if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock))) |
1142 | return; |
1143 | if (WARN_ON(dm_bufio_client_count < 0)) |
1144 | return; |
1145 | |
1146 | dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); |
1147 | |
1148 | /* |
1149 | * Use default if set to 0 and report the actual cache size used. |
1150 | */ |
1151 | if (!dm_bufio_cache_size_latch) { |
1152 | (void)cmpxchg(&dm_bufio_cache_size, 0, |
1153 | dm_bufio_default_cache_size); |
1154 | dm_bufio_cache_size_latch = dm_bufio_default_cache_size; |
1155 | } |
1156 | } |
1157 | |
1158 | /* |
1159 | * Allocating buffer data. |
1160 | * |
1161 | * Small buffers are allocated with kmem_cache, to use space optimally. |
1162 | * |
1163 | * For large buffers, we choose between get_free_pages and vmalloc. |
1164 | * Each has advantages and disadvantages. |
1165 | * |
1166 | * __get_free_pages can randomly fail if the memory is fragmented. |
1167 | * __vmalloc won't randomly fail, but vmalloc space is limited (it may be |
1168 | * as low as 128M) so using it for caching is not appropriate. |
1169 | * |
1170 | * If the allocation may fail we use __get_free_pages. Memory fragmentation |
1171 | * won't have a fatal effect here, but it just causes flushes of some other |
1172 | * buffers and more I/O will be performed. Don't use __get_free_pages if it |
1173 | * always fails (i.e. order > MAX_PAGE_ORDER). |
1174 | * |
1175 | * If the allocation shouldn't fail we use __vmalloc. This is only for the |
1176 | * initial reserve allocation, so there's no risk of wasting all vmalloc |
1177 | * space. |
1178 | */ |
1179 | static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, |
1180 | unsigned char *data_mode) |
1181 | { |
1182 | if (unlikely(c->slab_cache != NULL)) { |
1183 | *data_mode = DATA_MODE_SLAB; |
1184 | return kmem_cache_alloc(cachep: c->slab_cache, flags: gfp_mask); |
1185 | } |
1186 | |
1187 | if (c->block_size <= KMALLOC_MAX_SIZE && |
1188 | gfp_mask & __GFP_NORETRY) { |
1189 | *data_mode = DATA_MODE_GET_FREE_PAGES; |
1190 | return (void *)__get_free_pages(gfp_mask, |
1191 | order: c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); |
1192 | } |
1193 | |
1194 | *data_mode = DATA_MODE_VMALLOC; |
1195 | |
1196 | return __vmalloc(size: c->block_size, gfp_mask); |
1197 | } |
1198 | |
1199 | /* |
1200 | * Free buffer's data. |
1201 | */ |
1202 | static void free_buffer_data(struct dm_bufio_client *c, |
1203 | void *data, unsigned char data_mode) |
1204 | { |
1205 | switch (data_mode) { |
1206 | case DATA_MODE_SLAB: |
1207 | kmem_cache_free(s: c->slab_cache, objp: data); |
1208 | break; |
1209 | |
1210 | case DATA_MODE_GET_FREE_PAGES: |
1211 | free_pages(addr: (unsigned long)data, |
1212 | order: c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); |
1213 | break; |
1214 | |
1215 | case DATA_MODE_VMALLOC: |
1216 | vfree(addr: data); |
1217 | break; |
1218 | |
1219 | default: |
1220 | DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d" , |
1221 | data_mode); |
1222 | BUG(); |
1223 | } |
1224 | } |
1225 | |
1226 | /* |
1227 | * Allocate buffer and its data. |
1228 | */ |
1229 | static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) |
1230 | { |
1231 | struct dm_buffer *b = kmem_cache_alloc(cachep: c->slab_buffer, flags: gfp_mask); |
1232 | |
1233 | if (!b) |
1234 | return NULL; |
1235 | |
1236 | b->c = c; |
1237 | |
1238 | b->data = alloc_buffer_data(c, gfp_mask, data_mode: &b->data_mode); |
1239 | if (!b->data) { |
1240 | kmem_cache_free(s: c->slab_buffer, objp: b); |
1241 | return NULL; |
1242 | } |
1243 | adjust_total_allocated(b, unlink: false); |
1244 | |
1245 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
1246 | b->stack_len = 0; |
1247 | #endif |
1248 | return b; |
1249 | } |
1250 | |
1251 | /* |
1252 | * Free buffer and its data. |
1253 | */ |
1254 | static void free_buffer(struct dm_buffer *b) |
1255 | { |
1256 | struct dm_bufio_client *c = b->c; |
1257 | |
1258 | adjust_total_allocated(b, unlink: true); |
1259 | free_buffer_data(c, data: b->data, data_mode: b->data_mode); |
1260 | kmem_cache_free(s: c->slab_buffer, objp: b); |
1261 | } |
1262 | |
1263 | /* |
1264 | *-------------------------------------------------------------------------- |
1265 | * Submit I/O on the buffer. |
1266 | * |
1267 | * Bio interface is faster but it has some problems: |
1268 | * the vector list is limited (increasing this limit increases |
1269 | * memory-consumption per buffer, so it is not viable); |
1270 | * |
1271 | * the memory must be direct-mapped, not vmalloced; |
1272 | * |
1273 | * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and |
1274 | * it is not vmalloced, try using the bio interface. |
1275 | * |
1276 | * If the buffer is big, if it is vmalloced or if the underlying device |
1277 | * rejects the bio because it is too large, use dm-io layer to do the I/O. |
1278 | * The dm-io layer splits the I/O into multiple requests, avoiding the above |
1279 | * shortcomings. |
1280 | *-------------------------------------------------------------------------- |
1281 | */ |
1282 | |
1283 | /* |
1284 | * dm-io completion routine. It just calls b->bio.bi_end_io, pretending |
1285 | * that the request was handled directly with bio interface. |
1286 | */ |
1287 | static void dmio_complete(unsigned long error, void *context) |
1288 | { |
1289 | struct dm_buffer *b = context; |
1290 | |
1291 | b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); |
1292 | } |
1293 | |
1294 | static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector, |
1295 | unsigned int n_sectors, unsigned int offset, |
1296 | unsigned short ioprio) |
1297 | { |
1298 | int r; |
1299 | struct dm_io_request io_req = { |
1300 | .bi_opf = op, |
1301 | .notify.fn = dmio_complete, |
1302 | .notify.context = b, |
1303 | .client = b->c->dm_io, |
1304 | }; |
1305 | struct dm_io_region region = { |
1306 | .bdev = b->c->bdev, |
1307 | .sector = sector, |
1308 | .count = n_sectors, |
1309 | }; |
1310 | |
1311 | if (b->data_mode != DATA_MODE_VMALLOC) { |
1312 | io_req.mem.type = DM_IO_KMEM; |
1313 | io_req.mem.ptr.addr = (char *)b->data + offset; |
1314 | } else { |
1315 | io_req.mem.type = DM_IO_VMA; |
1316 | io_req.mem.ptr.vma = (char *)b->data + offset; |
1317 | } |
1318 | |
1319 | r = dm_io(io_req: &io_req, num_regions: 1, region: ®ion, NULL, ioprio); |
1320 | if (unlikely(r)) |
1321 | b->end_io(b, errno_to_blk_status(errno: r)); |
1322 | } |
1323 | |
1324 | static void bio_complete(struct bio *bio) |
1325 | { |
1326 | struct dm_buffer *b = bio->bi_private; |
1327 | blk_status_t status = bio->bi_status; |
1328 | |
1329 | bio_uninit(bio); |
1330 | kfree(objp: bio); |
1331 | b->end_io(b, status); |
1332 | } |
1333 | |
1334 | static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, |
1335 | unsigned int n_sectors, unsigned int offset, |
1336 | unsigned short ioprio) |
1337 | { |
1338 | struct bio *bio; |
1339 | char *ptr; |
1340 | unsigned int len; |
1341 | |
1342 | bio = bio_kmalloc(nr_vecs: 1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN); |
1343 | if (!bio) { |
1344 | use_dmio(b, op, sector, n_sectors, offset, ioprio); |
1345 | return; |
1346 | } |
1347 | bio_init(bio, bdev: b->c->bdev, table: bio->bi_inline_vecs, max_vecs: 1, opf: op); |
1348 | bio->bi_iter.bi_sector = sector; |
1349 | bio->bi_end_io = bio_complete; |
1350 | bio->bi_private = b; |
1351 | bio->bi_ioprio = ioprio; |
1352 | |
1353 | ptr = (char *)b->data + offset; |
1354 | len = n_sectors << SECTOR_SHIFT; |
1355 | |
1356 | __bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr)); |
1357 | |
1358 | submit_bio(bio); |
1359 | } |
1360 | |
1361 | static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block) |
1362 | { |
1363 | sector_t sector; |
1364 | |
1365 | if (likely(c->sectors_per_block_bits >= 0)) |
1366 | sector = block << c->sectors_per_block_bits; |
1367 | else |
1368 | sector = block * (c->block_size >> SECTOR_SHIFT); |
1369 | sector += c->start; |
1370 | |
1371 | return sector; |
1372 | } |
1373 | |
1374 | static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio, |
1375 | void (*end_io)(struct dm_buffer *, blk_status_t)) |
1376 | { |
1377 | unsigned int n_sectors; |
1378 | sector_t sector; |
1379 | unsigned int offset, end; |
1380 | |
1381 | b->end_io = end_io; |
1382 | |
1383 | sector = block_to_sector(c: b->c, block: b->block); |
1384 | |
1385 | if (op != REQ_OP_WRITE) { |
1386 | n_sectors = b->c->block_size >> SECTOR_SHIFT; |
1387 | offset = 0; |
1388 | } else { |
1389 | if (b->c->write_callback) |
1390 | b->c->write_callback(b); |
1391 | offset = b->write_start; |
1392 | end = b->write_end; |
1393 | offset &= -DM_BUFIO_WRITE_ALIGN; |
1394 | end += DM_BUFIO_WRITE_ALIGN - 1; |
1395 | end &= -DM_BUFIO_WRITE_ALIGN; |
1396 | if (unlikely(end > b->c->block_size)) |
1397 | end = b->c->block_size; |
1398 | |
1399 | sector += offset >> SECTOR_SHIFT; |
1400 | n_sectors = (end - offset) >> SECTOR_SHIFT; |
1401 | } |
1402 | |
1403 | if (b->data_mode != DATA_MODE_VMALLOC) |
1404 | use_bio(b, op, sector, n_sectors, offset, ioprio); |
1405 | else |
1406 | use_dmio(b, op, sector, n_sectors, offset, ioprio); |
1407 | } |
1408 | |
1409 | /* |
1410 | *-------------------------------------------------------------- |
1411 | * Writing dirty buffers |
1412 | *-------------------------------------------------------------- |
1413 | */ |
1414 | |
1415 | /* |
1416 | * The endio routine for write. |
1417 | * |
1418 | * Set the error, clear B_WRITING bit and wake anyone who was waiting on |
1419 | * it. |
1420 | */ |
1421 | static void write_endio(struct dm_buffer *b, blk_status_t status) |
1422 | { |
1423 | b->write_error = status; |
1424 | if (unlikely(status)) { |
1425 | struct dm_bufio_client *c = b->c; |
1426 | |
1427 | (void)cmpxchg(&c->async_write_error, 0, |
1428 | blk_status_to_errno(status)); |
1429 | } |
1430 | |
1431 | BUG_ON(!test_bit(B_WRITING, &b->state)); |
1432 | |
1433 | smp_mb__before_atomic(); |
1434 | clear_bit(B_WRITING, addr: &b->state); |
1435 | smp_mb__after_atomic(); |
1436 | |
1437 | wake_up_bit(word: &b->state, B_WRITING); |
1438 | } |
1439 | |
1440 | /* |
1441 | * Initiate a write on a dirty buffer, but don't wait for it. |
1442 | * |
1443 | * - If the buffer is not dirty, exit. |
1444 | * - If there some previous write going on, wait for it to finish (we can't |
1445 | * have two writes on the same buffer simultaneously). |
1446 | * - Submit our write and don't wait on it. We set B_WRITING indicating |
1447 | * that there is a write in progress. |
1448 | */ |
1449 | static void __write_dirty_buffer(struct dm_buffer *b, |
1450 | struct list_head *write_list) |
1451 | { |
1452 | if (!test_bit(B_DIRTY, &b->state)) |
1453 | return; |
1454 | |
1455 | clear_bit(B_DIRTY, addr: &b->state); |
1456 | wait_on_bit_lock_io(word: &b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
1457 | |
1458 | b->write_start = b->dirty_start; |
1459 | b->write_end = b->dirty_end; |
1460 | |
1461 | if (!write_list) |
1462 | submit_io(b, op: REQ_OP_WRITE, IOPRIO_DEFAULT, end_io: write_endio); |
1463 | else |
1464 | list_add_tail(new: &b->write_list, head: write_list); |
1465 | } |
1466 | |
1467 | static void __flush_write_list(struct list_head *write_list) |
1468 | { |
1469 | struct blk_plug plug; |
1470 | |
1471 | blk_start_plug(&plug); |
1472 | while (!list_empty(head: write_list)) { |
1473 | struct dm_buffer *b = |
1474 | list_entry(write_list->next, struct dm_buffer, write_list); |
1475 | list_del(entry: &b->write_list); |
1476 | submit_io(b, op: REQ_OP_WRITE, IOPRIO_DEFAULT, end_io: write_endio); |
1477 | cond_resched(); |
1478 | } |
1479 | blk_finish_plug(&plug); |
1480 | } |
1481 | |
1482 | /* |
1483 | * Wait until any activity on the buffer finishes. Possibly write the |
1484 | * buffer if it is dirty. When this function finishes, there is no I/O |
1485 | * running on the buffer and the buffer is not dirty. |
1486 | */ |
1487 | static void __make_buffer_clean(struct dm_buffer *b) |
1488 | { |
1489 | BUG_ON(atomic_read(&b->hold_count)); |
1490 | |
1491 | /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */ |
1492 | if (!smp_load_acquire(&b->state)) /* fast case */ |
1493 | return; |
1494 | |
1495 | wait_on_bit_io(word: &b->state, B_READING, TASK_UNINTERRUPTIBLE); |
1496 | __write_dirty_buffer(b, NULL); |
1497 | wait_on_bit_io(word: &b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
1498 | } |
1499 | |
1500 | static enum evict_result is_clean(struct dm_buffer *b, void *context) |
1501 | { |
1502 | struct dm_bufio_client *c = context; |
1503 | |
1504 | /* These should never happen */ |
1505 | if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state))) |
1506 | return ER_DONT_EVICT; |
1507 | if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state))) |
1508 | return ER_DONT_EVICT; |
1509 | if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN)) |
1510 | return ER_DONT_EVICT; |
1511 | |
1512 | if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && |
1513 | unlikely(test_bit(B_READING, &b->state))) |
1514 | return ER_DONT_EVICT; |
1515 | |
1516 | return ER_EVICT; |
1517 | } |
1518 | |
1519 | static enum evict_result is_dirty(struct dm_buffer *b, void *context) |
1520 | { |
1521 | /* These should never happen */ |
1522 | if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) |
1523 | return ER_DONT_EVICT; |
1524 | if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY)) |
1525 | return ER_DONT_EVICT; |
1526 | |
1527 | return ER_EVICT; |
1528 | } |
1529 | |
1530 | /* |
1531 | * Find some buffer that is not held by anybody, clean it, unlink it and |
1532 | * return it. |
1533 | */ |
1534 | static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) |
1535 | { |
1536 | struct dm_buffer *b; |
1537 | |
1538 | b = cache_evict(bc: &c->cache, LIST_CLEAN, pred: is_clean, context: c); |
1539 | if (b) { |
1540 | /* this also waits for pending reads */ |
1541 | __make_buffer_clean(b); |
1542 | return b; |
1543 | } |
1544 | |
1545 | if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) |
1546 | return NULL; |
1547 | |
1548 | b = cache_evict(bc: &c->cache, LIST_DIRTY, pred: is_dirty, NULL); |
1549 | if (b) { |
1550 | __make_buffer_clean(b); |
1551 | return b; |
1552 | } |
1553 | |
1554 | return NULL; |
1555 | } |
1556 | |
1557 | /* |
1558 | * Wait until some other threads free some buffer or release hold count on |
1559 | * some buffer. |
1560 | * |
1561 | * This function is entered with c->lock held, drops it and regains it |
1562 | * before exiting. |
1563 | */ |
1564 | static void __wait_for_free_buffer(struct dm_bufio_client *c) |
1565 | { |
1566 | DECLARE_WAITQUEUE(wait, current); |
1567 | |
1568 | add_wait_queue(wq_head: &c->free_buffer_wait, wq_entry: &wait); |
1569 | set_current_state(TASK_UNINTERRUPTIBLE); |
1570 | dm_bufio_unlock(c); |
1571 | |
1572 | /* |
1573 | * It's possible to miss a wake up event since we don't always |
1574 | * hold c->lock when wake_up is called. So we have a timeout here, |
1575 | * just in case. |
1576 | */ |
1577 | io_schedule_timeout(timeout: 5 * HZ); |
1578 | |
1579 | remove_wait_queue(wq_head: &c->free_buffer_wait, wq_entry: &wait); |
1580 | |
1581 | dm_bufio_lock(c); |
1582 | } |
1583 | |
1584 | enum new_flag { |
1585 | NF_FRESH = 0, |
1586 | NF_READ = 1, |
1587 | NF_GET = 2, |
1588 | NF_PREFETCH = 3 |
1589 | }; |
1590 | |
1591 | /* |
1592 | * Allocate a new buffer. If the allocation is not possible, wait until |
1593 | * some other thread frees a buffer. |
1594 | * |
1595 | * May drop the lock and regain it. |
1596 | */ |
1597 | static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf) |
1598 | { |
1599 | struct dm_buffer *b; |
1600 | bool tried_noio_alloc = false; |
1601 | |
1602 | /* |
1603 | * dm-bufio is resistant to allocation failures (it just keeps |
1604 | * one buffer reserved in cases all the allocations fail). |
1605 | * So set flags to not try too hard: |
1606 | * GFP_NOWAIT: don't wait; if we need to sleep we'll release our |
1607 | * mutex and wait ourselves. |
1608 | * __GFP_NORETRY: don't retry and rather return failure |
1609 | * __GFP_NOMEMALLOC: don't use emergency reserves |
1610 | * __GFP_NOWARN: don't print a warning in case of failure |
1611 | * |
1612 | * For debugging, if we set the cache size to 1, no new buffers will |
1613 | * be allocated. |
1614 | */ |
1615 | while (1) { |
1616 | if (dm_bufio_cache_size_latch != 1) { |
1617 | b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); |
1618 | if (b) |
1619 | return b; |
1620 | } |
1621 | |
1622 | if (nf == NF_PREFETCH) |
1623 | return NULL; |
1624 | |
1625 | if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) { |
1626 | dm_bufio_unlock(c); |
1627 | b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); |
1628 | dm_bufio_lock(c); |
1629 | if (b) |
1630 | return b; |
1631 | tried_noio_alloc = true; |
1632 | } |
1633 | |
1634 | if (!list_empty(head: &c->reserved_buffers)) { |
1635 | b = list_to_buffer(l: c->reserved_buffers.next); |
1636 | list_del(entry: &b->lru.list); |
1637 | c->need_reserved_buffers++; |
1638 | |
1639 | return b; |
1640 | } |
1641 | |
1642 | b = __get_unclaimed_buffer(c); |
1643 | if (b) |
1644 | return b; |
1645 | |
1646 | __wait_for_free_buffer(c); |
1647 | } |
1648 | } |
1649 | |
1650 | static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) |
1651 | { |
1652 | struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); |
1653 | |
1654 | if (!b) |
1655 | return NULL; |
1656 | |
1657 | if (c->alloc_callback) |
1658 | c->alloc_callback(b); |
1659 | |
1660 | return b; |
1661 | } |
1662 | |
1663 | /* |
1664 | * Free a buffer and wake other threads waiting for free buffers. |
1665 | */ |
1666 | static void __free_buffer_wake(struct dm_buffer *b) |
1667 | { |
1668 | struct dm_bufio_client *c = b->c; |
1669 | |
1670 | b->block = -1; |
1671 | if (!c->need_reserved_buffers) |
1672 | free_buffer(b); |
1673 | else { |
1674 | list_add(new: &b->lru.list, head: &c->reserved_buffers); |
1675 | c->need_reserved_buffers--; |
1676 | } |
1677 | |
1678 | /* |
1679 | * We hold the bufio lock here, so no one can add entries to the |
1680 | * wait queue anyway. |
1681 | */ |
1682 | if (unlikely(waitqueue_active(&c->free_buffer_wait))) |
1683 | wake_up(&c->free_buffer_wait); |
1684 | } |
1685 | |
1686 | static enum evict_result cleaned(struct dm_buffer *b, void *context) |
1687 | { |
1688 | if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) |
1689 | return ER_DONT_EVICT; /* should never happen */ |
1690 | |
1691 | if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state)) |
1692 | return ER_DONT_EVICT; |
1693 | else |
1694 | return ER_EVICT; |
1695 | } |
1696 | |
1697 | static void __move_clean_buffers(struct dm_bufio_client *c) |
1698 | { |
1699 | cache_mark_many(bc: &c->cache, LIST_DIRTY, LIST_CLEAN, pred: cleaned, NULL); |
1700 | } |
1701 | |
1702 | struct write_context { |
1703 | int no_wait; |
1704 | struct list_head *write_list; |
1705 | }; |
1706 | |
1707 | static enum it_action write_one(struct dm_buffer *b, void *context) |
1708 | { |
1709 | struct write_context *wc = context; |
1710 | |
1711 | if (wc->no_wait && test_bit(B_WRITING, &b->state)) |
1712 | return IT_COMPLETE; |
1713 | |
1714 | __write_dirty_buffer(b, write_list: wc->write_list); |
1715 | return IT_NEXT; |
1716 | } |
1717 | |
1718 | static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, |
1719 | struct list_head *write_list) |
1720 | { |
1721 | struct write_context wc = {.no_wait = no_wait, .write_list = write_list}; |
1722 | |
1723 | __move_clean_buffers(c); |
1724 | cache_iterate(bc: &c->cache, LIST_DIRTY, fn: write_one, context: &wc); |
1725 | } |
1726 | |
1727 | /* |
1728 | * Check if we're over watermark. |
1729 | * If we are over threshold_buffers, start freeing buffers. |
1730 | * If we're over "limit_buffers", block until we get under the limit. |
1731 | */ |
1732 | static void __check_watermark(struct dm_bufio_client *c, |
1733 | struct list_head *write_list) |
1734 | { |
1735 | if (cache_count(bc: &c->cache, LIST_DIRTY) > |
1736 | cache_count(bc: &c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO) |
1737 | __write_dirty_buffers_async(c, no_wait: 1, write_list); |
1738 | } |
1739 | |
1740 | /* |
1741 | *-------------------------------------------------------------- |
1742 | * Getting a buffer |
1743 | *-------------------------------------------------------------- |
1744 | */ |
1745 | |
1746 | static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b) |
1747 | { |
1748 | /* |
1749 | * Relying on waitqueue_active() is racey, but we sleep |
1750 | * with schedule_timeout anyway. |
1751 | */ |
1752 | if (cache_put(bc: &c->cache, b) && |
1753 | unlikely(waitqueue_active(&c->free_buffer_wait))) |
1754 | wake_up(&c->free_buffer_wait); |
1755 | } |
1756 | |
1757 | /* |
1758 | * This assumes you have already checked the cache to see if the buffer |
1759 | * is already present (it will recheck after dropping the lock for allocation). |
1760 | */ |
1761 | static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, |
1762 | enum new_flag nf, int *need_submit, |
1763 | struct list_head *write_list) |
1764 | { |
1765 | struct dm_buffer *b, *new_b = NULL; |
1766 | |
1767 | *need_submit = 0; |
1768 | |
1769 | /* This can't be called with NF_GET */ |
1770 | if (WARN_ON_ONCE(nf == NF_GET)) |
1771 | return NULL; |
1772 | |
1773 | new_b = __alloc_buffer_wait(c, nf); |
1774 | if (!new_b) |
1775 | return NULL; |
1776 | |
1777 | /* |
1778 | * We've had a period where the mutex was unlocked, so need to |
1779 | * recheck the buffer tree. |
1780 | */ |
1781 | b = cache_get(bc: &c->cache, block); |
1782 | if (b) { |
1783 | __free_buffer_wake(b: new_b); |
1784 | goto found_buffer; |
1785 | } |
1786 | |
1787 | __check_watermark(c, write_list); |
1788 | |
1789 | b = new_b; |
1790 | atomic_set(v: &b->hold_count, i: 1); |
1791 | WRITE_ONCE(b->last_accessed, jiffies); |
1792 | b->block = block; |
1793 | b->read_error = 0; |
1794 | b->write_error = 0; |
1795 | b->list_mode = LIST_CLEAN; |
1796 | |
1797 | if (nf == NF_FRESH) |
1798 | b->state = 0; |
1799 | else { |
1800 | b->state = 1 << B_READING; |
1801 | *need_submit = 1; |
1802 | } |
1803 | |
1804 | /* |
1805 | * We mustn't insert into the cache until the B_READING state |
1806 | * is set. Otherwise another thread could get it and use |
1807 | * it before it had been read. |
1808 | */ |
1809 | cache_insert(bc: &c->cache, b); |
1810 | |
1811 | return b; |
1812 | |
1813 | found_buffer: |
1814 | if (nf == NF_PREFETCH) { |
1815 | cache_put_and_wake(c, b); |
1816 | return NULL; |
1817 | } |
1818 | |
1819 | /* |
1820 | * Note: it is essential that we don't wait for the buffer to be |
1821 | * read if dm_bufio_get function is used. Both dm_bufio_get and |
1822 | * dm_bufio_prefetch can be used in the driver request routine. |
1823 | * If the user called both dm_bufio_prefetch and dm_bufio_get on |
1824 | * the same buffer, it would deadlock if we waited. |
1825 | */ |
1826 | if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { |
1827 | cache_put_and_wake(c, b); |
1828 | return NULL; |
1829 | } |
1830 | |
1831 | return b; |
1832 | } |
1833 | |
1834 | /* |
1835 | * The endio routine for reading: set the error, clear the bit and wake up |
1836 | * anyone waiting on the buffer. |
1837 | */ |
1838 | static void read_endio(struct dm_buffer *b, blk_status_t status) |
1839 | { |
1840 | b->read_error = status; |
1841 | |
1842 | BUG_ON(!test_bit(B_READING, &b->state)); |
1843 | |
1844 | smp_mb__before_atomic(); |
1845 | clear_bit(B_READING, addr: &b->state); |
1846 | smp_mb__after_atomic(); |
1847 | |
1848 | wake_up_bit(word: &b->state, B_READING); |
1849 | } |
1850 | |
1851 | /* |
1852 | * A common routine for dm_bufio_new and dm_bufio_read. Operation of these |
1853 | * functions is similar except that dm_bufio_new doesn't read the |
1854 | * buffer from the disk (assuming that the caller overwrites all the data |
1855 | * and uses dm_bufio_mark_buffer_dirty to write new data back). |
1856 | */ |
1857 | static void *new_read(struct dm_bufio_client *c, sector_t block, |
1858 | enum new_flag nf, struct dm_buffer **bp, |
1859 | unsigned short ioprio) |
1860 | { |
1861 | int need_submit = 0; |
1862 | struct dm_buffer *b; |
1863 | |
1864 | LIST_HEAD(write_list); |
1865 | |
1866 | *bp = NULL; |
1867 | |
1868 | /* |
1869 | * Fast path, hopefully the block is already in the cache. No need |
1870 | * to get the client lock for this. |
1871 | */ |
1872 | b = cache_get(bc: &c->cache, block); |
1873 | if (b) { |
1874 | if (nf == NF_PREFETCH) { |
1875 | cache_put_and_wake(c, b); |
1876 | return NULL; |
1877 | } |
1878 | |
1879 | /* |
1880 | * Note: it is essential that we don't wait for the buffer to be |
1881 | * read if dm_bufio_get function is used. Both dm_bufio_get and |
1882 | * dm_bufio_prefetch can be used in the driver request routine. |
1883 | * If the user called both dm_bufio_prefetch and dm_bufio_get on |
1884 | * the same buffer, it would deadlock if we waited. |
1885 | */ |
1886 | if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { |
1887 | cache_put_and_wake(c, b); |
1888 | return NULL; |
1889 | } |
1890 | } |
1891 | |
1892 | if (!b) { |
1893 | if (nf == NF_GET) |
1894 | return NULL; |
1895 | |
1896 | dm_bufio_lock(c); |
1897 | b = __bufio_new(c, block, nf, need_submit: &need_submit, write_list: &write_list); |
1898 | dm_bufio_unlock(c); |
1899 | } |
1900 | |
1901 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
1902 | if (b && (atomic_read(v: &b->hold_count) == 1)) |
1903 | buffer_record_stack(b); |
1904 | #endif |
1905 | |
1906 | __flush_write_list(write_list: &write_list); |
1907 | |
1908 | if (!b) |
1909 | return NULL; |
1910 | |
1911 | if (need_submit) |
1912 | submit_io(b, op: REQ_OP_READ, ioprio, end_io: read_endio); |
1913 | |
1914 | if (nf != NF_GET) /* we already tested this condition above */ |
1915 | wait_on_bit_io(word: &b->state, B_READING, TASK_UNINTERRUPTIBLE); |
1916 | |
1917 | if (b->read_error) { |
1918 | int error = blk_status_to_errno(status: b->read_error); |
1919 | |
1920 | dm_bufio_release(b); |
1921 | |
1922 | return ERR_PTR(error); |
1923 | } |
1924 | |
1925 | *bp = b; |
1926 | |
1927 | return b->data; |
1928 | } |
1929 | |
1930 | void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, |
1931 | struct dm_buffer **bp) |
1932 | { |
1933 | return new_read(c, block, nf: NF_GET, bp, IOPRIO_DEFAULT); |
1934 | } |
1935 | EXPORT_SYMBOL_GPL(dm_bufio_get); |
1936 | |
1937 | static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block, |
1938 | struct dm_buffer **bp, unsigned short ioprio) |
1939 | { |
1940 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
1941 | return ERR_PTR(error: -EINVAL); |
1942 | |
1943 | return new_read(c, block, nf: NF_READ, bp, ioprio); |
1944 | } |
1945 | |
1946 | void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, |
1947 | struct dm_buffer **bp) |
1948 | { |
1949 | return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT); |
1950 | } |
1951 | EXPORT_SYMBOL_GPL(dm_bufio_read); |
1952 | |
1953 | void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block, |
1954 | struct dm_buffer **bp, unsigned short ioprio) |
1955 | { |
1956 | return __dm_bufio_read(c, block, bp, ioprio); |
1957 | } |
1958 | EXPORT_SYMBOL_GPL(dm_bufio_read_with_ioprio); |
1959 | |
1960 | void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, |
1961 | struct dm_buffer **bp) |
1962 | { |
1963 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
1964 | return ERR_PTR(error: -EINVAL); |
1965 | |
1966 | return new_read(c, block, nf: NF_FRESH, bp, IOPRIO_DEFAULT); |
1967 | } |
1968 | EXPORT_SYMBOL_GPL(dm_bufio_new); |
1969 | |
1970 | static void __dm_bufio_prefetch(struct dm_bufio_client *c, |
1971 | sector_t block, unsigned int n_blocks, |
1972 | unsigned short ioprio) |
1973 | { |
1974 | struct blk_plug plug; |
1975 | |
1976 | LIST_HEAD(write_list); |
1977 | |
1978 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
1979 | return; /* should never happen */ |
1980 | |
1981 | blk_start_plug(&plug); |
1982 | |
1983 | for (; n_blocks--; block++) { |
1984 | int need_submit; |
1985 | struct dm_buffer *b; |
1986 | |
1987 | b = cache_get(bc: &c->cache, block); |
1988 | if (b) { |
1989 | /* already in cache */ |
1990 | cache_put_and_wake(c, b); |
1991 | continue; |
1992 | } |
1993 | |
1994 | dm_bufio_lock(c); |
1995 | b = __bufio_new(c, block, nf: NF_PREFETCH, need_submit: &need_submit, |
1996 | write_list: &write_list); |
1997 | if (unlikely(!list_empty(&write_list))) { |
1998 | dm_bufio_unlock(c); |
1999 | blk_finish_plug(&plug); |
2000 | __flush_write_list(write_list: &write_list); |
2001 | blk_start_plug(&plug); |
2002 | dm_bufio_lock(c); |
2003 | } |
2004 | if (unlikely(b != NULL)) { |
2005 | dm_bufio_unlock(c); |
2006 | |
2007 | if (need_submit) |
2008 | submit_io(b, op: REQ_OP_READ, ioprio, end_io: read_endio); |
2009 | dm_bufio_release(b); |
2010 | |
2011 | cond_resched(); |
2012 | |
2013 | if (!n_blocks) |
2014 | goto flush_plug; |
2015 | dm_bufio_lock(c); |
2016 | } |
2017 | dm_bufio_unlock(c); |
2018 | } |
2019 | |
2020 | flush_plug: |
2021 | blk_finish_plug(&plug); |
2022 | } |
2023 | |
2024 | void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks) |
2025 | { |
2026 | return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT); |
2027 | } |
2028 | EXPORT_SYMBOL_GPL(dm_bufio_prefetch); |
2029 | |
2030 | void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block, |
2031 | unsigned int n_blocks, unsigned short ioprio) |
2032 | { |
2033 | return __dm_bufio_prefetch(c, block, n_blocks, ioprio); |
2034 | } |
2035 | EXPORT_SYMBOL_GPL(dm_bufio_prefetch_with_ioprio); |
2036 | |
2037 | void dm_bufio_release(struct dm_buffer *b) |
2038 | { |
2039 | struct dm_bufio_client *c = b->c; |
2040 | |
2041 | /* |
2042 | * If there were errors on the buffer, and the buffer is not |
2043 | * to be written, free the buffer. There is no point in caching |
2044 | * invalid buffer. |
2045 | */ |
2046 | if ((b->read_error || b->write_error) && |
2047 | !test_bit_acquire(B_READING, &b->state) && |
2048 | !test_bit(B_WRITING, &b->state) && |
2049 | !test_bit(B_DIRTY, &b->state)) { |
2050 | dm_bufio_lock(c); |
2051 | |
2052 | /* cache remove can fail if there are other holders */ |
2053 | if (cache_remove(bc: &c->cache, b)) { |
2054 | __free_buffer_wake(b); |
2055 | dm_bufio_unlock(c); |
2056 | return; |
2057 | } |
2058 | |
2059 | dm_bufio_unlock(c); |
2060 | } |
2061 | |
2062 | cache_put_and_wake(c, b); |
2063 | } |
2064 | EXPORT_SYMBOL_GPL(dm_bufio_release); |
2065 | |
2066 | void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, |
2067 | unsigned int start, unsigned int end) |
2068 | { |
2069 | struct dm_bufio_client *c = b->c; |
2070 | |
2071 | BUG_ON(start >= end); |
2072 | BUG_ON(end > b->c->block_size); |
2073 | |
2074 | dm_bufio_lock(c); |
2075 | |
2076 | BUG_ON(test_bit(B_READING, &b->state)); |
2077 | |
2078 | if (!test_and_set_bit(B_DIRTY, addr: &b->state)) { |
2079 | b->dirty_start = start; |
2080 | b->dirty_end = end; |
2081 | cache_mark(bc: &c->cache, b, LIST_DIRTY); |
2082 | } else { |
2083 | if (start < b->dirty_start) |
2084 | b->dirty_start = start; |
2085 | if (end > b->dirty_end) |
2086 | b->dirty_end = end; |
2087 | } |
2088 | |
2089 | dm_bufio_unlock(c); |
2090 | } |
2091 | EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty); |
2092 | |
2093 | void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) |
2094 | { |
2095 | dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); |
2096 | } |
2097 | EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); |
2098 | |
2099 | void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) |
2100 | { |
2101 | LIST_HEAD(write_list); |
2102 | |
2103 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
2104 | return; /* should never happen */ |
2105 | |
2106 | dm_bufio_lock(c); |
2107 | __write_dirty_buffers_async(c, no_wait: 0, write_list: &write_list); |
2108 | dm_bufio_unlock(c); |
2109 | __flush_write_list(write_list: &write_list); |
2110 | } |
2111 | EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); |
2112 | |
2113 | /* |
2114 | * For performance, it is essential that the buffers are written asynchronously |
2115 | * and simultaneously (so that the block layer can merge the writes) and then |
2116 | * waited upon. |
2117 | * |
2118 | * Finally, we flush hardware disk cache. |
2119 | */ |
2120 | static bool is_writing(struct lru_entry *e, void *context) |
2121 | { |
2122 | struct dm_buffer *b = le_to_buffer(le: e); |
2123 | |
2124 | return test_bit(B_WRITING, &b->state); |
2125 | } |
2126 | |
2127 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) |
2128 | { |
2129 | int a, f; |
2130 | unsigned long nr_buffers; |
2131 | struct lru_entry *e; |
2132 | struct lru_iter it; |
2133 | |
2134 | LIST_HEAD(write_list); |
2135 | |
2136 | dm_bufio_lock(c); |
2137 | __write_dirty_buffers_async(c, no_wait: 0, write_list: &write_list); |
2138 | dm_bufio_unlock(c); |
2139 | __flush_write_list(write_list: &write_list); |
2140 | dm_bufio_lock(c); |
2141 | |
2142 | nr_buffers = cache_count(bc: &c->cache, LIST_DIRTY); |
2143 | lru_iter_begin(lru: &c->cache.lru[LIST_DIRTY], it: &it); |
2144 | while ((e = lru_iter_next(it: &it, pred: is_writing, context: c))) { |
2145 | struct dm_buffer *b = le_to_buffer(le: e); |
2146 | __cache_inc_buffer(b); |
2147 | |
2148 | BUG_ON(test_bit(B_READING, &b->state)); |
2149 | |
2150 | if (nr_buffers) { |
2151 | nr_buffers--; |
2152 | dm_bufio_unlock(c); |
2153 | wait_on_bit_io(word: &b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
2154 | dm_bufio_lock(c); |
2155 | } else { |
2156 | wait_on_bit_io(word: &b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
2157 | } |
2158 | |
2159 | if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state)) |
2160 | cache_mark(bc: &c->cache, b, LIST_CLEAN); |
2161 | |
2162 | cache_put_and_wake(c, b); |
2163 | |
2164 | cond_resched(); |
2165 | } |
2166 | lru_iter_end(it: &it); |
2167 | |
2168 | wake_up(&c->free_buffer_wait); |
2169 | dm_bufio_unlock(c); |
2170 | |
2171 | a = xchg(&c->async_write_error, 0); |
2172 | f = dm_bufio_issue_flush(c); |
2173 | if (a) |
2174 | return a; |
2175 | |
2176 | return f; |
2177 | } |
2178 | EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); |
2179 | |
2180 | /* |
2181 | * Use dm-io to send an empty barrier to flush the device. |
2182 | */ |
2183 | int dm_bufio_issue_flush(struct dm_bufio_client *c) |
2184 | { |
2185 | struct dm_io_request io_req = { |
2186 | .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, |
2187 | .mem.type = DM_IO_KMEM, |
2188 | .mem.ptr.addr = NULL, |
2189 | .client = c->dm_io, |
2190 | }; |
2191 | struct dm_io_region io_reg = { |
2192 | .bdev = c->bdev, |
2193 | .sector = 0, |
2194 | .count = 0, |
2195 | }; |
2196 | |
2197 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
2198 | return -EINVAL; |
2199 | |
2200 | return dm_io(io_req: &io_req, num_regions: 1, region: &io_reg, NULL, IOPRIO_DEFAULT); |
2201 | } |
2202 | EXPORT_SYMBOL_GPL(dm_bufio_issue_flush); |
2203 | |
2204 | /* |
2205 | * Use dm-io to send a discard request to flush the device. |
2206 | */ |
2207 | int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) |
2208 | { |
2209 | struct dm_io_request io_req = { |
2210 | .bi_opf = REQ_OP_DISCARD | REQ_SYNC, |
2211 | .mem.type = DM_IO_KMEM, |
2212 | .mem.ptr.addr = NULL, |
2213 | .client = c->dm_io, |
2214 | }; |
2215 | struct dm_io_region io_reg = { |
2216 | .bdev = c->bdev, |
2217 | .sector = block_to_sector(c, block), |
2218 | .count = block_to_sector(c, block: count), |
2219 | }; |
2220 | |
2221 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
2222 | return -EINVAL; /* discards are optional */ |
2223 | |
2224 | return dm_io(io_req: &io_req, num_regions: 1, region: &io_reg, NULL, IOPRIO_DEFAULT); |
2225 | } |
2226 | EXPORT_SYMBOL_GPL(dm_bufio_issue_discard); |
2227 | |
2228 | static bool forget_buffer(struct dm_bufio_client *c, sector_t block) |
2229 | { |
2230 | struct dm_buffer *b; |
2231 | |
2232 | b = cache_get(bc: &c->cache, block); |
2233 | if (b) { |
2234 | if (likely(!smp_load_acquire(&b->state))) { |
2235 | if (cache_remove(bc: &c->cache, b)) |
2236 | __free_buffer_wake(b); |
2237 | else |
2238 | cache_put_and_wake(c, b); |
2239 | } else { |
2240 | cache_put_and_wake(c, b); |
2241 | } |
2242 | } |
2243 | |
2244 | return b ? true : false; |
2245 | } |
2246 | |
2247 | /* |
2248 | * Free the given buffer. |
2249 | * |
2250 | * This is just a hint, if the buffer is in use or dirty, this function |
2251 | * does nothing. |
2252 | */ |
2253 | void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) |
2254 | { |
2255 | dm_bufio_lock(c); |
2256 | forget_buffer(c, block); |
2257 | dm_bufio_unlock(c); |
2258 | } |
2259 | EXPORT_SYMBOL_GPL(dm_bufio_forget); |
2260 | |
2261 | static enum evict_result idle(struct dm_buffer *b, void *context) |
2262 | { |
2263 | return b->state ? ER_DONT_EVICT : ER_EVICT; |
2264 | } |
2265 | |
2266 | void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) |
2267 | { |
2268 | dm_bufio_lock(c); |
2269 | cache_remove_range(bc: &c->cache, begin: block, end: block + n_blocks, pred: idle, release: __free_buffer_wake); |
2270 | dm_bufio_unlock(c); |
2271 | } |
2272 | EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers); |
2273 | |
2274 | void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n) |
2275 | { |
2276 | c->minimum_buffers = n; |
2277 | } |
2278 | EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers); |
2279 | |
2280 | unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c) |
2281 | { |
2282 | return c->block_size; |
2283 | } |
2284 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_size); |
2285 | |
2286 | sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) |
2287 | { |
2288 | sector_t s = bdev_nr_sectors(bdev: c->bdev); |
2289 | |
2290 | if (s >= c->start) |
2291 | s -= c->start; |
2292 | else |
2293 | s = 0; |
2294 | if (likely(c->sectors_per_block_bits >= 0)) |
2295 | s >>= c->sectors_per_block_bits; |
2296 | else |
2297 | sector_div(s, c->block_size >> SECTOR_SHIFT); |
2298 | return s; |
2299 | } |
2300 | EXPORT_SYMBOL_GPL(dm_bufio_get_device_size); |
2301 | |
2302 | struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c) |
2303 | { |
2304 | return c->dm_io; |
2305 | } |
2306 | EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client); |
2307 | |
2308 | sector_t dm_bufio_get_block_number(struct dm_buffer *b) |
2309 | { |
2310 | return b->block; |
2311 | } |
2312 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_number); |
2313 | |
2314 | void *dm_bufio_get_block_data(struct dm_buffer *b) |
2315 | { |
2316 | return b->data; |
2317 | } |
2318 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_data); |
2319 | |
2320 | void *dm_bufio_get_aux_data(struct dm_buffer *b) |
2321 | { |
2322 | return b + 1; |
2323 | } |
2324 | EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data); |
2325 | |
2326 | struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) |
2327 | { |
2328 | return b->c; |
2329 | } |
2330 | EXPORT_SYMBOL_GPL(dm_bufio_get_client); |
2331 | |
2332 | static enum it_action warn_leak(struct dm_buffer *b, void *context) |
2333 | { |
2334 | bool *warned = context; |
2335 | |
2336 | WARN_ON(!(*warned)); |
2337 | *warned = true; |
2338 | DMERR("leaked buffer %llx, hold count %u, list %d" , |
2339 | (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode); |
2340 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
2341 | stack_trace_print(trace: b->stack_entries, nr_entries: b->stack_len, spaces: 1); |
2342 | /* mark unclaimed to avoid WARN_ON at end of drop_buffers() */ |
2343 | atomic_set(v: &b->hold_count, i: 0); |
2344 | #endif |
2345 | return IT_NEXT; |
2346 | } |
2347 | |
2348 | static void drop_buffers(struct dm_bufio_client *c) |
2349 | { |
2350 | int i; |
2351 | struct dm_buffer *b; |
2352 | |
2353 | if (WARN_ON(dm_bufio_in_request())) |
2354 | return; /* should never happen */ |
2355 | |
2356 | /* |
2357 | * An optimization so that the buffers are not written one-by-one. |
2358 | */ |
2359 | dm_bufio_write_dirty_buffers_async(c); |
2360 | |
2361 | dm_bufio_lock(c); |
2362 | |
2363 | while ((b = __get_unclaimed_buffer(c))) |
2364 | __free_buffer_wake(b); |
2365 | |
2366 | for (i = 0; i < LIST_SIZE; i++) { |
2367 | bool warned = false; |
2368 | |
2369 | cache_iterate(bc: &c->cache, list_mode: i, fn: warn_leak, context: &warned); |
2370 | } |
2371 | |
2372 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
2373 | while ((b = __get_unclaimed_buffer(c))) |
2374 | __free_buffer_wake(b); |
2375 | #endif |
2376 | |
2377 | for (i = 0; i < LIST_SIZE; i++) |
2378 | WARN_ON(cache_count(&c->cache, i)); |
2379 | |
2380 | dm_bufio_unlock(c); |
2381 | } |
2382 | |
2383 | static unsigned long get_retain_buffers(struct dm_bufio_client *c) |
2384 | { |
2385 | unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); |
2386 | |
2387 | if (likely(c->sectors_per_block_bits >= 0)) |
2388 | retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT; |
2389 | else |
2390 | retain_bytes /= c->block_size; |
2391 | |
2392 | return retain_bytes; |
2393 | } |
2394 | |
2395 | static void __scan(struct dm_bufio_client *c) |
2396 | { |
2397 | int l; |
2398 | struct dm_buffer *b; |
2399 | unsigned long freed = 0; |
2400 | unsigned long retain_target = get_retain_buffers(c); |
2401 | unsigned long count = cache_total(bc: &c->cache); |
2402 | |
2403 | for (l = 0; l < LIST_SIZE; l++) { |
2404 | while (true) { |
2405 | if (count - freed <= retain_target) |
2406 | atomic_long_set(v: &c->need_shrink, i: 0); |
2407 | if (!atomic_long_read(v: &c->need_shrink)) |
2408 | break; |
2409 | |
2410 | b = cache_evict(bc: &c->cache, list_mode: l, |
2411 | pred: l == LIST_CLEAN ? is_clean : is_dirty, context: c); |
2412 | if (!b) |
2413 | break; |
2414 | |
2415 | __make_buffer_clean(b); |
2416 | __free_buffer_wake(b); |
2417 | |
2418 | atomic_long_dec(v: &c->need_shrink); |
2419 | freed++; |
2420 | cond_resched(); |
2421 | } |
2422 | } |
2423 | } |
2424 | |
2425 | static void shrink_work(struct work_struct *w) |
2426 | { |
2427 | struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work); |
2428 | |
2429 | dm_bufio_lock(c); |
2430 | __scan(c); |
2431 | dm_bufio_unlock(c); |
2432 | } |
2433 | |
2434 | static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
2435 | { |
2436 | struct dm_bufio_client *c; |
2437 | |
2438 | c = shrink->private_data; |
2439 | atomic_long_add(i: sc->nr_to_scan, v: &c->need_shrink); |
2440 | queue_work(wq: dm_bufio_wq, work: &c->shrink_work); |
2441 | |
2442 | return sc->nr_to_scan; |
2443 | } |
2444 | |
2445 | static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
2446 | { |
2447 | struct dm_bufio_client *c = shrink->private_data; |
2448 | unsigned long count = cache_total(bc: &c->cache); |
2449 | unsigned long retain_target = get_retain_buffers(c); |
2450 | unsigned long queued_for_cleanup = atomic_long_read(v: &c->need_shrink); |
2451 | |
2452 | if (unlikely(count < retain_target)) |
2453 | count = 0; |
2454 | else |
2455 | count -= retain_target; |
2456 | |
2457 | if (unlikely(count < queued_for_cleanup)) |
2458 | count = 0; |
2459 | else |
2460 | count -= queued_for_cleanup; |
2461 | |
2462 | return count; |
2463 | } |
2464 | |
2465 | /* |
2466 | * Create the buffering interface |
2467 | */ |
2468 | struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size, |
2469 | unsigned int reserved_buffers, unsigned int aux_size, |
2470 | void (*alloc_callback)(struct dm_buffer *), |
2471 | void (*write_callback)(struct dm_buffer *), |
2472 | unsigned int flags) |
2473 | { |
2474 | int r; |
2475 | unsigned int num_locks; |
2476 | struct dm_bufio_client *c; |
2477 | char slab_name[27]; |
2478 | |
2479 | if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) { |
2480 | DMERR("%s: block size not specified or is not multiple of 512b" , __func__); |
2481 | r = -EINVAL; |
2482 | goto bad_client; |
2483 | } |
2484 | |
2485 | num_locks = dm_num_hash_locks(); |
2486 | c = kzalloc(size: sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL); |
2487 | if (!c) { |
2488 | r = -ENOMEM; |
2489 | goto bad_client; |
2490 | } |
2491 | cache_init(bc: &c->cache, num_locks, no_sleep: (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0); |
2492 | |
2493 | c->bdev = bdev; |
2494 | c->block_size = block_size; |
2495 | if (is_power_of_2(n: block_size)) |
2496 | c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; |
2497 | else |
2498 | c->sectors_per_block_bits = -1; |
2499 | |
2500 | c->alloc_callback = alloc_callback; |
2501 | c->write_callback = write_callback; |
2502 | |
2503 | if (flags & DM_BUFIO_CLIENT_NO_SLEEP) { |
2504 | c->no_sleep = true; |
2505 | static_branch_inc(&no_sleep_enabled); |
2506 | } |
2507 | |
2508 | mutex_init(&c->lock); |
2509 | spin_lock_init(&c->spinlock); |
2510 | INIT_LIST_HEAD(list: &c->reserved_buffers); |
2511 | c->need_reserved_buffers = reserved_buffers; |
2512 | |
2513 | dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS); |
2514 | |
2515 | init_waitqueue_head(&c->free_buffer_wait); |
2516 | c->async_write_error = 0; |
2517 | |
2518 | c->dm_io = dm_io_client_create(); |
2519 | if (IS_ERR(ptr: c->dm_io)) { |
2520 | r = PTR_ERR(ptr: c->dm_io); |
2521 | goto bad_dm_io; |
2522 | } |
2523 | |
2524 | if (block_size <= KMALLOC_MAX_SIZE && |
2525 | (block_size < PAGE_SIZE || !is_power_of_2(n: block_size))) { |
2526 | unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE); |
2527 | |
2528 | snprintf(buf: slab_name, size: sizeof(slab_name), fmt: "dm_bufio_cache-%u" , block_size); |
2529 | c->slab_cache = kmem_cache_create(name: slab_name, size: block_size, align, |
2530 | SLAB_RECLAIM_ACCOUNT, NULL); |
2531 | if (!c->slab_cache) { |
2532 | r = -ENOMEM; |
2533 | goto bad; |
2534 | } |
2535 | } |
2536 | if (aux_size) |
2537 | snprintf(buf: slab_name, size: sizeof(slab_name), fmt: "dm_bufio_buffer-%u" , aux_size); |
2538 | else |
2539 | snprintf(buf: slab_name, size: sizeof(slab_name), fmt: "dm_bufio_buffer" ); |
2540 | c->slab_buffer = kmem_cache_create(name: slab_name, size: sizeof(struct dm_buffer) + aux_size, |
2541 | align: 0, SLAB_RECLAIM_ACCOUNT, NULL); |
2542 | if (!c->slab_buffer) { |
2543 | r = -ENOMEM; |
2544 | goto bad; |
2545 | } |
2546 | |
2547 | while (c->need_reserved_buffers) { |
2548 | struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); |
2549 | |
2550 | if (!b) { |
2551 | r = -ENOMEM; |
2552 | goto bad; |
2553 | } |
2554 | __free_buffer_wake(b); |
2555 | } |
2556 | |
2557 | INIT_WORK(&c->shrink_work, shrink_work); |
2558 | atomic_long_set(v: &c->need_shrink, i: 0); |
2559 | |
2560 | c->shrinker = shrinker_alloc(flags: 0, fmt: "dm-bufio:(%u:%u)" , |
2561 | MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); |
2562 | if (!c->shrinker) { |
2563 | r = -ENOMEM; |
2564 | goto bad; |
2565 | } |
2566 | |
2567 | c->shrinker->count_objects = dm_bufio_shrink_count; |
2568 | c->shrinker->scan_objects = dm_bufio_shrink_scan; |
2569 | c->shrinker->seeks = 1; |
2570 | c->shrinker->batch = 0; |
2571 | c->shrinker->private_data = c; |
2572 | |
2573 | shrinker_register(shrinker: c->shrinker); |
2574 | |
2575 | mutex_lock(&dm_bufio_clients_lock); |
2576 | dm_bufio_client_count++; |
2577 | list_add(new: &c->client_list, head: &dm_bufio_all_clients); |
2578 | __cache_size_refresh(); |
2579 | mutex_unlock(lock: &dm_bufio_clients_lock); |
2580 | |
2581 | return c; |
2582 | |
2583 | bad: |
2584 | while (!list_empty(head: &c->reserved_buffers)) { |
2585 | struct dm_buffer *b = list_to_buffer(l: c->reserved_buffers.next); |
2586 | |
2587 | list_del(entry: &b->lru.list); |
2588 | free_buffer(b); |
2589 | } |
2590 | kmem_cache_destroy(s: c->slab_cache); |
2591 | kmem_cache_destroy(s: c->slab_buffer); |
2592 | dm_io_client_destroy(client: c->dm_io); |
2593 | bad_dm_io: |
2594 | mutex_destroy(lock: &c->lock); |
2595 | if (c->no_sleep) |
2596 | static_branch_dec(&no_sleep_enabled); |
2597 | kfree(objp: c); |
2598 | bad_client: |
2599 | return ERR_PTR(error: r); |
2600 | } |
2601 | EXPORT_SYMBOL_GPL(dm_bufio_client_create); |
2602 | |
2603 | /* |
2604 | * Free the buffering interface. |
2605 | * It is required that there are no references on any buffers. |
2606 | */ |
2607 | void dm_bufio_client_destroy(struct dm_bufio_client *c) |
2608 | { |
2609 | unsigned int i; |
2610 | |
2611 | drop_buffers(c); |
2612 | |
2613 | shrinker_free(shrinker: c->shrinker); |
2614 | flush_work(work: &c->shrink_work); |
2615 | |
2616 | mutex_lock(&dm_bufio_clients_lock); |
2617 | |
2618 | list_del(entry: &c->client_list); |
2619 | dm_bufio_client_count--; |
2620 | __cache_size_refresh(); |
2621 | |
2622 | mutex_unlock(lock: &dm_bufio_clients_lock); |
2623 | |
2624 | WARN_ON(c->need_reserved_buffers); |
2625 | |
2626 | while (!list_empty(head: &c->reserved_buffers)) { |
2627 | struct dm_buffer *b = list_to_buffer(l: c->reserved_buffers.next); |
2628 | |
2629 | list_del(entry: &b->lru.list); |
2630 | free_buffer(b); |
2631 | } |
2632 | |
2633 | for (i = 0; i < LIST_SIZE; i++) |
2634 | if (cache_count(bc: &c->cache, list_mode: i)) |
2635 | DMERR("leaked buffer count %d: %lu" , i, cache_count(&c->cache, i)); |
2636 | |
2637 | for (i = 0; i < LIST_SIZE; i++) |
2638 | WARN_ON(cache_count(&c->cache, i)); |
2639 | |
2640 | cache_destroy(bc: &c->cache); |
2641 | kmem_cache_destroy(s: c->slab_cache); |
2642 | kmem_cache_destroy(s: c->slab_buffer); |
2643 | dm_io_client_destroy(client: c->dm_io); |
2644 | mutex_destroy(lock: &c->lock); |
2645 | if (c->no_sleep) |
2646 | static_branch_dec(&no_sleep_enabled); |
2647 | kfree(objp: c); |
2648 | } |
2649 | EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); |
2650 | |
2651 | void dm_bufio_client_reset(struct dm_bufio_client *c) |
2652 | { |
2653 | drop_buffers(c); |
2654 | flush_work(work: &c->shrink_work); |
2655 | } |
2656 | EXPORT_SYMBOL_GPL(dm_bufio_client_reset); |
2657 | |
2658 | void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) |
2659 | { |
2660 | c->start = start; |
2661 | } |
2662 | EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); |
2663 | |
2664 | /*--------------------------------------------------------------*/ |
2665 | |
2666 | static unsigned int get_max_age_hz(void) |
2667 | { |
2668 | unsigned int max_age = READ_ONCE(dm_bufio_max_age); |
2669 | |
2670 | if (max_age > UINT_MAX / HZ) |
2671 | max_age = UINT_MAX / HZ; |
2672 | |
2673 | return max_age * HZ; |
2674 | } |
2675 | |
2676 | static bool older_than(struct dm_buffer *b, unsigned long age_hz) |
2677 | { |
2678 | return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz); |
2679 | } |
2680 | |
2681 | struct evict_params { |
2682 | gfp_t gfp; |
2683 | unsigned long age_hz; |
2684 | |
2685 | /* |
2686 | * This gets updated with the largest last_accessed (ie. most |
2687 | * recently used) of the evicted buffers. It will not be reinitialised |
2688 | * by __evict_many(), so you can use it across multiple invocations. |
2689 | */ |
2690 | unsigned long last_accessed; |
2691 | }; |
2692 | |
2693 | /* |
2694 | * We may not be able to evict this buffer if IO pending or the client |
2695 | * is still using it. |
2696 | * |
2697 | * And if GFP_NOFS is used, we must not do any I/O because we hold |
2698 | * dm_bufio_clients_lock and we would risk deadlock if the I/O gets |
2699 | * rerouted to different bufio client. |
2700 | */ |
2701 | static enum evict_result select_for_evict(struct dm_buffer *b, void *context) |
2702 | { |
2703 | struct evict_params *params = context; |
2704 | |
2705 | if (!(params->gfp & __GFP_FS) || |
2706 | (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { |
2707 | if (test_bit_acquire(B_READING, &b->state) || |
2708 | test_bit(B_WRITING, &b->state) || |
2709 | test_bit(B_DIRTY, &b->state)) |
2710 | return ER_DONT_EVICT; |
2711 | } |
2712 | |
2713 | return older_than(b, age_hz: params->age_hz) ? ER_EVICT : ER_STOP; |
2714 | } |
2715 | |
2716 | static unsigned long __evict_many(struct dm_bufio_client *c, |
2717 | struct evict_params *params, |
2718 | int list_mode, unsigned long max_count) |
2719 | { |
2720 | unsigned long count; |
2721 | unsigned long last_accessed; |
2722 | struct dm_buffer *b; |
2723 | |
2724 | for (count = 0; count < max_count; count++) { |
2725 | b = cache_evict(bc: &c->cache, list_mode, pred: select_for_evict, context: params); |
2726 | if (!b) |
2727 | break; |
2728 | |
2729 | last_accessed = READ_ONCE(b->last_accessed); |
2730 | if (time_after_eq(params->last_accessed, last_accessed)) |
2731 | params->last_accessed = last_accessed; |
2732 | |
2733 | __make_buffer_clean(b); |
2734 | __free_buffer_wake(b); |
2735 | |
2736 | cond_resched(); |
2737 | } |
2738 | |
2739 | return count; |
2740 | } |
2741 | |
2742 | static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) |
2743 | { |
2744 | struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0}; |
2745 | unsigned long retain = get_retain_buffers(c); |
2746 | unsigned long count; |
2747 | LIST_HEAD(write_list); |
2748 | |
2749 | dm_bufio_lock(c); |
2750 | |
2751 | __check_watermark(c, write_list: &write_list); |
2752 | if (unlikely(!list_empty(&write_list))) { |
2753 | dm_bufio_unlock(c); |
2754 | __flush_write_list(write_list: &write_list); |
2755 | dm_bufio_lock(c); |
2756 | } |
2757 | |
2758 | count = cache_total(bc: &c->cache); |
2759 | if (count > retain) |
2760 | __evict_many(c, params: ¶ms, LIST_CLEAN, max_count: count - retain); |
2761 | |
2762 | dm_bufio_unlock(c); |
2763 | } |
2764 | |
2765 | static void cleanup_old_buffers(void) |
2766 | { |
2767 | unsigned long max_age_hz = get_max_age_hz(); |
2768 | struct dm_bufio_client *c; |
2769 | |
2770 | mutex_lock(&dm_bufio_clients_lock); |
2771 | |
2772 | __cache_size_refresh(); |
2773 | |
2774 | list_for_each_entry(c, &dm_bufio_all_clients, client_list) |
2775 | evict_old_buffers(c, age_hz: max_age_hz); |
2776 | |
2777 | mutex_unlock(lock: &dm_bufio_clients_lock); |
2778 | } |
2779 | |
2780 | static void work_fn(struct work_struct *w) |
2781 | { |
2782 | cleanup_old_buffers(); |
2783 | |
2784 | queue_delayed_work(wq: dm_bufio_wq, dwork: &dm_bufio_cleanup_old_work, |
2785 | DM_BUFIO_WORK_TIMER_SECS * HZ); |
2786 | } |
2787 | |
2788 | /*--------------------------------------------------------------*/ |
2789 | |
2790 | /* |
2791 | * Global cleanup tries to evict the oldest buffers from across _all_ |
2792 | * the clients. It does this by repeatedly evicting a few buffers from |
2793 | * the client that holds the oldest buffer. It's approximate, but hopefully |
2794 | * good enough. |
2795 | */ |
2796 | static struct dm_bufio_client *__pop_client(void) |
2797 | { |
2798 | struct list_head *h; |
2799 | |
2800 | if (list_empty(head: &dm_bufio_all_clients)) |
2801 | return NULL; |
2802 | |
2803 | h = dm_bufio_all_clients.next; |
2804 | list_del(entry: h); |
2805 | return container_of(h, struct dm_bufio_client, client_list); |
2806 | } |
2807 | |
2808 | /* |
2809 | * Inserts the client in the global client list based on its |
2810 | * 'oldest_buffer' field. |
2811 | */ |
2812 | static void __insert_client(struct dm_bufio_client *new_client) |
2813 | { |
2814 | struct dm_bufio_client *c; |
2815 | struct list_head *h = dm_bufio_all_clients.next; |
2816 | |
2817 | while (h != &dm_bufio_all_clients) { |
2818 | c = container_of(h, struct dm_bufio_client, client_list); |
2819 | if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer)) |
2820 | break; |
2821 | h = h->next; |
2822 | } |
2823 | |
2824 | list_add_tail(new: &new_client->client_list, head: h); |
2825 | } |
2826 | |
2827 | static unsigned long __evict_a_few(unsigned long nr_buffers) |
2828 | { |
2829 | unsigned long count; |
2830 | struct dm_bufio_client *c; |
2831 | struct evict_params params = { |
2832 | .gfp = GFP_KERNEL, |
2833 | .age_hz = 0, |
2834 | /* set to jiffies in case there are no buffers in this client */ |
2835 | .last_accessed = jiffies |
2836 | }; |
2837 | |
2838 | c = __pop_client(); |
2839 | if (!c) |
2840 | return 0; |
2841 | |
2842 | dm_bufio_lock(c); |
2843 | count = __evict_many(c, params: ¶ms, LIST_CLEAN, max_count: nr_buffers); |
2844 | dm_bufio_unlock(c); |
2845 | |
2846 | if (count) |
2847 | c->oldest_buffer = params.last_accessed; |
2848 | __insert_client(new_client: c); |
2849 | |
2850 | return count; |
2851 | } |
2852 | |
2853 | static void check_watermarks(void) |
2854 | { |
2855 | LIST_HEAD(write_list); |
2856 | struct dm_bufio_client *c; |
2857 | |
2858 | mutex_lock(&dm_bufio_clients_lock); |
2859 | list_for_each_entry(c, &dm_bufio_all_clients, client_list) { |
2860 | dm_bufio_lock(c); |
2861 | __check_watermark(c, write_list: &write_list); |
2862 | dm_bufio_unlock(c); |
2863 | } |
2864 | mutex_unlock(lock: &dm_bufio_clients_lock); |
2865 | |
2866 | __flush_write_list(write_list: &write_list); |
2867 | } |
2868 | |
2869 | static void evict_old(void) |
2870 | { |
2871 | unsigned long threshold = dm_bufio_cache_size - |
2872 | dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO; |
2873 | |
2874 | mutex_lock(&dm_bufio_clients_lock); |
2875 | while (dm_bufio_current_allocated > threshold) { |
2876 | if (!__evict_a_few(nr_buffers: 64)) |
2877 | break; |
2878 | cond_resched(); |
2879 | } |
2880 | mutex_unlock(lock: &dm_bufio_clients_lock); |
2881 | } |
2882 | |
2883 | static void do_global_cleanup(struct work_struct *w) |
2884 | { |
2885 | check_watermarks(); |
2886 | evict_old(); |
2887 | } |
2888 | |
2889 | /* |
2890 | *-------------------------------------------------------------- |
2891 | * Module setup |
2892 | *-------------------------------------------------------------- |
2893 | */ |
2894 | |
2895 | /* |
2896 | * This is called only once for the whole dm_bufio module. |
2897 | * It initializes memory limit. |
2898 | */ |
2899 | static int __init dm_bufio_init(void) |
2900 | { |
2901 | __u64 mem; |
2902 | |
2903 | dm_bufio_allocated_kmem_cache = 0; |
2904 | dm_bufio_allocated_get_free_pages = 0; |
2905 | dm_bufio_allocated_vmalloc = 0; |
2906 | dm_bufio_current_allocated = 0; |
2907 | |
2908 | mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(), |
2909 | DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; |
2910 | |
2911 | if (mem > ULONG_MAX) |
2912 | mem = ULONG_MAX; |
2913 | |
2914 | #ifdef CONFIG_MMU |
2915 | if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) |
2916 | mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); |
2917 | #endif |
2918 | |
2919 | dm_bufio_default_cache_size = mem; |
2920 | |
2921 | mutex_lock(&dm_bufio_clients_lock); |
2922 | __cache_size_refresh(); |
2923 | mutex_unlock(lock: &dm_bufio_clients_lock); |
2924 | |
2925 | dm_bufio_wq = alloc_workqueue(fmt: "dm_bufio_cache" , flags: WQ_MEM_RECLAIM, max_active: 0); |
2926 | if (!dm_bufio_wq) |
2927 | return -ENOMEM; |
2928 | |
2929 | INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn); |
2930 | INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup); |
2931 | queue_delayed_work(wq: dm_bufio_wq, dwork: &dm_bufio_cleanup_old_work, |
2932 | DM_BUFIO_WORK_TIMER_SECS * HZ); |
2933 | |
2934 | return 0; |
2935 | } |
2936 | |
2937 | /* |
2938 | * This is called once when unloading the dm_bufio module. |
2939 | */ |
2940 | static void __exit dm_bufio_exit(void) |
2941 | { |
2942 | int bug = 0; |
2943 | |
2944 | cancel_delayed_work_sync(dwork: &dm_bufio_cleanup_old_work); |
2945 | destroy_workqueue(wq: dm_bufio_wq); |
2946 | |
2947 | if (dm_bufio_client_count) { |
2948 | DMCRIT("%s: dm_bufio_client_count leaked: %d" , |
2949 | __func__, dm_bufio_client_count); |
2950 | bug = 1; |
2951 | } |
2952 | |
2953 | if (dm_bufio_current_allocated) { |
2954 | DMCRIT("%s: dm_bufio_current_allocated leaked: %lu" , |
2955 | __func__, dm_bufio_current_allocated); |
2956 | bug = 1; |
2957 | } |
2958 | |
2959 | if (dm_bufio_allocated_get_free_pages) { |
2960 | DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu" , |
2961 | __func__, dm_bufio_allocated_get_free_pages); |
2962 | bug = 1; |
2963 | } |
2964 | |
2965 | if (dm_bufio_allocated_vmalloc) { |
2966 | DMCRIT("%s: dm_bufio_vmalloc leaked: %lu" , |
2967 | __func__, dm_bufio_allocated_vmalloc); |
2968 | bug = 1; |
2969 | } |
2970 | |
2971 | WARN_ON(bug); /* leaks are not worth crashing the system */ |
2972 | } |
2973 | |
2974 | module_init(dm_bufio_init) |
2975 | module_exit(dm_bufio_exit) |
2976 | |
2977 | module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644); |
2978 | MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache" ); |
2979 | |
2980 | module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644); |
2981 | MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds" ); |
2982 | |
2983 | module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644); |
2984 | MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory" ); |
2985 | |
2986 | module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644); |
2987 | MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory" ); |
2988 | |
2989 | module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444); |
2990 | MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc" ); |
2991 | |
2992 | module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444); |
2993 | MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages" ); |
2994 | |
2995 | module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444); |
2996 | MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc" ); |
2997 | |
2998 | module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444); |
2999 | MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache" ); |
3000 | |
3001 | MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>" ); |
3002 | MODULE_DESCRIPTION(DM_NAME " buffered I/O library" ); |
3003 | MODULE_LICENSE("GPL" ); |
3004 | |