1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | drbd_bitmap.c |
4 | |
5 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. |
6 | |
7 | Copyright (C) 2004-2008, LINBIT Information Technologies GmbH. |
8 | Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>. |
9 | Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. |
10 | |
11 | */ |
12 | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | |
15 | #include <linux/bitmap.h> |
16 | #include <linux/vmalloc.h> |
17 | #include <linux/string.h> |
18 | #include <linux/drbd.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/highmem.h> |
21 | |
22 | #include "drbd_int.h" |
23 | |
24 | |
25 | /* OPAQUE outside this file! |
26 | * interface defined in drbd_int.h |
27 | |
28 | * convention: |
29 | * function name drbd_bm_... => used elsewhere, "public". |
30 | * function name bm_... => internal to implementation, "private". |
31 | */ |
32 | |
33 | |
34 | /* |
35 | * LIMITATIONS: |
36 | * We want to support >= peta byte of backend storage, while for now still using |
37 | * a granularity of one bit per 4KiB of storage. |
38 | * 1 << 50 bytes backend storage (1 PiB) |
39 | * 1 << (50 - 12) bits needed |
40 | * 38 --> we need u64 to index and count bits |
41 | * 1 << (38 - 3) bitmap bytes needed |
42 | * 35 --> we still need u64 to index and count bytes |
43 | * (that's 32 GiB of bitmap for 1 PiB storage) |
44 | * 1 << (35 - 2) 32bit longs needed |
45 | * 33 --> we'd even need u64 to index and count 32bit long words. |
46 | * 1 << (35 - 3) 64bit longs needed |
47 | * 32 --> we could get away with a 32bit unsigned int to index and count |
48 | * 64bit long words, but I rather stay with unsigned long for now. |
49 | * We probably should neither count nor point to bytes or long words |
50 | * directly, but either by bitnumber, or by page index and offset. |
51 | * 1 << (35 - 12) |
52 | * 22 --> we need that much 4KiB pages of bitmap. |
53 | * 1 << (22 + 3) --> on a 64bit arch, |
54 | * we need 32 MiB to store the array of page pointers. |
55 | * |
56 | * Because I'm lazy, and because the resulting patch was too large, too ugly |
57 | * and still incomplete, on 32bit we still "only" support 16 TiB (minus some), |
58 | * (1 << 32) bits * 4k storage. |
59 | * |
60 | |
61 | * bitmap storage and IO: |
62 | * Bitmap is stored little endian on disk, and is kept little endian in |
63 | * core memory. Currently we still hold the full bitmap in core as long |
64 | * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage |
65 | * seems excessive. |
66 | * |
67 | * We plan to reduce the amount of in-core bitmap pages by paging them in |
68 | * and out against their on-disk location as necessary, but need to make |
69 | * sure we don't cause too much meta data IO, and must not deadlock in |
70 | * tight memory situations. This needs some more work. |
71 | */ |
72 | |
73 | /* |
74 | * NOTE |
75 | * Access to the *bm_pages is protected by bm_lock. |
76 | * It is safe to read the other members within the lock. |
77 | * |
78 | * drbd_bm_set_bits is called from bio_endio callbacks, |
79 | * We may be called with irq already disabled, |
80 | * so we need spin_lock_irqsave(). |
81 | * And we need the kmap_atomic. |
82 | */ |
83 | struct drbd_bitmap { |
84 | struct page **bm_pages; |
85 | spinlock_t bm_lock; |
86 | |
87 | /* exclusively to be used by __al_write_transaction(), |
88 | * drbd_bm_mark_for_writeout() and |
89 | * and drbd_bm_write_hinted() -> bm_rw() called from there. |
90 | */ |
91 | unsigned int n_bitmap_hints; |
92 | unsigned int al_bitmap_hints[AL_UPDATES_PER_TRANSACTION]; |
93 | |
94 | /* see LIMITATIONS: above */ |
95 | |
96 | unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ |
97 | unsigned long bm_bits; |
98 | size_t bm_words; |
99 | size_t bm_number_of_pages; |
100 | sector_t bm_dev_capacity; |
101 | struct mutex bm_change; /* serializes resize operations */ |
102 | |
103 | wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */ |
104 | |
105 | enum bm_flag bm_flags; |
106 | |
107 | /* debugging aid, in case we are still racy somewhere */ |
108 | char *bm_why; |
109 | struct task_struct *bm_task; |
110 | }; |
111 | |
112 | #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) |
113 | static void __bm_print_lock_info(struct drbd_device *device, const char *func) |
114 | { |
115 | struct drbd_bitmap *b = device->bitmap; |
116 | if (!drbd_ratelimit()) |
117 | return; |
118 | drbd_err(device, "FIXME %s[%d] in %s, bitmap locked for '%s' by %s[%d]\n" , |
119 | current->comm, task_pid_nr(current), |
120 | func, b->bm_why ?: "?" , |
121 | b->bm_task->comm, task_pid_nr(b->bm_task)); |
122 | } |
123 | |
124 | void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags) |
125 | { |
126 | struct drbd_bitmap *b = device->bitmap; |
127 | int trylock_failed; |
128 | |
129 | if (!b) { |
130 | drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n" ); |
131 | return; |
132 | } |
133 | |
134 | trylock_failed = !mutex_trylock(lock: &b->bm_change); |
135 | |
136 | if (trylock_failed) { |
137 | drbd_warn(device, "%s[%d] going to '%s' but bitmap already locked for '%s' by %s[%d]\n" , |
138 | current->comm, task_pid_nr(current), |
139 | why, b->bm_why ?: "?" , |
140 | b->bm_task->comm, task_pid_nr(b->bm_task)); |
141 | mutex_lock(&b->bm_change); |
142 | } |
143 | if (BM_LOCKED_MASK & b->bm_flags) |
144 | drbd_err(device, "FIXME bitmap already locked in bm_lock\n" ); |
145 | b->bm_flags |= flags & BM_LOCKED_MASK; |
146 | |
147 | b->bm_why = why; |
148 | b->bm_task = current; |
149 | } |
150 | |
151 | void drbd_bm_unlock(struct drbd_device *device) |
152 | { |
153 | struct drbd_bitmap *b = device->bitmap; |
154 | if (!b) { |
155 | drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n" ); |
156 | return; |
157 | } |
158 | |
159 | if (!(BM_LOCKED_MASK & device->bitmap->bm_flags)) |
160 | drbd_err(device, "FIXME bitmap not locked in bm_unlock\n" ); |
161 | |
162 | b->bm_flags &= ~BM_LOCKED_MASK; |
163 | b->bm_why = NULL; |
164 | b->bm_task = NULL; |
165 | mutex_unlock(lock: &b->bm_change); |
166 | } |
167 | |
168 | /* we store some "meta" info about our pages in page->private */ |
169 | /* at a granularity of 4k storage per bitmap bit: |
170 | * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks |
171 | * 1<<38 bits, |
172 | * 1<<23 4k bitmap pages. |
173 | * Use 24 bits as page index, covers 2 peta byte storage |
174 | * at a granularity of 4k per bit. |
175 | * Used to report the failed page idx on io error from the endio handlers. |
176 | */ |
177 | #define BM_PAGE_IDX_MASK ((1UL<<24)-1) |
178 | /* this page is currently read in, or written back */ |
179 | #define BM_PAGE_IO_LOCK 31 |
180 | /* if there has been an IO error for this page */ |
181 | #define BM_PAGE_IO_ERROR 30 |
182 | /* this is to be able to intelligently skip disk IO, |
183 | * set if bits have been set since last IO. */ |
184 | #define BM_PAGE_NEED_WRITEOUT 29 |
185 | /* to mark for lazy writeout once syncer cleared all clearable bits, |
186 | * we if bits have been cleared since last IO. */ |
187 | #define BM_PAGE_LAZY_WRITEOUT 28 |
188 | /* pages marked with this "HINT" will be considered for writeout |
189 | * on activity log transactions */ |
190 | #define BM_PAGE_HINT_WRITEOUT 27 |
191 | |
192 | /* store_page_idx uses non-atomic assignment. It is only used directly after |
193 | * allocating the page. All other bm_set_page_* and bm_clear_page_* need to |
194 | * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap |
195 | * changes) may happen from various contexts, and wait_on_bit/wake_up_bit |
196 | * requires it all to be atomic as well. */ |
197 | static void bm_store_page_idx(struct page *page, unsigned long idx) |
198 | { |
199 | BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK)); |
200 | set_page_private(page, private: idx); |
201 | } |
202 | |
203 | static unsigned long bm_page_to_idx(struct page *page) |
204 | { |
205 | return page_private(page) & BM_PAGE_IDX_MASK; |
206 | } |
207 | |
208 | /* As is very unlikely that the same page is under IO from more than one |
209 | * context, we can get away with a bit per page and one wait queue per bitmap. |
210 | */ |
211 | static void bm_page_lock_io(struct drbd_device *device, int page_nr) |
212 | { |
213 | struct drbd_bitmap *b = device->bitmap; |
214 | void *addr = &page_private(b->bm_pages[page_nr]); |
215 | wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); |
216 | } |
217 | |
218 | static void bm_page_unlock_io(struct drbd_device *device, int page_nr) |
219 | { |
220 | struct drbd_bitmap *b = device->bitmap; |
221 | void *addr = &page_private(b->bm_pages[page_nr]); |
222 | clear_bit_unlock(BM_PAGE_IO_LOCK, addr); |
223 | wake_up(&device->bitmap->bm_io_wait); |
224 | } |
225 | |
226 | /* set _before_ submit_io, so it may be reset due to being changed |
227 | * while this page is in flight... will get submitted later again */ |
228 | static void bm_set_page_unchanged(struct page *page) |
229 | { |
230 | /* use cmpxchg? */ |
231 | clear_bit(BM_PAGE_NEED_WRITEOUT, addr: &page_private(page)); |
232 | clear_bit(BM_PAGE_LAZY_WRITEOUT, addr: &page_private(page)); |
233 | } |
234 | |
235 | static void bm_set_page_need_writeout(struct page *page) |
236 | { |
237 | set_bit(BM_PAGE_NEED_WRITEOUT, addr: &page_private(page)); |
238 | } |
239 | |
240 | void drbd_bm_reset_al_hints(struct drbd_device *device) |
241 | { |
242 | device->bitmap->n_bitmap_hints = 0; |
243 | } |
244 | |
245 | /** |
246 | * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout |
247 | * @device: DRBD device. |
248 | * @page_nr: the bitmap page to mark with the "hint" flag |
249 | * |
250 | * From within an activity log transaction, we mark a few pages with these |
251 | * hints, then call drbd_bm_write_hinted(), which will only write out changed |
252 | * pages which are flagged with this mark. |
253 | */ |
254 | void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr) |
255 | { |
256 | struct drbd_bitmap *b = device->bitmap; |
257 | struct page *page; |
258 | if (page_nr >= device->bitmap->bm_number_of_pages) { |
259 | drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n" , |
260 | page_nr, (int)device->bitmap->bm_number_of_pages); |
261 | return; |
262 | } |
263 | page = device->bitmap->bm_pages[page_nr]; |
264 | BUG_ON(b->n_bitmap_hints >= ARRAY_SIZE(b->al_bitmap_hints)); |
265 | if (!test_and_set_bit(BM_PAGE_HINT_WRITEOUT, addr: &page_private(page))) |
266 | b->al_bitmap_hints[b->n_bitmap_hints++] = page_nr; |
267 | } |
268 | |
269 | static int bm_test_page_unchanged(struct page *page) |
270 | { |
271 | volatile const unsigned long *addr = &page_private(page); |
272 | return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0; |
273 | } |
274 | |
275 | static void bm_set_page_io_err(struct page *page) |
276 | { |
277 | set_bit(BM_PAGE_IO_ERROR, addr: &page_private(page)); |
278 | } |
279 | |
280 | static void bm_clear_page_io_err(struct page *page) |
281 | { |
282 | clear_bit(BM_PAGE_IO_ERROR, addr: &page_private(page)); |
283 | } |
284 | |
285 | static void bm_set_page_lazy_writeout(struct page *page) |
286 | { |
287 | set_bit(BM_PAGE_LAZY_WRITEOUT, addr: &page_private(page)); |
288 | } |
289 | |
290 | static int bm_test_page_lazy_writeout(struct page *page) |
291 | { |
292 | return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); |
293 | } |
294 | |
295 | /* on a 32bit box, this would allow for exactly (2<<38) bits. */ |
296 | static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr) |
297 | { |
298 | /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ |
299 | unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3); |
300 | BUG_ON(page_nr >= b->bm_number_of_pages); |
301 | return page_nr; |
302 | } |
303 | |
304 | static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) |
305 | { |
306 | /* page_nr = (bitnr/8) >> PAGE_SHIFT; */ |
307 | unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3); |
308 | BUG_ON(page_nr >= b->bm_number_of_pages); |
309 | return page_nr; |
310 | } |
311 | |
312 | static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) |
313 | { |
314 | struct page *page = b->bm_pages[idx]; |
315 | return (unsigned long *) kmap_atomic(page); |
316 | } |
317 | |
318 | static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) |
319 | { |
320 | return __bm_map_pidx(b, idx); |
321 | } |
322 | |
323 | static void __bm_unmap(unsigned long *p_addr) |
324 | { |
325 | kunmap_atomic(p_addr); |
326 | }; |
327 | |
328 | static void bm_unmap(unsigned long *p_addr) |
329 | { |
330 | return __bm_unmap(p_addr); |
331 | } |
332 | |
333 | /* long word offset of _bitmap_ sector */ |
334 | #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) |
335 | /* word offset from start of bitmap to word number _in_page_ |
336 | * modulo longs per page |
337 | #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) |
338 | hm, well, Philipp thinks gcc might not optimize the % into & (... - 1) |
339 | so do it explicitly: |
340 | */ |
341 | #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1)) |
342 | |
343 | /* Long words per page */ |
344 | #define LWPP (PAGE_SIZE/sizeof(long)) |
345 | |
346 | /* |
347 | * actually most functions herein should take a struct drbd_bitmap*, not a |
348 | * struct drbd_device*, but for the debug macros I like to have the device around |
349 | * to be able to report device specific. |
350 | */ |
351 | |
352 | |
353 | static void bm_free_pages(struct page **pages, unsigned long number) |
354 | { |
355 | unsigned long i; |
356 | if (!pages) |
357 | return; |
358 | |
359 | for (i = 0; i < number; i++) { |
360 | if (!pages[i]) { |
361 | pr_alert("bm_free_pages tried to free a NULL pointer; i=%lu n=%lu\n" , |
362 | i, number); |
363 | continue; |
364 | } |
365 | __free_page(pages[i]); |
366 | pages[i] = NULL; |
367 | } |
368 | } |
369 | |
370 | static inline void bm_vk_free(void *ptr) |
371 | { |
372 | kvfree(addr: ptr); |
373 | } |
374 | |
375 | /* |
376 | * "have" and "want" are NUMBER OF PAGES. |
377 | */ |
378 | static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) |
379 | { |
380 | struct page **old_pages = b->bm_pages; |
381 | struct page **new_pages, *page; |
382 | unsigned int i, bytes; |
383 | unsigned long have = b->bm_number_of_pages; |
384 | |
385 | BUG_ON(have == 0 && old_pages != NULL); |
386 | BUG_ON(have != 0 && old_pages == NULL); |
387 | |
388 | if (have == want) |
389 | return old_pages; |
390 | |
391 | /* Trying kmalloc first, falling back to vmalloc. |
392 | * GFP_NOIO, as this is called while drbd IO is "suspended", |
393 | * and during resize or attach on diskless Primary, |
394 | * we must not block on IO to ourselves. |
395 | * Context is receiver thread or dmsetup. */ |
396 | bytes = sizeof(struct page *)*want; |
397 | new_pages = kzalloc(size: bytes, GFP_NOIO | __GFP_NOWARN); |
398 | if (!new_pages) { |
399 | new_pages = __vmalloc(size: bytes, GFP_NOIO | __GFP_ZERO); |
400 | if (!new_pages) |
401 | return NULL; |
402 | } |
403 | |
404 | if (want >= have) { |
405 | for (i = 0; i < have; i++) |
406 | new_pages[i] = old_pages[i]; |
407 | for (; i < want; i++) { |
408 | page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); |
409 | if (!page) { |
410 | bm_free_pages(pages: new_pages + have, number: i - have); |
411 | bm_vk_free(ptr: new_pages); |
412 | return NULL; |
413 | } |
414 | /* we want to know which page it is |
415 | * from the endio handlers */ |
416 | bm_store_page_idx(page, idx: i); |
417 | new_pages[i] = page; |
418 | } |
419 | } else { |
420 | for (i = 0; i < want; i++) |
421 | new_pages[i] = old_pages[i]; |
422 | /* NOT HERE, we are outside the spinlock! |
423 | bm_free_pages(old_pages + want, have - want); |
424 | */ |
425 | } |
426 | |
427 | return new_pages; |
428 | } |
429 | |
430 | /* |
431 | * allocates the drbd_bitmap and stores it in device->bitmap. |
432 | */ |
433 | int drbd_bm_init(struct drbd_device *device) |
434 | { |
435 | struct drbd_bitmap *b = device->bitmap; |
436 | WARN_ON(b != NULL); |
437 | b = kzalloc(size: sizeof(struct drbd_bitmap), GFP_KERNEL); |
438 | if (!b) |
439 | return -ENOMEM; |
440 | spin_lock_init(&b->bm_lock); |
441 | mutex_init(&b->bm_change); |
442 | init_waitqueue_head(&b->bm_io_wait); |
443 | |
444 | device->bitmap = b; |
445 | |
446 | return 0; |
447 | } |
448 | |
449 | sector_t drbd_bm_capacity(struct drbd_device *device) |
450 | { |
451 | if (!expect(device, device->bitmap)) |
452 | return 0; |
453 | return device->bitmap->bm_dev_capacity; |
454 | } |
455 | |
456 | /* called on driver unload. TODO: call when a device is destroyed. |
457 | */ |
458 | void drbd_bm_cleanup(struct drbd_device *device) |
459 | { |
460 | if (!expect(device, device->bitmap)) |
461 | return; |
462 | bm_free_pages(pages: device->bitmap->bm_pages, number: device->bitmap->bm_number_of_pages); |
463 | bm_vk_free(ptr: device->bitmap->bm_pages); |
464 | kfree(objp: device->bitmap); |
465 | device->bitmap = NULL; |
466 | } |
467 | |
468 | /* |
469 | * since (b->bm_bits % BITS_PER_LONG) != 0, |
470 | * this masks out the remaining bits. |
471 | * Returns the number of bits cleared. |
472 | */ |
473 | #ifndef BITS_PER_PAGE |
474 | #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3)) |
475 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1) |
476 | #else |
477 | # if BITS_PER_PAGE != (1UL << (PAGE_SHIFT + 3)) |
478 | # error "ambiguous BITS_PER_PAGE" |
479 | # endif |
480 | #endif |
481 | #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1) |
482 | static int bm_clear_surplus(struct drbd_bitmap *b) |
483 | { |
484 | unsigned long mask; |
485 | unsigned long *p_addr, *bm; |
486 | int tmp; |
487 | int cleared = 0; |
488 | |
489 | /* number of bits modulo bits per page */ |
490 | tmp = (b->bm_bits & BITS_PER_PAGE_MASK); |
491 | /* mask the used bits of the word containing the last bit */ |
492 | mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; |
493 | /* bitmap is always stored little endian, |
494 | * on disk and in core memory alike */ |
495 | mask = cpu_to_lel(mask); |
496 | |
497 | p_addr = bm_map_pidx(b, idx: b->bm_number_of_pages - 1); |
498 | bm = p_addr + (tmp/BITS_PER_LONG); |
499 | if (mask) { |
500 | /* If mask != 0, we are not exactly aligned, so bm now points |
501 | * to the long containing the last bit. |
502 | * If mask == 0, bm already points to the word immediately |
503 | * after the last (long word aligned) bit. */ |
504 | cleared = hweight_long(w: *bm & ~mask); |
505 | *bm &= mask; |
506 | bm++; |
507 | } |
508 | |
509 | if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { |
510 | /* on a 32bit arch, we may need to zero out |
511 | * a padding long to align with a 64bit remote */ |
512 | cleared += hweight_long(w: *bm); |
513 | *bm = 0; |
514 | } |
515 | bm_unmap(p_addr); |
516 | return cleared; |
517 | } |
518 | |
519 | static void bm_set_surplus(struct drbd_bitmap *b) |
520 | { |
521 | unsigned long mask; |
522 | unsigned long *p_addr, *bm; |
523 | int tmp; |
524 | |
525 | /* number of bits modulo bits per page */ |
526 | tmp = (b->bm_bits & BITS_PER_PAGE_MASK); |
527 | /* mask the used bits of the word containing the last bit */ |
528 | mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; |
529 | /* bitmap is always stored little endian, |
530 | * on disk and in core memory alike */ |
531 | mask = cpu_to_lel(mask); |
532 | |
533 | p_addr = bm_map_pidx(b, idx: b->bm_number_of_pages - 1); |
534 | bm = p_addr + (tmp/BITS_PER_LONG); |
535 | if (mask) { |
536 | /* If mask != 0, we are not exactly aligned, so bm now points |
537 | * to the long containing the last bit. |
538 | * If mask == 0, bm already points to the word immediately |
539 | * after the last (long word aligned) bit. */ |
540 | *bm |= ~mask; |
541 | bm++; |
542 | } |
543 | |
544 | if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { |
545 | /* on a 32bit arch, we may need to zero out |
546 | * a padding long to align with a 64bit remote */ |
547 | *bm = ~0UL; |
548 | } |
549 | bm_unmap(p_addr); |
550 | } |
551 | |
552 | /* you better not modify the bitmap while this is running, |
553 | * or its results will be stale */ |
554 | static unsigned long bm_count_bits(struct drbd_bitmap *b) |
555 | { |
556 | unsigned long *p_addr; |
557 | unsigned long bits = 0; |
558 | unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; |
559 | int idx, last_word; |
560 | |
561 | /* all but last page */ |
562 | for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { |
563 | p_addr = __bm_map_pidx(b, idx); |
564 | bits += bitmap_weight(src: p_addr, BITS_PER_PAGE); |
565 | __bm_unmap(p_addr); |
566 | cond_resched(); |
567 | } |
568 | /* last (or only) page */ |
569 | last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; |
570 | p_addr = __bm_map_pidx(b, idx); |
571 | bits += bitmap_weight(src: p_addr, nbits: last_word * BITS_PER_LONG); |
572 | p_addr[last_word] &= cpu_to_lel(mask); |
573 | bits += hweight_long(w: p_addr[last_word]); |
574 | /* 32bit arch, may have an unused padding long */ |
575 | if (BITS_PER_LONG == 32 && (last_word & 1) == 0) |
576 | p_addr[last_word+1] = 0; |
577 | __bm_unmap(p_addr); |
578 | return bits; |
579 | } |
580 | |
581 | /* offset and len in long words.*/ |
582 | static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) |
583 | { |
584 | unsigned long *p_addr, *bm; |
585 | unsigned int idx; |
586 | size_t do_now, end; |
587 | |
588 | end = offset + len; |
589 | |
590 | if (end > b->bm_words) { |
591 | pr_alert("bm_memset end > bm_words\n" ); |
592 | return; |
593 | } |
594 | |
595 | while (offset < end) { |
596 | do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; |
597 | idx = bm_word_to_page_idx(b, long_nr: offset); |
598 | p_addr = bm_map_pidx(b, idx); |
599 | bm = p_addr + MLPP(offset); |
600 | if (bm+do_now > p_addr + LWPP) { |
601 | pr_alert("BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n" , |
602 | p_addr, bm, (int)do_now); |
603 | } else |
604 | memset(bm, c, do_now * sizeof(long)); |
605 | bm_unmap(p_addr); |
606 | bm_set_page_need_writeout(page: b->bm_pages[idx]); |
607 | offset += do_now; |
608 | } |
609 | } |
610 | |
611 | /* For the layout, see comment above drbd_md_set_sector_offsets(). */ |
612 | static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev) |
613 | { |
614 | u64 bitmap_sectors; |
615 | if (ldev->md.al_offset == 8) |
616 | bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset; |
617 | else |
618 | bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset; |
619 | return bitmap_sectors << (9 + 3); |
620 | } |
621 | |
622 | /* |
623 | * make sure the bitmap has enough room for the attached storage, |
624 | * if necessary, resize. |
625 | * called whenever we may have changed the device size. |
626 | * returns -ENOMEM if we could not allocate enough memory, 0 on success. |
627 | * In case this is actually a resize, we copy the old bitmap into the new one. |
628 | * Otherwise, the bitmap is initialized to all bits set. |
629 | */ |
630 | int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits) |
631 | { |
632 | struct drbd_bitmap *b = device->bitmap; |
633 | unsigned long bits, words, owords, obits; |
634 | unsigned long want, have, onpages; /* number of pages */ |
635 | struct page **npages, **opages = NULL; |
636 | int err = 0; |
637 | bool growing; |
638 | |
639 | if (!expect(device, b)) |
640 | return -ENOMEM; |
641 | |
642 | drbd_bm_lock(device, why: "resize" , flags: BM_LOCKED_MASK); |
643 | |
644 | drbd_info(device, "drbd_bm_resize called with capacity == %llu\n" , |
645 | (unsigned long long)capacity); |
646 | |
647 | if (capacity == b->bm_dev_capacity) |
648 | goto out; |
649 | |
650 | if (capacity == 0) { |
651 | spin_lock_irq(lock: &b->bm_lock); |
652 | opages = b->bm_pages; |
653 | onpages = b->bm_number_of_pages; |
654 | owords = b->bm_words; |
655 | b->bm_pages = NULL; |
656 | b->bm_number_of_pages = |
657 | b->bm_set = |
658 | b->bm_bits = |
659 | b->bm_words = |
660 | b->bm_dev_capacity = 0; |
661 | spin_unlock_irq(lock: &b->bm_lock); |
662 | bm_free_pages(pages: opages, number: onpages); |
663 | bm_vk_free(ptr: opages); |
664 | goto out; |
665 | } |
666 | bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT)); |
667 | |
668 | /* if we would use |
669 | words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL; |
670 | a 32bit host could present the wrong number of words |
671 | to a 64bit host. |
672 | */ |
673 | words = ALIGN(bits, 64) >> LN2_BPL; |
674 | |
675 | if (get_ldev(device)) { |
676 | u64 bits_on_disk = drbd_md_on_disk_bits(ldev: device->ldev); |
677 | put_ldev(device); |
678 | if (bits > bits_on_disk) { |
679 | drbd_info(device, "bits = %lu\n" , bits); |
680 | drbd_info(device, "bits_on_disk = %llu\n" , bits_on_disk); |
681 | err = -ENOSPC; |
682 | goto out; |
683 | } |
684 | } |
685 | |
686 | want = PFN_UP(words*sizeof(long)); |
687 | have = b->bm_number_of_pages; |
688 | if (want == have) { |
689 | D_ASSERT(device, b->bm_pages != NULL); |
690 | npages = b->bm_pages; |
691 | } else { |
692 | if (drbd_insert_fault(device, type: DRBD_FAULT_BM_ALLOC)) |
693 | npages = NULL; |
694 | else |
695 | npages = bm_realloc_pages(b, want); |
696 | } |
697 | |
698 | if (!npages) { |
699 | err = -ENOMEM; |
700 | goto out; |
701 | } |
702 | |
703 | spin_lock_irq(lock: &b->bm_lock); |
704 | opages = b->bm_pages; |
705 | owords = b->bm_words; |
706 | obits = b->bm_bits; |
707 | |
708 | growing = bits > obits; |
709 | if (opages && growing && set_new_bits) |
710 | bm_set_surplus(b); |
711 | |
712 | b->bm_pages = npages; |
713 | b->bm_number_of_pages = want; |
714 | b->bm_bits = bits; |
715 | b->bm_words = words; |
716 | b->bm_dev_capacity = capacity; |
717 | |
718 | if (growing) { |
719 | if (set_new_bits) { |
720 | bm_memset(b, offset: owords, c: 0xff, len: words-owords); |
721 | b->bm_set += bits - obits; |
722 | } else |
723 | bm_memset(b, offset: owords, c: 0x00, len: words-owords); |
724 | |
725 | } |
726 | |
727 | if (want < have) { |
728 | /* implicit: (opages != NULL) && (opages != npages) */ |
729 | bm_free_pages(pages: opages + want, number: have - want); |
730 | } |
731 | |
732 | (void)bm_clear_surplus(b); |
733 | |
734 | spin_unlock_irq(lock: &b->bm_lock); |
735 | if (opages != npages) |
736 | bm_vk_free(ptr: opages); |
737 | if (!growing) |
738 | b->bm_set = bm_count_bits(b); |
739 | drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n" , bits, words, want); |
740 | |
741 | out: |
742 | drbd_bm_unlock(device); |
743 | return err; |
744 | } |
745 | |
746 | /* inherently racy: |
747 | * if not protected by other means, return value may be out of date when |
748 | * leaving this function... |
749 | * we still need to lock it, since it is important that this returns |
750 | * bm_set == 0 precisely. |
751 | * |
752 | * maybe bm_set should be atomic_t ? |
753 | */ |
754 | unsigned long _drbd_bm_total_weight(struct drbd_device *device) |
755 | { |
756 | struct drbd_bitmap *b = device->bitmap; |
757 | unsigned long s; |
758 | unsigned long flags; |
759 | |
760 | if (!expect(device, b)) |
761 | return 0; |
762 | if (!expect(device, b->bm_pages)) |
763 | return 0; |
764 | |
765 | spin_lock_irqsave(&b->bm_lock, flags); |
766 | s = b->bm_set; |
767 | spin_unlock_irqrestore(lock: &b->bm_lock, flags); |
768 | |
769 | return s; |
770 | } |
771 | |
772 | unsigned long drbd_bm_total_weight(struct drbd_device *device) |
773 | { |
774 | unsigned long s; |
775 | /* if I don't have a disk, I don't know about out-of-sync status */ |
776 | if (!get_ldev_if_state(device, D_NEGOTIATING)) |
777 | return 0; |
778 | s = _drbd_bm_total_weight(device); |
779 | put_ldev(device); |
780 | return s; |
781 | } |
782 | |
783 | size_t drbd_bm_words(struct drbd_device *device) |
784 | { |
785 | struct drbd_bitmap *b = device->bitmap; |
786 | if (!expect(device, b)) |
787 | return 0; |
788 | if (!expect(device, b->bm_pages)) |
789 | return 0; |
790 | |
791 | return b->bm_words; |
792 | } |
793 | |
794 | unsigned long drbd_bm_bits(struct drbd_device *device) |
795 | { |
796 | struct drbd_bitmap *b = device->bitmap; |
797 | if (!expect(device, b)) |
798 | return 0; |
799 | |
800 | return b->bm_bits; |
801 | } |
802 | |
803 | /* merge number words from buffer into the bitmap starting at offset. |
804 | * buffer[i] is expected to be little endian unsigned long. |
805 | * bitmap must be locked by drbd_bm_lock. |
806 | * currently only used from receive_bitmap. |
807 | */ |
808 | void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number, |
809 | unsigned long *buffer) |
810 | { |
811 | struct drbd_bitmap *b = device->bitmap; |
812 | unsigned long *p_addr, *bm; |
813 | unsigned long word, bits; |
814 | unsigned int idx; |
815 | size_t end, do_now; |
816 | |
817 | end = offset + number; |
818 | |
819 | if (!expect(device, b)) |
820 | return; |
821 | if (!expect(device, b->bm_pages)) |
822 | return; |
823 | if (number == 0) |
824 | return; |
825 | WARN_ON(offset >= b->bm_words); |
826 | WARN_ON(end > b->bm_words); |
827 | |
828 | spin_lock_irq(lock: &b->bm_lock); |
829 | while (offset < end) { |
830 | do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; |
831 | idx = bm_word_to_page_idx(b, long_nr: offset); |
832 | p_addr = bm_map_pidx(b, idx); |
833 | bm = p_addr + MLPP(offset); |
834 | offset += do_now; |
835 | while (do_now--) { |
836 | bits = hweight_long(w: *bm); |
837 | word = *bm | *buffer++; |
838 | *bm++ = word; |
839 | b->bm_set += hweight_long(w: word) - bits; |
840 | } |
841 | bm_unmap(p_addr); |
842 | bm_set_page_need_writeout(page: b->bm_pages[idx]); |
843 | } |
844 | /* with 32bit <-> 64bit cross-platform connect |
845 | * this is only correct for current usage, |
846 | * where we _know_ that we are 64 bit aligned, |
847 | * and know that this function is used in this way, too... |
848 | */ |
849 | if (end == b->bm_words) |
850 | b->bm_set -= bm_clear_surplus(b); |
851 | spin_unlock_irq(lock: &b->bm_lock); |
852 | } |
853 | |
854 | /* copy number words from the bitmap starting at offset into the buffer. |
855 | * buffer[i] will be little endian unsigned long. |
856 | */ |
857 | void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number, |
858 | unsigned long *buffer) |
859 | { |
860 | struct drbd_bitmap *b = device->bitmap; |
861 | unsigned long *p_addr, *bm; |
862 | size_t end, do_now; |
863 | |
864 | end = offset + number; |
865 | |
866 | if (!expect(device, b)) |
867 | return; |
868 | if (!expect(device, b->bm_pages)) |
869 | return; |
870 | |
871 | spin_lock_irq(lock: &b->bm_lock); |
872 | if ((offset >= b->bm_words) || |
873 | (end > b->bm_words) || |
874 | (number <= 0)) |
875 | drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n" , |
876 | (unsigned long) offset, |
877 | (unsigned long) number, |
878 | (unsigned long) b->bm_words); |
879 | else { |
880 | while (offset < end) { |
881 | do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; |
882 | p_addr = bm_map_pidx(b, idx: bm_word_to_page_idx(b, long_nr: offset)); |
883 | bm = p_addr + MLPP(offset); |
884 | offset += do_now; |
885 | while (do_now--) |
886 | *buffer++ = *bm++; |
887 | bm_unmap(p_addr); |
888 | } |
889 | } |
890 | spin_unlock_irq(lock: &b->bm_lock); |
891 | } |
892 | |
893 | /* set all bits in the bitmap */ |
894 | void drbd_bm_set_all(struct drbd_device *device) |
895 | { |
896 | struct drbd_bitmap *b = device->bitmap; |
897 | if (!expect(device, b)) |
898 | return; |
899 | if (!expect(device, b->bm_pages)) |
900 | return; |
901 | |
902 | spin_lock_irq(lock: &b->bm_lock); |
903 | bm_memset(b, offset: 0, c: 0xff, len: b->bm_words); |
904 | (void)bm_clear_surplus(b); |
905 | b->bm_set = b->bm_bits; |
906 | spin_unlock_irq(lock: &b->bm_lock); |
907 | } |
908 | |
909 | /* clear all bits in the bitmap */ |
910 | void drbd_bm_clear_all(struct drbd_device *device) |
911 | { |
912 | struct drbd_bitmap *b = device->bitmap; |
913 | if (!expect(device, b)) |
914 | return; |
915 | if (!expect(device, b->bm_pages)) |
916 | return; |
917 | |
918 | spin_lock_irq(lock: &b->bm_lock); |
919 | bm_memset(b, offset: 0, c: 0, len: b->bm_words); |
920 | b->bm_set = 0; |
921 | spin_unlock_irq(lock: &b->bm_lock); |
922 | } |
923 | |
924 | static void drbd_bm_aio_ctx_destroy(struct kref *kref) |
925 | { |
926 | struct drbd_bm_aio_ctx *ctx = container_of(kref, struct drbd_bm_aio_ctx, kref); |
927 | unsigned long flags; |
928 | |
929 | spin_lock_irqsave(&ctx->device->resource->req_lock, flags); |
930 | list_del(entry: &ctx->list); |
931 | spin_unlock_irqrestore(lock: &ctx->device->resource->req_lock, flags); |
932 | put_ldev(device: ctx->device); |
933 | kfree(objp: ctx); |
934 | } |
935 | |
936 | /* bv_page may be a copy, or may be the original */ |
937 | static void drbd_bm_endio(struct bio *bio) |
938 | { |
939 | struct drbd_bm_aio_ctx *ctx = bio->bi_private; |
940 | struct drbd_device *device = ctx->device; |
941 | struct drbd_bitmap *b = device->bitmap; |
942 | unsigned int idx = bm_page_to_idx(page: bio_first_page_all(bio)); |
943 | |
944 | if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && |
945 | !bm_test_page_unchanged(page: b->bm_pages[idx])) |
946 | drbd_warn(device, "bitmap page idx %u changed during IO!\n" , idx); |
947 | |
948 | if (bio->bi_status) { |
949 | /* ctx error will hold the completed-last non-zero error code, |
950 | * in case error codes differ. */ |
951 | ctx->error = blk_status_to_errno(status: bio->bi_status); |
952 | bm_set_page_io_err(page: b->bm_pages[idx]); |
953 | /* Not identical to on disk version of it. |
954 | * Is BM_PAGE_IO_ERROR enough? */ |
955 | if (drbd_ratelimit()) |
956 | drbd_err(device, "IO ERROR %d on bitmap page idx %u\n" , |
957 | bio->bi_status, idx); |
958 | } else { |
959 | bm_clear_page_io_err(page: b->bm_pages[idx]); |
960 | dynamic_drbd_dbg(device, "bitmap page idx %u completed\n" , idx); |
961 | } |
962 | |
963 | bm_page_unlock_io(device, page_nr: idx); |
964 | |
965 | if (ctx->flags & BM_AIO_COPY_PAGES) |
966 | mempool_free(element: bio->bi_io_vec[0].bv_page, pool: &drbd_md_io_page_pool); |
967 | |
968 | bio_put(bio); |
969 | |
970 | if (atomic_dec_and_test(v: &ctx->in_flight)) { |
971 | ctx->done = 1; |
972 | wake_up(&device->misc_wait); |
973 | kref_put(kref: &ctx->kref, release: &drbd_bm_aio_ctx_destroy); |
974 | } |
975 | } |
976 | |
977 | /* For the layout, see comment above drbd_md_set_sector_offsets(). */ |
978 | static inline sector_t drbd_md_last_bitmap_sector(struct drbd_backing_dev *bdev) |
979 | { |
980 | switch (bdev->md.meta_dev_idx) { |
981 | case DRBD_MD_INDEX_INTERNAL: |
982 | case DRBD_MD_INDEX_FLEX_INT: |
983 | return bdev->md.md_offset + bdev->md.al_offset -1; |
984 | case DRBD_MD_INDEX_FLEX_EXT: |
985 | default: |
986 | return bdev->md.md_offset + bdev->md.md_size_sect -1; |
987 | } |
988 | } |
989 | |
990 | static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local) |
991 | { |
992 | struct drbd_device *device = ctx->device; |
993 | enum req_op op = ctx->flags & BM_AIO_READ ? REQ_OP_READ : REQ_OP_WRITE; |
994 | struct drbd_bitmap *b = device->bitmap; |
995 | struct bio *bio; |
996 | struct page *page; |
997 | sector_t last_bm_sect; |
998 | sector_t first_bm_sect; |
999 | sector_t on_disk_sector; |
1000 | unsigned int len; |
1001 | |
1002 | first_bm_sect = device->ldev->md.md_offset + device->ldev->md.bm_offset; |
1003 | on_disk_sector = first_bm_sect + (((sector_t)page_nr) << (PAGE_SHIFT-SECTOR_SHIFT)); |
1004 | |
1005 | /* this might happen with very small |
1006 | * flexible external meta data device, |
1007 | * or with PAGE_SIZE > 4k */ |
1008 | last_bm_sect = drbd_md_last_bitmap_sector(bdev: device->ldev); |
1009 | if (first_bm_sect <= on_disk_sector && last_bm_sect >= on_disk_sector) { |
1010 | sector_t len_sect = last_bm_sect - on_disk_sector + 1; |
1011 | if (len_sect < PAGE_SIZE/SECTOR_SIZE) |
1012 | len = (unsigned int)len_sect*SECTOR_SIZE; |
1013 | else |
1014 | len = PAGE_SIZE; |
1015 | } else { |
1016 | if (drbd_ratelimit()) { |
1017 | drbd_err(device, "Invalid offset during on-disk bitmap access: " |
1018 | "page idx %u, sector %llu\n" , page_nr, on_disk_sector); |
1019 | } |
1020 | ctx->error = -EIO; |
1021 | bm_set_page_io_err(page: b->bm_pages[page_nr]); |
1022 | if (atomic_dec_and_test(v: &ctx->in_flight)) { |
1023 | ctx->done = 1; |
1024 | wake_up(&device->misc_wait); |
1025 | kref_put(kref: &ctx->kref, release: &drbd_bm_aio_ctx_destroy); |
1026 | } |
1027 | return; |
1028 | } |
1029 | |
1030 | /* serialize IO on this page */ |
1031 | bm_page_lock_io(device, page_nr); |
1032 | /* before memcpy and submit, |
1033 | * so it can be redirtied any time */ |
1034 | bm_set_page_unchanged(page: b->bm_pages[page_nr]); |
1035 | |
1036 | if (ctx->flags & BM_AIO_COPY_PAGES) { |
1037 | page = mempool_alloc(pool: &drbd_md_io_page_pool, |
1038 | GFP_NOIO | __GFP_HIGHMEM); |
1039 | copy_highpage(to: page, from: b->bm_pages[page_nr]); |
1040 | bm_store_page_idx(page, idx: page_nr); |
1041 | } else |
1042 | page = b->bm_pages[page_nr]; |
1043 | bio = bio_alloc_bioset(bdev: device->ldev->md_bdev, nr_vecs: 1, opf: op, GFP_NOIO, |
1044 | bs: &drbd_md_io_bio_set); |
1045 | bio->bi_iter.bi_sector = on_disk_sector; |
1046 | __bio_add_page(bio, page, len, off: 0); |
1047 | bio->bi_private = ctx; |
1048 | bio->bi_end_io = drbd_bm_endio; |
1049 | |
1050 | if (drbd_insert_fault(device, type: (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { |
1051 | bio_io_error(bio); |
1052 | } else { |
1053 | submit_bio(bio); |
1054 | /* this should not count as user activity and cause the |
1055 | * resync to throttle -- see drbd_rs_should_slow_down(). */ |
1056 | atomic_add(i: len >> 9, v: &device->rs_sect_ev); |
1057 | } |
1058 | } |
1059 | |
1060 | /* |
1061 | * bm_rw: read/write the whole bitmap from/to its on disk location. |
1062 | */ |
1063 | static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned lazy_writeout_upper_idx) __must_hold(local) |
1064 | { |
1065 | struct drbd_bm_aio_ctx *ctx; |
1066 | struct drbd_bitmap *b = device->bitmap; |
1067 | unsigned int num_pages, i, count = 0; |
1068 | unsigned long now; |
1069 | char ppb[10]; |
1070 | int err = 0; |
1071 | |
1072 | /* |
1073 | * We are protected against bitmap disappearing/resizing by holding an |
1074 | * ldev reference (caller must have called get_ldev()). |
1075 | * For read/write, we are protected against changes to the bitmap by |
1076 | * the bitmap lock (see drbd_bitmap_io). |
1077 | * For lazy writeout, we don't care for ongoing changes to the bitmap, |
1078 | * as we submit copies of pages anyways. |
1079 | */ |
1080 | |
1081 | ctx = kmalloc(size: sizeof(struct drbd_bm_aio_ctx), GFP_NOIO); |
1082 | if (!ctx) |
1083 | return -ENOMEM; |
1084 | |
1085 | *ctx = (struct drbd_bm_aio_ctx) { |
1086 | .device = device, |
1087 | .start_jif = jiffies, |
1088 | .in_flight = ATOMIC_INIT(1), |
1089 | .done = 0, |
1090 | .flags = flags, |
1091 | .error = 0, |
1092 | .kref = KREF_INIT(2), |
1093 | }; |
1094 | |
1095 | if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in drbd_bm_aio_ctx_destroy() */ |
1096 | drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n" ); |
1097 | kfree(objp: ctx); |
1098 | return -ENODEV; |
1099 | } |
1100 | /* Here D_ATTACHING is sufficient since drbd_bm_read() is called only from |
1101 | drbd_adm_attach(), after device->ldev was assigned. */ |
1102 | |
1103 | if (0 == (ctx->flags & ~BM_AIO_READ)) |
1104 | WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); |
1105 | |
1106 | spin_lock_irq(lock: &device->resource->req_lock); |
1107 | list_add_tail(new: &ctx->list, head: &device->pending_bitmap_io); |
1108 | spin_unlock_irq(lock: &device->resource->req_lock); |
1109 | |
1110 | num_pages = b->bm_number_of_pages; |
1111 | |
1112 | now = jiffies; |
1113 | |
1114 | /* let the layers below us try to merge these bios... */ |
1115 | |
1116 | if (flags & BM_AIO_READ) { |
1117 | for (i = 0; i < num_pages; i++) { |
1118 | atomic_inc(v: &ctx->in_flight); |
1119 | bm_page_io_async(ctx, page_nr: i); |
1120 | ++count; |
1121 | cond_resched(); |
1122 | } |
1123 | } else if (flags & BM_AIO_WRITE_HINTED) { |
1124 | /* ASSERT: BM_AIO_WRITE_ALL_PAGES is not set. */ |
1125 | unsigned int hint; |
1126 | for (hint = 0; hint < b->n_bitmap_hints; hint++) { |
1127 | i = b->al_bitmap_hints[hint]; |
1128 | if (i >= num_pages) /* == -1U: no hint here. */ |
1129 | continue; |
1130 | /* Several AL-extents may point to the same page. */ |
1131 | if (!test_and_clear_bit(BM_PAGE_HINT_WRITEOUT, |
1132 | addr: &page_private(b->bm_pages[i]))) |
1133 | continue; |
1134 | /* Has it even changed? */ |
1135 | if (bm_test_page_unchanged(page: b->bm_pages[i])) |
1136 | continue; |
1137 | atomic_inc(v: &ctx->in_flight); |
1138 | bm_page_io_async(ctx, page_nr: i); |
1139 | ++count; |
1140 | } |
1141 | } else { |
1142 | for (i = 0; i < num_pages; i++) { |
1143 | /* ignore completely unchanged pages */ |
1144 | if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) |
1145 | break; |
1146 | if (!(flags & BM_AIO_WRITE_ALL_PAGES) && |
1147 | bm_test_page_unchanged(page: b->bm_pages[i])) { |
1148 | dynamic_drbd_dbg(device, "skipped bm write for idx %u\n" , i); |
1149 | continue; |
1150 | } |
1151 | /* during lazy writeout, |
1152 | * ignore those pages not marked for lazy writeout. */ |
1153 | if (lazy_writeout_upper_idx && |
1154 | !bm_test_page_lazy_writeout(page: b->bm_pages[i])) { |
1155 | dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n" , i); |
1156 | continue; |
1157 | } |
1158 | atomic_inc(v: &ctx->in_flight); |
1159 | bm_page_io_async(ctx, page_nr: i); |
1160 | ++count; |
1161 | cond_resched(); |
1162 | } |
1163 | } |
1164 | |
1165 | /* |
1166 | * We initialize ctx->in_flight to one to make sure drbd_bm_endio |
1167 | * will not set ctx->done early, and decrement / test it here. If there |
1168 | * are still some bios in flight, we need to wait for them here. |
1169 | * If all IO is done already (or nothing had been submitted), there is |
1170 | * no need to wait. Still, we need to put the kref associated with the |
1171 | * "in_flight reached zero, all done" event. |
1172 | */ |
1173 | if (!atomic_dec_and_test(v: &ctx->in_flight)) |
1174 | wait_until_done_or_force_detached(device, bdev: device->ldev, done: &ctx->done); |
1175 | else |
1176 | kref_put(kref: &ctx->kref, release: &drbd_bm_aio_ctx_destroy); |
1177 | |
1178 | /* summary for global bitmap IO */ |
1179 | if (flags == 0) { |
1180 | unsigned int ms = jiffies_to_msecs(j: jiffies - now); |
1181 | if (ms > 5) { |
1182 | drbd_info(device, "bitmap %s of %u pages took %u ms\n" , |
1183 | (flags & BM_AIO_READ) ? "READ" : "WRITE" , |
1184 | count, ms); |
1185 | } |
1186 | } |
1187 | |
1188 | if (ctx->error) { |
1189 | drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n" ); |
1190 | drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); |
1191 | err = -EIO; /* ctx->error ? */ |
1192 | } |
1193 | |
1194 | if (atomic_read(v: &ctx->in_flight)) |
1195 | err = -EIO; /* Disk timeout/force-detach during IO... */ |
1196 | |
1197 | now = jiffies; |
1198 | if (flags & BM_AIO_READ) { |
1199 | b->bm_set = bm_count_bits(b); |
1200 | drbd_info(device, "recounting of set bits took additional %lu jiffies\n" , |
1201 | jiffies - now); |
1202 | } |
1203 | now = b->bm_set; |
1204 | |
1205 | if ((flags & ~BM_AIO_READ) == 0) |
1206 | drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n" , |
1207 | ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now); |
1208 | |
1209 | kref_put(kref: &ctx->kref, release: &drbd_bm_aio_ctx_destroy); |
1210 | return err; |
1211 | } |
1212 | |
1213 | /** |
1214 | * drbd_bm_read() - Read the whole bitmap from its on disk location. |
1215 | * @device: DRBD device. |
1216 | */ |
1217 | int drbd_bm_read(struct drbd_device *device, |
1218 | struct drbd_peer_device *peer_device) __must_hold(local) |
1219 | |
1220 | { |
1221 | return bm_rw(device, BM_AIO_READ, lazy_writeout_upper_idx: 0); |
1222 | } |
1223 | |
1224 | /** |
1225 | * drbd_bm_write() - Write the whole bitmap to its on disk location. |
1226 | * @device: DRBD device. |
1227 | * |
1228 | * Will only write pages that have changed since last IO. |
1229 | */ |
1230 | int drbd_bm_write(struct drbd_device *device, |
1231 | struct drbd_peer_device *peer_device) __must_hold(local) |
1232 | { |
1233 | return bm_rw(device, flags: 0, lazy_writeout_upper_idx: 0); |
1234 | } |
1235 | |
1236 | /** |
1237 | * drbd_bm_write_all() - Write the whole bitmap to its on disk location. |
1238 | * @device: DRBD device. |
1239 | * |
1240 | * Will write all pages. |
1241 | */ |
1242 | int drbd_bm_write_all(struct drbd_device *device, |
1243 | struct drbd_peer_device *peer_device) __must_hold(local) |
1244 | { |
1245 | return bm_rw(device, BM_AIO_WRITE_ALL_PAGES, lazy_writeout_upper_idx: 0); |
1246 | } |
1247 | |
1248 | /** |
1249 | * drbd_bm_write_lazy() - Write bitmap pages 0 to @upper_idx-1, if they have changed. |
1250 | * @device: DRBD device. |
1251 | * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages |
1252 | */ |
1253 | int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local) |
1254 | { |
1255 | return bm_rw(device, BM_AIO_COPY_PAGES, lazy_writeout_upper_idx: upper_idx); |
1256 | } |
1257 | |
1258 | /** |
1259 | * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location. |
1260 | * @device: DRBD device. |
1261 | * |
1262 | * Will only write pages that have changed since last IO. |
1263 | * In contrast to drbd_bm_write(), this will copy the bitmap pages |
1264 | * to temporary writeout pages. It is intended to trigger a full write-out |
1265 | * while still allowing the bitmap to change, for example if a resync or online |
1266 | * verify is aborted due to a failed peer disk, while local IO continues, or |
1267 | * pending resync acks are still being processed. |
1268 | */ |
1269 | int drbd_bm_write_copy_pages(struct drbd_device *device, |
1270 | struct drbd_peer_device *peer_device) __must_hold(local) |
1271 | { |
1272 | return bm_rw(device, BM_AIO_COPY_PAGES, lazy_writeout_upper_idx: 0); |
1273 | } |
1274 | |
1275 | /** |
1276 | * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed. |
1277 | * @device: DRBD device. |
1278 | */ |
1279 | int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local) |
1280 | { |
1281 | return bm_rw(device, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, lazy_writeout_upper_idx: 0); |
1282 | } |
1283 | |
1284 | /* NOTE |
1285 | * find_first_bit returns int, we return unsigned long. |
1286 | * For this to work on 32bit arch with bitnumbers > (1<<32), |
1287 | * we'd need to return u64, and get a whole lot of other places |
1288 | * fixed where we still use unsigned long. |
1289 | * |
1290 | * this returns a bit number, NOT a sector! |
1291 | */ |
1292 | static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo, |
1293 | const int find_zero_bit) |
1294 | { |
1295 | struct drbd_bitmap *b = device->bitmap; |
1296 | unsigned long *p_addr; |
1297 | unsigned long bit_offset; |
1298 | unsigned i; |
1299 | |
1300 | |
1301 | if (bm_fo > b->bm_bits) { |
1302 | drbd_err(device, "bm_fo=%lu bm_bits=%lu\n" , bm_fo, b->bm_bits); |
1303 | bm_fo = DRBD_END_OF_BITMAP; |
1304 | } else { |
1305 | while (bm_fo < b->bm_bits) { |
1306 | /* bit offset of the first bit in the page */ |
1307 | bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; |
1308 | p_addr = __bm_map_pidx(b, idx: bm_bit_to_page_idx(b, bitnr: bm_fo)); |
1309 | |
1310 | if (find_zero_bit) |
1311 | i = find_next_zero_bit_le(addr: p_addr, |
1312 | PAGE_SIZE*8, offset: bm_fo & BITS_PER_PAGE_MASK); |
1313 | else |
1314 | i = find_next_bit_le(addr: p_addr, |
1315 | PAGE_SIZE*8, offset: bm_fo & BITS_PER_PAGE_MASK); |
1316 | |
1317 | __bm_unmap(p_addr); |
1318 | if (i < PAGE_SIZE*8) { |
1319 | bm_fo = bit_offset + i; |
1320 | if (bm_fo >= b->bm_bits) |
1321 | break; |
1322 | goto found; |
1323 | } |
1324 | bm_fo = bit_offset + PAGE_SIZE*8; |
1325 | } |
1326 | bm_fo = DRBD_END_OF_BITMAP; |
1327 | } |
1328 | found: |
1329 | return bm_fo; |
1330 | } |
1331 | |
1332 | static unsigned long bm_find_next(struct drbd_device *device, |
1333 | unsigned long bm_fo, const int find_zero_bit) |
1334 | { |
1335 | struct drbd_bitmap *b = device->bitmap; |
1336 | unsigned long i = DRBD_END_OF_BITMAP; |
1337 | |
1338 | if (!expect(device, b)) |
1339 | return i; |
1340 | if (!expect(device, b->bm_pages)) |
1341 | return i; |
1342 | |
1343 | spin_lock_irq(lock: &b->bm_lock); |
1344 | if (BM_DONT_TEST & b->bm_flags) |
1345 | bm_print_lock_info(device); |
1346 | |
1347 | i = __bm_find_next(device, bm_fo, find_zero_bit); |
1348 | |
1349 | spin_unlock_irq(lock: &b->bm_lock); |
1350 | return i; |
1351 | } |
1352 | |
1353 | unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo) |
1354 | { |
1355 | return bm_find_next(device, bm_fo, find_zero_bit: 0); |
1356 | } |
1357 | |
1358 | #if 0 |
1359 | /* not yet needed for anything. */ |
1360 | unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo) |
1361 | { |
1362 | return bm_find_next(device, bm_fo, 1); |
1363 | } |
1364 | #endif |
1365 | |
1366 | /* does not spin_lock_irqsave. |
1367 | * you must take drbd_bm_lock() first */ |
1368 | unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo) |
1369 | { |
1370 | /* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */ |
1371 | return __bm_find_next(device, bm_fo, find_zero_bit: 0); |
1372 | } |
1373 | |
1374 | unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo) |
1375 | { |
1376 | /* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */ |
1377 | return __bm_find_next(device, bm_fo, find_zero_bit: 1); |
1378 | } |
1379 | |
1380 | /* returns number of bits actually changed. |
1381 | * for val != 0, we change 0 -> 1, return code positive |
1382 | * for val == 0, we change 1 -> 0, return code negative |
1383 | * wants bitnr, not sector. |
1384 | * expected to be called for only a few bits (e - s about BITS_PER_LONG). |
1385 | * Must hold bitmap lock already. */ |
1386 | static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s, |
1387 | unsigned long e, int val) |
1388 | { |
1389 | struct drbd_bitmap *b = device->bitmap; |
1390 | unsigned long *p_addr = NULL; |
1391 | unsigned long bitnr; |
1392 | unsigned int last_page_nr = -1U; |
1393 | int c = 0; |
1394 | int changed_total = 0; |
1395 | |
1396 | if (e >= b->bm_bits) { |
1397 | drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n" , |
1398 | s, e, b->bm_bits); |
1399 | e = b->bm_bits ? b->bm_bits -1 : 0; |
1400 | } |
1401 | for (bitnr = s; bitnr <= e; bitnr++) { |
1402 | unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); |
1403 | if (page_nr != last_page_nr) { |
1404 | if (p_addr) |
1405 | __bm_unmap(p_addr); |
1406 | if (c < 0) |
1407 | bm_set_page_lazy_writeout(page: b->bm_pages[last_page_nr]); |
1408 | else if (c > 0) |
1409 | bm_set_page_need_writeout(page: b->bm_pages[last_page_nr]); |
1410 | changed_total += c; |
1411 | c = 0; |
1412 | p_addr = __bm_map_pidx(b, idx: page_nr); |
1413 | last_page_nr = page_nr; |
1414 | } |
1415 | if (val) |
1416 | c += (0 == __test_and_set_bit_le(nr: bitnr & BITS_PER_PAGE_MASK, addr: p_addr)); |
1417 | else |
1418 | c -= (0 != __test_and_clear_bit_le(nr: bitnr & BITS_PER_PAGE_MASK, addr: p_addr)); |
1419 | } |
1420 | if (p_addr) |
1421 | __bm_unmap(p_addr); |
1422 | if (c < 0) |
1423 | bm_set_page_lazy_writeout(page: b->bm_pages[last_page_nr]); |
1424 | else if (c > 0) |
1425 | bm_set_page_need_writeout(page: b->bm_pages[last_page_nr]); |
1426 | changed_total += c; |
1427 | b->bm_set += changed_total; |
1428 | return changed_total; |
1429 | } |
1430 | |
1431 | /* returns number of bits actually changed. |
1432 | * for val != 0, we change 0 -> 1, return code positive |
1433 | * for val == 0, we change 1 -> 0, return code negative |
1434 | * wants bitnr, not sector */ |
1435 | static int bm_change_bits_to(struct drbd_device *device, const unsigned long s, |
1436 | const unsigned long e, int val) |
1437 | { |
1438 | unsigned long flags; |
1439 | struct drbd_bitmap *b = device->bitmap; |
1440 | int c = 0; |
1441 | |
1442 | if (!expect(device, b)) |
1443 | return 1; |
1444 | if (!expect(device, b->bm_pages)) |
1445 | return 0; |
1446 | |
1447 | spin_lock_irqsave(&b->bm_lock, flags); |
1448 | if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) |
1449 | bm_print_lock_info(device); |
1450 | |
1451 | c = __bm_change_bits_to(device, s, e, val); |
1452 | |
1453 | spin_unlock_irqrestore(lock: &b->bm_lock, flags); |
1454 | return c; |
1455 | } |
1456 | |
1457 | /* returns number of bits changed 0 -> 1 */ |
1458 | int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e) |
1459 | { |
1460 | return bm_change_bits_to(device, s, e, val: 1); |
1461 | } |
1462 | |
1463 | /* returns number of bits changed 1 -> 0 */ |
1464 | int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e) |
1465 | { |
1466 | return -bm_change_bits_to(device, s, e, val: 0); |
1467 | } |
1468 | |
1469 | /* sets all bits in full words, |
1470 | * from first_word up to, but not including, last_word */ |
1471 | static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b, |
1472 | int page_nr, int first_word, int last_word) |
1473 | { |
1474 | int i; |
1475 | int bits; |
1476 | int changed = 0; |
1477 | unsigned long *paddr = kmap_atomic(page: b->bm_pages[page_nr]); |
1478 | |
1479 | /* I think it is more cache line friendly to hweight_long then set to ~0UL, |
1480 | * than to first bitmap_weight() all words, then bitmap_fill() all words */ |
1481 | for (i = first_word; i < last_word; i++) { |
1482 | bits = hweight_long(w: paddr[i]); |
1483 | paddr[i] = ~0UL; |
1484 | changed += BITS_PER_LONG - bits; |
1485 | } |
1486 | kunmap_atomic(paddr); |
1487 | if (changed) { |
1488 | /* We only need lazy writeout, the information is still in the |
1489 | * remote bitmap as well, and is reconstructed during the next |
1490 | * bitmap exchange, if lost locally due to a crash. */ |
1491 | bm_set_page_lazy_writeout(page: b->bm_pages[page_nr]); |
1492 | b->bm_set += changed; |
1493 | } |
1494 | } |
1495 | |
1496 | /* Same thing as drbd_bm_set_bits, |
1497 | * but more efficient for a large bit range. |
1498 | * You must first drbd_bm_lock(). |
1499 | * Can be called to set the whole bitmap in one go. |
1500 | * Sets bits from s to e _inclusive_. */ |
1501 | void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e) |
1502 | { |
1503 | /* First set_bit from the first bit (s) |
1504 | * up to the next long boundary (sl), |
1505 | * then assign full words up to the last long boundary (el), |
1506 | * then set_bit up to and including the last bit (e). |
1507 | * |
1508 | * Do not use memset, because we must account for changes, |
1509 | * so we need to loop over the words with hweight() anyways. |
1510 | */ |
1511 | struct drbd_bitmap *b = device->bitmap; |
1512 | unsigned long sl = ALIGN(s,BITS_PER_LONG); |
1513 | unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1); |
1514 | int first_page; |
1515 | int last_page; |
1516 | int page_nr; |
1517 | int first_word; |
1518 | int last_word; |
1519 | |
1520 | if (e - s <= 3*BITS_PER_LONG) { |
1521 | /* don't bother; el and sl may even be wrong. */ |
1522 | spin_lock_irq(lock: &b->bm_lock); |
1523 | __bm_change_bits_to(device, s, e, val: 1); |
1524 | spin_unlock_irq(lock: &b->bm_lock); |
1525 | return; |
1526 | } |
1527 | |
1528 | /* difference is large enough that we can trust sl and el */ |
1529 | |
1530 | spin_lock_irq(lock: &b->bm_lock); |
1531 | |
1532 | /* bits filling the current long */ |
1533 | if (sl) |
1534 | __bm_change_bits_to(device, s, e: sl-1, val: 1); |
1535 | |
1536 | first_page = sl >> (3 + PAGE_SHIFT); |
1537 | last_page = el >> (3 + PAGE_SHIFT); |
1538 | |
1539 | /* MLPP: modulo longs per page */ |
1540 | /* LWPP: long words per page */ |
1541 | first_word = MLPP(sl >> LN2_BPL); |
1542 | last_word = LWPP; |
1543 | |
1544 | /* first and full pages, unless first page == last page */ |
1545 | for (page_nr = first_page; page_nr < last_page; page_nr++) { |
1546 | bm_set_full_words_within_one_page(b: device->bitmap, page_nr, first_word, last_word); |
1547 | spin_unlock_irq(lock: &b->bm_lock); |
1548 | cond_resched(); |
1549 | first_word = 0; |
1550 | spin_lock_irq(lock: &b->bm_lock); |
1551 | } |
1552 | /* last page (respectively only page, for first page == last page) */ |
1553 | last_word = MLPP(el >> LN2_BPL); |
1554 | |
1555 | /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples). |
1556 | * ==> e = 32767, el = 32768, last_page = 2, |
1557 | * and now last_word = 0. |
1558 | * We do not want to touch last_page in this case, |
1559 | * as we did not allocate it, it is not present in bitmap->bm_pages. |
1560 | */ |
1561 | if (last_word) |
1562 | bm_set_full_words_within_one_page(b: device->bitmap, page_nr: last_page, first_word, last_word); |
1563 | |
1564 | /* possibly trailing bits. |
1565 | * example: (e & 63) == 63, el will be e+1. |
1566 | * if that even was the very last bit, |
1567 | * it would trigger an assert in __bm_change_bits_to() |
1568 | */ |
1569 | if (el <= e) |
1570 | __bm_change_bits_to(device, s: el, e, val: 1); |
1571 | spin_unlock_irq(lock: &b->bm_lock); |
1572 | } |
1573 | |
1574 | /* returns bit state |
1575 | * wants bitnr, NOT sector. |
1576 | * inherently racy... area needs to be locked by means of {al,rs}_lru |
1577 | * 1 ... bit set |
1578 | * 0 ... bit not set |
1579 | * -1 ... first out of bounds access, stop testing for bits! |
1580 | */ |
1581 | int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr) |
1582 | { |
1583 | unsigned long flags; |
1584 | struct drbd_bitmap *b = device->bitmap; |
1585 | unsigned long *p_addr; |
1586 | int i; |
1587 | |
1588 | if (!expect(device, b)) |
1589 | return 0; |
1590 | if (!expect(device, b->bm_pages)) |
1591 | return 0; |
1592 | |
1593 | spin_lock_irqsave(&b->bm_lock, flags); |
1594 | if (BM_DONT_TEST & b->bm_flags) |
1595 | bm_print_lock_info(device); |
1596 | if (bitnr < b->bm_bits) { |
1597 | p_addr = bm_map_pidx(b, idx: bm_bit_to_page_idx(b, bitnr)); |
1598 | i = test_bit_le(nr: bitnr & BITS_PER_PAGE_MASK, addr: p_addr) ? 1 : 0; |
1599 | bm_unmap(p_addr); |
1600 | } else if (bitnr == b->bm_bits) { |
1601 | i = -1; |
1602 | } else { /* (bitnr > b->bm_bits) */ |
1603 | drbd_err(device, "bitnr=%lu > bm_bits=%lu\n" , bitnr, b->bm_bits); |
1604 | i = 0; |
1605 | } |
1606 | |
1607 | spin_unlock_irqrestore(lock: &b->bm_lock, flags); |
1608 | return i; |
1609 | } |
1610 | |
1611 | /* returns number of bits set in the range [s, e] */ |
1612 | int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e) |
1613 | { |
1614 | unsigned long flags; |
1615 | struct drbd_bitmap *b = device->bitmap; |
1616 | unsigned long *p_addr = NULL; |
1617 | unsigned long bitnr; |
1618 | unsigned int page_nr = -1U; |
1619 | int c = 0; |
1620 | |
1621 | /* If this is called without a bitmap, that is a bug. But just to be |
1622 | * robust in case we screwed up elsewhere, in that case pretend there |
1623 | * was one dirty bit in the requested area, so we won't try to do a |
1624 | * local read there (no bitmap probably implies no disk) */ |
1625 | if (!expect(device, b)) |
1626 | return 1; |
1627 | if (!expect(device, b->bm_pages)) |
1628 | return 1; |
1629 | |
1630 | spin_lock_irqsave(&b->bm_lock, flags); |
1631 | if (BM_DONT_TEST & b->bm_flags) |
1632 | bm_print_lock_info(device); |
1633 | for (bitnr = s; bitnr <= e; bitnr++) { |
1634 | unsigned int idx = bm_bit_to_page_idx(b, bitnr); |
1635 | if (page_nr != idx) { |
1636 | page_nr = idx; |
1637 | if (p_addr) |
1638 | bm_unmap(p_addr); |
1639 | p_addr = bm_map_pidx(b, idx); |
1640 | } |
1641 | if (expect(device, bitnr < b->bm_bits)) |
1642 | c += (0 != test_bit_le(nr: bitnr - (page_nr << (PAGE_SHIFT+3)), addr: p_addr)); |
1643 | else |
1644 | drbd_err(device, "bitnr=%lu bm_bits=%lu\n" , bitnr, b->bm_bits); |
1645 | } |
1646 | if (p_addr) |
1647 | bm_unmap(p_addr); |
1648 | spin_unlock_irqrestore(lock: &b->bm_lock, flags); |
1649 | return c; |
1650 | } |
1651 | |
1652 | |
1653 | /* inherently racy... |
1654 | * return value may be already out-of-date when this function returns. |
1655 | * but the general usage is that this is only use during a cstate when bits are |
1656 | * only cleared, not set, and typically only care for the case when the return |
1657 | * value is zero, or we already "locked" this "bitmap extent" by other means. |
1658 | * |
1659 | * enr is bm-extent number, since we chose to name one sector (512 bytes) |
1660 | * worth of the bitmap a "bitmap extent". |
1661 | * |
1662 | * TODO |
1663 | * I think since we use it like a reference count, we should use the real |
1664 | * reference count of some bitmap extent element from some lru instead... |
1665 | * |
1666 | */ |
1667 | int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr) |
1668 | { |
1669 | struct drbd_bitmap *b = device->bitmap; |
1670 | int count, s, e; |
1671 | unsigned long flags; |
1672 | unsigned long *p_addr, *bm; |
1673 | |
1674 | if (!expect(device, b)) |
1675 | return 0; |
1676 | if (!expect(device, b->bm_pages)) |
1677 | return 0; |
1678 | |
1679 | spin_lock_irqsave(&b->bm_lock, flags); |
1680 | if (BM_DONT_TEST & b->bm_flags) |
1681 | bm_print_lock_info(device); |
1682 | |
1683 | s = S2W(enr); |
1684 | e = min((size_t)S2W(enr+1), b->bm_words); |
1685 | count = 0; |
1686 | if (s < b->bm_words) { |
1687 | int n = e-s; |
1688 | p_addr = bm_map_pidx(b, idx: bm_word_to_page_idx(b, long_nr: s)); |
1689 | bm = p_addr + MLPP(s); |
1690 | count += bitmap_weight(src: bm, nbits: n * BITS_PER_LONG); |
1691 | bm_unmap(p_addr); |
1692 | } else { |
1693 | drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n" , s); |
1694 | } |
1695 | spin_unlock_irqrestore(lock: &b->bm_lock, flags); |
1696 | return count; |
1697 | } |
1698 | |