1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2013 Fusion IO. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/pagemap.h> |
7 | #include <linux/pagevec.h> |
8 | #include <linux/sched.h> |
9 | #include <linux/slab.h> |
10 | #include <linux/sizes.h> |
11 | #include "btrfs-tests.h" |
12 | #include "../ctree.h" |
13 | #include "../extent_io.h" |
14 | #include "../disk-io.h" |
15 | #include "../btrfs_inode.h" |
16 | |
17 | #define PROCESS_UNLOCK (1 << 0) |
18 | #define PROCESS_RELEASE (1 << 1) |
19 | #define PROCESS_TEST_LOCKED (1 << 2) |
20 | |
21 | static noinline int process_page_range(struct inode *inode, u64 start, u64 end, |
22 | unsigned long flags) |
23 | { |
24 | int ret; |
25 | struct folio_batch fbatch; |
26 | unsigned long index = start >> PAGE_SHIFT; |
27 | unsigned long end_index = end >> PAGE_SHIFT; |
28 | int i; |
29 | int count = 0; |
30 | int loops = 0; |
31 | |
32 | folio_batch_init(fbatch: &fbatch); |
33 | |
34 | while (index <= end_index) { |
35 | ret = filemap_get_folios_contig(mapping: inode->i_mapping, start: &index, |
36 | end: end_index, fbatch: &fbatch); |
37 | for (i = 0; i < ret; i++) { |
38 | struct folio *folio = fbatch.folios[i]; |
39 | |
40 | if (flags & PROCESS_TEST_LOCKED && |
41 | !folio_test_locked(folio)) |
42 | count++; |
43 | if (flags & PROCESS_UNLOCK && folio_test_locked(folio)) |
44 | folio_unlock(folio); |
45 | if (flags & PROCESS_RELEASE) |
46 | folio_put(folio); |
47 | } |
48 | folio_batch_release(fbatch: &fbatch); |
49 | cond_resched(); |
50 | loops++; |
51 | if (loops > 100000) { |
52 | printk(KERN_ERR |
53 | "stuck in a loop, start %llu, end %llu, ret %d\n" , |
54 | start, end, ret); |
55 | break; |
56 | } |
57 | } |
58 | |
59 | return count; |
60 | } |
61 | |
62 | #define STATE_FLAG_STR_LEN 256 |
63 | |
64 | #define PRINT_ONE_FLAG(state, dest, cur, name) \ |
65 | ({ \ |
66 | if (state->state & EXTENT_##name) \ |
67 | cur += scnprintf(dest + cur, STATE_FLAG_STR_LEN - cur, \ |
68 | "%s" #name, cur == 0 ? "" : "|"); \ |
69 | }) |
70 | |
71 | static void extent_flag_to_str(const struct extent_state *state, char *dest) |
72 | { |
73 | int cur = 0; |
74 | |
75 | dest[0] = 0; |
76 | PRINT_ONE_FLAG(state, dest, cur, DIRTY); |
77 | PRINT_ONE_FLAG(state, dest, cur, UPTODATE); |
78 | PRINT_ONE_FLAG(state, dest, cur, LOCKED); |
79 | PRINT_ONE_FLAG(state, dest, cur, NEW); |
80 | PRINT_ONE_FLAG(state, dest, cur, DELALLOC); |
81 | PRINT_ONE_FLAG(state, dest, cur, DEFRAG); |
82 | PRINT_ONE_FLAG(state, dest, cur, BOUNDARY); |
83 | PRINT_ONE_FLAG(state, dest, cur, NODATASUM); |
84 | PRINT_ONE_FLAG(state, dest, cur, CLEAR_META_RESV); |
85 | PRINT_ONE_FLAG(state, dest, cur, NEED_WAIT); |
86 | PRINT_ONE_FLAG(state, dest, cur, NORESERVE); |
87 | PRINT_ONE_FLAG(state, dest, cur, QGROUP_RESERVED); |
88 | PRINT_ONE_FLAG(state, dest, cur, CLEAR_DATA_RESV); |
89 | } |
90 | |
91 | static void dump_extent_io_tree(const struct extent_io_tree *tree) |
92 | { |
93 | struct rb_node *node; |
94 | char flags_str[STATE_FLAG_STR_LEN]; |
95 | |
96 | node = rb_first(&tree->state); |
97 | test_msg("io tree content:" ); |
98 | while (node) { |
99 | struct extent_state *state; |
100 | |
101 | state = rb_entry(node, struct extent_state, rb_node); |
102 | extent_flag_to_str(state, dest: flags_str); |
103 | test_msg(" start=%llu len=%llu flags=%s" , state->start, |
104 | state->end + 1 - state->start, flags_str); |
105 | node = rb_next(node); |
106 | } |
107 | } |
108 | |
109 | static int test_find_delalloc(u32 sectorsize, u32 nodesize) |
110 | { |
111 | struct btrfs_fs_info *fs_info; |
112 | struct btrfs_root *root = NULL; |
113 | struct inode *inode = NULL; |
114 | struct extent_io_tree *tmp; |
115 | struct page *page; |
116 | struct page *locked_page = NULL; |
117 | unsigned long index = 0; |
118 | /* In this test we need at least 2 file extents at its maximum size */ |
119 | u64 max_bytes = BTRFS_MAX_EXTENT_SIZE; |
120 | u64 total_dirty = 2 * max_bytes; |
121 | u64 start, end, test_start; |
122 | bool found; |
123 | int ret = -EINVAL; |
124 | |
125 | test_msg("running find delalloc tests" ); |
126 | |
127 | fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); |
128 | if (!fs_info) { |
129 | test_std_err(TEST_ALLOC_FS_INFO); |
130 | return -ENOMEM; |
131 | } |
132 | |
133 | root = btrfs_alloc_dummy_root(fs_info); |
134 | if (IS_ERR(ptr: root)) { |
135 | test_std_err(TEST_ALLOC_ROOT); |
136 | ret = PTR_ERR(ptr: root); |
137 | goto out; |
138 | } |
139 | |
140 | inode = btrfs_new_test_inode(); |
141 | if (!inode) { |
142 | test_std_err(TEST_ALLOC_INODE); |
143 | ret = -ENOMEM; |
144 | goto out; |
145 | } |
146 | tmp = &BTRFS_I(inode)->io_tree; |
147 | BTRFS_I(inode)->root = root; |
148 | |
149 | /* |
150 | * Passing NULL as we don't have fs_info but tracepoints are not used |
151 | * at this point |
152 | */ |
153 | extent_io_tree_init(NULL, tree: tmp, owner: IO_TREE_SELFTEST); |
154 | |
155 | /* |
156 | * First go through and create and mark all of our pages dirty, we pin |
157 | * everything to make sure our pages don't get evicted and screw up our |
158 | * test. |
159 | */ |
160 | for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) { |
161 | page = find_or_create_page(mapping: inode->i_mapping, index, GFP_KERNEL); |
162 | if (!page) { |
163 | test_err("failed to allocate test page" ); |
164 | ret = -ENOMEM; |
165 | goto out; |
166 | } |
167 | SetPageDirty(page); |
168 | if (index) { |
169 | unlock_page(page); |
170 | } else { |
171 | get_page(page); |
172 | locked_page = page; |
173 | } |
174 | } |
175 | |
176 | /* Test this scenario |
177 | * |--- delalloc ---| |
178 | * |--- search ---| |
179 | */ |
180 | set_extent_bit(tree: tmp, start: 0, end: sectorsize - 1, bits: EXTENT_DELALLOC, NULL); |
181 | start = 0; |
182 | end = start + PAGE_SIZE - 1; |
183 | found = find_lock_delalloc_range(inode, locked_page, start: &start, |
184 | end: &end); |
185 | if (!found) { |
186 | test_err("should have found at least one delalloc" ); |
187 | goto out_bits; |
188 | } |
189 | if (start != 0 || end != (sectorsize - 1)) { |
190 | test_err("expected start 0 end %u, got start %llu end %llu" , |
191 | sectorsize - 1, start, end); |
192 | goto out_bits; |
193 | } |
194 | unlock_extent(tree: tmp, start, end, NULL); |
195 | unlock_page(page: locked_page); |
196 | put_page(page: locked_page); |
197 | |
198 | /* |
199 | * Test this scenario |
200 | * |
201 | * |--- delalloc ---| |
202 | * |--- search ---| |
203 | */ |
204 | test_start = SZ_64M; |
205 | locked_page = find_lock_page(mapping: inode->i_mapping, |
206 | index: test_start >> PAGE_SHIFT); |
207 | if (!locked_page) { |
208 | test_err("couldn't find the locked page" ); |
209 | goto out_bits; |
210 | } |
211 | set_extent_bit(tree: tmp, start: sectorsize, end: max_bytes - 1, bits: EXTENT_DELALLOC, NULL); |
212 | start = test_start; |
213 | end = start + PAGE_SIZE - 1; |
214 | found = find_lock_delalloc_range(inode, locked_page, start: &start, |
215 | end: &end); |
216 | if (!found) { |
217 | test_err("couldn't find delalloc in our range" ); |
218 | goto out_bits; |
219 | } |
220 | if (start != test_start || end != max_bytes - 1) { |
221 | test_err("expected start %llu end %llu, got start %llu, end %llu" , |
222 | test_start, max_bytes - 1, start, end); |
223 | goto out_bits; |
224 | } |
225 | if (process_page_range(inode, start, end, |
226 | PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) { |
227 | test_err("there were unlocked pages in the range" ); |
228 | goto out_bits; |
229 | } |
230 | unlock_extent(tree: tmp, start, end, NULL); |
231 | /* locked_page was unlocked above */ |
232 | put_page(page: locked_page); |
233 | |
234 | /* |
235 | * Test this scenario |
236 | * |--- delalloc ---| |
237 | * |--- search ---| |
238 | */ |
239 | test_start = max_bytes + sectorsize; |
240 | locked_page = find_lock_page(mapping: inode->i_mapping, index: test_start >> |
241 | PAGE_SHIFT); |
242 | if (!locked_page) { |
243 | test_err("couldn't find the locked page" ); |
244 | goto out_bits; |
245 | } |
246 | start = test_start; |
247 | end = start + PAGE_SIZE - 1; |
248 | found = find_lock_delalloc_range(inode, locked_page, start: &start, |
249 | end: &end); |
250 | if (found) { |
251 | test_err("found range when we shouldn't have" ); |
252 | goto out_bits; |
253 | } |
254 | if (end != test_start + PAGE_SIZE - 1) { |
255 | test_err("did not return the proper end offset" ); |
256 | goto out_bits; |
257 | } |
258 | |
259 | /* |
260 | * Test this scenario |
261 | * [------- delalloc -------| |
262 | * [max_bytes]|-- search--| |
263 | * |
264 | * We are re-using our test_start from above since it works out well. |
265 | */ |
266 | set_extent_bit(tree: tmp, start: max_bytes, end: total_dirty - 1, bits: EXTENT_DELALLOC, NULL); |
267 | start = test_start; |
268 | end = start + PAGE_SIZE - 1; |
269 | found = find_lock_delalloc_range(inode, locked_page, start: &start, |
270 | end: &end); |
271 | if (!found) { |
272 | test_err("didn't find our range" ); |
273 | goto out_bits; |
274 | } |
275 | if (start != test_start || end != total_dirty - 1) { |
276 | test_err("expected start %llu end %llu, got start %llu end %llu" , |
277 | test_start, total_dirty - 1, start, end); |
278 | goto out_bits; |
279 | } |
280 | if (process_page_range(inode, start, end, |
281 | PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) { |
282 | test_err("pages in range were not all locked" ); |
283 | goto out_bits; |
284 | } |
285 | unlock_extent(tree: tmp, start, end, NULL); |
286 | |
287 | /* |
288 | * Now to test where we run into a page that is no longer dirty in the |
289 | * range we want to find. |
290 | */ |
291 | page = find_get_page(mapping: inode->i_mapping, |
292 | offset: (max_bytes + SZ_1M) >> PAGE_SHIFT); |
293 | if (!page) { |
294 | test_err("couldn't find our page" ); |
295 | goto out_bits; |
296 | } |
297 | ClearPageDirty(page); |
298 | put_page(page); |
299 | |
300 | /* We unlocked it in the previous test */ |
301 | lock_page(page: locked_page); |
302 | start = test_start; |
303 | end = start + PAGE_SIZE - 1; |
304 | /* |
305 | * Currently if we fail to find dirty pages in the delalloc range we |
306 | * will adjust max_bytes down to PAGE_SIZE and then re-search. If |
307 | * this changes at any point in the future we will need to fix this |
308 | * tests expected behavior. |
309 | */ |
310 | found = find_lock_delalloc_range(inode, locked_page, start: &start, |
311 | end: &end); |
312 | if (!found) { |
313 | test_err("didn't find our range" ); |
314 | goto out_bits; |
315 | } |
316 | if (start != test_start && end != test_start + PAGE_SIZE - 1) { |
317 | test_err("expected start %llu end %llu, got start %llu end %llu" , |
318 | test_start, test_start + PAGE_SIZE - 1, start, end); |
319 | goto out_bits; |
320 | } |
321 | if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED | |
322 | PROCESS_UNLOCK)) { |
323 | test_err("pages in range were not all locked" ); |
324 | goto out_bits; |
325 | } |
326 | ret = 0; |
327 | out_bits: |
328 | if (ret) |
329 | dump_extent_io_tree(tree: tmp); |
330 | clear_extent_bits(tree: tmp, start: 0, end: total_dirty - 1, bits: (unsigned)-1); |
331 | out: |
332 | if (locked_page) |
333 | put_page(page: locked_page); |
334 | process_page_range(inode, start: 0, end: total_dirty - 1, |
335 | PROCESS_UNLOCK | PROCESS_RELEASE); |
336 | iput(inode); |
337 | btrfs_free_dummy_root(root); |
338 | btrfs_free_dummy_fs_info(fs_info); |
339 | return ret; |
340 | } |
341 | |
342 | static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb) |
343 | { |
344 | unsigned long i; |
345 | |
346 | for (i = 0; i < eb->len * BITS_PER_BYTE; i++) { |
347 | int bit, bit1; |
348 | |
349 | bit = !!test_bit(i, bitmap); |
350 | bit1 = !!extent_buffer_test_bit(eb, start: 0, pos: i); |
351 | if (bit1 != bit) { |
352 | u8 has; |
353 | u8 expect; |
354 | |
355 | read_extent_buffer(eb, dst: &has, start: i / BITS_PER_BYTE, len: 1); |
356 | expect = bitmap_get_value8(map: bitmap, ALIGN(i, BITS_PER_BYTE)); |
357 | |
358 | test_err( |
359 | "bits do not match, start byte 0 bit %lu, byte %lu has 0x%02x expect 0x%02x" , |
360 | i, i / BITS_PER_BYTE, has, expect); |
361 | return -EINVAL; |
362 | } |
363 | |
364 | bit1 = !!extent_buffer_test_bit(eb, start: i / BITS_PER_BYTE, |
365 | pos: i % BITS_PER_BYTE); |
366 | if (bit1 != bit) { |
367 | u8 has; |
368 | u8 expect; |
369 | |
370 | read_extent_buffer(eb, dst: &has, start: i / BITS_PER_BYTE, len: 1); |
371 | expect = bitmap_get_value8(map: bitmap, ALIGN(i, BITS_PER_BYTE)); |
372 | |
373 | test_err( |
374 | "bits do not match, start byte %lu bit %lu, byte %lu has 0x%02x expect 0x%02x" , |
375 | i / BITS_PER_BYTE, i % BITS_PER_BYTE, |
376 | i / BITS_PER_BYTE, has, expect); |
377 | return -EINVAL; |
378 | } |
379 | } |
380 | return 0; |
381 | } |
382 | |
383 | static int test_bitmap_set(const char *name, unsigned long *bitmap, |
384 | struct extent_buffer *eb, |
385 | unsigned long byte_start, unsigned long bit_start, |
386 | unsigned long bit_len) |
387 | { |
388 | int ret; |
389 | |
390 | bitmap_set(map: bitmap, start: byte_start * BITS_PER_BYTE + bit_start, nbits: bit_len); |
391 | extent_buffer_bitmap_set(eb, start: byte_start, pos: bit_start, len: bit_len); |
392 | ret = check_eb_bitmap(bitmap, eb); |
393 | if (ret < 0) |
394 | test_err("%s test failed" , name); |
395 | return ret; |
396 | } |
397 | |
398 | static int test_bitmap_clear(const char *name, unsigned long *bitmap, |
399 | struct extent_buffer *eb, |
400 | unsigned long byte_start, unsigned long bit_start, |
401 | unsigned long bit_len) |
402 | { |
403 | int ret; |
404 | |
405 | bitmap_clear(map: bitmap, start: byte_start * BITS_PER_BYTE + bit_start, nbits: bit_len); |
406 | extent_buffer_bitmap_clear(eb, start: byte_start, pos: bit_start, len: bit_len); |
407 | ret = check_eb_bitmap(bitmap, eb); |
408 | if (ret < 0) |
409 | test_err("%s test failed" , name); |
410 | return ret; |
411 | } |
412 | static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb) |
413 | { |
414 | unsigned long i, j; |
415 | unsigned long byte_len = eb->len; |
416 | u32 x; |
417 | int ret; |
418 | |
419 | ret = test_bitmap_clear(name: "clear all run 1" , bitmap, eb, byte_start: 0, bit_start: 0, |
420 | bit_len: byte_len * BITS_PER_BYTE); |
421 | if (ret < 0) |
422 | return ret; |
423 | |
424 | ret = test_bitmap_set(name: "set all" , bitmap, eb, byte_start: 0, bit_start: 0, bit_len: byte_len * BITS_PER_BYTE); |
425 | if (ret < 0) |
426 | return ret; |
427 | |
428 | ret = test_bitmap_clear(name: "clear all run 2" , bitmap, eb, byte_start: 0, bit_start: 0, |
429 | bit_len: byte_len * BITS_PER_BYTE); |
430 | if (ret < 0) |
431 | return ret; |
432 | |
433 | ret = test_bitmap_set(name: "same byte set" , bitmap, eb, byte_start: 0, bit_start: 2, bit_len: 4); |
434 | if (ret < 0) |
435 | return ret; |
436 | |
437 | ret = test_bitmap_clear(name: "same byte partial clear" , bitmap, eb, byte_start: 0, bit_start: 4, bit_len: 1); |
438 | if (ret < 0) |
439 | return ret; |
440 | |
441 | ret = test_bitmap_set(name: "cross byte set" , bitmap, eb, byte_start: 2, bit_start: 4, bit_len: 8); |
442 | if (ret < 0) |
443 | return ret; |
444 | |
445 | ret = test_bitmap_set(name: "cross multi byte set" , bitmap, eb, byte_start: 4, bit_start: 4, bit_len: 24); |
446 | if (ret < 0) |
447 | return ret; |
448 | |
449 | ret = test_bitmap_clear(name: "cross byte clear" , bitmap, eb, byte_start: 2, bit_start: 6, bit_len: 4); |
450 | if (ret < 0) |
451 | return ret; |
452 | |
453 | ret = test_bitmap_clear(name: "cross multi byte clear" , bitmap, eb, byte_start: 4, bit_start: 6, bit_len: 20); |
454 | if (ret < 0) |
455 | return ret; |
456 | |
457 | /* Straddling pages test */ |
458 | if (byte_len > PAGE_SIZE) { |
459 | ret = test_bitmap_set(name: "cross page set" , bitmap, eb, |
460 | PAGE_SIZE - sizeof(long) / 2, bit_start: 0, |
461 | bit_len: sizeof(long) * BITS_PER_BYTE); |
462 | if (ret < 0) |
463 | return ret; |
464 | |
465 | ret = test_bitmap_set(name: "cross page set all" , bitmap, eb, byte_start: 0, bit_start: 0, |
466 | bit_len: byte_len * BITS_PER_BYTE); |
467 | if (ret < 0) |
468 | return ret; |
469 | |
470 | ret = test_bitmap_clear(name: "cross page clear" , bitmap, eb, |
471 | PAGE_SIZE - sizeof(long) / 2, bit_start: 0, |
472 | bit_len: sizeof(long) * BITS_PER_BYTE); |
473 | if (ret < 0) |
474 | return ret; |
475 | } |
476 | |
477 | /* |
478 | * Generate a wonky pseudo-random bit pattern for the sake of not using |
479 | * something repetitive that could miss some hypothetical off-by-n bug. |
480 | */ |
481 | x = 0; |
482 | ret = test_bitmap_clear(name: "clear all run 3" , bitmap, eb, byte_start: 0, bit_start: 0, |
483 | bit_len: byte_len * BITS_PER_BYTE); |
484 | if (ret < 0) |
485 | return ret; |
486 | |
487 | for (i = 0; i < byte_len * BITS_PER_BYTE / 32; i++) { |
488 | x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU; |
489 | for (j = 0; j < 32; j++) { |
490 | if (x & (1U << j)) { |
491 | bitmap_set(map: bitmap, start: i * 32 + j, nbits: 1); |
492 | extent_buffer_bitmap_set(eb, start: 0, pos: i * 32 + j, len: 1); |
493 | } |
494 | } |
495 | } |
496 | |
497 | ret = check_eb_bitmap(bitmap, eb); |
498 | if (ret) { |
499 | test_err("random bit pattern failed" ); |
500 | return ret; |
501 | } |
502 | |
503 | return 0; |
504 | } |
505 | |
506 | static int test_eb_bitmaps(u32 sectorsize, u32 nodesize) |
507 | { |
508 | struct btrfs_fs_info *fs_info; |
509 | unsigned long *bitmap = NULL; |
510 | struct extent_buffer *eb = NULL; |
511 | int ret; |
512 | |
513 | test_msg("running extent buffer bitmap tests" ); |
514 | |
515 | fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); |
516 | if (!fs_info) { |
517 | test_std_err(TEST_ALLOC_FS_INFO); |
518 | return -ENOMEM; |
519 | } |
520 | |
521 | bitmap = kmalloc(size: nodesize, GFP_KERNEL); |
522 | if (!bitmap) { |
523 | test_err("couldn't allocate test bitmap" ); |
524 | ret = -ENOMEM; |
525 | goto out; |
526 | } |
527 | |
528 | eb = __alloc_dummy_extent_buffer(fs_info, start: 0, len: nodesize); |
529 | if (!eb) { |
530 | test_std_err(TEST_ALLOC_ROOT); |
531 | ret = -ENOMEM; |
532 | goto out; |
533 | } |
534 | |
535 | ret = __test_eb_bitmaps(bitmap, eb); |
536 | if (ret) |
537 | goto out; |
538 | |
539 | free_extent_buffer(eb); |
540 | |
541 | /* |
542 | * Test again for case where the tree block is sectorsize aligned but |
543 | * not nodesize aligned. |
544 | */ |
545 | eb = __alloc_dummy_extent_buffer(fs_info, start: sectorsize, len: nodesize); |
546 | if (!eb) { |
547 | test_std_err(TEST_ALLOC_ROOT); |
548 | ret = -ENOMEM; |
549 | goto out; |
550 | } |
551 | |
552 | ret = __test_eb_bitmaps(bitmap, eb); |
553 | out: |
554 | free_extent_buffer(eb); |
555 | kfree(objp: bitmap); |
556 | btrfs_free_dummy_fs_info(fs_info); |
557 | return ret; |
558 | } |
559 | |
560 | static int test_find_first_clear_extent_bit(void) |
561 | { |
562 | struct extent_io_tree tree; |
563 | u64 start, end; |
564 | int ret = -EINVAL; |
565 | |
566 | test_msg("running find_first_clear_extent_bit test" ); |
567 | |
568 | extent_io_tree_init(NULL, tree: &tree, owner: IO_TREE_SELFTEST); |
569 | |
570 | /* Test correct handling of empty tree */ |
571 | find_first_clear_extent_bit(tree: &tree, start: 0, start_ret: &start, end_ret: &end, CHUNK_TRIMMED); |
572 | if (start != 0 || end != -1) { |
573 | test_err( |
574 | "error getting a range from completely empty tree: start %llu end %llu" , |
575 | start, end); |
576 | goto out; |
577 | } |
578 | /* |
579 | * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between |
580 | * 4M-32M |
581 | */ |
582 | set_extent_bit(tree: &tree, SZ_1M, SZ_4M - 1, |
583 | CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); |
584 | |
585 | find_first_clear_extent_bit(tree: &tree, SZ_512K, start_ret: &start, end_ret: &end, |
586 | CHUNK_TRIMMED | CHUNK_ALLOCATED); |
587 | |
588 | if (start != 0 || end != SZ_1M - 1) { |
589 | test_err("error finding beginning range: start %llu end %llu" , |
590 | start, end); |
591 | goto out; |
592 | } |
593 | |
594 | /* Now add 32M-64M so that we have a hole between 4M-32M */ |
595 | set_extent_bit(tree: &tree, SZ_32M, SZ_64M - 1, |
596 | CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); |
597 | |
598 | /* |
599 | * Request first hole starting at 12M, we should get 4M-32M |
600 | */ |
601 | find_first_clear_extent_bit(tree: &tree, start: 12 * SZ_1M, start_ret: &start, end_ret: &end, |
602 | CHUNK_TRIMMED | CHUNK_ALLOCATED); |
603 | |
604 | if (start != SZ_4M || end != SZ_32M - 1) { |
605 | test_err("error finding trimmed range: start %llu end %llu" , |
606 | start, end); |
607 | goto out; |
608 | } |
609 | |
610 | /* |
611 | * Search in the middle of allocated range, should get the next one |
612 | * available, which happens to be unallocated -> 4M-32M |
613 | */ |
614 | find_first_clear_extent_bit(tree: &tree, SZ_2M, start_ret: &start, end_ret: &end, |
615 | CHUNK_TRIMMED | CHUNK_ALLOCATED); |
616 | |
617 | if (start != SZ_4M || end != SZ_32M - 1) { |
618 | test_err("error finding next unalloc range: start %llu end %llu" , |
619 | start, end); |
620 | goto out; |
621 | } |
622 | |
623 | /* |
624 | * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag |
625 | * being unset in this range, we should get the entry in range 64M-72M |
626 | */ |
627 | set_extent_bit(tree: &tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL); |
628 | find_first_clear_extent_bit(tree: &tree, SZ_64M + SZ_1M, start_ret: &start, end_ret: &end, |
629 | CHUNK_TRIMMED); |
630 | |
631 | if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) { |
632 | test_err("error finding exact range: start %llu end %llu" , |
633 | start, end); |
634 | goto out; |
635 | } |
636 | |
637 | find_first_clear_extent_bit(tree: &tree, SZ_64M - SZ_8M, start_ret: &start, end_ret: &end, |
638 | CHUNK_TRIMMED); |
639 | |
640 | /* |
641 | * Search in the middle of set range whose immediate neighbour doesn't |
642 | * have the bits set so it must be returned |
643 | */ |
644 | if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) { |
645 | test_err("error finding next alloc range: start %llu end %llu" , |
646 | start, end); |
647 | goto out; |
648 | } |
649 | |
650 | /* |
651 | * Search beyond any known range, shall return after last known range |
652 | * and end should be -1 |
653 | */ |
654 | find_first_clear_extent_bit(tree: &tree, start: -1, start_ret: &start, end_ret: &end, CHUNK_TRIMMED); |
655 | if (start != SZ_64M + SZ_8M || end != -1) { |
656 | test_err( |
657 | "error handling beyond end of range search: start %llu end %llu" , |
658 | start, end); |
659 | goto out; |
660 | } |
661 | |
662 | ret = 0; |
663 | out: |
664 | if (ret) |
665 | dump_extent_io_tree(tree: &tree); |
666 | clear_extent_bits(tree: &tree, start: 0, end: (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED); |
667 | |
668 | return ret; |
669 | } |
670 | |
671 | static void dump_eb_and_memory_contents(struct extent_buffer *eb, void *memory, |
672 | const char *test_name) |
673 | { |
674 | for (int i = 0; i < eb->len; i++) { |
675 | struct page *page = folio_page(eb->folios[i >> PAGE_SHIFT], 0); |
676 | void *addr = page_address(page) + offset_in_page(i); |
677 | |
678 | if (memcmp(p: addr, q: memory + i, size: 1) != 0) { |
679 | test_err("%s failed" , test_name); |
680 | test_err("eb and memory diffs at byte %u, eb has 0x%02x memory has 0x%02x" , |
681 | i, *(u8 *)addr, *(u8 *)(memory + i)); |
682 | return; |
683 | } |
684 | } |
685 | } |
686 | |
687 | static int verify_eb_and_memory(struct extent_buffer *eb, void *memory, |
688 | const char *test_name) |
689 | { |
690 | for (int i = 0; i < (eb->len >> PAGE_SHIFT); i++) { |
691 | void *eb_addr = folio_address(folio: eb->folios[i]); |
692 | |
693 | if (memcmp(p: memory + (i << PAGE_SHIFT), q: eb_addr, PAGE_SIZE) != 0) { |
694 | dump_eb_and_memory_contents(eb, memory, test_name); |
695 | return -EUCLEAN; |
696 | } |
697 | } |
698 | return 0; |
699 | } |
700 | |
701 | /* |
702 | * Init both memory and extent buffer contents to the same randomly generated |
703 | * contents. |
704 | */ |
705 | static void init_eb_and_memory(struct extent_buffer *eb, void *memory) |
706 | { |
707 | get_random_bytes(buf: memory, len: eb->len); |
708 | write_extent_buffer(eb, src: memory, start: 0, len: eb->len); |
709 | } |
710 | |
711 | static int test_eb_mem_ops(u32 sectorsize, u32 nodesize) |
712 | { |
713 | struct btrfs_fs_info *fs_info; |
714 | struct extent_buffer *eb = NULL; |
715 | void *memory = NULL; |
716 | int ret; |
717 | |
718 | test_msg("running extent buffer memory operation tests" ); |
719 | |
720 | fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); |
721 | if (!fs_info) { |
722 | test_std_err(TEST_ALLOC_FS_INFO); |
723 | return -ENOMEM; |
724 | } |
725 | |
726 | memory = kvzalloc(size: nodesize, GFP_KERNEL); |
727 | if (!memory) { |
728 | test_err("failed to allocate memory" ); |
729 | ret = -ENOMEM; |
730 | goto out; |
731 | } |
732 | |
733 | eb = __alloc_dummy_extent_buffer(fs_info, SZ_1M, len: nodesize); |
734 | if (!eb) { |
735 | test_std_err(TEST_ALLOC_EXTENT_BUFFER); |
736 | ret = -ENOMEM; |
737 | goto out; |
738 | } |
739 | |
740 | init_eb_and_memory(eb, memory); |
741 | ret = verify_eb_and_memory(eb, memory, test_name: "full eb write" ); |
742 | if (ret < 0) |
743 | goto out; |
744 | |
745 | memcpy(memory, memory + 16, 16); |
746 | memcpy_extent_buffer(dst: eb, dst_offset: 0, src_offset: 16, len: 16); |
747 | ret = verify_eb_and_memory(eb, memory, test_name: "same page non-overlapping memcpy 1" ); |
748 | if (ret < 0) |
749 | goto out; |
750 | |
751 | memcpy(memory, memory + 2048, 16); |
752 | memcpy_extent_buffer(dst: eb, dst_offset: 0, src_offset: 2048, len: 16); |
753 | ret = verify_eb_and_memory(eb, memory, test_name: "same page non-overlapping memcpy 2" ); |
754 | if (ret < 0) |
755 | goto out; |
756 | memcpy(memory, memory + 2048, 2048); |
757 | memcpy_extent_buffer(dst: eb, dst_offset: 0, src_offset: 2048, len: 2048); |
758 | ret = verify_eb_and_memory(eb, memory, test_name: "same page non-overlapping memcpy 3" ); |
759 | if (ret < 0) |
760 | goto out; |
761 | |
762 | memmove(memory + 512, memory + 256, 512); |
763 | memmove_extent_buffer(dst: eb, dst_offset: 512, src_offset: 256, len: 512); |
764 | ret = verify_eb_and_memory(eb, memory, test_name: "same page overlapping memcpy 1" ); |
765 | if (ret < 0) |
766 | goto out; |
767 | |
768 | memmove(memory + 2048, memory + 512, 2048); |
769 | memmove_extent_buffer(dst: eb, dst_offset: 2048, src_offset: 512, len: 2048); |
770 | ret = verify_eb_and_memory(eb, memory, test_name: "same page overlapping memcpy 2" ); |
771 | if (ret < 0) |
772 | goto out; |
773 | memmove(memory + 512, memory + 2048, 2048); |
774 | memmove_extent_buffer(dst: eb, dst_offset: 512, src_offset: 2048, len: 2048); |
775 | ret = verify_eb_and_memory(eb, memory, test_name: "same page overlapping memcpy 3" ); |
776 | if (ret < 0) |
777 | goto out; |
778 | |
779 | if (nodesize > PAGE_SIZE) { |
780 | memcpy(memory, memory + 4096 - 128, 256); |
781 | memcpy_extent_buffer(dst: eb, dst_offset: 0, src_offset: 4096 - 128, len: 256); |
782 | ret = verify_eb_and_memory(eb, memory, test_name: "cross page non-overlapping memcpy 1" ); |
783 | if (ret < 0) |
784 | goto out; |
785 | |
786 | memcpy(memory + 4096 - 128, memory + 4096 + 128, 256); |
787 | memcpy_extent_buffer(dst: eb, dst_offset: 4096 - 128, src_offset: 4096 + 128, len: 256); |
788 | ret = verify_eb_and_memory(eb, memory, test_name: "cross page non-overlapping memcpy 2" ); |
789 | if (ret < 0) |
790 | goto out; |
791 | |
792 | memmove(memory + 4096 - 128, memory + 4096 - 64, 256); |
793 | memmove_extent_buffer(dst: eb, dst_offset: 4096 - 128, src_offset: 4096 - 64, len: 256); |
794 | ret = verify_eb_and_memory(eb, memory, test_name: "cross page overlapping memcpy 1" ); |
795 | if (ret < 0) |
796 | goto out; |
797 | |
798 | memmove(memory + 4096 - 64, memory + 4096 - 128, 256); |
799 | memmove_extent_buffer(dst: eb, dst_offset: 4096 - 64, src_offset: 4096 - 128, len: 256); |
800 | ret = verify_eb_and_memory(eb, memory, test_name: "cross page overlapping memcpy 2" ); |
801 | if (ret < 0) |
802 | goto out; |
803 | } |
804 | out: |
805 | free_extent_buffer(eb); |
806 | kvfree(addr: memory); |
807 | btrfs_free_dummy_fs_info(fs_info); |
808 | return ret; |
809 | } |
810 | |
811 | int btrfs_test_extent_io(u32 sectorsize, u32 nodesize) |
812 | { |
813 | int ret; |
814 | |
815 | test_msg("running extent I/O tests" ); |
816 | |
817 | ret = test_find_delalloc(sectorsize, nodesize); |
818 | if (ret) |
819 | goto out; |
820 | |
821 | ret = test_find_first_clear_extent_bit(); |
822 | if (ret) |
823 | goto out; |
824 | |
825 | ret = test_eb_bitmaps(sectorsize, nodesize); |
826 | if (ret) |
827 | goto out; |
828 | |
829 | ret = test_eb_mem_ops(sectorsize, nodesize); |
830 | out: |
831 | return ret; |
832 | } |
833 | |