1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2017 Oracle. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/types.h> |
7 | #include "btrfs-tests.h" |
8 | #include "../ctree.h" |
9 | #include "../btrfs_inode.h" |
10 | #include "../volumes.h" |
11 | #include "../disk-io.h" |
12 | #include "../block-group.h" |
13 | |
14 | static void free_extent_map_tree(struct extent_map_tree *em_tree) |
15 | { |
16 | struct extent_map *em; |
17 | struct rb_node *node; |
18 | |
19 | write_lock(&em_tree->lock); |
20 | while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) { |
21 | node = rb_first_cached(&em_tree->map); |
22 | em = rb_entry(node, struct extent_map, rb_node); |
23 | remove_extent_mapping(tree: em_tree, em); |
24 | |
25 | #ifdef CONFIG_BTRFS_DEBUG |
26 | if (refcount_read(r: &em->refs) != 1) { |
27 | test_err( |
28 | "em leak: em (start %llu len %llu block_start %llu block_len %llu) refs %d" , |
29 | em->start, em->len, em->block_start, |
30 | em->block_len, refcount_read(&em->refs)); |
31 | |
32 | refcount_set(r: &em->refs, n: 1); |
33 | } |
34 | #endif |
35 | free_extent_map(em); |
36 | } |
37 | write_unlock(&em_tree->lock); |
38 | } |
39 | |
40 | /* |
41 | * Test scenario: |
42 | * |
43 | * Suppose that no extent map has been loaded into memory yet, there is a file |
44 | * extent [0, 16K), followed by another file extent [16K, 20K), two dio reads |
45 | * are entering btrfs_get_extent() concurrently, t1 is reading [8K, 16K), t2 is |
46 | * reading [0, 8K) |
47 | * |
48 | * t1 t2 |
49 | * btrfs_get_extent() btrfs_get_extent() |
50 | * -> lookup_extent_mapping() ->lookup_extent_mapping() |
51 | * -> add_extent_mapping(0, 16K) |
52 | * -> return em |
53 | * ->add_extent_mapping(0, 16K) |
54 | * -> #handle -EEXIST |
55 | */ |
56 | static int test_case_1(struct btrfs_fs_info *fs_info, |
57 | struct extent_map_tree *em_tree) |
58 | { |
59 | struct extent_map *em; |
60 | u64 start = 0; |
61 | u64 len = SZ_8K; |
62 | int ret; |
63 | |
64 | em = alloc_extent_map(); |
65 | if (!em) { |
66 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
67 | return -ENOMEM; |
68 | } |
69 | |
70 | /* Add [0, 16K) */ |
71 | em->start = 0; |
72 | em->len = SZ_16K; |
73 | em->block_start = 0; |
74 | em->block_len = SZ_16K; |
75 | write_lock(&em_tree->lock); |
76 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: em->start, len: em->len); |
77 | write_unlock(&em_tree->lock); |
78 | if (ret < 0) { |
79 | test_err("cannot add extent range [0, 16K)" ); |
80 | goto out; |
81 | } |
82 | free_extent_map(em); |
83 | |
84 | /* Add [16K, 20K) following [0, 16K) */ |
85 | em = alloc_extent_map(); |
86 | if (!em) { |
87 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
88 | ret = -ENOMEM; |
89 | goto out; |
90 | } |
91 | |
92 | em->start = SZ_16K; |
93 | em->len = SZ_4K; |
94 | em->block_start = SZ_32K; /* avoid merging */ |
95 | em->block_len = SZ_4K; |
96 | write_lock(&em_tree->lock); |
97 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: em->start, len: em->len); |
98 | write_unlock(&em_tree->lock); |
99 | if (ret < 0) { |
100 | test_err("cannot add extent range [16K, 20K)" ); |
101 | goto out; |
102 | } |
103 | free_extent_map(em); |
104 | |
105 | em = alloc_extent_map(); |
106 | if (!em) { |
107 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
108 | ret = -ENOMEM; |
109 | goto out; |
110 | } |
111 | |
112 | /* Add [0, 8K), should return [0, 16K) instead. */ |
113 | em->start = start; |
114 | em->len = len; |
115 | em->block_start = start; |
116 | em->block_len = len; |
117 | write_lock(&em_tree->lock); |
118 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: em->start, len: em->len); |
119 | write_unlock(&em_tree->lock); |
120 | if (ret) { |
121 | test_err("case1 [%llu %llu]: ret %d" , start, start + len, ret); |
122 | goto out; |
123 | } |
124 | if (!em) { |
125 | test_err("case1 [%llu %llu]: no extent map returned" , |
126 | start, start + len); |
127 | ret = -ENOENT; |
128 | goto out; |
129 | } |
130 | if (em->start != 0 || extent_map_end(em) != SZ_16K || |
131 | em->block_start != 0 || em->block_len != SZ_16K) { |
132 | test_err( |
133 | "case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu" , |
134 | start, start + len, ret, em->start, em->len, |
135 | em->block_start, em->block_len); |
136 | ret = -EINVAL; |
137 | } |
138 | free_extent_map(em); |
139 | out: |
140 | free_extent_map_tree(em_tree); |
141 | |
142 | return ret; |
143 | } |
144 | |
145 | /* |
146 | * Test scenario: |
147 | * |
148 | * Reading the inline ending up with EEXIST, ie. read an inline |
149 | * extent and discard page cache and read it again. |
150 | */ |
151 | static int test_case_2(struct btrfs_fs_info *fs_info, |
152 | struct extent_map_tree *em_tree) |
153 | { |
154 | struct extent_map *em; |
155 | int ret; |
156 | |
157 | em = alloc_extent_map(); |
158 | if (!em) { |
159 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
160 | return -ENOMEM; |
161 | } |
162 | |
163 | /* Add [0, 1K) */ |
164 | em->start = 0; |
165 | em->len = SZ_1K; |
166 | em->block_start = EXTENT_MAP_INLINE; |
167 | em->block_len = (u64)-1; |
168 | write_lock(&em_tree->lock); |
169 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: em->start, len: em->len); |
170 | write_unlock(&em_tree->lock); |
171 | if (ret < 0) { |
172 | test_err("cannot add extent range [0, 1K)" ); |
173 | goto out; |
174 | } |
175 | free_extent_map(em); |
176 | |
177 | /* Add [4K, 8K) following [0, 1K) */ |
178 | em = alloc_extent_map(); |
179 | if (!em) { |
180 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
181 | ret = -ENOMEM; |
182 | goto out; |
183 | } |
184 | |
185 | em->start = SZ_4K; |
186 | em->len = SZ_4K; |
187 | em->block_start = SZ_4K; |
188 | em->block_len = SZ_4K; |
189 | write_lock(&em_tree->lock); |
190 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: em->start, len: em->len); |
191 | write_unlock(&em_tree->lock); |
192 | if (ret < 0) { |
193 | test_err("cannot add extent range [4K, 8K)" ); |
194 | goto out; |
195 | } |
196 | free_extent_map(em); |
197 | |
198 | em = alloc_extent_map(); |
199 | if (!em) { |
200 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
201 | ret = -ENOMEM; |
202 | goto out; |
203 | } |
204 | |
205 | /* Add [0, 1K) */ |
206 | em->start = 0; |
207 | em->len = SZ_1K; |
208 | em->block_start = EXTENT_MAP_INLINE; |
209 | em->block_len = (u64)-1; |
210 | write_lock(&em_tree->lock); |
211 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: em->start, len: em->len); |
212 | write_unlock(&em_tree->lock); |
213 | if (ret) { |
214 | test_err("case2 [0 1K]: ret %d" , ret); |
215 | goto out; |
216 | } |
217 | if (!em) { |
218 | test_err("case2 [0 1K]: no extent map returned" ); |
219 | ret = -ENOENT; |
220 | goto out; |
221 | } |
222 | if (em->start != 0 || extent_map_end(em) != SZ_1K || |
223 | em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1) { |
224 | test_err( |
225 | "case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu" , |
226 | ret, em->start, em->len, em->block_start, |
227 | em->block_len); |
228 | ret = -EINVAL; |
229 | } |
230 | free_extent_map(em); |
231 | out: |
232 | free_extent_map_tree(em_tree); |
233 | |
234 | return ret; |
235 | } |
236 | |
237 | static int __test_case_3(struct btrfs_fs_info *fs_info, |
238 | struct extent_map_tree *em_tree, u64 start) |
239 | { |
240 | struct extent_map *em; |
241 | u64 len = SZ_4K; |
242 | int ret; |
243 | |
244 | em = alloc_extent_map(); |
245 | if (!em) { |
246 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
247 | return -ENOMEM; |
248 | } |
249 | |
250 | /* Add [4K, 8K) */ |
251 | em->start = SZ_4K; |
252 | em->len = SZ_4K; |
253 | em->block_start = SZ_4K; |
254 | em->block_len = SZ_4K; |
255 | write_lock(&em_tree->lock); |
256 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: em->start, len: em->len); |
257 | write_unlock(&em_tree->lock); |
258 | if (ret < 0) { |
259 | test_err("cannot add extent range [4K, 8K)" ); |
260 | goto out; |
261 | } |
262 | free_extent_map(em); |
263 | |
264 | em = alloc_extent_map(); |
265 | if (!em) { |
266 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
267 | ret = -ENOMEM; |
268 | goto out; |
269 | } |
270 | |
271 | /* Add [0, 16K) */ |
272 | em->start = 0; |
273 | em->len = SZ_16K; |
274 | em->block_start = 0; |
275 | em->block_len = SZ_16K; |
276 | write_lock(&em_tree->lock); |
277 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start, len); |
278 | write_unlock(&em_tree->lock); |
279 | if (ret) { |
280 | test_err("case3 [%llu %llu): ret %d" , |
281 | start, start + len, ret); |
282 | goto out; |
283 | } |
284 | if (!em) { |
285 | test_err("case3 [%llu %llu): no extent map returned" , |
286 | start, start + len); |
287 | ret = -ENOENT; |
288 | goto out; |
289 | } |
290 | /* |
291 | * Since bytes within em are contiguous, em->block_start is identical to |
292 | * em->start. |
293 | */ |
294 | if (start < em->start || start + len > extent_map_end(em) || |
295 | em->start != em->block_start || em->len != em->block_len) { |
296 | test_err( |
297 | "case3 [%llu %llu): ret %d em (start %llu len %llu block_start %llu block_len %llu)" , |
298 | start, start + len, ret, em->start, em->len, |
299 | em->block_start, em->block_len); |
300 | ret = -EINVAL; |
301 | } |
302 | free_extent_map(em); |
303 | out: |
304 | free_extent_map_tree(em_tree); |
305 | |
306 | return ret; |
307 | } |
308 | |
309 | /* |
310 | * Test scenario: |
311 | * |
312 | * Suppose that no extent map has been loaded into memory yet. |
313 | * There is a file extent [0, 16K), two jobs are running concurrently |
314 | * against it, t1 is buffered writing to [4K, 8K) and t2 is doing dio |
315 | * read from [0, 4K) or [8K, 12K) or [12K, 16K). |
316 | * |
317 | * t1 goes ahead of t2 and adds em [4K, 8K) into tree. |
318 | * |
319 | * t1 t2 |
320 | * cow_file_range() btrfs_get_extent() |
321 | * -> lookup_extent_mapping() |
322 | * -> add_extent_mapping() |
323 | * -> add_extent_mapping() |
324 | */ |
325 | static int test_case_3(struct btrfs_fs_info *fs_info, |
326 | struct extent_map_tree *em_tree) |
327 | { |
328 | int ret; |
329 | |
330 | ret = __test_case_3(fs_info, em_tree, start: 0); |
331 | if (ret) |
332 | return ret; |
333 | ret = __test_case_3(fs_info, em_tree, SZ_8K); |
334 | if (ret) |
335 | return ret; |
336 | ret = __test_case_3(fs_info, em_tree, start: (12 * SZ_1K)); |
337 | |
338 | return ret; |
339 | } |
340 | |
341 | static int __test_case_4(struct btrfs_fs_info *fs_info, |
342 | struct extent_map_tree *em_tree, u64 start) |
343 | { |
344 | struct extent_map *em; |
345 | u64 len = SZ_4K; |
346 | int ret; |
347 | |
348 | em = alloc_extent_map(); |
349 | if (!em) { |
350 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
351 | return -ENOMEM; |
352 | } |
353 | |
354 | /* Add [0K, 8K) */ |
355 | em->start = 0; |
356 | em->len = SZ_8K; |
357 | em->block_start = 0; |
358 | em->block_len = SZ_8K; |
359 | write_lock(&em_tree->lock); |
360 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: em->start, len: em->len); |
361 | write_unlock(&em_tree->lock); |
362 | if (ret < 0) { |
363 | test_err("cannot add extent range [0, 8K)" ); |
364 | goto out; |
365 | } |
366 | free_extent_map(em); |
367 | |
368 | em = alloc_extent_map(); |
369 | if (!em) { |
370 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
371 | ret = -ENOMEM; |
372 | goto out; |
373 | } |
374 | |
375 | /* Add [8K, 32K) */ |
376 | em->start = SZ_8K; |
377 | em->len = 24 * SZ_1K; |
378 | em->block_start = SZ_16K; /* avoid merging */ |
379 | em->block_len = 24 * SZ_1K; |
380 | write_lock(&em_tree->lock); |
381 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: em->start, len: em->len); |
382 | write_unlock(&em_tree->lock); |
383 | if (ret < 0) { |
384 | test_err("cannot add extent range [8K, 32K)" ); |
385 | goto out; |
386 | } |
387 | free_extent_map(em); |
388 | |
389 | em = alloc_extent_map(); |
390 | if (!em) { |
391 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
392 | ret = -ENOMEM; |
393 | goto out; |
394 | } |
395 | /* Add [0K, 32K) */ |
396 | em->start = 0; |
397 | em->len = SZ_32K; |
398 | em->block_start = 0; |
399 | em->block_len = SZ_32K; |
400 | write_lock(&em_tree->lock); |
401 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start, len); |
402 | write_unlock(&em_tree->lock); |
403 | if (ret) { |
404 | test_err("case4 [%llu %llu): ret %d" , |
405 | start, start + len, ret); |
406 | goto out; |
407 | } |
408 | if (!em) { |
409 | test_err("case4 [%llu %llu): no extent map returned" , |
410 | start, start + len); |
411 | ret = -ENOENT; |
412 | goto out; |
413 | } |
414 | if (start < em->start || start + len > extent_map_end(em)) { |
415 | test_err( |
416 | "case4 [%llu %llu): ret %d, added wrong em (start %llu len %llu block_start %llu block_len %llu)" , |
417 | start, start + len, ret, em->start, em->len, em->block_start, |
418 | em->block_len); |
419 | ret = -EINVAL; |
420 | } |
421 | free_extent_map(em); |
422 | out: |
423 | free_extent_map_tree(em_tree); |
424 | |
425 | return ret; |
426 | } |
427 | |
428 | /* |
429 | * Test scenario: |
430 | * |
431 | * Suppose that no extent map has been loaded into memory yet. |
432 | * There is a file extent [0, 32K), two jobs are running concurrently |
433 | * against it, t1 is doing dio write to [8K, 32K) and t2 is doing dio |
434 | * read from [0, 4K) or [4K, 8K). |
435 | * |
436 | * t1 goes ahead of t2 and splits em [0, 32K) to em [0K, 8K) and [8K 32K). |
437 | * |
438 | * t1 t2 |
439 | * btrfs_get_blocks_direct() btrfs_get_blocks_direct() |
440 | * -> btrfs_get_extent() -> btrfs_get_extent() |
441 | * -> lookup_extent_mapping() |
442 | * -> add_extent_mapping() -> lookup_extent_mapping() |
443 | * # load [0, 32K) |
444 | * -> btrfs_new_extent_direct() |
445 | * -> btrfs_drop_extent_cache() |
446 | * # split [0, 32K) |
447 | * -> add_extent_mapping() |
448 | * # add [8K, 32K) |
449 | * -> add_extent_mapping() |
450 | * # handle -EEXIST when adding |
451 | * # [0, 32K) |
452 | */ |
453 | static int test_case_4(struct btrfs_fs_info *fs_info, |
454 | struct extent_map_tree *em_tree) |
455 | { |
456 | int ret; |
457 | |
458 | ret = __test_case_4(fs_info, em_tree, start: 0); |
459 | if (ret) |
460 | return ret; |
461 | ret = __test_case_4(fs_info, em_tree, SZ_4K); |
462 | |
463 | return ret; |
464 | } |
465 | |
466 | static int add_compressed_extent(struct btrfs_fs_info *fs_info, |
467 | struct extent_map_tree *em_tree, |
468 | u64 start, u64 len, u64 block_start) |
469 | { |
470 | struct extent_map *em; |
471 | int ret; |
472 | |
473 | em = alloc_extent_map(); |
474 | if (!em) { |
475 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
476 | return -ENOMEM; |
477 | } |
478 | |
479 | em->start = start; |
480 | em->len = len; |
481 | em->block_start = block_start; |
482 | em->block_len = SZ_4K; |
483 | em->flags |= EXTENT_FLAG_COMPRESS_ZLIB; |
484 | write_lock(&em_tree->lock); |
485 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: em->start, len: em->len); |
486 | write_unlock(&em_tree->lock); |
487 | free_extent_map(em); |
488 | if (ret < 0) { |
489 | test_err("cannot add extent map [%llu, %llu)" , start, start + len); |
490 | return ret; |
491 | } |
492 | |
493 | return 0; |
494 | } |
495 | |
496 | struct extent_range { |
497 | u64 start; |
498 | u64 len; |
499 | }; |
500 | |
501 | /* The valid states of the tree after every drop, as described below. */ |
502 | struct extent_range valid_ranges[][7] = { |
503 | { |
504 | { .start = 0, .len = SZ_8K }, /* [0, 8K) */ |
505 | { .start = SZ_4K * 3, .len = SZ_4K * 3}, /* [12k, 24k) */ |
506 | { .start = SZ_4K * 6, .len = SZ_4K * 3}, /* [24k, 36k) */ |
507 | { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */ |
508 | { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */ |
509 | }, |
510 | { |
511 | { .start = 0, .len = SZ_8K }, /* [0, 8K) */ |
512 | { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */ |
513 | { .start = SZ_4K * 6, .len = SZ_4K * 3}, /* [24k, 36k) */ |
514 | { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */ |
515 | { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */ |
516 | }, |
517 | { |
518 | { .start = 0, .len = SZ_8K }, /* [0, 8K) */ |
519 | { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */ |
520 | { .start = SZ_4K * 6, .len = SZ_4K}, /* [24k, 28k) */ |
521 | { .start = SZ_32K, .len = SZ_4K}, /* [32k, 36k) */ |
522 | { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */ |
523 | { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */ |
524 | }, |
525 | { |
526 | { .start = 0, .len = SZ_8K}, /* [0, 8K) */ |
527 | { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */ |
528 | { .start = SZ_4K * 6, .len = SZ_4K}, /* [24k, 28k) */ |
529 | } |
530 | }; |
531 | |
532 | static int validate_range(struct extent_map_tree *em_tree, int index) |
533 | { |
534 | struct rb_node *n; |
535 | int i; |
536 | |
537 | for (i = 0, n = rb_first_cached(&em_tree->map); |
538 | valid_ranges[index][i].len && n; |
539 | i++, n = rb_next(n)) { |
540 | struct extent_map *entry = rb_entry(n, struct extent_map, rb_node); |
541 | |
542 | if (entry->start != valid_ranges[index][i].start) { |
543 | test_err("mapping has start %llu expected %llu" , |
544 | entry->start, valid_ranges[index][i].start); |
545 | return -EINVAL; |
546 | } |
547 | |
548 | if (entry->len != valid_ranges[index][i].len) { |
549 | test_err("mapping has len %llu expected %llu" , |
550 | entry->len, valid_ranges[index][i].len); |
551 | return -EINVAL; |
552 | } |
553 | } |
554 | |
555 | /* |
556 | * We exited because we don't have any more entries in the extent_map |
557 | * but we still expect more valid entries. |
558 | */ |
559 | if (valid_ranges[index][i].len) { |
560 | test_err("missing an entry" ); |
561 | return -EINVAL; |
562 | } |
563 | |
564 | /* We exited the loop but still have entries in the extent map. */ |
565 | if (n) { |
566 | test_err("we have a left over entry in the extent map we didn't expect" ); |
567 | return -EINVAL; |
568 | } |
569 | |
570 | return 0; |
571 | } |
572 | |
573 | /* |
574 | * Test scenario: |
575 | * |
576 | * Test the various edge cases of btrfs_drop_extent_map_range, create the |
577 | * following ranges |
578 | * |
579 | * [0, 12k)[12k, 24k)[24k, 36k)[36k, 40k)[40k,64k) |
580 | * |
581 | * And then we'll drop: |
582 | * |
583 | * [8k, 12k) - test the single front split |
584 | * [12k, 20k) - test the single back split |
585 | * [28k, 32k) - test the double split |
586 | * [32k, 64k) - test whole em dropping |
587 | * |
588 | * They'll have the EXTENT_FLAG_COMPRESSED flag set to keep the em tree from |
589 | * merging the em's. |
590 | */ |
591 | static int test_case_5(struct btrfs_fs_info *fs_info) |
592 | { |
593 | struct extent_map_tree *em_tree; |
594 | struct inode *inode; |
595 | u64 start, end; |
596 | int ret; |
597 | |
598 | test_msg("Running btrfs_drop_extent_map_range tests" ); |
599 | |
600 | inode = btrfs_new_test_inode(); |
601 | if (!inode) { |
602 | test_std_err(TEST_ALLOC_INODE); |
603 | return -ENOMEM; |
604 | } |
605 | |
606 | em_tree = &BTRFS_I(inode)->extent_tree; |
607 | |
608 | /* [0, 12k) */ |
609 | ret = add_compressed_extent(fs_info, em_tree, start: 0, SZ_4K * 3, block_start: 0); |
610 | if (ret) { |
611 | test_err("cannot add extent range [0, 12K)" ); |
612 | goto out; |
613 | } |
614 | |
615 | /* [12k, 24k) */ |
616 | ret = add_compressed_extent(fs_info, em_tree, SZ_4K * 3, SZ_4K * 3, SZ_4K); |
617 | if (ret) { |
618 | test_err("cannot add extent range [12k, 24k)" ); |
619 | goto out; |
620 | } |
621 | |
622 | /* [24k, 36k) */ |
623 | ret = add_compressed_extent(fs_info, em_tree, SZ_4K * 6, SZ_4K * 3, SZ_8K); |
624 | if (ret) { |
625 | test_err("cannot add extent range [12k, 24k)" ); |
626 | goto out; |
627 | } |
628 | |
629 | /* [36k, 40k) */ |
630 | ret = add_compressed_extent(fs_info, em_tree, SZ_32K + SZ_4K, SZ_4K, SZ_4K * 3); |
631 | if (ret) { |
632 | test_err("cannot add extent range [12k, 24k)" ); |
633 | goto out; |
634 | } |
635 | |
636 | /* [40k, 64k) */ |
637 | ret = add_compressed_extent(fs_info, em_tree, SZ_4K * 10, SZ_4K * 6, SZ_16K); |
638 | if (ret) { |
639 | test_err("cannot add extent range [12k, 24k)" ); |
640 | goto out; |
641 | } |
642 | |
643 | /* Drop [8k, 12k) */ |
644 | start = SZ_8K; |
645 | end = (3 * SZ_4K) - 1; |
646 | btrfs_drop_extent_map_range(inode: BTRFS_I(inode), start, end, skip_pinned: false); |
647 | ret = validate_range(em_tree: &BTRFS_I(inode)->extent_tree, index: 0); |
648 | if (ret) |
649 | goto out; |
650 | |
651 | /* Drop [12k, 20k) */ |
652 | start = SZ_4K * 3; |
653 | end = SZ_16K + SZ_4K - 1; |
654 | btrfs_drop_extent_map_range(inode: BTRFS_I(inode), start, end, skip_pinned: false); |
655 | ret = validate_range(em_tree: &BTRFS_I(inode)->extent_tree, index: 1); |
656 | if (ret) |
657 | goto out; |
658 | |
659 | /* Drop [28k, 32k) */ |
660 | start = SZ_32K - SZ_4K; |
661 | end = SZ_32K - 1; |
662 | btrfs_drop_extent_map_range(inode: BTRFS_I(inode), start, end, skip_pinned: false); |
663 | ret = validate_range(em_tree: &BTRFS_I(inode)->extent_tree, index: 2); |
664 | if (ret) |
665 | goto out; |
666 | |
667 | /* Drop [32k, 64k) */ |
668 | start = SZ_32K; |
669 | end = SZ_64K - 1; |
670 | btrfs_drop_extent_map_range(inode: BTRFS_I(inode), start, end, skip_pinned: false); |
671 | ret = validate_range(em_tree: &BTRFS_I(inode)->extent_tree, index: 3); |
672 | if (ret) |
673 | goto out; |
674 | out: |
675 | iput(inode); |
676 | return ret; |
677 | } |
678 | |
679 | /* |
680 | * Test the btrfs_add_extent_mapping helper which will attempt to create an em |
681 | * for areas between two existing ems. Validate it doesn't do this when there |
682 | * are two unmerged em's side by side. |
683 | */ |
684 | static int test_case_6(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree) |
685 | { |
686 | struct extent_map *em = NULL; |
687 | int ret; |
688 | |
689 | ret = add_compressed_extent(fs_info, em_tree, start: 0, SZ_4K, block_start: 0); |
690 | if (ret) |
691 | goto out; |
692 | |
693 | ret = add_compressed_extent(fs_info, em_tree, SZ_4K, SZ_4K, block_start: 0); |
694 | if (ret) |
695 | goto out; |
696 | |
697 | em = alloc_extent_map(); |
698 | if (!em) { |
699 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
700 | return -ENOMEM; |
701 | } |
702 | |
703 | em->start = SZ_4K; |
704 | em->len = SZ_4K; |
705 | em->block_start = SZ_16K; |
706 | em->block_len = SZ_16K; |
707 | write_lock(&em_tree->lock); |
708 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: 0, SZ_8K); |
709 | write_unlock(&em_tree->lock); |
710 | |
711 | if (ret != 0) { |
712 | test_err("got an error when adding our em: %d" , ret); |
713 | goto out; |
714 | } |
715 | |
716 | ret = -EINVAL; |
717 | if (em->start != 0) { |
718 | test_err("unexpected em->start at %llu, wanted 0" , em->start); |
719 | goto out; |
720 | } |
721 | if (em->len != SZ_4K) { |
722 | test_err("unexpected em->len %llu, expected 4K" , em->len); |
723 | goto out; |
724 | } |
725 | ret = 0; |
726 | out: |
727 | free_extent_map(em); |
728 | free_extent_map_tree(em_tree); |
729 | return ret; |
730 | } |
731 | |
732 | /* |
733 | * Regression test for btrfs_drop_extent_map_range. Calling with skip_pinned == |
734 | * true would mess up the start/end calculations and subsequent splits would be |
735 | * incorrect. |
736 | */ |
737 | static int test_case_7(struct btrfs_fs_info *fs_info) |
738 | { |
739 | struct extent_map_tree *em_tree; |
740 | struct extent_map *em; |
741 | struct inode *inode; |
742 | int ret; |
743 | |
744 | test_msg("Running btrfs_drop_extent_cache with pinned" ); |
745 | |
746 | inode = btrfs_new_test_inode(); |
747 | if (!inode) { |
748 | test_std_err(TEST_ALLOC_INODE); |
749 | return -ENOMEM; |
750 | } |
751 | |
752 | em_tree = &BTRFS_I(inode)->extent_tree; |
753 | |
754 | em = alloc_extent_map(); |
755 | if (!em) { |
756 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
757 | ret = -ENOMEM; |
758 | goto out; |
759 | } |
760 | |
761 | /* [0, 16K), pinned */ |
762 | em->start = 0; |
763 | em->len = SZ_16K; |
764 | em->block_start = 0; |
765 | em->block_len = SZ_4K; |
766 | em->flags |= EXTENT_FLAG_PINNED; |
767 | write_lock(&em_tree->lock); |
768 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: em->start, len: em->len); |
769 | write_unlock(&em_tree->lock); |
770 | if (ret < 0) { |
771 | test_err("couldn't add extent map" ); |
772 | goto out; |
773 | } |
774 | free_extent_map(em); |
775 | |
776 | em = alloc_extent_map(); |
777 | if (!em) { |
778 | test_std_err(TEST_ALLOC_EXTENT_MAP); |
779 | ret = -ENOMEM; |
780 | goto out; |
781 | } |
782 | |
783 | /* [32K, 48K), not pinned */ |
784 | em->start = SZ_32K; |
785 | em->len = SZ_16K; |
786 | em->block_start = SZ_32K; |
787 | em->block_len = SZ_16K; |
788 | write_lock(&em_tree->lock); |
789 | ret = btrfs_add_extent_mapping(fs_info, em_tree, em_in: &em, start: em->start, len: em->len); |
790 | write_unlock(&em_tree->lock); |
791 | if (ret < 0) { |
792 | test_err("couldn't add extent map" ); |
793 | goto out; |
794 | } |
795 | free_extent_map(em); |
796 | |
797 | /* |
798 | * Drop [0, 36K) This should skip the [0, 4K) extent and then split the |
799 | * [32K, 48K) extent. |
800 | */ |
801 | btrfs_drop_extent_map_range(inode: BTRFS_I(inode), start: 0, end: (36 * SZ_1K) - 1, skip_pinned: true); |
802 | |
803 | /* Make sure our extent maps look sane. */ |
804 | ret = -EINVAL; |
805 | |
806 | em = lookup_extent_mapping(tree: em_tree, start: 0, SZ_16K); |
807 | if (!em) { |
808 | test_err("didn't find an em at 0 as expected" ); |
809 | goto out; |
810 | } |
811 | |
812 | if (em->start != 0) { |
813 | test_err("em->start is %llu, expected 0" , em->start); |
814 | goto out; |
815 | } |
816 | |
817 | if (em->len != SZ_16K) { |
818 | test_err("em->len is %llu, expected 16K" , em->len); |
819 | goto out; |
820 | } |
821 | |
822 | free_extent_map(em); |
823 | |
824 | read_lock(&em_tree->lock); |
825 | em = lookup_extent_mapping(tree: em_tree, SZ_16K, SZ_16K); |
826 | read_unlock(&em_tree->lock); |
827 | if (em) { |
828 | test_err("found an em when we weren't expecting one" ); |
829 | goto out; |
830 | } |
831 | |
832 | read_lock(&em_tree->lock); |
833 | em = lookup_extent_mapping(tree: em_tree, SZ_32K, SZ_16K); |
834 | read_unlock(&em_tree->lock); |
835 | if (!em) { |
836 | test_err("didn't find an em at 32K as expected" ); |
837 | goto out; |
838 | } |
839 | |
840 | if (em->start != (36 * SZ_1K)) { |
841 | test_err("em->start is %llu, expected 36K" , em->start); |
842 | goto out; |
843 | } |
844 | |
845 | if (em->len != (12 * SZ_1K)) { |
846 | test_err("em->len is %llu, expected 12K" , em->len); |
847 | goto out; |
848 | } |
849 | |
850 | free_extent_map(em); |
851 | |
852 | read_lock(&em_tree->lock); |
853 | em = lookup_extent_mapping(tree: em_tree, start: 48 * SZ_1K, len: (u64)-1); |
854 | read_unlock(&em_tree->lock); |
855 | if (em) { |
856 | test_err("found an unexpected em above 48K" ); |
857 | goto out; |
858 | } |
859 | |
860 | ret = 0; |
861 | out: |
862 | free_extent_map(em); |
863 | iput(inode); |
864 | return ret; |
865 | } |
866 | |
867 | struct rmap_test_vector { |
868 | u64 raid_type; |
869 | u64 physical_start; |
870 | u64 data_stripe_size; |
871 | u64 num_data_stripes; |
872 | u64 num_stripes; |
873 | /* Assume we won't have more than 5 physical stripes */ |
874 | u64 data_stripe_phys_start[5]; |
875 | bool expected_mapped_addr; |
876 | /* Physical to logical addresses */ |
877 | u64 mapped_logical[5]; |
878 | }; |
879 | |
880 | static int test_rmap_block(struct btrfs_fs_info *fs_info, |
881 | struct rmap_test_vector *test) |
882 | { |
883 | struct btrfs_chunk_map *map; |
884 | u64 *logical = NULL; |
885 | int i, out_ndaddrs, out_stripe_len; |
886 | int ret; |
887 | |
888 | map = btrfs_alloc_chunk_map(num_stripes: test->num_stripes, GFP_KERNEL); |
889 | if (!map) { |
890 | test_std_err(TEST_ALLOC_CHUNK_MAP); |
891 | return -ENOMEM; |
892 | } |
893 | |
894 | /* Start at 4GiB logical address */ |
895 | map->start = SZ_4G; |
896 | map->chunk_len = test->data_stripe_size * test->num_data_stripes; |
897 | map->stripe_size = test->data_stripe_size; |
898 | map->num_stripes = test->num_stripes; |
899 | map->type = test->raid_type; |
900 | |
901 | for (i = 0; i < map->num_stripes; i++) { |
902 | struct btrfs_device *dev = btrfs_alloc_dummy_device(fs_info); |
903 | |
904 | if (IS_ERR(ptr: dev)) { |
905 | test_err("cannot allocate device" ); |
906 | ret = PTR_ERR(ptr: dev); |
907 | goto out; |
908 | } |
909 | map->stripes[i].dev = dev; |
910 | map->stripes[i].physical = test->data_stripe_phys_start[i]; |
911 | } |
912 | |
913 | ret = btrfs_add_chunk_map(fs_info, map); |
914 | if (ret) { |
915 | test_err("error adding chunk map to mapping tree" ); |
916 | goto out_free; |
917 | } |
918 | |
919 | ret = btrfs_rmap_block(fs_info, chunk_start: map->start, physical: btrfs_sb_offset(mirror: 1), |
920 | logical: &logical, naddrs: &out_ndaddrs, stripe_len: &out_stripe_len); |
921 | if (ret || (out_ndaddrs == 0 && test->expected_mapped_addr)) { |
922 | test_err("didn't rmap anything but expected %d" , |
923 | test->expected_mapped_addr); |
924 | goto out; |
925 | } |
926 | |
927 | if (out_stripe_len != BTRFS_STRIPE_LEN) { |
928 | test_err("calculated stripe length doesn't match" ); |
929 | goto out; |
930 | } |
931 | |
932 | if (out_ndaddrs != test->expected_mapped_addr) { |
933 | for (i = 0; i < out_ndaddrs; i++) |
934 | test_msg("mapped %llu" , logical[i]); |
935 | test_err("unexpected number of mapped addresses: %d" , out_ndaddrs); |
936 | goto out; |
937 | } |
938 | |
939 | for (i = 0; i < out_ndaddrs; i++) { |
940 | if (logical[i] != test->mapped_logical[i]) { |
941 | test_err("unexpected logical address mapped" ); |
942 | goto out; |
943 | } |
944 | } |
945 | |
946 | ret = 0; |
947 | out: |
948 | btrfs_remove_chunk_map(fs_info, map); |
949 | out_free: |
950 | kfree(objp: logical); |
951 | return ret; |
952 | } |
953 | |
954 | int btrfs_test_extent_map(void) |
955 | { |
956 | struct btrfs_fs_info *fs_info = NULL; |
957 | struct extent_map_tree *em_tree; |
958 | int ret = 0, i; |
959 | struct rmap_test_vector rmap_tests[] = { |
960 | { |
961 | /* |
962 | * Test a chunk with 2 data stripes one of which |
963 | * intersects the physical address of the super block |
964 | * is correctly recognised. |
965 | */ |
966 | .raid_type = BTRFS_BLOCK_GROUP_RAID1, |
967 | .physical_start = SZ_64M - SZ_4M, |
968 | .data_stripe_size = SZ_256M, |
969 | .num_data_stripes = 2, |
970 | .num_stripes = 2, |
971 | .data_stripe_phys_start = |
972 | {SZ_64M - SZ_4M, SZ_64M - SZ_4M + SZ_256M}, |
973 | .expected_mapped_addr = true, |
974 | .mapped_logical= {SZ_4G + SZ_4M} |
975 | }, |
976 | { |
977 | /* |
978 | * Test that out-of-range physical addresses are |
979 | * ignored |
980 | */ |
981 | |
982 | /* SINGLE chunk type */ |
983 | .raid_type = 0, |
984 | .physical_start = SZ_4G, |
985 | .data_stripe_size = SZ_256M, |
986 | .num_data_stripes = 1, |
987 | .num_stripes = 1, |
988 | .data_stripe_phys_start = {SZ_256M}, |
989 | .expected_mapped_addr = false, |
990 | .mapped_logical = {0} |
991 | } |
992 | }; |
993 | |
994 | test_msg("running extent_map tests" ); |
995 | |
996 | /* |
997 | * Note: the fs_info is not set up completely, we only need |
998 | * fs_info::fsid for the tracepoint. |
999 | */ |
1000 | fs_info = btrfs_alloc_dummy_fs_info(PAGE_SIZE, PAGE_SIZE); |
1001 | if (!fs_info) { |
1002 | test_std_err(TEST_ALLOC_FS_INFO); |
1003 | return -ENOMEM; |
1004 | } |
1005 | |
1006 | em_tree = kzalloc(size: sizeof(*em_tree), GFP_KERNEL); |
1007 | if (!em_tree) { |
1008 | ret = -ENOMEM; |
1009 | goto out; |
1010 | } |
1011 | |
1012 | extent_map_tree_init(tree: em_tree); |
1013 | |
1014 | ret = test_case_1(fs_info, em_tree); |
1015 | if (ret) |
1016 | goto out; |
1017 | ret = test_case_2(fs_info, em_tree); |
1018 | if (ret) |
1019 | goto out; |
1020 | ret = test_case_3(fs_info, em_tree); |
1021 | if (ret) |
1022 | goto out; |
1023 | ret = test_case_4(fs_info, em_tree); |
1024 | if (ret) |
1025 | goto out; |
1026 | ret = test_case_5(fs_info); |
1027 | if (ret) |
1028 | goto out; |
1029 | ret = test_case_6(fs_info, em_tree); |
1030 | if (ret) |
1031 | goto out; |
1032 | ret = test_case_7(fs_info); |
1033 | if (ret) |
1034 | goto out; |
1035 | |
1036 | test_msg("running rmap tests" ); |
1037 | for (i = 0; i < ARRAY_SIZE(rmap_tests); i++) { |
1038 | ret = test_rmap_block(fs_info, test: &rmap_tests[i]); |
1039 | if (ret) |
1040 | goto out; |
1041 | } |
1042 | |
1043 | out: |
1044 | kfree(objp: em_tree); |
1045 | btrfs_free_dummy_fs_info(fs_info); |
1046 | |
1047 | return ret; |
1048 | } |
1049 | |