1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * fs/f2fs/node.c |
4 | * |
5 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
6 | * http://www.samsung.com/ |
7 | */ |
8 | #include <linux/fs.h> |
9 | #include <linux/f2fs_fs.h> |
10 | #include <linux/mpage.h> |
11 | #include <linux/sched/mm.h> |
12 | #include <linux/blkdev.h> |
13 | #include <linux/pagevec.h> |
14 | #include <linux/swap.h> |
15 | |
16 | #include "f2fs.h" |
17 | #include "node.h" |
18 | #include "segment.h" |
19 | #include "xattr.h" |
20 | #include "iostat.h" |
21 | #include <trace/events/f2fs.h> |
22 | |
23 | #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock) |
24 | |
25 | static struct kmem_cache *nat_entry_slab; |
26 | static struct kmem_cache *free_nid_slab; |
27 | static struct kmem_cache *nat_entry_set_slab; |
28 | static struct kmem_cache *fsync_node_entry_slab; |
29 | |
30 | /* |
31 | * Check whether the given nid is within node id range. |
32 | */ |
33 | int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) |
34 | { |
35 | if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { |
36 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
37 | f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix." , |
38 | __func__, nid); |
39 | f2fs_handle_error(sbi, error: ERROR_CORRUPTED_INODE); |
40 | return -EFSCORRUPTED; |
41 | } |
42 | return 0; |
43 | } |
44 | |
45 | bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) |
46 | { |
47 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
48 | struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; |
49 | struct sysinfo val; |
50 | unsigned long avail_ram; |
51 | unsigned long mem_size = 0; |
52 | bool res = false; |
53 | |
54 | if (!nm_i) |
55 | return true; |
56 | |
57 | si_meminfo(val: &val); |
58 | |
59 | /* only uses low memory */ |
60 | avail_ram = val.totalram - val.totalhigh; |
61 | |
62 | /* |
63 | * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively |
64 | */ |
65 | if (type == FREE_NIDS) { |
66 | mem_size = (nm_i->nid_cnt[FREE_NID] * |
67 | sizeof(struct free_nid)) >> PAGE_SHIFT; |
68 | res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); |
69 | } else if (type == NAT_ENTRIES) { |
70 | mem_size = (nm_i->nat_cnt[TOTAL_NAT] * |
71 | sizeof(struct nat_entry)) >> PAGE_SHIFT; |
72 | res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); |
73 | if (excess_cached_nats(sbi)) |
74 | res = false; |
75 | } else if (type == DIRTY_DENTS) { |
76 | if (sbi->sb->s_bdi->wb.dirty_exceeded) |
77 | return false; |
78 | mem_size = get_pages(sbi, count_type: F2FS_DIRTY_DENTS); |
79 | res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); |
80 | } else if (type == INO_ENTRIES) { |
81 | int i; |
82 | |
83 | for (i = 0; i < MAX_INO_ENTRY; i++) |
84 | mem_size += sbi->im[i].ino_num * |
85 | sizeof(struct ino_entry); |
86 | mem_size >>= PAGE_SHIFT; |
87 | res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); |
88 | } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) { |
89 | enum extent_type etype = type == READ_EXTENT_CACHE ? |
90 | EX_READ : EX_BLOCK_AGE; |
91 | struct extent_tree_info *eti = &sbi->extent_tree[etype]; |
92 | |
93 | mem_size = (atomic_read(v: &eti->total_ext_tree) * |
94 | sizeof(struct extent_tree) + |
95 | atomic_read(v: &eti->total_ext_node) * |
96 | sizeof(struct extent_node)) >> PAGE_SHIFT; |
97 | res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); |
98 | } else if (type == DISCARD_CACHE) { |
99 | mem_size = (atomic_read(v: &dcc->discard_cmd_cnt) * |
100 | sizeof(struct discard_cmd)) >> PAGE_SHIFT; |
101 | res = mem_size < (avail_ram * nm_i->ram_thresh / 100); |
102 | } else if (type == COMPRESS_PAGE) { |
103 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
104 | unsigned long free_ram = val.freeram; |
105 | |
106 | /* |
107 | * free memory is lower than watermark or cached page count |
108 | * exceed threshold, deny caching compress page. |
109 | */ |
110 | res = (free_ram > avail_ram * sbi->compress_watermark / 100) && |
111 | (COMPRESS_MAPPING(sbi)->nrpages < |
112 | free_ram * sbi->compress_percent / 100); |
113 | #else |
114 | res = false; |
115 | #endif |
116 | } else { |
117 | if (!sbi->sb->s_bdi->wb.dirty_exceeded) |
118 | return true; |
119 | } |
120 | return res; |
121 | } |
122 | |
123 | static void clear_node_page_dirty(struct page *page) |
124 | { |
125 | if (PageDirty(page)) { |
126 | f2fs_clear_page_cache_dirty_tag(page); |
127 | clear_page_dirty_for_io(page); |
128 | dec_page_count(sbi: F2FS_P_SB(page), count_type: F2FS_DIRTY_NODES); |
129 | } |
130 | ClearPageUptodate(page); |
131 | } |
132 | |
133 | static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) |
134 | { |
135 | return f2fs_get_meta_page_retry(sbi, index: current_nat_addr(sbi, start: nid)); |
136 | } |
137 | |
138 | static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) |
139 | { |
140 | struct page *src_page; |
141 | struct page *dst_page; |
142 | pgoff_t dst_off; |
143 | void *src_addr; |
144 | void *dst_addr; |
145 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
146 | |
147 | dst_off = next_nat_addr(sbi, block_addr: current_nat_addr(sbi, start: nid)); |
148 | |
149 | /* get current nat block page with lock */ |
150 | src_page = get_current_nat_page(sbi, nid); |
151 | if (IS_ERR(ptr: src_page)) |
152 | return src_page; |
153 | dst_page = f2fs_grab_meta_page(sbi, index: dst_off); |
154 | f2fs_bug_on(sbi, PageDirty(src_page)); |
155 | |
156 | src_addr = page_address(src_page); |
157 | dst_addr = page_address(dst_page); |
158 | memcpy(dst_addr, src_addr, PAGE_SIZE); |
159 | set_page_dirty(dst_page); |
160 | f2fs_put_page(page: src_page, unlock: 1); |
161 | |
162 | set_to_next_nat(nm_i, start_nid: nid); |
163 | |
164 | return dst_page; |
165 | } |
166 | |
167 | static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi, |
168 | nid_t nid, bool no_fail) |
169 | { |
170 | struct nat_entry *new; |
171 | |
172 | new = f2fs_kmem_cache_alloc(cachep: nat_entry_slab, |
173 | GFP_F2FS_ZERO, nofail: no_fail, sbi); |
174 | if (new) { |
175 | nat_set_nid(new, nid); |
176 | nat_reset_flag(ne: new); |
177 | } |
178 | return new; |
179 | } |
180 | |
181 | static void __free_nat_entry(struct nat_entry *e) |
182 | { |
183 | kmem_cache_free(s: nat_entry_slab, objp: e); |
184 | } |
185 | |
186 | /* must be locked by nat_tree_lock */ |
187 | static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, |
188 | struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail) |
189 | { |
190 | if (no_fail) |
191 | f2fs_radix_tree_insert(root: &nm_i->nat_root, nat_get_nid(ne), item: ne); |
192 | else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) |
193 | return NULL; |
194 | |
195 | if (raw_ne) |
196 | node_info_from_raw_nat(ni: &ne->ni, raw_ne); |
197 | |
198 | spin_lock(lock: &nm_i->nat_list_lock); |
199 | list_add_tail(new: &ne->list, head: &nm_i->nat_entries); |
200 | spin_unlock(lock: &nm_i->nat_list_lock); |
201 | |
202 | nm_i->nat_cnt[TOTAL_NAT]++; |
203 | nm_i->nat_cnt[RECLAIMABLE_NAT]++; |
204 | return ne; |
205 | } |
206 | |
207 | static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) |
208 | { |
209 | struct nat_entry *ne; |
210 | |
211 | ne = radix_tree_lookup(&nm_i->nat_root, n); |
212 | |
213 | /* for recent accessed nat entry, move it to tail of lru list */ |
214 | if (ne && !get_nat_flag(ne, type: IS_DIRTY)) { |
215 | spin_lock(lock: &nm_i->nat_list_lock); |
216 | if (!list_empty(head: &ne->list)) |
217 | list_move_tail(list: &ne->list, head: &nm_i->nat_entries); |
218 | spin_unlock(lock: &nm_i->nat_list_lock); |
219 | } |
220 | |
221 | return ne; |
222 | } |
223 | |
224 | static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, |
225 | nid_t start, unsigned int nr, struct nat_entry **ep) |
226 | { |
227 | return radix_tree_gang_lookup(&nm_i->nat_root, results: (void **)ep, first_index: start, max_items: nr); |
228 | } |
229 | |
230 | static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) |
231 | { |
232 | radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); |
233 | nm_i->nat_cnt[TOTAL_NAT]--; |
234 | nm_i->nat_cnt[RECLAIMABLE_NAT]--; |
235 | __free_nat_entry(e); |
236 | } |
237 | |
238 | static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i, |
239 | struct nat_entry *ne) |
240 | { |
241 | nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); |
242 | struct nat_entry_set *head; |
243 | |
244 | head = radix_tree_lookup(&nm_i->nat_set_root, set); |
245 | if (!head) { |
246 | head = f2fs_kmem_cache_alloc(cachep: nat_entry_set_slab, |
247 | GFP_NOFS, nofail: true, NULL); |
248 | |
249 | INIT_LIST_HEAD(list: &head->entry_list); |
250 | INIT_LIST_HEAD(list: &head->set_list); |
251 | head->set = set; |
252 | head->entry_cnt = 0; |
253 | f2fs_radix_tree_insert(root: &nm_i->nat_set_root, index: set, item: head); |
254 | } |
255 | return head; |
256 | } |
257 | |
258 | static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, |
259 | struct nat_entry *ne) |
260 | { |
261 | struct nat_entry_set *head; |
262 | bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR; |
263 | |
264 | if (!new_ne) |
265 | head = __grab_nat_entry_set(nm_i, ne); |
266 | |
267 | /* |
268 | * update entry_cnt in below condition: |
269 | * 1. update NEW_ADDR to valid block address; |
270 | * 2. update old block address to new one; |
271 | */ |
272 | if (!new_ne && (get_nat_flag(ne, type: IS_PREALLOC) || |
273 | !get_nat_flag(ne, type: IS_DIRTY))) |
274 | head->entry_cnt++; |
275 | |
276 | set_nat_flag(ne, type: IS_PREALLOC, set: new_ne); |
277 | |
278 | if (get_nat_flag(ne, type: IS_DIRTY)) |
279 | goto refresh_list; |
280 | |
281 | nm_i->nat_cnt[DIRTY_NAT]++; |
282 | nm_i->nat_cnt[RECLAIMABLE_NAT]--; |
283 | set_nat_flag(ne, type: IS_DIRTY, set: true); |
284 | refresh_list: |
285 | spin_lock(lock: &nm_i->nat_list_lock); |
286 | if (new_ne) |
287 | list_del_init(entry: &ne->list); |
288 | else |
289 | list_move_tail(list: &ne->list, head: &head->entry_list); |
290 | spin_unlock(lock: &nm_i->nat_list_lock); |
291 | } |
292 | |
293 | static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, |
294 | struct nat_entry_set *set, struct nat_entry *ne) |
295 | { |
296 | spin_lock(lock: &nm_i->nat_list_lock); |
297 | list_move_tail(list: &ne->list, head: &nm_i->nat_entries); |
298 | spin_unlock(lock: &nm_i->nat_list_lock); |
299 | |
300 | set_nat_flag(ne, type: IS_DIRTY, set: false); |
301 | set->entry_cnt--; |
302 | nm_i->nat_cnt[DIRTY_NAT]--; |
303 | nm_i->nat_cnt[RECLAIMABLE_NAT]++; |
304 | } |
305 | |
306 | static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, |
307 | nid_t start, unsigned int nr, struct nat_entry_set **ep) |
308 | { |
309 | return radix_tree_gang_lookup(&nm_i->nat_set_root, results: (void **)ep, |
310 | first_index: start, max_items: nr); |
311 | } |
312 | |
313 | bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page) |
314 | { |
315 | return NODE_MAPPING(sbi) == page->mapping && |
316 | IS_DNODE(node_page: page) && is_cold_node(page); |
317 | } |
318 | |
319 | void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi) |
320 | { |
321 | spin_lock_init(&sbi->fsync_node_lock); |
322 | INIT_LIST_HEAD(list: &sbi->fsync_node_list); |
323 | sbi->fsync_seg_id = 0; |
324 | sbi->fsync_node_num = 0; |
325 | } |
326 | |
327 | static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, |
328 | struct page *page) |
329 | { |
330 | struct fsync_node_entry *fn; |
331 | unsigned long flags; |
332 | unsigned int seq_id; |
333 | |
334 | fn = f2fs_kmem_cache_alloc(cachep: fsync_node_entry_slab, |
335 | GFP_NOFS, nofail: true, NULL); |
336 | |
337 | get_page(page); |
338 | fn->page = page; |
339 | INIT_LIST_HEAD(list: &fn->list); |
340 | |
341 | spin_lock_irqsave(&sbi->fsync_node_lock, flags); |
342 | list_add_tail(new: &fn->list, head: &sbi->fsync_node_list); |
343 | fn->seq_id = sbi->fsync_seg_id++; |
344 | seq_id = fn->seq_id; |
345 | sbi->fsync_node_num++; |
346 | spin_unlock_irqrestore(lock: &sbi->fsync_node_lock, flags); |
347 | |
348 | return seq_id; |
349 | } |
350 | |
351 | void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page) |
352 | { |
353 | struct fsync_node_entry *fn; |
354 | unsigned long flags; |
355 | |
356 | spin_lock_irqsave(&sbi->fsync_node_lock, flags); |
357 | list_for_each_entry(fn, &sbi->fsync_node_list, list) { |
358 | if (fn->page == page) { |
359 | list_del(entry: &fn->list); |
360 | sbi->fsync_node_num--; |
361 | spin_unlock_irqrestore(lock: &sbi->fsync_node_lock, flags); |
362 | kmem_cache_free(s: fsync_node_entry_slab, objp: fn); |
363 | put_page(page); |
364 | return; |
365 | } |
366 | } |
367 | spin_unlock_irqrestore(lock: &sbi->fsync_node_lock, flags); |
368 | f2fs_bug_on(sbi, 1); |
369 | } |
370 | |
371 | void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi) |
372 | { |
373 | unsigned long flags; |
374 | |
375 | spin_lock_irqsave(&sbi->fsync_node_lock, flags); |
376 | sbi->fsync_seg_id = 0; |
377 | spin_unlock_irqrestore(lock: &sbi->fsync_node_lock, flags); |
378 | } |
379 | |
380 | int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) |
381 | { |
382 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
383 | struct nat_entry *e; |
384 | bool need = false; |
385 | |
386 | f2fs_down_read(sem: &nm_i->nat_tree_lock); |
387 | e = __lookup_nat_cache(nm_i, n: nid); |
388 | if (e) { |
389 | if (!get_nat_flag(ne: e, type: IS_CHECKPOINTED) && |
390 | !get_nat_flag(ne: e, type: HAS_FSYNCED_INODE)) |
391 | need = true; |
392 | } |
393 | f2fs_up_read(sem: &nm_i->nat_tree_lock); |
394 | return need; |
395 | } |
396 | |
397 | bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) |
398 | { |
399 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
400 | struct nat_entry *e; |
401 | bool is_cp = true; |
402 | |
403 | f2fs_down_read(sem: &nm_i->nat_tree_lock); |
404 | e = __lookup_nat_cache(nm_i, n: nid); |
405 | if (e && !get_nat_flag(ne: e, type: IS_CHECKPOINTED)) |
406 | is_cp = false; |
407 | f2fs_up_read(sem: &nm_i->nat_tree_lock); |
408 | return is_cp; |
409 | } |
410 | |
411 | bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) |
412 | { |
413 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
414 | struct nat_entry *e; |
415 | bool need_update = true; |
416 | |
417 | f2fs_down_read(sem: &nm_i->nat_tree_lock); |
418 | e = __lookup_nat_cache(nm_i, n: ino); |
419 | if (e && get_nat_flag(ne: e, type: HAS_LAST_FSYNC) && |
420 | (get_nat_flag(ne: e, type: IS_CHECKPOINTED) || |
421 | get_nat_flag(ne: e, type: HAS_FSYNCED_INODE))) |
422 | need_update = false; |
423 | f2fs_up_read(sem: &nm_i->nat_tree_lock); |
424 | return need_update; |
425 | } |
426 | |
427 | /* must be locked by nat_tree_lock */ |
428 | static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, |
429 | struct f2fs_nat_entry *ne) |
430 | { |
431 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
432 | struct nat_entry *new, *e; |
433 | |
434 | /* Let's mitigate lock contention of nat_tree_lock during checkpoint */ |
435 | if (f2fs_rwsem_is_locked(sem: &sbi->cp_global_sem)) |
436 | return; |
437 | |
438 | new = __alloc_nat_entry(sbi, nid, no_fail: false); |
439 | if (!new) |
440 | return; |
441 | |
442 | f2fs_down_write(sem: &nm_i->nat_tree_lock); |
443 | e = __lookup_nat_cache(nm_i, n: nid); |
444 | if (!e) |
445 | e = __init_nat_entry(nm_i, ne: new, raw_ne: ne, no_fail: false); |
446 | else |
447 | f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || |
448 | nat_get_blkaddr(e) != |
449 | le32_to_cpu(ne->block_addr) || |
450 | nat_get_version(e) != ne->version); |
451 | f2fs_up_write(sem: &nm_i->nat_tree_lock); |
452 | if (e != new) |
453 | __free_nat_entry(e: new); |
454 | } |
455 | |
456 | static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, |
457 | block_t new_blkaddr, bool fsync_done) |
458 | { |
459 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
460 | struct nat_entry *e; |
461 | struct nat_entry *new = __alloc_nat_entry(sbi, nid: ni->nid, no_fail: true); |
462 | |
463 | f2fs_down_write(sem: &nm_i->nat_tree_lock); |
464 | e = __lookup_nat_cache(nm_i, n: ni->nid); |
465 | if (!e) { |
466 | e = __init_nat_entry(nm_i, ne: new, NULL, no_fail: true); |
467 | copy_node_info(dst: &e->ni, src: ni); |
468 | f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); |
469 | } else if (new_blkaddr == NEW_ADDR) { |
470 | /* |
471 | * when nid is reallocated, |
472 | * previous nat entry can be remained in nat cache. |
473 | * So, reinitialize it with new information. |
474 | */ |
475 | copy_node_info(dst: &e->ni, src: ni); |
476 | f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); |
477 | } |
478 | /* let's free early to reduce memory consumption */ |
479 | if (e != new) |
480 | __free_nat_entry(e: new); |
481 | |
482 | /* sanity check */ |
483 | f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); |
484 | f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && |
485 | new_blkaddr == NULL_ADDR); |
486 | f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && |
487 | new_blkaddr == NEW_ADDR); |
488 | f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) && |
489 | new_blkaddr == NEW_ADDR); |
490 | |
491 | /* increment version no as node is removed */ |
492 | if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { |
493 | unsigned char version = nat_get_version(e); |
494 | |
495 | nat_set_version(e, inc_node_version(version)); |
496 | } |
497 | |
498 | /* change address */ |
499 | nat_set_blkaddr(e, new_blkaddr); |
500 | if (!__is_valid_data_blkaddr(blkaddr: new_blkaddr)) |
501 | set_nat_flag(ne: e, type: IS_CHECKPOINTED, set: false); |
502 | __set_nat_cache_dirty(nm_i, ne: e); |
503 | |
504 | /* update fsync_mark if its inode nat entry is still alive */ |
505 | if (ni->nid != ni->ino) |
506 | e = __lookup_nat_cache(nm_i, n: ni->ino); |
507 | if (e) { |
508 | if (fsync_done && ni->nid == ni->ino) |
509 | set_nat_flag(ne: e, type: HAS_FSYNCED_INODE, set: true); |
510 | set_nat_flag(ne: e, type: HAS_LAST_FSYNC, set: fsync_done); |
511 | } |
512 | f2fs_up_write(sem: &nm_i->nat_tree_lock); |
513 | } |
514 | |
515 | int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) |
516 | { |
517 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
518 | int nr = nr_shrink; |
519 | |
520 | if (!f2fs_down_write_trylock(sem: &nm_i->nat_tree_lock)) |
521 | return 0; |
522 | |
523 | spin_lock(lock: &nm_i->nat_list_lock); |
524 | while (nr_shrink) { |
525 | struct nat_entry *ne; |
526 | |
527 | if (list_empty(head: &nm_i->nat_entries)) |
528 | break; |
529 | |
530 | ne = list_first_entry(&nm_i->nat_entries, |
531 | struct nat_entry, list); |
532 | list_del(entry: &ne->list); |
533 | spin_unlock(lock: &nm_i->nat_list_lock); |
534 | |
535 | __del_from_nat_cache(nm_i, e: ne); |
536 | nr_shrink--; |
537 | |
538 | spin_lock(lock: &nm_i->nat_list_lock); |
539 | } |
540 | spin_unlock(lock: &nm_i->nat_list_lock); |
541 | |
542 | f2fs_up_write(sem: &nm_i->nat_tree_lock); |
543 | return nr - nr_shrink; |
544 | } |
545 | |
546 | int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, |
547 | struct node_info *ni, bool checkpoint_context) |
548 | { |
549 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
550 | struct curseg_info *curseg = CURSEG_I(sbi, type: CURSEG_HOT_DATA); |
551 | struct f2fs_journal *journal = curseg->journal; |
552 | nid_t start_nid = START_NID(nid); |
553 | struct f2fs_nat_block *nat_blk; |
554 | struct page *page = NULL; |
555 | struct f2fs_nat_entry ne; |
556 | struct nat_entry *e; |
557 | pgoff_t index; |
558 | block_t blkaddr; |
559 | int i; |
560 | |
561 | ni->nid = nid; |
562 | retry: |
563 | /* Check nat cache */ |
564 | f2fs_down_read(sem: &nm_i->nat_tree_lock); |
565 | e = __lookup_nat_cache(nm_i, n: nid); |
566 | if (e) { |
567 | ni->ino = nat_get_ino(e); |
568 | ni->blk_addr = nat_get_blkaddr(e); |
569 | ni->version = nat_get_version(e); |
570 | f2fs_up_read(sem: &nm_i->nat_tree_lock); |
571 | return 0; |
572 | } |
573 | |
574 | /* |
575 | * Check current segment summary by trying to grab journal_rwsem first. |
576 | * This sem is on the critical path on the checkpoint requiring the above |
577 | * nat_tree_lock. Therefore, we should retry, if we failed to grab here |
578 | * while not bothering checkpoint. |
579 | */ |
580 | if (!f2fs_rwsem_is_locked(sem: &sbi->cp_global_sem) || checkpoint_context) { |
581 | down_read(sem: &curseg->journal_rwsem); |
582 | } else if (f2fs_rwsem_is_contended(sem: &nm_i->nat_tree_lock) || |
583 | !down_read_trylock(sem: &curseg->journal_rwsem)) { |
584 | f2fs_up_read(sem: &nm_i->nat_tree_lock); |
585 | goto retry; |
586 | } |
587 | |
588 | i = f2fs_lookup_journal_in_cursum(journal, type: NAT_JOURNAL, val: nid, alloc: 0); |
589 | if (i >= 0) { |
590 | ne = nat_in_journal(journal, i); |
591 | node_info_from_raw_nat(ni, raw_ne: &ne); |
592 | } |
593 | up_read(sem: &curseg->journal_rwsem); |
594 | if (i >= 0) { |
595 | f2fs_up_read(sem: &nm_i->nat_tree_lock); |
596 | goto cache; |
597 | } |
598 | |
599 | /* Fill node_info from nat page */ |
600 | index = current_nat_addr(sbi, start: nid); |
601 | f2fs_up_read(sem: &nm_i->nat_tree_lock); |
602 | |
603 | page = f2fs_get_meta_page(sbi, index); |
604 | if (IS_ERR(ptr: page)) |
605 | return PTR_ERR(ptr: page); |
606 | |
607 | nat_blk = (struct f2fs_nat_block *)page_address(page); |
608 | ne = nat_blk->entries[nid - start_nid]; |
609 | node_info_from_raw_nat(ni, raw_ne: &ne); |
610 | f2fs_put_page(page, unlock: 1); |
611 | cache: |
612 | blkaddr = le32_to_cpu(ne.block_addr); |
613 | if (__is_valid_data_blkaddr(blkaddr) && |
614 | !f2fs_is_valid_blkaddr(sbi, blkaddr, type: DATA_GENERIC_ENHANCE)) |
615 | return -EFAULT; |
616 | |
617 | /* cache nat entry */ |
618 | cache_nat_entry(sbi, nid, ne: &ne); |
619 | return 0; |
620 | } |
621 | |
622 | /* |
623 | * readahead MAX_RA_NODE number of node pages. |
624 | */ |
625 | static void f2fs_ra_node_pages(struct page *parent, int start, int n) |
626 | { |
627 | struct f2fs_sb_info *sbi = F2FS_P_SB(page: parent); |
628 | struct blk_plug plug; |
629 | int i, end; |
630 | nid_t nid; |
631 | |
632 | blk_start_plug(&plug); |
633 | |
634 | /* Then, try readahead for siblings of the desired node */ |
635 | end = start + n; |
636 | end = min(end, (int)NIDS_PER_BLOCK); |
637 | for (i = start; i < end; i++) { |
638 | nid = get_nid(p: parent, off: i, i: false); |
639 | f2fs_ra_node_page(sbi, nid); |
640 | } |
641 | |
642 | blk_finish_plug(&plug); |
643 | } |
644 | |
645 | pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) |
646 | { |
647 | const long direct_index = ADDRS_PER_INODE(dn->inode); |
648 | const long direct_blks = ADDRS_PER_BLOCK(dn->inode); |
649 | const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK; |
650 | unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode); |
651 | int cur_level = dn->cur_level; |
652 | int max_level = dn->max_level; |
653 | pgoff_t base = 0; |
654 | |
655 | if (!dn->max_level) |
656 | return pgofs + 1; |
657 | |
658 | while (max_level-- > cur_level) |
659 | skipped_unit *= NIDS_PER_BLOCK; |
660 | |
661 | switch (dn->max_level) { |
662 | case 3: |
663 | base += 2 * indirect_blks; |
664 | fallthrough; |
665 | case 2: |
666 | base += 2 * direct_blks; |
667 | fallthrough; |
668 | case 1: |
669 | base += direct_index; |
670 | break; |
671 | default: |
672 | f2fs_bug_on(F2FS_I_SB(dn->inode), 1); |
673 | } |
674 | |
675 | return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; |
676 | } |
677 | |
678 | /* |
679 | * The maximum depth is four. |
680 | * Offset[0] will have raw inode offset. |
681 | */ |
682 | static int get_node_path(struct inode *inode, long block, |
683 | int offset[4], unsigned int noffset[4]) |
684 | { |
685 | const long direct_index = ADDRS_PER_INODE(inode); |
686 | const long direct_blks = ADDRS_PER_BLOCK(inode); |
687 | const long dptrs_per_blk = NIDS_PER_BLOCK; |
688 | const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK; |
689 | const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; |
690 | int n = 0; |
691 | int level = 0; |
692 | |
693 | noffset[0] = 0; |
694 | |
695 | if (block < direct_index) { |
696 | offset[n] = block; |
697 | goto got; |
698 | } |
699 | block -= direct_index; |
700 | if (block < direct_blks) { |
701 | offset[n++] = NODE_DIR1_BLOCK; |
702 | noffset[n] = 1; |
703 | offset[n] = block; |
704 | level = 1; |
705 | goto got; |
706 | } |
707 | block -= direct_blks; |
708 | if (block < direct_blks) { |
709 | offset[n++] = NODE_DIR2_BLOCK; |
710 | noffset[n] = 2; |
711 | offset[n] = block; |
712 | level = 1; |
713 | goto got; |
714 | } |
715 | block -= direct_blks; |
716 | if (block < indirect_blks) { |
717 | offset[n++] = NODE_IND1_BLOCK; |
718 | noffset[n] = 3; |
719 | offset[n++] = block / direct_blks; |
720 | noffset[n] = 4 + offset[n - 1]; |
721 | offset[n] = block % direct_blks; |
722 | level = 2; |
723 | goto got; |
724 | } |
725 | block -= indirect_blks; |
726 | if (block < indirect_blks) { |
727 | offset[n++] = NODE_IND2_BLOCK; |
728 | noffset[n] = 4 + dptrs_per_blk; |
729 | offset[n++] = block / direct_blks; |
730 | noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; |
731 | offset[n] = block % direct_blks; |
732 | level = 2; |
733 | goto got; |
734 | } |
735 | block -= indirect_blks; |
736 | if (block < dindirect_blks) { |
737 | offset[n++] = NODE_DIND_BLOCK; |
738 | noffset[n] = 5 + (dptrs_per_blk * 2); |
739 | offset[n++] = block / indirect_blks; |
740 | noffset[n] = 6 + (dptrs_per_blk * 2) + |
741 | offset[n - 1] * (dptrs_per_blk + 1); |
742 | offset[n++] = (block / direct_blks) % dptrs_per_blk; |
743 | noffset[n] = 7 + (dptrs_per_blk * 2) + |
744 | offset[n - 2] * (dptrs_per_blk + 1) + |
745 | offset[n - 1]; |
746 | offset[n] = block % direct_blks; |
747 | level = 3; |
748 | goto got; |
749 | } else { |
750 | return -E2BIG; |
751 | } |
752 | got: |
753 | return level; |
754 | } |
755 | |
756 | /* |
757 | * Caller should call f2fs_put_dnode(dn). |
758 | * Also, it should grab and release a rwsem by calling f2fs_lock_op() and |
759 | * f2fs_unlock_op() only if mode is set with ALLOC_NODE. |
760 | */ |
761 | int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) |
762 | { |
763 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: dn->inode); |
764 | struct page *npage[4]; |
765 | struct page *parent = NULL; |
766 | int offset[4]; |
767 | unsigned int noffset[4]; |
768 | nid_t nids[4]; |
769 | int level, i = 0; |
770 | int err = 0; |
771 | |
772 | level = get_node_path(inode: dn->inode, block: index, offset, noffset); |
773 | if (level < 0) |
774 | return level; |
775 | |
776 | nids[0] = dn->inode->i_ino; |
777 | npage[0] = dn->inode_page; |
778 | |
779 | if (!npage[0]) { |
780 | npage[0] = f2fs_get_node_page(sbi, nid: nids[0]); |
781 | if (IS_ERR(ptr: npage[0])) |
782 | return PTR_ERR(ptr: npage[0]); |
783 | } |
784 | |
785 | /* if inline_data is set, should not report any block indices */ |
786 | if (f2fs_has_inline_data(inode: dn->inode) && index) { |
787 | err = -ENOENT; |
788 | f2fs_put_page(page: npage[0], unlock: 1); |
789 | goto release_out; |
790 | } |
791 | |
792 | parent = npage[0]; |
793 | if (level != 0) |
794 | nids[1] = get_nid(p: parent, off: offset[0], i: true); |
795 | dn->inode_page = npage[0]; |
796 | dn->inode_page_locked = true; |
797 | |
798 | /* get indirect or direct nodes */ |
799 | for (i = 1; i <= level; i++) { |
800 | bool done = false; |
801 | |
802 | if (!nids[i] && mode == ALLOC_NODE) { |
803 | /* alloc new node */ |
804 | if (!f2fs_alloc_nid(sbi, nid: &(nids[i]))) { |
805 | err = -ENOSPC; |
806 | goto release_pages; |
807 | } |
808 | |
809 | dn->nid = nids[i]; |
810 | npage[i] = f2fs_new_node_page(dn, ofs: noffset[i]); |
811 | if (IS_ERR(ptr: npage[i])) { |
812 | f2fs_alloc_nid_failed(sbi, nid: nids[i]); |
813 | err = PTR_ERR(ptr: npage[i]); |
814 | goto release_pages; |
815 | } |
816 | |
817 | set_nid(p: parent, off: offset[i - 1], nid: nids[i], i: i == 1); |
818 | f2fs_alloc_nid_done(sbi, nid: nids[i]); |
819 | done = true; |
820 | } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { |
821 | npage[i] = f2fs_get_node_page_ra(parent, start: offset[i - 1]); |
822 | if (IS_ERR(ptr: npage[i])) { |
823 | err = PTR_ERR(ptr: npage[i]); |
824 | goto release_pages; |
825 | } |
826 | done = true; |
827 | } |
828 | if (i == 1) { |
829 | dn->inode_page_locked = false; |
830 | unlock_page(page: parent); |
831 | } else { |
832 | f2fs_put_page(page: parent, unlock: 1); |
833 | } |
834 | |
835 | if (!done) { |
836 | npage[i] = f2fs_get_node_page(sbi, nid: nids[i]); |
837 | if (IS_ERR(ptr: npage[i])) { |
838 | err = PTR_ERR(ptr: npage[i]); |
839 | f2fs_put_page(page: npage[0], unlock: 0); |
840 | goto release_out; |
841 | } |
842 | } |
843 | if (i < level) { |
844 | parent = npage[i]; |
845 | nids[i + 1] = get_nid(p: parent, off: offset[i], i: false); |
846 | } |
847 | } |
848 | dn->nid = nids[level]; |
849 | dn->ofs_in_node = offset[level]; |
850 | dn->node_page = npage[level]; |
851 | dn->data_blkaddr = f2fs_data_blkaddr(dn); |
852 | |
853 | if (is_inode_flag_set(inode: dn->inode, flag: FI_COMPRESSED_FILE) && |
854 | f2fs_sb_has_readonly(sbi)) { |
855 | unsigned int cluster_size = F2FS_I(inode: dn->inode)->i_cluster_size; |
856 | unsigned int ofs_in_node = dn->ofs_in_node; |
857 | pgoff_t fofs = index; |
858 | unsigned int c_len; |
859 | block_t blkaddr; |
860 | |
861 | /* should align fofs and ofs_in_node to cluster_size */ |
862 | if (fofs % cluster_size) { |
863 | fofs = round_down(fofs, cluster_size); |
864 | ofs_in_node = round_down(ofs_in_node, cluster_size); |
865 | } |
866 | |
867 | c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node); |
868 | if (!c_len) |
869 | goto out; |
870 | |
871 | blkaddr = data_blkaddr(inode: dn->inode, node_page: dn->node_page, offset: ofs_in_node); |
872 | if (blkaddr == COMPRESS_ADDR) |
873 | blkaddr = data_blkaddr(inode: dn->inode, node_page: dn->node_page, |
874 | offset: ofs_in_node + 1); |
875 | |
876 | f2fs_update_read_extent_tree_range_compressed(inode: dn->inode, |
877 | fofs, blkaddr, llen: cluster_size, c_len); |
878 | } |
879 | out: |
880 | return 0; |
881 | |
882 | release_pages: |
883 | f2fs_put_page(page: parent, unlock: 1); |
884 | if (i > 1) |
885 | f2fs_put_page(page: npage[0], unlock: 0); |
886 | release_out: |
887 | dn->inode_page = NULL; |
888 | dn->node_page = NULL; |
889 | if (err == -ENOENT) { |
890 | dn->cur_level = i; |
891 | dn->max_level = level; |
892 | dn->ofs_in_node = offset[level]; |
893 | } |
894 | return err; |
895 | } |
896 | |
897 | static int truncate_node(struct dnode_of_data *dn) |
898 | { |
899 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: dn->inode); |
900 | struct node_info ni; |
901 | int err; |
902 | pgoff_t index; |
903 | |
904 | err = f2fs_get_node_info(sbi, nid: dn->nid, ni: &ni, checkpoint_context: false); |
905 | if (err) |
906 | return err; |
907 | |
908 | /* Deallocate node address */ |
909 | f2fs_invalidate_blocks(sbi, addr: ni.blk_addr); |
910 | dec_valid_node_count(sbi, inode: dn->inode, is_inode: dn->nid == dn->inode->i_ino); |
911 | set_node_addr(sbi, ni: &ni, NULL_ADDR, fsync_done: false); |
912 | |
913 | if (dn->nid == dn->inode->i_ino) { |
914 | f2fs_remove_orphan_inode(sbi, ino: dn->nid); |
915 | dec_valid_inode_count(sbi); |
916 | f2fs_inode_synced(inode: dn->inode); |
917 | } |
918 | |
919 | clear_node_page_dirty(page: dn->node_page); |
920 | set_sbi_flag(sbi, type: SBI_IS_DIRTY); |
921 | |
922 | index = dn->node_page->index; |
923 | f2fs_put_page(page: dn->node_page, unlock: 1); |
924 | |
925 | invalidate_mapping_pages(mapping: NODE_MAPPING(sbi), |
926 | start: index, end: index); |
927 | |
928 | dn->node_page = NULL; |
929 | trace_f2fs_truncate_node(inode: dn->inode, nid: dn->nid, blk_addr: ni.blk_addr); |
930 | |
931 | return 0; |
932 | } |
933 | |
934 | static int truncate_dnode(struct dnode_of_data *dn) |
935 | { |
936 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: dn->inode); |
937 | struct page *page; |
938 | int err; |
939 | |
940 | if (dn->nid == 0) |
941 | return 1; |
942 | |
943 | /* get direct node */ |
944 | page = f2fs_get_node_page(sbi, nid: dn->nid); |
945 | if (PTR_ERR(ptr: page) == -ENOENT) |
946 | return 1; |
947 | else if (IS_ERR(ptr: page)) |
948 | return PTR_ERR(ptr: page); |
949 | |
950 | if (IS_INODE(page) || ino_of_node(node_page: page) != dn->inode->i_ino) { |
951 | f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u" , |
952 | dn->inode->i_ino, dn->nid, ino_of_node(page)); |
953 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
954 | f2fs_handle_error(sbi, error: ERROR_INVALID_NODE_REFERENCE); |
955 | f2fs_put_page(page, unlock: 1); |
956 | return -EFSCORRUPTED; |
957 | } |
958 | |
959 | /* Make dnode_of_data for parameter */ |
960 | dn->node_page = page; |
961 | dn->ofs_in_node = 0; |
962 | f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode)); |
963 | err = truncate_node(dn); |
964 | if (err) { |
965 | f2fs_put_page(page, unlock: 1); |
966 | return err; |
967 | } |
968 | |
969 | return 1; |
970 | } |
971 | |
972 | static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, |
973 | int ofs, int depth) |
974 | { |
975 | struct dnode_of_data rdn = *dn; |
976 | struct page *page; |
977 | struct f2fs_node *rn; |
978 | nid_t child_nid; |
979 | unsigned int child_nofs; |
980 | int freed = 0; |
981 | int i, ret; |
982 | |
983 | if (dn->nid == 0) |
984 | return NIDS_PER_BLOCK + 1; |
985 | |
986 | trace_f2fs_truncate_nodes_enter(inode: dn->inode, nid: dn->nid, blk_addr: dn->data_blkaddr); |
987 | |
988 | page = f2fs_get_node_page(sbi: F2FS_I_SB(inode: dn->inode), nid: dn->nid); |
989 | if (IS_ERR(ptr: page)) { |
990 | trace_f2fs_truncate_nodes_exit(inode: dn->inode, ret: PTR_ERR(ptr: page)); |
991 | return PTR_ERR(ptr: page); |
992 | } |
993 | |
994 | f2fs_ra_node_pages(parent: page, start: ofs, NIDS_PER_BLOCK); |
995 | |
996 | rn = F2FS_NODE(page); |
997 | if (depth < 3) { |
998 | for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { |
999 | child_nid = le32_to_cpu(rn->in.nid[i]); |
1000 | if (child_nid == 0) |
1001 | continue; |
1002 | rdn.nid = child_nid; |
1003 | ret = truncate_dnode(dn: &rdn); |
1004 | if (ret < 0) |
1005 | goto out_err; |
1006 | if (set_nid(p: page, off: i, nid: 0, i: false)) |
1007 | dn->node_changed = true; |
1008 | } |
1009 | } else { |
1010 | child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; |
1011 | for (i = ofs; i < NIDS_PER_BLOCK; i++) { |
1012 | child_nid = le32_to_cpu(rn->in.nid[i]); |
1013 | if (child_nid == 0) { |
1014 | child_nofs += NIDS_PER_BLOCK + 1; |
1015 | continue; |
1016 | } |
1017 | rdn.nid = child_nid; |
1018 | ret = truncate_nodes(dn: &rdn, nofs: child_nofs, ofs: 0, depth: depth - 1); |
1019 | if (ret == (NIDS_PER_BLOCK + 1)) { |
1020 | if (set_nid(p: page, off: i, nid: 0, i: false)) |
1021 | dn->node_changed = true; |
1022 | child_nofs += ret; |
1023 | } else if (ret < 0 && ret != -ENOENT) { |
1024 | goto out_err; |
1025 | } |
1026 | } |
1027 | freed = child_nofs; |
1028 | } |
1029 | |
1030 | if (!ofs) { |
1031 | /* remove current indirect node */ |
1032 | dn->node_page = page; |
1033 | ret = truncate_node(dn); |
1034 | if (ret) |
1035 | goto out_err; |
1036 | freed++; |
1037 | } else { |
1038 | f2fs_put_page(page, unlock: 1); |
1039 | } |
1040 | trace_f2fs_truncate_nodes_exit(inode: dn->inode, ret: freed); |
1041 | return freed; |
1042 | |
1043 | out_err: |
1044 | f2fs_put_page(page, unlock: 1); |
1045 | trace_f2fs_truncate_nodes_exit(inode: dn->inode, ret); |
1046 | return ret; |
1047 | } |
1048 | |
1049 | static int truncate_partial_nodes(struct dnode_of_data *dn, |
1050 | struct f2fs_inode *ri, int *offset, int depth) |
1051 | { |
1052 | struct page *pages[2]; |
1053 | nid_t nid[3]; |
1054 | nid_t child_nid; |
1055 | int err = 0; |
1056 | int i; |
1057 | int idx = depth - 2; |
1058 | |
1059 | nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); |
1060 | if (!nid[0]) |
1061 | return 0; |
1062 | |
1063 | /* get indirect nodes in the path */ |
1064 | for (i = 0; i < idx + 1; i++) { |
1065 | /* reference count'll be increased */ |
1066 | pages[i] = f2fs_get_node_page(sbi: F2FS_I_SB(inode: dn->inode), nid: nid[i]); |
1067 | if (IS_ERR(ptr: pages[i])) { |
1068 | err = PTR_ERR(ptr: pages[i]); |
1069 | idx = i - 1; |
1070 | goto fail; |
1071 | } |
1072 | nid[i + 1] = get_nid(p: pages[i], off: offset[i + 1], i: false); |
1073 | } |
1074 | |
1075 | f2fs_ra_node_pages(parent: pages[idx], start: offset[idx + 1], NIDS_PER_BLOCK); |
1076 | |
1077 | /* free direct nodes linked to a partial indirect node */ |
1078 | for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { |
1079 | child_nid = get_nid(p: pages[idx], off: i, i: false); |
1080 | if (!child_nid) |
1081 | continue; |
1082 | dn->nid = child_nid; |
1083 | err = truncate_dnode(dn); |
1084 | if (err < 0) |
1085 | goto fail; |
1086 | if (set_nid(p: pages[idx], off: i, nid: 0, i: false)) |
1087 | dn->node_changed = true; |
1088 | } |
1089 | |
1090 | if (offset[idx + 1] == 0) { |
1091 | dn->node_page = pages[idx]; |
1092 | dn->nid = nid[idx]; |
1093 | err = truncate_node(dn); |
1094 | if (err) |
1095 | goto fail; |
1096 | } else { |
1097 | f2fs_put_page(page: pages[idx], unlock: 1); |
1098 | } |
1099 | offset[idx]++; |
1100 | offset[idx + 1] = 0; |
1101 | idx--; |
1102 | fail: |
1103 | for (i = idx; i >= 0; i--) |
1104 | f2fs_put_page(page: pages[i], unlock: 1); |
1105 | |
1106 | trace_f2fs_truncate_partial_nodes(inode: dn->inode, nid, depth, err); |
1107 | |
1108 | return err; |
1109 | } |
1110 | |
1111 | /* |
1112 | * All the block addresses of data and nodes should be nullified. |
1113 | */ |
1114 | int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) |
1115 | { |
1116 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
1117 | int err = 0, cont = 1; |
1118 | int level, offset[4], noffset[4]; |
1119 | unsigned int nofs = 0; |
1120 | struct f2fs_inode *ri; |
1121 | struct dnode_of_data dn; |
1122 | struct page *page; |
1123 | |
1124 | trace_f2fs_truncate_inode_blocks_enter(inode, from); |
1125 | |
1126 | level = get_node_path(inode, block: from, offset, noffset); |
1127 | if (level < 0) { |
1128 | trace_f2fs_truncate_inode_blocks_exit(inode, ret: level); |
1129 | return level; |
1130 | } |
1131 | |
1132 | page = f2fs_get_node_page(sbi, nid: inode->i_ino); |
1133 | if (IS_ERR(ptr: page)) { |
1134 | trace_f2fs_truncate_inode_blocks_exit(inode, ret: PTR_ERR(ptr: page)); |
1135 | return PTR_ERR(ptr: page); |
1136 | } |
1137 | |
1138 | set_new_dnode(dn: &dn, inode, ipage: page, NULL, nid: 0); |
1139 | unlock_page(page); |
1140 | |
1141 | ri = F2FS_INODE(page); |
1142 | switch (level) { |
1143 | case 0: |
1144 | case 1: |
1145 | nofs = noffset[1]; |
1146 | break; |
1147 | case 2: |
1148 | nofs = noffset[1]; |
1149 | if (!offset[level - 1]) |
1150 | goto skip_partial; |
1151 | err = truncate_partial_nodes(dn: &dn, ri, offset, depth: level); |
1152 | if (err < 0 && err != -ENOENT) |
1153 | goto fail; |
1154 | nofs += 1 + NIDS_PER_BLOCK; |
1155 | break; |
1156 | case 3: |
1157 | nofs = 5 + 2 * NIDS_PER_BLOCK; |
1158 | if (!offset[level - 1]) |
1159 | goto skip_partial; |
1160 | err = truncate_partial_nodes(dn: &dn, ri, offset, depth: level); |
1161 | if (err < 0 && err != -ENOENT) |
1162 | goto fail; |
1163 | break; |
1164 | default: |
1165 | BUG(); |
1166 | } |
1167 | |
1168 | skip_partial: |
1169 | while (cont) { |
1170 | dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); |
1171 | switch (offset[0]) { |
1172 | case NODE_DIR1_BLOCK: |
1173 | case NODE_DIR2_BLOCK: |
1174 | err = truncate_dnode(dn: &dn); |
1175 | break; |
1176 | |
1177 | case NODE_IND1_BLOCK: |
1178 | case NODE_IND2_BLOCK: |
1179 | err = truncate_nodes(dn: &dn, nofs, ofs: offset[1], depth: 2); |
1180 | break; |
1181 | |
1182 | case NODE_DIND_BLOCK: |
1183 | err = truncate_nodes(dn: &dn, nofs, ofs: offset[1], depth: 3); |
1184 | cont = 0; |
1185 | break; |
1186 | |
1187 | default: |
1188 | BUG(); |
1189 | } |
1190 | if (err < 0 && err != -ENOENT) |
1191 | goto fail; |
1192 | if (offset[1] == 0 && |
1193 | ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { |
1194 | lock_page(page); |
1195 | BUG_ON(page->mapping != NODE_MAPPING(sbi)); |
1196 | f2fs_wait_on_page_writeback(page, type: NODE, ordered: true, locked: true); |
1197 | ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; |
1198 | set_page_dirty(page); |
1199 | unlock_page(page); |
1200 | } |
1201 | offset[1] = 0; |
1202 | offset[0]++; |
1203 | nofs += err; |
1204 | } |
1205 | fail: |
1206 | f2fs_put_page(page, unlock: 0); |
1207 | trace_f2fs_truncate_inode_blocks_exit(inode, ret: err); |
1208 | return err > 0 ? 0 : err; |
1209 | } |
1210 | |
1211 | /* caller must lock inode page */ |
1212 | int f2fs_truncate_xattr_node(struct inode *inode) |
1213 | { |
1214 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
1215 | nid_t nid = F2FS_I(inode)->i_xattr_nid; |
1216 | struct dnode_of_data dn; |
1217 | struct page *npage; |
1218 | int err; |
1219 | |
1220 | if (!nid) |
1221 | return 0; |
1222 | |
1223 | npage = f2fs_get_node_page(sbi, nid); |
1224 | if (IS_ERR(ptr: npage)) |
1225 | return PTR_ERR(ptr: npage); |
1226 | |
1227 | set_new_dnode(dn: &dn, inode, NULL, npage, nid); |
1228 | err = truncate_node(dn: &dn); |
1229 | if (err) { |
1230 | f2fs_put_page(page: npage, unlock: 1); |
1231 | return err; |
1232 | } |
1233 | |
1234 | f2fs_i_xnid_write(inode, xnid: 0); |
1235 | |
1236 | return 0; |
1237 | } |
1238 | |
1239 | /* |
1240 | * Caller should grab and release a rwsem by calling f2fs_lock_op() and |
1241 | * f2fs_unlock_op(). |
1242 | */ |
1243 | int f2fs_remove_inode_page(struct inode *inode) |
1244 | { |
1245 | struct dnode_of_data dn; |
1246 | int err; |
1247 | |
1248 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: inode->i_ino); |
1249 | err = f2fs_get_dnode_of_data(dn: &dn, index: 0, mode: LOOKUP_NODE); |
1250 | if (err) |
1251 | return err; |
1252 | |
1253 | err = f2fs_truncate_xattr_node(inode); |
1254 | if (err) { |
1255 | f2fs_put_dnode(dn: &dn); |
1256 | return err; |
1257 | } |
1258 | |
1259 | /* remove potential inline_data blocks */ |
1260 | if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
1261 | S_ISLNK(inode->i_mode)) |
1262 | f2fs_truncate_data_blocks_range(dn: &dn, count: 1); |
1263 | |
1264 | /* 0 is possible, after f2fs_new_inode() has failed */ |
1265 | if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { |
1266 | f2fs_put_dnode(dn: &dn); |
1267 | return -EIO; |
1268 | } |
1269 | |
1270 | if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { |
1271 | f2fs_warn(F2FS_I_SB(inode), |
1272 | "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu" , |
1273 | inode->i_ino, (unsigned long long)inode->i_blocks); |
1274 | set_sbi_flag(sbi: F2FS_I_SB(inode), type: SBI_NEED_FSCK); |
1275 | } |
1276 | |
1277 | /* will put inode & node pages */ |
1278 | err = truncate_node(dn: &dn); |
1279 | if (err) { |
1280 | f2fs_put_dnode(dn: &dn); |
1281 | return err; |
1282 | } |
1283 | return 0; |
1284 | } |
1285 | |
1286 | struct page *f2fs_new_inode_page(struct inode *inode) |
1287 | { |
1288 | struct dnode_of_data dn; |
1289 | |
1290 | /* allocate inode page for new inode */ |
1291 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: inode->i_ino); |
1292 | |
1293 | /* caller should f2fs_put_page(page, 1); */ |
1294 | return f2fs_new_node_page(dn: &dn, ofs: 0); |
1295 | } |
1296 | |
1297 | struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) |
1298 | { |
1299 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode: dn->inode); |
1300 | struct node_info new_ni; |
1301 | struct page *page; |
1302 | int err; |
1303 | |
1304 | if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) |
1305 | return ERR_PTR(error: -EPERM); |
1306 | |
1307 | page = f2fs_grab_cache_page(mapping: NODE_MAPPING(sbi), index: dn->nid, for_write: false); |
1308 | if (!page) |
1309 | return ERR_PTR(error: -ENOMEM); |
1310 | |
1311 | if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) |
1312 | goto fail; |
1313 | |
1314 | #ifdef CONFIG_F2FS_CHECK_FS |
1315 | err = f2fs_get_node_info(sbi, nid: dn->nid, ni: &new_ni, checkpoint_context: false); |
1316 | if (err) { |
1317 | dec_valid_node_count(sbi, inode: dn->inode, is_inode: !ofs); |
1318 | goto fail; |
1319 | } |
1320 | if (unlikely(new_ni.blk_addr != NULL_ADDR)) { |
1321 | err = -EFSCORRUPTED; |
1322 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
1323 | f2fs_handle_error(sbi, error: ERROR_INVALID_BLKADDR); |
1324 | goto fail; |
1325 | } |
1326 | #endif |
1327 | new_ni.nid = dn->nid; |
1328 | new_ni.ino = dn->inode->i_ino; |
1329 | new_ni.blk_addr = NULL_ADDR; |
1330 | new_ni.flag = 0; |
1331 | new_ni.version = 0; |
1332 | set_node_addr(sbi, ni: &new_ni, NEW_ADDR, fsync_done: false); |
1333 | |
1334 | f2fs_wait_on_page_writeback(page, type: NODE, ordered: true, locked: true); |
1335 | fill_node_footer(page, nid: dn->nid, ino: dn->inode->i_ino, ofs, reset: true); |
1336 | set_cold_node(page, S_ISDIR(dn->inode->i_mode)); |
1337 | if (!PageUptodate(page)) |
1338 | SetPageUptodate(page); |
1339 | if (set_page_dirty(page)) |
1340 | dn->node_changed = true; |
1341 | |
1342 | if (f2fs_has_xattr_block(ofs)) |
1343 | f2fs_i_xnid_write(inode: dn->inode, xnid: dn->nid); |
1344 | |
1345 | if (ofs == 0) |
1346 | inc_valid_inode_count(sbi); |
1347 | return page; |
1348 | |
1349 | fail: |
1350 | clear_node_page_dirty(page); |
1351 | f2fs_put_page(page, unlock: 1); |
1352 | return ERR_PTR(error: err); |
1353 | } |
1354 | |
1355 | /* |
1356 | * Caller should do after getting the following values. |
1357 | * 0: f2fs_put_page(page, 0) |
1358 | * LOCKED_PAGE or error: f2fs_put_page(page, 1) |
1359 | */ |
1360 | static int read_node_page(struct page *page, blk_opf_t op_flags) |
1361 | { |
1362 | struct f2fs_sb_info *sbi = F2FS_P_SB(page); |
1363 | struct node_info ni; |
1364 | struct f2fs_io_info fio = { |
1365 | .sbi = sbi, |
1366 | .type = NODE, |
1367 | .op = REQ_OP_READ, |
1368 | .op_flags = op_flags, |
1369 | .page = page, |
1370 | .encrypted_page = NULL, |
1371 | }; |
1372 | int err; |
1373 | |
1374 | if (PageUptodate(page)) { |
1375 | if (!f2fs_inode_chksum_verify(sbi, page)) { |
1376 | ClearPageUptodate(page); |
1377 | return -EFSBADCRC; |
1378 | } |
1379 | return LOCKED_PAGE; |
1380 | } |
1381 | |
1382 | err = f2fs_get_node_info(sbi, nid: page->index, ni: &ni, checkpoint_context: false); |
1383 | if (err) |
1384 | return err; |
1385 | |
1386 | /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */ |
1387 | if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) { |
1388 | ClearPageUptodate(page); |
1389 | return -ENOENT; |
1390 | } |
1391 | |
1392 | fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; |
1393 | |
1394 | err = f2fs_submit_page_bio(fio: &fio); |
1395 | |
1396 | if (!err) |
1397 | f2fs_update_iostat(sbi, NULL, type: FS_NODE_READ_IO, F2FS_BLKSIZE); |
1398 | |
1399 | return err; |
1400 | } |
1401 | |
1402 | /* |
1403 | * Readahead a node page |
1404 | */ |
1405 | void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) |
1406 | { |
1407 | struct page *apage; |
1408 | int err; |
1409 | |
1410 | if (!nid) |
1411 | return; |
1412 | if (f2fs_check_nid_range(sbi, nid)) |
1413 | return; |
1414 | |
1415 | apage = xa_load(&NODE_MAPPING(sbi)->i_pages, index: nid); |
1416 | if (apage) |
1417 | return; |
1418 | |
1419 | apage = f2fs_grab_cache_page(mapping: NODE_MAPPING(sbi), index: nid, for_write: false); |
1420 | if (!apage) |
1421 | return; |
1422 | |
1423 | err = read_node_page(page: apage, REQ_RAHEAD); |
1424 | f2fs_put_page(page: apage, unlock: err ? 1 : 0); |
1425 | } |
1426 | |
1427 | static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, |
1428 | struct page *parent, int start) |
1429 | { |
1430 | struct page *page; |
1431 | int err; |
1432 | |
1433 | if (!nid) |
1434 | return ERR_PTR(error: -ENOENT); |
1435 | if (f2fs_check_nid_range(sbi, nid)) |
1436 | return ERR_PTR(error: -EINVAL); |
1437 | repeat: |
1438 | page = f2fs_grab_cache_page(mapping: NODE_MAPPING(sbi), index: nid, for_write: false); |
1439 | if (!page) |
1440 | return ERR_PTR(error: -ENOMEM); |
1441 | |
1442 | err = read_node_page(page, op_flags: 0); |
1443 | if (err < 0) { |
1444 | goto out_put_err; |
1445 | } else if (err == LOCKED_PAGE) { |
1446 | err = 0; |
1447 | goto page_hit; |
1448 | } |
1449 | |
1450 | if (parent) |
1451 | f2fs_ra_node_pages(parent, start: start + 1, MAX_RA_NODE); |
1452 | |
1453 | lock_page(page); |
1454 | |
1455 | if (unlikely(page->mapping != NODE_MAPPING(sbi))) { |
1456 | f2fs_put_page(page, unlock: 1); |
1457 | goto repeat; |
1458 | } |
1459 | |
1460 | if (unlikely(!PageUptodate(page))) { |
1461 | err = -EIO; |
1462 | goto out_err; |
1463 | } |
1464 | |
1465 | if (!f2fs_inode_chksum_verify(sbi, page)) { |
1466 | err = -EFSBADCRC; |
1467 | goto out_err; |
1468 | } |
1469 | page_hit: |
1470 | if (likely(nid == nid_of_node(page))) |
1471 | return page; |
1472 | |
1473 | f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]" , |
1474 | nid, nid_of_node(page), ino_of_node(page), |
1475 | ofs_of_node(page), cpver_of_node(page), |
1476 | next_blkaddr_of_node(page)); |
1477 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
1478 | f2fs_handle_error(sbi, error: ERROR_INCONSISTENT_FOOTER); |
1479 | err = -EFSCORRUPTED; |
1480 | out_err: |
1481 | ClearPageUptodate(page); |
1482 | out_put_err: |
1483 | /* ENOENT comes from read_node_page which is not an error. */ |
1484 | if (err != -ENOENT) |
1485 | f2fs_handle_page_eio(sbi, ofs: page->index, type: NODE); |
1486 | f2fs_put_page(page, unlock: 1); |
1487 | return ERR_PTR(error: err); |
1488 | } |
1489 | |
1490 | struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) |
1491 | { |
1492 | return __get_node_page(sbi, nid, NULL, start: 0); |
1493 | } |
1494 | |
1495 | struct page *f2fs_get_node_page_ra(struct page *parent, int start) |
1496 | { |
1497 | struct f2fs_sb_info *sbi = F2FS_P_SB(page: parent); |
1498 | nid_t nid = get_nid(p: parent, off: start, i: false); |
1499 | |
1500 | return __get_node_page(sbi, nid, parent, start); |
1501 | } |
1502 | |
1503 | static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) |
1504 | { |
1505 | struct inode *inode; |
1506 | struct page *page; |
1507 | int ret; |
1508 | |
1509 | /* should flush inline_data before evict_inode */ |
1510 | inode = ilookup(sb: sbi->sb, ino); |
1511 | if (!inode) |
1512 | return; |
1513 | |
1514 | page = f2fs_pagecache_get_page(mapping: inode->i_mapping, index: 0, |
1515 | FGP_LOCK|FGP_NOWAIT, gfp_mask: 0); |
1516 | if (!page) |
1517 | goto iput_out; |
1518 | |
1519 | if (!PageUptodate(page)) |
1520 | goto page_out; |
1521 | |
1522 | if (!PageDirty(page)) |
1523 | goto page_out; |
1524 | |
1525 | if (!clear_page_dirty_for_io(page)) |
1526 | goto page_out; |
1527 | |
1528 | ret = f2fs_write_inline_data(inode, page); |
1529 | inode_dec_dirty_pages(inode); |
1530 | f2fs_remove_dirty_inode(inode); |
1531 | if (ret) |
1532 | set_page_dirty(page); |
1533 | page_out: |
1534 | f2fs_put_page(page, unlock: 1); |
1535 | iput_out: |
1536 | iput(inode); |
1537 | } |
1538 | |
1539 | static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) |
1540 | { |
1541 | pgoff_t index; |
1542 | struct folio_batch fbatch; |
1543 | struct page *last_page = NULL; |
1544 | int nr_folios; |
1545 | |
1546 | folio_batch_init(fbatch: &fbatch); |
1547 | index = 0; |
1548 | |
1549 | while ((nr_folios = filemap_get_folios_tag(mapping: NODE_MAPPING(sbi), start: &index, |
1550 | end: (pgoff_t)-1, PAGECACHE_TAG_DIRTY, |
1551 | fbatch: &fbatch))) { |
1552 | int i; |
1553 | |
1554 | for (i = 0; i < nr_folios; i++) { |
1555 | struct page *page = &fbatch.folios[i]->page; |
1556 | |
1557 | if (unlikely(f2fs_cp_error(sbi))) { |
1558 | f2fs_put_page(page: last_page, unlock: 0); |
1559 | folio_batch_release(fbatch: &fbatch); |
1560 | return ERR_PTR(error: -EIO); |
1561 | } |
1562 | |
1563 | if (!IS_DNODE(node_page: page) || !is_cold_node(page)) |
1564 | continue; |
1565 | if (ino_of_node(node_page: page) != ino) |
1566 | continue; |
1567 | |
1568 | lock_page(page); |
1569 | |
1570 | if (unlikely(page->mapping != NODE_MAPPING(sbi))) { |
1571 | continue_unlock: |
1572 | unlock_page(page); |
1573 | continue; |
1574 | } |
1575 | if (ino_of_node(node_page: page) != ino) |
1576 | goto continue_unlock; |
1577 | |
1578 | if (!PageDirty(page)) { |
1579 | /* someone wrote it for us */ |
1580 | goto continue_unlock; |
1581 | } |
1582 | |
1583 | if (last_page) |
1584 | f2fs_put_page(page: last_page, unlock: 0); |
1585 | |
1586 | get_page(page); |
1587 | last_page = page; |
1588 | unlock_page(page); |
1589 | } |
1590 | folio_batch_release(fbatch: &fbatch); |
1591 | cond_resched(); |
1592 | } |
1593 | return last_page; |
1594 | } |
1595 | |
1596 | static int __write_node_page(struct page *page, bool atomic, bool *submitted, |
1597 | struct writeback_control *wbc, bool do_balance, |
1598 | enum iostat_type io_type, unsigned int *seq_id) |
1599 | { |
1600 | struct f2fs_sb_info *sbi = F2FS_P_SB(page); |
1601 | nid_t nid; |
1602 | struct node_info ni; |
1603 | struct f2fs_io_info fio = { |
1604 | .sbi = sbi, |
1605 | .ino = ino_of_node(node_page: page), |
1606 | .type = NODE, |
1607 | .op = REQ_OP_WRITE, |
1608 | .op_flags = wbc_to_write_flags(wbc), |
1609 | .page = page, |
1610 | .encrypted_page = NULL, |
1611 | .submitted = 0, |
1612 | .io_type = io_type, |
1613 | .io_wbc = wbc, |
1614 | }; |
1615 | unsigned int seq; |
1616 | |
1617 | trace_f2fs_writepage(page, type: NODE); |
1618 | |
1619 | if (unlikely(f2fs_cp_error(sbi))) { |
1620 | /* keep node pages in remount-ro mode */ |
1621 | if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY) |
1622 | goto redirty_out; |
1623 | ClearPageUptodate(page); |
1624 | dec_page_count(sbi, count_type: F2FS_DIRTY_NODES); |
1625 | unlock_page(page); |
1626 | return 0; |
1627 | } |
1628 | |
1629 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) |
1630 | goto redirty_out; |
1631 | |
1632 | if (!is_sbi_flag_set(sbi, type: SBI_CP_DISABLED) && |
1633 | wbc->sync_mode == WB_SYNC_NONE && |
1634 | IS_DNODE(node_page: page) && is_cold_node(page)) |
1635 | goto redirty_out; |
1636 | |
1637 | /* get old block addr of this node page */ |
1638 | nid = nid_of_node(node_page: page); |
1639 | f2fs_bug_on(sbi, page->index != nid); |
1640 | |
1641 | if (f2fs_get_node_info(sbi, nid, ni: &ni, checkpoint_context: !do_balance)) |
1642 | goto redirty_out; |
1643 | |
1644 | if (wbc->for_reclaim) { |
1645 | if (!f2fs_down_read_trylock(sem: &sbi->node_write)) |
1646 | goto redirty_out; |
1647 | } else { |
1648 | f2fs_down_read(sem: &sbi->node_write); |
1649 | } |
1650 | |
1651 | /* This page is already truncated */ |
1652 | if (unlikely(ni.blk_addr == NULL_ADDR)) { |
1653 | ClearPageUptodate(page); |
1654 | dec_page_count(sbi, count_type: F2FS_DIRTY_NODES); |
1655 | f2fs_up_read(sem: &sbi->node_write); |
1656 | unlock_page(page); |
1657 | return 0; |
1658 | } |
1659 | |
1660 | if (__is_valid_data_blkaddr(blkaddr: ni.blk_addr) && |
1661 | !f2fs_is_valid_blkaddr(sbi, blkaddr: ni.blk_addr, |
1662 | type: DATA_GENERIC_ENHANCE)) { |
1663 | f2fs_up_read(sem: &sbi->node_write); |
1664 | goto redirty_out; |
1665 | } |
1666 | |
1667 | if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi)) |
1668 | fio.op_flags |= REQ_PREFLUSH | REQ_FUA; |
1669 | |
1670 | /* should add to global list before clearing PAGECACHE status */ |
1671 | if (f2fs_in_warm_node_list(sbi, page)) { |
1672 | seq = f2fs_add_fsync_node_entry(sbi, page); |
1673 | if (seq_id) |
1674 | *seq_id = seq; |
1675 | } |
1676 | |
1677 | set_page_writeback(page); |
1678 | |
1679 | fio.old_blkaddr = ni.blk_addr; |
1680 | f2fs_do_write_node_page(nid, fio: &fio); |
1681 | set_node_addr(sbi, ni: &ni, new_blkaddr: fio.new_blkaddr, is_fsync_dnode(page)); |
1682 | dec_page_count(sbi, count_type: F2FS_DIRTY_NODES); |
1683 | f2fs_up_read(sem: &sbi->node_write); |
1684 | |
1685 | if (wbc->for_reclaim) { |
1686 | f2fs_submit_merged_write_cond(sbi, NULL, page, ino: 0, type: NODE); |
1687 | submitted = NULL; |
1688 | } |
1689 | |
1690 | unlock_page(page); |
1691 | |
1692 | if (unlikely(f2fs_cp_error(sbi))) { |
1693 | f2fs_submit_merged_write(sbi, type: NODE); |
1694 | submitted = NULL; |
1695 | } |
1696 | if (submitted) |
1697 | *submitted = fio.submitted; |
1698 | |
1699 | if (do_balance) |
1700 | f2fs_balance_fs(sbi, need: false); |
1701 | return 0; |
1702 | |
1703 | redirty_out: |
1704 | redirty_page_for_writepage(wbc, page); |
1705 | return AOP_WRITEPAGE_ACTIVATE; |
1706 | } |
1707 | |
1708 | int f2fs_move_node_page(struct page *node_page, int gc_type) |
1709 | { |
1710 | int err = 0; |
1711 | |
1712 | if (gc_type == FG_GC) { |
1713 | struct writeback_control wbc = { |
1714 | .sync_mode = WB_SYNC_ALL, |
1715 | .nr_to_write = 1, |
1716 | .for_reclaim = 0, |
1717 | }; |
1718 | |
1719 | f2fs_wait_on_page_writeback(page: node_page, type: NODE, ordered: true, locked: true); |
1720 | |
1721 | set_page_dirty(node_page); |
1722 | |
1723 | if (!clear_page_dirty_for_io(page: node_page)) { |
1724 | err = -EAGAIN; |
1725 | goto out_page; |
1726 | } |
1727 | |
1728 | if (__write_node_page(page: node_page, atomic: false, NULL, |
1729 | wbc: &wbc, do_balance: false, io_type: FS_GC_NODE_IO, NULL)) { |
1730 | err = -EAGAIN; |
1731 | unlock_page(page: node_page); |
1732 | } |
1733 | goto release_page; |
1734 | } else { |
1735 | /* set page dirty and write it */ |
1736 | if (!PageWriteback(page: node_page)) |
1737 | set_page_dirty(node_page); |
1738 | } |
1739 | out_page: |
1740 | unlock_page(page: node_page); |
1741 | release_page: |
1742 | f2fs_put_page(page: node_page, unlock: 0); |
1743 | return err; |
1744 | } |
1745 | |
1746 | static int f2fs_write_node_page(struct page *page, |
1747 | struct writeback_control *wbc) |
1748 | { |
1749 | return __write_node_page(page, atomic: false, NULL, wbc, do_balance: false, |
1750 | io_type: FS_NODE_IO, NULL); |
1751 | } |
1752 | |
1753 | int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, |
1754 | struct writeback_control *wbc, bool atomic, |
1755 | unsigned int *seq_id) |
1756 | { |
1757 | pgoff_t index; |
1758 | struct folio_batch fbatch; |
1759 | int ret = 0; |
1760 | struct page *last_page = NULL; |
1761 | bool marked = false; |
1762 | nid_t ino = inode->i_ino; |
1763 | int nr_folios; |
1764 | int nwritten = 0; |
1765 | |
1766 | if (atomic) { |
1767 | last_page = last_fsync_dnode(sbi, ino); |
1768 | if (IS_ERR_OR_NULL(ptr: last_page)) |
1769 | return PTR_ERR_OR_ZERO(ptr: last_page); |
1770 | } |
1771 | retry: |
1772 | folio_batch_init(fbatch: &fbatch); |
1773 | index = 0; |
1774 | |
1775 | while ((nr_folios = filemap_get_folios_tag(mapping: NODE_MAPPING(sbi), start: &index, |
1776 | end: (pgoff_t)-1, PAGECACHE_TAG_DIRTY, |
1777 | fbatch: &fbatch))) { |
1778 | int i; |
1779 | |
1780 | for (i = 0; i < nr_folios; i++) { |
1781 | struct page *page = &fbatch.folios[i]->page; |
1782 | bool submitted = false; |
1783 | |
1784 | if (unlikely(f2fs_cp_error(sbi))) { |
1785 | f2fs_put_page(page: last_page, unlock: 0); |
1786 | folio_batch_release(fbatch: &fbatch); |
1787 | ret = -EIO; |
1788 | goto out; |
1789 | } |
1790 | |
1791 | if (!IS_DNODE(node_page: page) || !is_cold_node(page)) |
1792 | continue; |
1793 | if (ino_of_node(node_page: page) != ino) |
1794 | continue; |
1795 | |
1796 | lock_page(page); |
1797 | |
1798 | if (unlikely(page->mapping != NODE_MAPPING(sbi))) { |
1799 | continue_unlock: |
1800 | unlock_page(page); |
1801 | continue; |
1802 | } |
1803 | if (ino_of_node(node_page: page) != ino) |
1804 | goto continue_unlock; |
1805 | |
1806 | if (!PageDirty(page) && page != last_page) { |
1807 | /* someone wrote it for us */ |
1808 | goto continue_unlock; |
1809 | } |
1810 | |
1811 | f2fs_wait_on_page_writeback(page, type: NODE, ordered: true, locked: true); |
1812 | |
1813 | set_fsync_mark(page, 0); |
1814 | set_dentry_mark(page, 0); |
1815 | |
1816 | if (!atomic || page == last_page) { |
1817 | set_fsync_mark(page, 1); |
1818 | percpu_counter_inc(fbc: &sbi->rf_node_block_count); |
1819 | if (IS_INODE(page)) { |
1820 | if (is_inode_flag_set(inode, |
1821 | flag: FI_DIRTY_INODE)) |
1822 | f2fs_update_inode(inode, node_page: page); |
1823 | set_dentry_mark(page, |
1824 | f2fs_need_dentry_mark(sbi, ino)); |
1825 | } |
1826 | /* may be written by other thread */ |
1827 | if (!PageDirty(page)) |
1828 | set_page_dirty(page); |
1829 | } |
1830 | |
1831 | if (!clear_page_dirty_for_io(page)) |
1832 | goto continue_unlock; |
1833 | |
1834 | ret = __write_node_page(page, atomic: atomic && |
1835 | page == last_page, |
1836 | submitted: &submitted, wbc, do_balance: true, |
1837 | io_type: FS_NODE_IO, seq_id); |
1838 | if (ret) { |
1839 | unlock_page(page); |
1840 | f2fs_put_page(page: last_page, unlock: 0); |
1841 | break; |
1842 | } else if (submitted) { |
1843 | nwritten++; |
1844 | } |
1845 | |
1846 | if (page == last_page) { |
1847 | f2fs_put_page(page, unlock: 0); |
1848 | marked = true; |
1849 | break; |
1850 | } |
1851 | } |
1852 | folio_batch_release(fbatch: &fbatch); |
1853 | cond_resched(); |
1854 | |
1855 | if (ret || marked) |
1856 | break; |
1857 | } |
1858 | if (!ret && atomic && !marked) { |
1859 | f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx" , |
1860 | ino, last_page->index); |
1861 | lock_page(page: last_page); |
1862 | f2fs_wait_on_page_writeback(page: last_page, type: NODE, ordered: true, locked: true); |
1863 | set_page_dirty(last_page); |
1864 | unlock_page(page: last_page); |
1865 | goto retry; |
1866 | } |
1867 | out: |
1868 | if (nwritten) |
1869 | f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, type: NODE); |
1870 | return ret ? -EIO : 0; |
1871 | } |
1872 | |
1873 | static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data) |
1874 | { |
1875 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
1876 | bool clean; |
1877 | |
1878 | if (inode->i_ino != ino) |
1879 | return 0; |
1880 | |
1881 | if (!is_inode_flag_set(inode, flag: FI_DIRTY_INODE)) |
1882 | return 0; |
1883 | |
1884 | spin_lock(lock: &sbi->inode_lock[DIRTY_META]); |
1885 | clean = list_empty(head: &F2FS_I(inode)->gdirty_list); |
1886 | spin_unlock(lock: &sbi->inode_lock[DIRTY_META]); |
1887 | |
1888 | if (clean) |
1889 | return 0; |
1890 | |
1891 | inode = igrab(inode); |
1892 | if (!inode) |
1893 | return 0; |
1894 | return 1; |
1895 | } |
1896 | |
1897 | static bool flush_dirty_inode(struct page *page) |
1898 | { |
1899 | struct f2fs_sb_info *sbi = F2FS_P_SB(page); |
1900 | struct inode *inode; |
1901 | nid_t ino = ino_of_node(node_page: page); |
1902 | |
1903 | inode = find_inode_nowait(sbi->sb, ino, match: f2fs_match_ino, NULL); |
1904 | if (!inode) |
1905 | return false; |
1906 | |
1907 | f2fs_update_inode(inode, node_page: page); |
1908 | unlock_page(page); |
1909 | |
1910 | iput(inode); |
1911 | return true; |
1912 | } |
1913 | |
1914 | void f2fs_flush_inline_data(struct f2fs_sb_info *sbi) |
1915 | { |
1916 | pgoff_t index = 0; |
1917 | struct folio_batch fbatch; |
1918 | int nr_folios; |
1919 | |
1920 | folio_batch_init(fbatch: &fbatch); |
1921 | |
1922 | while ((nr_folios = filemap_get_folios_tag(mapping: NODE_MAPPING(sbi), start: &index, |
1923 | end: (pgoff_t)-1, PAGECACHE_TAG_DIRTY, |
1924 | fbatch: &fbatch))) { |
1925 | int i; |
1926 | |
1927 | for (i = 0; i < nr_folios; i++) { |
1928 | struct page *page = &fbatch.folios[i]->page; |
1929 | |
1930 | if (!IS_INODE(page)) |
1931 | continue; |
1932 | |
1933 | lock_page(page); |
1934 | |
1935 | if (unlikely(page->mapping != NODE_MAPPING(sbi))) { |
1936 | continue_unlock: |
1937 | unlock_page(page); |
1938 | continue; |
1939 | } |
1940 | |
1941 | if (!PageDirty(page)) { |
1942 | /* someone wrote it for us */ |
1943 | goto continue_unlock; |
1944 | } |
1945 | |
1946 | /* flush inline_data, if it's async context. */ |
1947 | if (page_private_inline(page)) { |
1948 | clear_page_private_inline(page); |
1949 | unlock_page(page); |
1950 | flush_inline_data(sbi, ino: ino_of_node(node_page: page)); |
1951 | continue; |
1952 | } |
1953 | unlock_page(page); |
1954 | } |
1955 | folio_batch_release(fbatch: &fbatch); |
1956 | cond_resched(); |
1957 | } |
1958 | } |
1959 | |
1960 | int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, |
1961 | struct writeback_control *wbc, |
1962 | bool do_balance, enum iostat_type io_type) |
1963 | { |
1964 | pgoff_t index; |
1965 | struct folio_batch fbatch; |
1966 | int step = 0; |
1967 | int nwritten = 0; |
1968 | int ret = 0; |
1969 | int nr_folios, done = 0; |
1970 | |
1971 | folio_batch_init(fbatch: &fbatch); |
1972 | |
1973 | next_step: |
1974 | index = 0; |
1975 | |
1976 | while (!done && (nr_folios = filemap_get_folios_tag(mapping: NODE_MAPPING(sbi), |
1977 | start: &index, end: (pgoff_t)-1, PAGECACHE_TAG_DIRTY, |
1978 | fbatch: &fbatch))) { |
1979 | int i; |
1980 | |
1981 | for (i = 0; i < nr_folios; i++) { |
1982 | struct page *page = &fbatch.folios[i]->page; |
1983 | bool submitted = false; |
1984 | |
1985 | /* give a priority to WB_SYNC threads */ |
1986 | if (atomic_read(v: &sbi->wb_sync_req[NODE]) && |
1987 | wbc->sync_mode == WB_SYNC_NONE) { |
1988 | done = 1; |
1989 | break; |
1990 | } |
1991 | |
1992 | /* |
1993 | * flushing sequence with step: |
1994 | * 0. indirect nodes |
1995 | * 1. dentry dnodes |
1996 | * 2. file dnodes |
1997 | */ |
1998 | if (step == 0 && IS_DNODE(node_page: page)) |
1999 | continue; |
2000 | if (step == 1 && (!IS_DNODE(node_page: page) || |
2001 | is_cold_node(page))) |
2002 | continue; |
2003 | if (step == 2 && (!IS_DNODE(node_page: page) || |
2004 | !is_cold_node(page))) |
2005 | continue; |
2006 | lock_node: |
2007 | if (wbc->sync_mode == WB_SYNC_ALL) |
2008 | lock_page(page); |
2009 | else if (!trylock_page(page)) |
2010 | continue; |
2011 | |
2012 | if (unlikely(page->mapping != NODE_MAPPING(sbi))) { |
2013 | continue_unlock: |
2014 | unlock_page(page); |
2015 | continue; |
2016 | } |
2017 | |
2018 | if (!PageDirty(page)) { |
2019 | /* someone wrote it for us */ |
2020 | goto continue_unlock; |
2021 | } |
2022 | |
2023 | /* flush inline_data/inode, if it's async context. */ |
2024 | if (!do_balance) |
2025 | goto write_node; |
2026 | |
2027 | /* flush inline_data */ |
2028 | if (page_private_inline(page)) { |
2029 | clear_page_private_inline(page); |
2030 | unlock_page(page); |
2031 | flush_inline_data(sbi, ino: ino_of_node(node_page: page)); |
2032 | goto lock_node; |
2033 | } |
2034 | |
2035 | /* flush dirty inode */ |
2036 | if (IS_INODE(page) && flush_dirty_inode(page)) |
2037 | goto lock_node; |
2038 | write_node: |
2039 | f2fs_wait_on_page_writeback(page, type: NODE, ordered: true, locked: true); |
2040 | |
2041 | if (!clear_page_dirty_for_io(page)) |
2042 | goto continue_unlock; |
2043 | |
2044 | set_fsync_mark(page, 0); |
2045 | set_dentry_mark(page, 0); |
2046 | |
2047 | ret = __write_node_page(page, atomic: false, submitted: &submitted, |
2048 | wbc, do_balance, io_type, NULL); |
2049 | if (ret) |
2050 | unlock_page(page); |
2051 | else if (submitted) |
2052 | nwritten++; |
2053 | |
2054 | if (--wbc->nr_to_write == 0) |
2055 | break; |
2056 | } |
2057 | folio_batch_release(fbatch: &fbatch); |
2058 | cond_resched(); |
2059 | |
2060 | if (wbc->nr_to_write == 0) { |
2061 | step = 2; |
2062 | break; |
2063 | } |
2064 | } |
2065 | |
2066 | if (step < 2) { |
2067 | if (!is_sbi_flag_set(sbi, type: SBI_CP_DISABLED) && |
2068 | wbc->sync_mode == WB_SYNC_NONE && step == 1) |
2069 | goto out; |
2070 | step++; |
2071 | goto next_step; |
2072 | } |
2073 | out: |
2074 | if (nwritten) |
2075 | f2fs_submit_merged_write(sbi, type: NODE); |
2076 | |
2077 | if (unlikely(f2fs_cp_error(sbi))) |
2078 | return -EIO; |
2079 | return ret; |
2080 | } |
2081 | |
2082 | int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, |
2083 | unsigned int seq_id) |
2084 | { |
2085 | struct fsync_node_entry *fn; |
2086 | struct page *page; |
2087 | struct list_head *head = &sbi->fsync_node_list; |
2088 | unsigned long flags; |
2089 | unsigned int cur_seq_id = 0; |
2090 | |
2091 | while (seq_id && cur_seq_id < seq_id) { |
2092 | spin_lock_irqsave(&sbi->fsync_node_lock, flags); |
2093 | if (list_empty(head)) { |
2094 | spin_unlock_irqrestore(lock: &sbi->fsync_node_lock, flags); |
2095 | break; |
2096 | } |
2097 | fn = list_first_entry(head, struct fsync_node_entry, list); |
2098 | if (fn->seq_id > seq_id) { |
2099 | spin_unlock_irqrestore(lock: &sbi->fsync_node_lock, flags); |
2100 | break; |
2101 | } |
2102 | cur_seq_id = fn->seq_id; |
2103 | page = fn->page; |
2104 | get_page(page); |
2105 | spin_unlock_irqrestore(lock: &sbi->fsync_node_lock, flags); |
2106 | |
2107 | f2fs_wait_on_page_writeback(page, type: NODE, ordered: true, locked: false); |
2108 | |
2109 | put_page(page); |
2110 | } |
2111 | |
2112 | return filemap_check_errors(mapping: NODE_MAPPING(sbi)); |
2113 | } |
2114 | |
2115 | static int f2fs_write_node_pages(struct address_space *mapping, |
2116 | struct writeback_control *wbc) |
2117 | { |
2118 | struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); |
2119 | struct blk_plug plug; |
2120 | long diff; |
2121 | |
2122 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) |
2123 | goto skip_write; |
2124 | |
2125 | /* balancing f2fs's metadata in background */ |
2126 | f2fs_balance_fs_bg(sbi, from_bg: true); |
2127 | |
2128 | /* collect a number of dirty node pages and write together */ |
2129 | if (wbc->sync_mode != WB_SYNC_ALL && |
2130 | get_pages(sbi, count_type: F2FS_DIRTY_NODES) < |
2131 | nr_pages_to_skip(sbi, type: NODE)) |
2132 | goto skip_write; |
2133 | |
2134 | if (wbc->sync_mode == WB_SYNC_ALL) |
2135 | atomic_inc(v: &sbi->wb_sync_req[NODE]); |
2136 | else if (atomic_read(v: &sbi->wb_sync_req[NODE])) { |
2137 | /* to avoid potential deadlock */ |
2138 | if (current->plug) |
2139 | blk_finish_plug(current->plug); |
2140 | goto skip_write; |
2141 | } |
2142 | |
2143 | trace_f2fs_writepages(inode: mapping->host, wbc, type: NODE); |
2144 | |
2145 | diff = nr_pages_to_write(sbi, type: NODE, wbc); |
2146 | blk_start_plug(&plug); |
2147 | f2fs_sync_node_pages(sbi, wbc, do_balance: true, io_type: FS_NODE_IO); |
2148 | blk_finish_plug(&plug); |
2149 | wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); |
2150 | |
2151 | if (wbc->sync_mode == WB_SYNC_ALL) |
2152 | atomic_dec(v: &sbi->wb_sync_req[NODE]); |
2153 | return 0; |
2154 | |
2155 | skip_write: |
2156 | wbc->pages_skipped += get_pages(sbi, count_type: F2FS_DIRTY_NODES); |
2157 | trace_f2fs_writepages(inode: mapping->host, wbc, type: NODE); |
2158 | return 0; |
2159 | } |
2160 | |
2161 | static bool f2fs_dirty_node_folio(struct address_space *mapping, |
2162 | struct folio *folio) |
2163 | { |
2164 | trace_f2fs_set_page_dirty(page: &folio->page, type: NODE); |
2165 | |
2166 | if (!folio_test_uptodate(folio)) |
2167 | folio_mark_uptodate(folio); |
2168 | #ifdef CONFIG_F2FS_CHECK_FS |
2169 | if (IS_INODE(page: &folio->page)) |
2170 | f2fs_inode_chksum_set(sbi: F2FS_M_SB(mapping), page: &folio->page); |
2171 | #endif |
2172 | if (filemap_dirty_folio(mapping, folio)) { |
2173 | inc_page_count(sbi: F2FS_M_SB(mapping), count_type: F2FS_DIRTY_NODES); |
2174 | set_page_private_reference(&folio->page); |
2175 | return true; |
2176 | } |
2177 | return false; |
2178 | } |
2179 | |
2180 | /* |
2181 | * Structure of the f2fs node operations |
2182 | */ |
2183 | const struct address_space_operations f2fs_node_aops = { |
2184 | .writepage = f2fs_write_node_page, |
2185 | .writepages = f2fs_write_node_pages, |
2186 | .dirty_folio = f2fs_dirty_node_folio, |
2187 | .invalidate_folio = f2fs_invalidate_folio, |
2188 | .release_folio = f2fs_release_folio, |
2189 | .migrate_folio = filemap_migrate_folio, |
2190 | }; |
2191 | |
2192 | static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, |
2193 | nid_t n) |
2194 | { |
2195 | return radix_tree_lookup(&nm_i->free_nid_root, n); |
2196 | } |
2197 | |
2198 | static int __insert_free_nid(struct f2fs_sb_info *sbi, |
2199 | struct free_nid *i) |
2200 | { |
2201 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2202 | int err = radix_tree_insert(&nm_i->free_nid_root, index: i->nid, i); |
2203 | |
2204 | if (err) |
2205 | return err; |
2206 | |
2207 | nm_i->nid_cnt[FREE_NID]++; |
2208 | list_add_tail(new: &i->list, head: &nm_i->free_nid_list); |
2209 | return 0; |
2210 | } |
2211 | |
2212 | static void __remove_free_nid(struct f2fs_sb_info *sbi, |
2213 | struct free_nid *i, enum nid_state state) |
2214 | { |
2215 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2216 | |
2217 | f2fs_bug_on(sbi, state != i->state); |
2218 | nm_i->nid_cnt[state]--; |
2219 | if (state == FREE_NID) |
2220 | list_del(entry: &i->list); |
2221 | radix_tree_delete(&nm_i->free_nid_root, i->nid); |
2222 | } |
2223 | |
2224 | static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, |
2225 | enum nid_state org_state, enum nid_state dst_state) |
2226 | { |
2227 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2228 | |
2229 | f2fs_bug_on(sbi, org_state != i->state); |
2230 | i->state = dst_state; |
2231 | nm_i->nid_cnt[org_state]--; |
2232 | nm_i->nid_cnt[dst_state]++; |
2233 | |
2234 | switch (dst_state) { |
2235 | case PREALLOC_NID: |
2236 | list_del(entry: &i->list); |
2237 | break; |
2238 | case FREE_NID: |
2239 | list_add_tail(new: &i->list, head: &nm_i->free_nid_list); |
2240 | break; |
2241 | default: |
2242 | BUG_ON(1); |
2243 | } |
2244 | } |
2245 | |
2246 | bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi) |
2247 | { |
2248 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2249 | unsigned int i; |
2250 | bool ret = true; |
2251 | |
2252 | f2fs_down_read(sem: &nm_i->nat_tree_lock); |
2253 | for (i = 0; i < nm_i->nat_blocks; i++) { |
2254 | if (!test_bit_le(nr: i, addr: nm_i->nat_block_bitmap)) { |
2255 | ret = false; |
2256 | break; |
2257 | } |
2258 | } |
2259 | f2fs_up_read(sem: &nm_i->nat_tree_lock); |
2260 | |
2261 | return ret; |
2262 | } |
2263 | |
2264 | static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, |
2265 | bool set, bool build) |
2266 | { |
2267 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2268 | unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); |
2269 | unsigned int nid_ofs = nid - START_NID(nid); |
2270 | |
2271 | if (!test_bit_le(nr: nat_ofs, addr: nm_i->nat_block_bitmap)) |
2272 | return; |
2273 | |
2274 | if (set) { |
2275 | if (test_bit_le(nr: nid_ofs, addr: nm_i->free_nid_bitmap[nat_ofs])) |
2276 | return; |
2277 | __set_bit_le(nr: nid_ofs, addr: nm_i->free_nid_bitmap[nat_ofs]); |
2278 | nm_i->free_nid_count[nat_ofs]++; |
2279 | } else { |
2280 | if (!test_bit_le(nr: nid_ofs, addr: nm_i->free_nid_bitmap[nat_ofs])) |
2281 | return; |
2282 | __clear_bit_le(nr: nid_ofs, addr: nm_i->free_nid_bitmap[nat_ofs]); |
2283 | if (!build) |
2284 | nm_i->free_nid_count[nat_ofs]--; |
2285 | } |
2286 | } |
2287 | |
2288 | /* return if the nid is recognized as free */ |
2289 | static bool add_free_nid(struct f2fs_sb_info *sbi, |
2290 | nid_t nid, bool build, bool update) |
2291 | { |
2292 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2293 | struct free_nid *i, *e; |
2294 | struct nat_entry *ne; |
2295 | int err = -EINVAL; |
2296 | bool ret = false; |
2297 | |
2298 | /* 0 nid should not be used */ |
2299 | if (unlikely(nid == 0)) |
2300 | return false; |
2301 | |
2302 | if (unlikely(f2fs_check_nid_range(sbi, nid))) |
2303 | return false; |
2304 | |
2305 | i = f2fs_kmem_cache_alloc(cachep: free_nid_slab, GFP_NOFS, nofail: true, NULL); |
2306 | i->nid = nid; |
2307 | i->state = FREE_NID; |
2308 | |
2309 | radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); |
2310 | |
2311 | spin_lock(lock: &nm_i->nid_list_lock); |
2312 | |
2313 | if (build) { |
2314 | /* |
2315 | * Thread A Thread B |
2316 | * - f2fs_create |
2317 | * - f2fs_new_inode |
2318 | * - f2fs_alloc_nid |
2319 | * - __insert_nid_to_list(PREALLOC_NID) |
2320 | * - f2fs_balance_fs_bg |
2321 | * - f2fs_build_free_nids |
2322 | * - __f2fs_build_free_nids |
2323 | * - scan_nat_page |
2324 | * - add_free_nid |
2325 | * - __lookup_nat_cache |
2326 | * - f2fs_add_link |
2327 | * - f2fs_init_inode_metadata |
2328 | * - f2fs_new_inode_page |
2329 | * - f2fs_new_node_page |
2330 | * - set_node_addr |
2331 | * - f2fs_alloc_nid_done |
2332 | * - __remove_nid_from_list(PREALLOC_NID) |
2333 | * - __insert_nid_to_list(FREE_NID) |
2334 | */ |
2335 | ne = __lookup_nat_cache(nm_i, n: nid); |
2336 | if (ne && (!get_nat_flag(ne, type: IS_CHECKPOINTED) || |
2337 | nat_get_blkaddr(ne) != NULL_ADDR)) |
2338 | goto err_out; |
2339 | |
2340 | e = __lookup_free_nid_list(nm_i, n: nid); |
2341 | if (e) { |
2342 | if (e->state == FREE_NID) |
2343 | ret = true; |
2344 | goto err_out; |
2345 | } |
2346 | } |
2347 | ret = true; |
2348 | err = __insert_free_nid(sbi, i); |
2349 | err_out: |
2350 | if (update) { |
2351 | update_free_nid_bitmap(sbi, nid, set: ret, build); |
2352 | if (!build) |
2353 | nm_i->available_nids++; |
2354 | } |
2355 | spin_unlock(lock: &nm_i->nid_list_lock); |
2356 | radix_tree_preload_end(); |
2357 | |
2358 | if (err) |
2359 | kmem_cache_free(s: free_nid_slab, objp: i); |
2360 | return ret; |
2361 | } |
2362 | |
2363 | static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) |
2364 | { |
2365 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2366 | struct free_nid *i; |
2367 | bool need_free = false; |
2368 | |
2369 | spin_lock(lock: &nm_i->nid_list_lock); |
2370 | i = __lookup_free_nid_list(nm_i, n: nid); |
2371 | if (i && i->state == FREE_NID) { |
2372 | __remove_free_nid(sbi, i, state: FREE_NID); |
2373 | need_free = true; |
2374 | } |
2375 | spin_unlock(lock: &nm_i->nid_list_lock); |
2376 | |
2377 | if (need_free) |
2378 | kmem_cache_free(s: free_nid_slab, objp: i); |
2379 | } |
2380 | |
2381 | static int scan_nat_page(struct f2fs_sb_info *sbi, |
2382 | struct page *nat_page, nid_t start_nid) |
2383 | { |
2384 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2385 | struct f2fs_nat_block *nat_blk = page_address(nat_page); |
2386 | block_t blk_addr; |
2387 | unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); |
2388 | int i; |
2389 | |
2390 | __set_bit_le(nr: nat_ofs, addr: nm_i->nat_block_bitmap); |
2391 | |
2392 | i = start_nid % NAT_ENTRY_PER_BLOCK; |
2393 | |
2394 | for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { |
2395 | if (unlikely(start_nid >= nm_i->max_nid)) |
2396 | break; |
2397 | |
2398 | blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); |
2399 | |
2400 | if (blk_addr == NEW_ADDR) |
2401 | return -EFSCORRUPTED; |
2402 | |
2403 | if (blk_addr == NULL_ADDR) { |
2404 | add_free_nid(sbi, nid: start_nid, build: true, update: true); |
2405 | } else { |
2406 | spin_lock(lock: &NM_I(sbi)->nid_list_lock); |
2407 | update_free_nid_bitmap(sbi, nid: start_nid, set: false, build: true); |
2408 | spin_unlock(lock: &NM_I(sbi)->nid_list_lock); |
2409 | } |
2410 | } |
2411 | |
2412 | return 0; |
2413 | } |
2414 | |
2415 | static void scan_curseg_cache(struct f2fs_sb_info *sbi) |
2416 | { |
2417 | struct curseg_info *curseg = CURSEG_I(sbi, type: CURSEG_HOT_DATA); |
2418 | struct f2fs_journal *journal = curseg->journal; |
2419 | int i; |
2420 | |
2421 | down_read(sem: &curseg->journal_rwsem); |
2422 | for (i = 0; i < nats_in_cursum(journal); i++) { |
2423 | block_t addr; |
2424 | nid_t nid; |
2425 | |
2426 | addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); |
2427 | nid = le32_to_cpu(nid_in_journal(journal, i)); |
2428 | if (addr == NULL_ADDR) |
2429 | add_free_nid(sbi, nid, build: true, update: false); |
2430 | else |
2431 | remove_free_nid(sbi, nid); |
2432 | } |
2433 | up_read(sem: &curseg->journal_rwsem); |
2434 | } |
2435 | |
2436 | static void scan_free_nid_bits(struct f2fs_sb_info *sbi) |
2437 | { |
2438 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2439 | unsigned int i, idx; |
2440 | nid_t nid; |
2441 | |
2442 | f2fs_down_read(sem: &nm_i->nat_tree_lock); |
2443 | |
2444 | for (i = 0; i < nm_i->nat_blocks; i++) { |
2445 | if (!test_bit_le(nr: i, addr: nm_i->nat_block_bitmap)) |
2446 | continue; |
2447 | if (!nm_i->free_nid_count[i]) |
2448 | continue; |
2449 | for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { |
2450 | idx = find_next_bit_le(addr: nm_i->free_nid_bitmap[i], |
2451 | NAT_ENTRY_PER_BLOCK, offset: idx); |
2452 | if (idx >= NAT_ENTRY_PER_BLOCK) |
2453 | break; |
2454 | |
2455 | nid = i * NAT_ENTRY_PER_BLOCK + idx; |
2456 | add_free_nid(sbi, nid, build: true, update: false); |
2457 | |
2458 | if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) |
2459 | goto out; |
2460 | } |
2461 | } |
2462 | out: |
2463 | scan_curseg_cache(sbi); |
2464 | |
2465 | f2fs_up_read(sem: &nm_i->nat_tree_lock); |
2466 | } |
2467 | |
2468 | static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, |
2469 | bool sync, bool mount) |
2470 | { |
2471 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2472 | int i = 0, ret; |
2473 | nid_t nid = nm_i->next_scan_nid; |
2474 | |
2475 | if (unlikely(nid >= nm_i->max_nid)) |
2476 | nid = 0; |
2477 | |
2478 | if (unlikely(nid % NAT_ENTRY_PER_BLOCK)) |
2479 | nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK; |
2480 | |
2481 | /* Enough entries */ |
2482 | if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) |
2483 | return 0; |
2484 | |
2485 | if (!sync && !f2fs_available_free_memory(sbi, type: FREE_NIDS)) |
2486 | return 0; |
2487 | |
2488 | if (!mount) { |
2489 | /* try to find free nids in free_nid_bitmap */ |
2490 | scan_free_nid_bits(sbi); |
2491 | |
2492 | if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) |
2493 | return 0; |
2494 | } |
2495 | |
2496 | /* readahead nat pages to be scanned */ |
2497 | f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, |
2498 | type: META_NAT, sync: true); |
2499 | |
2500 | f2fs_down_read(sem: &nm_i->nat_tree_lock); |
2501 | |
2502 | while (1) { |
2503 | if (!test_bit_le(NAT_BLOCK_OFFSET(nid), |
2504 | addr: nm_i->nat_block_bitmap)) { |
2505 | struct page *page = get_current_nat_page(sbi, nid); |
2506 | |
2507 | if (IS_ERR(ptr: page)) { |
2508 | ret = PTR_ERR(ptr: page); |
2509 | } else { |
2510 | ret = scan_nat_page(sbi, nat_page: page, start_nid: nid); |
2511 | f2fs_put_page(page, unlock: 1); |
2512 | } |
2513 | |
2514 | if (ret) { |
2515 | f2fs_up_read(sem: &nm_i->nat_tree_lock); |
2516 | |
2517 | if (ret == -EFSCORRUPTED) { |
2518 | f2fs_err(sbi, "NAT is corrupt, run fsck to fix it" ); |
2519 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
2520 | f2fs_handle_error(sbi, |
2521 | error: ERROR_INCONSISTENT_NAT); |
2522 | } |
2523 | |
2524 | return ret; |
2525 | } |
2526 | } |
2527 | |
2528 | nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); |
2529 | if (unlikely(nid >= nm_i->max_nid)) |
2530 | nid = 0; |
2531 | |
2532 | if (++i >= FREE_NID_PAGES) |
2533 | break; |
2534 | } |
2535 | |
2536 | /* go to the next free nat pages to find free nids abundantly */ |
2537 | nm_i->next_scan_nid = nid; |
2538 | |
2539 | /* find free nids from current sum_pages */ |
2540 | scan_curseg_cache(sbi); |
2541 | |
2542 | f2fs_up_read(sem: &nm_i->nat_tree_lock); |
2543 | |
2544 | f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), |
2545 | nrpages: nm_i->ra_nid_pages, type: META_NAT, sync: false); |
2546 | |
2547 | return 0; |
2548 | } |
2549 | |
2550 | int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) |
2551 | { |
2552 | int ret; |
2553 | |
2554 | mutex_lock(&NM_I(sbi)->build_lock); |
2555 | ret = __f2fs_build_free_nids(sbi, sync, mount); |
2556 | mutex_unlock(lock: &NM_I(sbi)->build_lock); |
2557 | |
2558 | return ret; |
2559 | } |
2560 | |
2561 | /* |
2562 | * If this function returns success, caller can obtain a new nid |
2563 | * from second parameter of this function. |
2564 | * The returned nid could be used ino as well as nid when inode is created. |
2565 | */ |
2566 | bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) |
2567 | { |
2568 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2569 | struct free_nid *i = NULL; |
2570 | retry: |
2571 | if (time_to_inject(sbi, FAULT_ALLOC_NID)) |
2572 | return false; |
2573 | |
2574 | spin_lock(lock: &nm_i->nid_list_lock); |
2575 | |
2576 | if (unlikely(nm_i->available_nids == 0)) { |
2577 | spin_unlock(lock: &nm_i->nid_list_lock); |
2578 | return false; |
2579 | } |
2580 | |
2581 | /* We should not use stale free nids created by f2fs_build_free_nids */ |
2582 | if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) { |
2583 | f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); |
2584 | i = list_first_entry(&nm_i->free_nid_list, |
2585 | struct free_nid, list); |
2586 | *nid = i->nid; |
2587 | |
2588 | __move_free_nid(sbi, i, org_state: FREE_NID, dst_state: PREALLOC_NID); |
2589 | nm_i->available_nids--; |
2590 | |
2591 | update_free_nid_bitmap(sbi, nid: *nid, set: false, build: false); |
2592 | |
2593 | spin_unlock(lock: &nm_i->nid_list_lock); |
2594 | return true; |
2595 | } |
2596 | spin_unlock(lock: &nm_i->nid_list_lock); |
2597 | |
2598 | /* Let's scan nat pages and its caches to get free nids */ |
2599 | if (!f2fs_build_free_nids(sbi, sync: true, mount: false)) |
2600 | goto retry; |
2601 | return false; |
2602 | } |
2603 | |
2604 | /* |
2605 | * f2fs_alloc_nid() should be called prior to this function. |
2606 | */ |
2607 | void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) |
2608 | { |
2609 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2610 | struct free_nid *i; |
2611 | |
2612 | spin_lock(lock: &nm_i->nid_list_lock); |
2613 | i = __lookup_free_nid_list(nm_i, n: nid); |
2614 | f2fs_bug_on(sbi, !i); |
2615 | __remove_free_nid(sbi, i, state: PREALLOC_NID); |
2616 | spin_unlock(lock: &nm_i->nid_list_lock); |
2617 | |
2618 | kmem_cache_free(s: free_nid_slab, objp: i); |
2619 | } |
2620 | |
2621 | /* |
2622 | * f2fs_alloc_nid() should be called prior to this function. |
2623 | */ |
2624 | void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) |
2625 | { |
2626 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2627 | struct free_nid *i; |
2628 | bool need_free = false; |
2629 | |
2630 | if (!nid) |
2631 | return; |
2632 | |
2633 | spin_lock(lock: &nm_i->nid_list_lock); |
2634 | i = __lookup_free_nid_list(nm_i, n: nid); |
2635 | f2fs_bug_on(sbi, !i); |
2636 | |
2637 | if (!f2fs_available_free_memory(sbi, type: FREE_NIDS)) { |
2638 | __remove_free_nid(sbi, i, state: PREALLOC_NID); |
2639 | need_free = true; |
2640 | } else { |
2641 | __move_free_nid(sbi, i, org_state: PREALLOC_NID, dst_state: FREE_NID); |
2642 | } |
2643 | |
2644 | nm_i->available_nids++; |
2645 | |
2646 | update_free_nid_bitmap(sbi, nid, set: true, build: false); |
2647 | |
2648 | spin_unlock(lock: &nm_i->nid_list_lock); |
2649 | |
2650 | if (need_free) |
2651 | kmem_cache_free(s: free_nid_slab, objp: i); |
2652 | } |
2653 | |
2654 | int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) |
2655 | { |
2656 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2657 | int nr = nr_shrink; |
2658 | |
2659 | if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) |
2660 | return 0; |
2661 | |
2662 | if (!mutex_trylock(lock: &nm_i->build_lock)) |
2663 | return 0; |
2664 | |
2665 | while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) { |
2666 | struct free_nid *i, *next; |
2667 | unsigned int batch = SHRINK_NID_BATCH_SIZE; |
2668 | |
2669 | spin_lock(lock: &nm_i->nid_list_lock); |
2670 | list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { |
2671 | if (!nr_shrink || !batch || |
2672 | nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) |
2673 | break; |
2674 | __remove_free_nid(sbi, i, state: FREE_NID); |
2675 | kmem_cache_free(s: free_nid_slab, objp: i); |
2676 | nr_shrink--; |
2677 | batch--; |
2678 | } |
2679 | spin_unlock(lock: &nm_i->nid_list_lock); |
2680 | } |
2681 | |
2682 | mutex_unlock(lock: &nm_i->build_lock); |
2683 | |
2684 | return nr - nr_shrink; |
2685 | } |
2686 | |
2687 | int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) |
2688 | { |
2689 | void *src_addr, *dst_addr; |
2690 | size_t inline_size; |
2691 | struct page *ipage; |
2692 | struct f2fs_inode *ri; |
2693 | |
2694 | ipage = f2fs_get_node_page(sbi: F2FS_I_SB(inode), nid: inode->i_ino); |
2695 | if (IS_ERR(ptr: ipage)) |
2696 | return PTR_ERR(ptr: ipage); |
2697 | |
2698 | ri = F2FS_INODE(page); |
2699 | if (ri->i_inline & F2FS_INLINE_XATTR) { |
2700 | if (!f2fs_has_inline_xattr(inode)) { |
2701 | set_inode_flag(inode, flag: FI_INLINE_XATTR); |
2702 | stat_inc_inline_xattr(inode); |
2703 | } |
2704 | } else { |
2705 | if (f2fs_has_inline_xattr(inode)) { |
2706 | stat_dec_inline_xattr(inode); |
2707 | clear_inode_flag(inode, flag: FI_INLINE_XATTR); |
2708 | } |
2709 | goto update_inode; |
2710 | } |
2711 | |
2712 | dst_addr = inline_xattr_addr(inode, page: ipage); |
2713 | src_addr = inline_xattr_addr(inode, page); |
2714 | inline_size = inline_xattr_size(inode); |
2715 | |
2716 | f2fs_wait_on_page_writeback(page: ipage, type: NODE, ordered: true, locked: true); |
2717 | memcpy(dst_addr, src_addr, inline_size); |
2718 | update_inode: |
2719 | f2fs_update_inode(inode, node_page: ipage); |
2720 | f2fs_put_page(page: ipage, unlock: 1); |
2721 | return 0; |
2722 | } |
2723 | |
2724 | int f2fs_recover_xattr_data(struct inode *inode, struct page *page) |
2725 | { |
2726 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
2727 | nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; |
2728 | nid_t new_xnid; |
2729 | struct dnode_of_data dn; |
2730 | struct node_info ni; |
2731 | struct page *xpage; |
2732 | int err; |
2733 | |
2734 | if (!prev_xnid) |
2735 | goto recover_xnid; |
2736 | |
2737 | /* 1: invalidate the previous xattr nid */ |
2738 | err = f2fs_get_node_info(sbi, nid: prev_xnid, ni: &ni, checkpoint_context: false); |
2739 | if (err) |
2740 | return err; |
2741 | |
2742 | f2fs_invalidate_blocks(sbi, addr: ni.blk_addr); |
2743 | dec_valid_node_count(sbi, inode, is_inode: false); |
2744 | set_node_addr(sbi, ni: &ni, NULL_ADDR, fsync_done: false); |
2745 | |
2746 | recover_xnid: |
2747 | /* 2: update xattr nid in inode */ |
2748 | if (!f2fs_alloc_nid(sbi, nid: &new_xnid)) |
2749 | return -ENOSPC; |
2750 | |
2751 | set_new_dnode(dn: &dn, inode, NULL, NULL, nid: new_xnid); |
2752 | xpage = f2fs_new_node_page(dn: &dn, XATTR_NODE_OFFSET); |
2753 | if (IS_ERR(ptr: xpage)) { |
2754 | f2fs_alloc_nid_failed(sbi, nid: new_xnid); |
2755 | return PTR_ERR(ptr: xpage); |
2756 | } |
2757 | |
2758 | f2fs_alloc_nid_done(sbi, nid: new_xnid); |
2759 | f2fs_update_inode_page(inode); |
2760 | |
2761 | /* 3: update and set xattr node page dirty */ |
2762 | if (page) { |
2763 | memcpy(F2FS_NODE(xpage), F2FS_NODE(page), |
2764 | VALID_XATTR_BLOCK_SIZE); |
2765 | set_page_dirty(xpage); |
2766 | } |
2767 | f2fs_put_page(page: xpage, unlock: 1); |
2768 | |
2769 | return 0; |
2770 | } |
2771 | |
2772 | int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) |
2773 | { |
2774 | struct f2fs_inode *src, *dst; |
2775 | nid_t ino = ino_of_node(node_page: page); |
2776 | struct node_info old_ni, new_ni; |
2777 | struct page *ipage; |
2778 | int err; |
2779 | |
2780 | err = f2fs_get_node_info(sbi, nid: ino, ni: &old_ni, checkpoint_context: false); |
2781 | if (err) |
2782 | return err; |
2783 | |
2784 | if (unlikely(old_ni.blk_addr != NULL_ADDR)) |
2785 | return -EINVAL; |
2786 | retry: |
2787 | ipage = f2fs_grab_cache_page(mapping: NODE_MAPPING(sbi), index: ino, for_write: false); |
2788 | if (!ipage) { |
2789 | memalloc_retry_wait(GFP_NOFS); |
2790 | goto retry; |
2791 | } |
2792 | |
2793 | /* Should not use this inode from free nid list */ |
2794 | remove_free_nid(sbi, nid: ino); |
2795 | |
2796 | if (!PageUptodate(page: ipage)) |
2797 | SetPageUptodate(ipage); |
2798 | fill_node_footer(page: ipage, nid: ino, ino, ofs: 0, reset: true); |
2799 | set_cold_node(page: ipage, is_dir: false); |
2800 | |
2801 | src = F2FS_INODE(page); |
2802 | dst = F2FS_INODE(page: ipage); |
2803 | |
2804 | memcpy(dst, src, offsetof(struct f2fs_inode, i_ext)); |
2805 | dst->i_size = 0; |
2806 | dst->i_blocks = cpu_to_le64(1); |
2807 | dst->i_links = cpu_to_le32(1); |
2808 | dst->i_xattr_nid = 0; |
2809 | dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR); |
2810 | if (dst->i_inline & F2FS_EXTRA_ATTR) { |
2811 | dst->i_extra_isize = src->i_extra_isize; |
2812 | |
2813 | if (f2fs_sb_has_flexible_inline_xattr(sbi) && |
2814 | F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), |
2815 | i_inline_xattr_size)) |
2816 | dst->i_inline_xattr_size = src->i_inline_xattr_size; |
2817 | |
2818 | if (f2fs_sb_has_project_quota(sbi) && |
2819 | F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), |
2820 | i_projid)) |
2821 | dst->i_projid = src->i_projid; |
2822 | |
2823 | if (f2fs_sb_has_inode_crtime(sbi) && |
2824 | F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), |
2825 | i_crtime_nsec)) { |
2826 | dst->i_crtime = src->i_crtime; |
2827 | dst->i_crtime_nsec = src->i_crtime_nsec; |
2828 | } |
2829 | } |
2830 | |
2831 | new_ni = old_ni; |
2832 | new_ni.ino = ino; |
2833 | |
2834 | if (unlikely(inc_valid_node_count(sbi, NULL, true))) |
2835 | WARN_ON(1); |
2836 | set_node_addr(sbi, ni: &new_ni, NEW_ADDR, fsync_done: false); |
2837 | inc_valid_inode_count(sbi); |
2838 | set_page_dirty(ipage); |
2839 | f2fs_put_page(page: ipage, unlock: 1); |
2840 | return 0; |
2841 | } |
2842 | |
2843 | int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, |
2844 | unsigned int segno, struct f2fs_summary_block *sum) |
2845 | { |
2846 | struct f2fs_node *rn; |
2847 | struct f2fs_summary *sum_entry; |
2848 | block_t addr; |
2849 | int i, idx, last_offset, nrpages; |
2850 | |
2851 | /* scan the node segment */ |
2852 | last_offset = BLKS_PER_SEG(sbi); |
2853 | addr = START_BLOCK(sbi, segno); |
2854 | sum_entry = &sum->entries[0]; |
2855 | |
2856 | for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { |
2857 | nrpages = bio_max_segs(nr_segs: last_offset - i); |
2858 | |
2859 | /* readahead node pages */ |
2860 | f2fs_ra_meta_pages(sbi, start: addr, nrpages, type: META_POR, sync: true); |
2861 | |
2862 | for (idx = addr; idx < addr + nrpages; idx++) { |
2863 | struct page *page = f2fs_get_tmp_page(sbi, index: idx); |
2864 | |
2865 | if (IS_ERR(ptr: page)) |
2866 | return PTR_ERR(ptr: page); |
2867 | |
2868 | rn = F2FS_NODE(page); |
2869 | sum_entry->nid = rn->footer.nid; |
2870 | sum_entry->version = 0; |
2871 | sum_entry->ofs_in_node = 0; |
2872 | sum_entry++; |
2873 | f2fs_put_page(page, unlock: 1); |
2874 | } |
2875 | |
2876 | invalidate_mapping_pages(mapping: META_MAPPING(sbi), start: addr, |
2877 | end: addr + nrpages); |
2878 | } |
2879 | return 0; |
2880 | } |
2881 | |
2882 | static void remove_nats_in_journal(struct f2fs_sb_info *sbi) |
2883 | { |
2884 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2885 | struct curseg_info *curseg = CURSEG_I(sbi, type: CURSEG_HOT_DATA); |
2886 | struct f2fs_journal *journal = curseg->journal; |
2887 | int i; |
2888 | |
2889 | down_write(sem: &curseg->journal_rwsem); |
2890 | for (i = 0; i < nats_in_cursum(journal); i++) { |
2891 | struct nat_entry *ne; |
2892 | struct f2fs_nat_entry raw_ne; |
2893 | nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); |
2894 | |
2895 | if (f2fs_check_nid_range(sbi, nid)) |
2896 | continue; |
2897 | |
2898 | raw_ne = nat_in_journal(journal, i); |
2899 | |
2900 | ne = __lookup_nat_cache(nm_i, n: nid); |
2901 | if (!ne) { |
2902 | ne = __alloc_nat_entry(sbi, nid, no_fail: true); |
2903 | __init_nat_entry(nm_i, ne, raw_ne: &raw_ne, no_fail: true); |
2904 | } |
2905 | |
2906 | /* |
2907 | * if a free nat in journal has not been used after last |
2908 | * checkpoint, we should remove it from available nids, |
2909 | * since later we will add it again. |
2910 | */ |
2911 | if (!get_nat_flag(ne, type: IS_DIRTY) && |
2912 | le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) { |
2913 | spin_lock(lock: &nm_i->nid_list_lock); |
2914 | nm_i->available_nids--; |
2915 | spin_unlock(lock: &nm_i->nid_list_lock); |
2916 | } |
2917 | |
2918 | __set_nat_cache_dirty(nm_i, ne); |
2919 | } |
2920 | update_nats_in_cursum(journal, i: -i); |
2921 | up_write(sem: &curseg->journal_rwsem); |
2922 | } |
2923 | |
2924 | static void __adjust_nat_entry_set(struct nat_entry_set *nes, |
2925 | struct list_head *head, int max) |
2926 | { |
2927 | struct nat_entry_set *cur; |
2928 | |
2929 | if (nes->entry_cnt >= max) |
2930 | goto add_out; |
2931 | |
2932 | list_for_each_entry(cur, head, set_list) { |
2933 | if (cur->entry_cnt >= nes->entry_cnt) { |
2934 | list_add(new: &nes->set_list, head: cur->set_list.prev); |
2935 | return; |
2936 | } |
2937 | } |
2938 | add_out: |
2939 | list_add_tail(new: &nes->set_list, head); |
2940 | } |
2941 | |
2942 | static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs, |
2943 | unsigned int valid) |
2944 | { |
2945 | if (valid == 0) { |
2946 | __set_bit_le(nr: nat_ofs, addr: nm_i->empty_nat_bits); |
2947 | __clear_bit_le(nr: nat_ofs, addr: nm_i->full_nat_bits); |
2948 | return; |
2949 | } |
2950 | |
2951 | __clear_bit_le(nr: nat_ofs, addr: nm_i->empty_nat_bits); |
2952 | if (valid == NAT_ENTRY_PER_BLOCK) |
2953 | __set_bit_le(nr: nat_ofs, addr: nm_i->full_nat_bits); |
2954 | else |
2955 | __clear_bit_le(nr: nat_ofs, addr: nm_i->full_nat_bits); |
2956 | } |
2957 | |
2958 | static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, |
2959 | struct page *page) |
2960 | { |
2961 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2962 | unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; |
2963 | struct f2fs_nat_block *nat_blk = page_address(page); |
2964 | int valid = 0; |
2965 | int i = 0; |
2966 | |
2967 | if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) |
2968 | return; |
2969 | |
2970 | if (nat_index == 0) { |
2971 | valid = 1; |
2972 | i = 1; |
2973 | } |
2974 | for (; i < NAT_ENTRY_PER_BLOCK; i++) { |
2975 | if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR) |
2976 | valid++; |
2977 | } |
2978 | |
2979 | __update_nat_bits(nm_i, nat_ofs: nat_index, valid); |
2980 | } |
2981 | |
2982 | void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi) |
2983 | { |
2984 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
2985 | unsigned int nat_ofs; |
2986 | |
2987 | f2fs_down_read(sem: &nm_i->nat_tree_lock); |
2988 | |
2989 | for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) { |
2990 | unsigned int valid = 0, nid_ofs = 0; |
2991 | |
2992 | /* handle nid zero due to it should never be used */ |
2993 | if (unlikely(nat_ofs == 0)) { |
2994 | valid = 1; |
2995 | nid_ofs = 1; |
2996 | } |
2997 | |
2998 | for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) { |
2999 | if (!test_bit_le(nr: nid_ofs, |
3000 | addr: nm_i->free_nid_bitmap[nat_ofs])) |
3001 | valid++; |
3002 | } |
3003 | |
3004 | __update_nat_bits(nm_i, nat_ofs, valid); |
3005 | } |
3006 | |
3007 | f2fs_up_read(sem: &nm_i->nat_tree_lock); |
3008 | } |
3009 | |
3010 | static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, |
3011 | struct nat_entry_set *set, struct cp_control *cpc) |
3012 | { |
3013 | struct curseg_info *curseg = CURSEG_I(sbi, type: CURSEG_HOT_DATA); |
3014 | struct f2fs_journal *journal = curseg->journal; |
3015 | nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; |
3016 | bool to_journal = true; |
3017 | struct f2fs_nat_block *nat_blk; |
3018 | struct nat_entry *ne, *cur; |
3019 | struct page *page = NULL; |
3020 | |
3021 | /* |
3022 | * there are two steps to flush nat entries: |
3023 | * #1, flush nat entries to journal in current hot data summary block. |
3024 | * #2, flush nat entries to nat page. |
3025 | */ |
3026 | if ((cpc->reason & CP_UMOUNT) || |
3027 | !__has_cursum_space(journal, size: set->entry_cnt, type: NAT_JOURNAL)) |
3028 | to_journal = false; |
3029 | |
3030 | if (to_journal) { |
3031 | down_write(sem: &curseg->journal_rwsem); |
3032 | } else { |
3033 | page = get_next_nat_page(sbi, nid: start_nid); |
3034 | if (IS_ERR(ptr: page)) |
3035 | return PTR_ERR(ptr: page); |
3036 | |
3037 | nat_blk = page_address(page); |
3038 | f2fs_bug_on(sbi, !nat_blk); |
3039 | } |
3040 | |
3041 | /* flush dirty nats in nat entry set */ |
3042 | list_for_each_entry_safe(ne, cur, &set->entry_list, list) { |
3043 | struct f2fs_nat_entry *raw_ne; |
3044 | nid_t nid = nat_get_nid(ne); |
3045 | int offset; |
3046 | |
3047 | f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR); |
3048 | |
3049 | if (to_journal) { |
3050 | offset = f2fs_lookup_journal_in_cursum(journal, |
3051 | type: NAT_JOURNAL, val: nid, alloc: 1); |
3052 | f2fs_bug_on(sbi, offset < 0); |
3053 | raw_ne = &nat_in_journal(journal, offset); |
3054 | nid_in_journal(journal, offset) = cpu_to_le32(nid); |
3055 | } else { |
3056 | raw_ne = &nat_blk->entries[nid - start_nid]; |
3057 | } |
3058 | raw_nat_from_node_info(raw_ne, ni: &ne->ni); |
3059 | nat_reset_flag(ne); |
3060 | __clear_nat_cache_dirty(nm_i: NM_I(sbi), set, ne); |
3061 | if (nat_get_blkaddr(ne) == NULL_ADDR) { |
3062 | add_free_nid(sbi, nid, build: false, update: true); |
3063 | } else { |
3064 | spin_lock(lock: &NM_I(sbi)->nid_list_lock); |
3065 | update_free_nid_bitmap(sbi, nid, set: false, build: false); |
3066 | spin_unlock(lock: &NM_I(sbi)->nid_list_lock); |
3067 | } |
3068 | } |
3069 | |
3070 | if (to_journal) { |
3071 | up_write(sem: &curseg->journal_rwsem); |
3072 | } else { |
3073 | update_nat_bits(sbi, start_nid, page); |
3074 | f2fs_put_page(page, unlock: 1); |
3075 | } |
3076 | |
3077 | /* Allow dirty nats by node block allocation in write_begin */ |
3078 | if (!set->entry_cnt) { |
3079 | radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); |
3080 | kmem_cache_free(s: nat_entry_set_slab, objp: set); |
3081 | } |
3082 | return 0; |
3083 | } |
3084 | |
3085 | /* |
3086 | * This function is called during the checkpointing process. |
3087 | */ |
3088 | int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) |
3089 | { |
3090 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
3091 | struct curseg_info *curseg = CURSEG_I(sbi, type: CURSEG_HOT_DATA); |
3092 | struct f2fs_journal *journal = curseg->journal; |
3093 | struct nat_entry_set *setvec[NAT_VEC_SIZE]; |
3094 | struct nat_entry_set *set, *tmp; |
3095 | unsigned int found; |
3096 | nid_t set_idx = 0; |
3097 | LIST_HEAD(sets); |
3098 | int err = 0; |
3099 | |
3100 | /* |
3101 | * during unmount, let's flush nat_bits before checking |
3102 | * nat_cnt[DIRTY_NAT]. |
3103 | */ |
3104 | if (cpc->reason & CP_UMOUNT) { |
3105 | f2fs_down_write(sem: &nm_i->nat_tree_lock); |
3106 | remove_nats_in_journal(sbi); |
3107 | f2fs_up_write(sem: &nm_i->nat_tree_lock); |
3108 | } |
3109 | |
3110 | if (!nm_i->nat_cnt[DIRTY_NAT]) |
3111 | return 0; |
3112 | |
3113 | f2fs_down_write(sem: &nm_i->nat_tree_lock); |
3114 | |
3115 | /* |
3116 | * if there are no enough space in journal to store dirty nat |
3117 | * entries, remove all entries from journal and merge them |
3118 | * into nat entry set. |
3119 | */ |
3120 | if (cpc->reason & CP_UMOUNT || |
3121 | !__has_cursum_space(journal, |
3122 | size: nm_i->nat_cnt[DIRTY_NAT], type: NAT_JOURNAL)) |
3123 | remove_nats_in_journal(sbi); |
3124 | |
3125 | while ((found = __gang_lookup_nat_set(nm_i, |
3126 | start: set_idx, NAT_VEC_SIZE, ep: setvec))) { |
3127 | unsigned idx; |
3128 | |
3129 | set_idx = setvec[found - 1]->set + 1; |
3130 | for (idx = 0; idx < found; idx++) |
3131 | __adjust_nat_entry_set(nes: setvec[idx], head: &sets, |
3132 | MAX_NAT_JENTRIES(journal)); |
3133 | } |
3134 | |
3135 | /* flush dirty nats in nat entry set */ |
3136 | list_for_each_entry_safe(set, tmp, &sets, set_list) { |
3137 | err = __flush_nat_entry_set(sbi, set, cpc); |
3138 | if (err) |
3139 | break; |
3140 | } |
3141 | |
3142 | f2fs_up_write(sem: &nm_i->nat_tree_lock); |
3143 | /* Allow dirty nats by node block allocation in write_begin */ |
3144 | |
3145 | return err; |
3146 | } |
3147 | |
3148 | static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) |
3149 | { |
3150 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); |
3151 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
3152 | unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; |
3153 | unsigned int i; |
3154 | __u64 cp_ver = cur_cp_version(cp: ckpt); |
3155 | block_t nat_bits_addr; |
3156 | |
3157 | nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); |
3158 | nm_i->nat_bits = f2fs_kvzalloc(sbi, |
3159 | size: nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); |
3160 | if (!nm_i->nat_bits) |
3161 | return -ENOMEM; |
3162 | |
3163 | nm_i->full_nat_bits = nm_i->nat_bits + 8; |
3164 | nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; |
3165 | |
3166 | if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) |
3167 | return 0; |
3168 | |
3169 | nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) - |
3170 | nm_i->nat_bits_blocks; |
3171 | for (i = 0; i < nm_i->nat_bits_blocks; i++) { |
3172 | struct page *page; |
3173 | |
3174 | page = f2fs_get_meta_page(sbi, index: nat_bits_addr++); |
3175 | if (IS_ERR(ptr: page)) |
3176 | return PTR_ERR(ptr: page); |
3177 | |
3178 | memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS), |
3179 | page_address(page), F2FS_BLKSIZE); |
3180 | f2fs_put_page(page, unlock: 1); |
3181 | } |
3182 | |
3183 | cp_ver |= (cur_cp_crc(cp: ckpt) << 32); |
3184 | if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { |
3185 | clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG); |
3186 | f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)" , |
3187 | cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits)); |
3188 | return 0; |
3189 | } |
3190 | |
3191 | f2fs_notice(sbi, "Found nat_bits in checkpoint" ); |
3192 | return 0; |
3193 | } |
3194 | |
3195 | static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) |
3196 | { |
3197 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
3198 | unsigned int i = 0; |
3199 | nid_t nid, last_nid; |
3200 | |
3201 | if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) |
3202 | return; |
3203 | |
3204 | for (i = 0; i < nm_i->nat_blocks; i++) { |
3205 | i = find_next_bit_le(addr: nm_i->empty_nat_bits, size: nm_i->nat_blocks, offset: i); |
3206 | if (i >= nm_i->nat_blocks) |
3207 | break; |
3208 | |
3209 | __set_bit_le(nr: i, addr: nm_i->nat_block_bitmap); |
3210 | |
3211 | nid = i * NAT_ENTRY_PER_BLOCK; |
3212 | last_nid = nid + NAT_ENTRY_PER_BLOCK; |
3213 | |
3214 | spin_lock(lock: &NM_I(sbi)->nid_list_lock); |
3215 | for (; nid < last_nid; nid++) |
3216 | update_free_nid_bitmap(sbi, nid, set: true, build: true); |
3217 | spin_unlock(lock: &NM_I(sbi)->nid_list_lock); |
3218 | } |
3219 | |
3220 | for (i = 0; i < nm_i->nat_blocks; i++) { |
3221 | i = find_next_bit_le(addr: nm_i->full_nat_bits, size: nm_i->nat_blocks, offset: i); |
3222 | if (i >= nm_i->nat_blocks) |
3223 | break; |
3224 | |
3225 | __set_bit_le(nr: i, addr: nm_i->nat_block_bitmap); |
3226 | } |
3227 | } |
3228 | |
3229 | static int init_node_manager(struct f2fs_sb_info *sbi) |
3230 | { |
3231 | struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); |
3232 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
3233 | unsigned char *version_bitmap; |
3234 | unsigned int nat_segs; |
3235 | int err; |
3236 | |
3237 | nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); |
3238 | |
3239 | /* segment_count_nat includes pair segment so divide to 2. */ |
3240 | nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; |
3241 | nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); |
3242 | nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; |
3243 | |
3244 | /* not used nids: 0, node, meta, (and root counted as valid node) */ |
3245 | nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - |
3246 | F2FS_RESERVED_NODE_NUM; |
3247 | nm_i->nid_cnt[FREE_NID] = 0; |
3248 | nm_i->nid_cnt[PREALLOC_NID] = 0; |
3249 | nm_i->ram_thresh = DEF_RAM_THRESHOLD; |
3250 | nm_i->ra_nid_pages = DEF_RA_NID_PAGES; |
3251 | nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; |
3252 | nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS; |
3253 | |
3254 | INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); |
3255 | INIT_LIST_HEAD(list: &nm_i->free_nid_list); |
3256 | INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); |
3257 | INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); |
3258 | INIT_LIST_HEAD(list: &nm_i->nat_entries); |
3259 | spin_lock_init(&nm_i->nat_list_lock); |
3260 | |
3261 | mutex_init(&nm_i->build_lock); |
3262 | spin_lock_init(&nm_i->nid_list_lock); |
3263 | init_f2fs_rwsem(&nm_i->nat_tree_lock); |
3264 | |
3265 | nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); |
3266 | nm_i->bitmap_size = __bitmap_size(sbi, flag: NAT_BITMAP); |
3267 | version_bitmap = __bitmap_ptr(sbi, flag: NAT_BITMAP); |
3268 | nm_i->nat_bitmap = kmemdup(p: version_bitmap, size: nm_i->bitmap_size, |
3269 | GFP_KERNEL); |
3270 | if (!nm_i->nat_bitmap) |
3271 | return -ENOMEM; |
3272 | |
3273 | err = __get_nat_bitmaps(sbi); |
3274 | if (err) |
3275 | return err; |
3276 | |
3277 | #ifdef CONFIG_F2FS_CHECK_FS |
3278 | nm_i->nat_bitmap_mir = kmemdup(p: version_bitmap, size: nm_i->bitmap_size, |
3279 | GFP_KERNEL); |
3280 | if (!nm_i->nat_bitmap_mir) |
3281 | return -ENOMEM; |
3282 | #endif |
3283 | |
3284 | return 0; |
3285 | } |
3286 | |
3287 | static int init_free_nid_cache(struct f2fs_sb_info *sbi) |
3288 | { |
3289 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
3290 | int i; |
3291 | |
3292 | nm_i->free_nid_bitmap = |
3293 | f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), |
3294 | nm_i->nat_blocks), |
3295 | GFP_KERNEL); |
3296 | if (!nm_i->free_nid_bitmap) |
3297 | return -ENOMEM; |
3298 | |
3299 | for (i = 0; i < nm_i->nat_blocks; i++) { |
3300 | nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, |
3301 | f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL); |
3302 | if (!nm_i->free_nid_bitmap[i]) |
3303 | return -ENOMEM; |
3304 | } |
3305 | |
3306 | nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, size: nm_i->nat_blocks / 8, |
3307 | GFP_KERNEL); |
3308 | if (!nm_i->nat_block_bitmap) |
3309 | return -ENOMEM; |
3310 | |
3311 | nm_i->free_nid_count = |
3312 | f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short), |
3313 | nm_i->nat_blocks), |
3314 | GFP_KERNEL); |
3315 | if (!nm_i->free_nid_count) |
3316 | return -ENOMEM; |
3317 | return 0; |
3318 | } |
3319 | |
3320 | int f2fs_build_node_manager(struct f2fs_sb_info *sbi) |
3321 | { |
3322 | int err; |
3323 | |
3324 | sbi->nm_info = f2fs_kzalloc(sbi, size: sizeof(struct f2fs_nm_info), |
3325 | GFP_KERNEL); |
3326 | if (!sbi->nm_info) |
3327 | return -ENOMEM; |
3328 | |
3329 | err = init_node_manager(sbi); |
3330 | if (err) |
3331 | return err; |
3332 | |
3333 | err = init_free_nid_cache(sbi); |
3334 | if (err) |
3335 | return err; |
3336 | |
3337 | /* load free nid status from nat_bits table */ |
3338 | load_free_nid_bitmap(sbi); |
3339 | |
3340 | return f2fs_build_free_nids(sbi, sync: true, mount: true); |
3341 | } |
3342 | |
3343 | void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) |
3344 | { |
3345 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
3346 | struct free_nid *i, *next_i; |
3347 | void *vec[NAT_VEC_SIZE]; |
3348 | struct nat_entry **natvec = (struct nat_entry **)vec; |
3349 | struct nat_entry_set **setvec = (struct nat_entry_set **)vec; |
3350 | nid_t nid = 0; |
3351 | unsigned int found; |
3352 | |
3353 | if (!nm_i) |
3354 | return; |
3355 | |
3356 | /* destroy free nid list */ |
3357 | spin_lock(lock: &nm_i->nid_list_lock); |
3358 | list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { |
3359 | __remove_free_nid(sbi, i, state: FREE_NID); |
3360 | spin_unlock(lock: &nm_i->nid_list_lock); |
3361 | kmem_cache_free(s: free_nid_slab, objp: i); |
3362 | spin_lock(lock: &nm_i->nid_list_lock); |
3363 | } |
3364 | f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); |
3365 | f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); |
3366 | f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); |
3367 | spin_unlock(lock: &nm_i->nid_list_lock); |
3368 | |
3369 | /* destroy nat cache */ |
3370 | f2fs_down_write(sem: &nm_i->nat_tree_lock); |
3371 | while ((found = __gang_lookup_nat_cache(nm_i, |
3372 | start: nid, NAT_VEC_SIZE, ep: natvec))) { |
3373 | unsigned idx; |
3374 | |
3375 | nid = nat_get_nid(natvec[found - 1]) + 1; |
3376 | for (idx = 0; idx < found; idx++) { |
3377 | spin_lock(lock: &nm_i->nat_list_lock); |
3378 | list_del(entry: &natvec[idx]->list); |
3379 | spin_unlock(lock: &nm_i->nat_list_lock); |
3380 | |
3381 | __del_from_nat_cache(nm_i, e: natvec[idx]); |
3382 | } |
3383 | } |
3384 | f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); |
3385 | |
3386 | /* destroy nat set cache */ |
3387 | nid = 0; |
3388 | memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE); |
3389 | while ((found = __gang_lookup_nat_set(nm_i, |
3390 | start: nid, NAT_VEC_SIZE, ep: setvec))) { |
3391 | unsigned idx; |
3392 | |
3393 | nid = setvec[found - 1]->set + 1; |
3394 | for (idx = 0; idx < found; idx++) { |
3395 | /* entry_cnt is not zero, when cp_error was occurred */ |
3396 | f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); |
3397 | radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); |
3398 | kmem_cache_free(s: nat_entry_set_slab, objp: setvec[idx]); |
3399 | } |
3400 | } |
3401 | f2fs_up_write(sem: &nm_i->nat_tree_lock); |
3402 | |
3403 | kvfree(addr: nm_i->nat_block_bitmap); |
3404 | if (nm_i->free_nid_bitmap) { |
3405 | int i; |
3406 | |
3407 | for (i = 0; i < nm_i->nat_blocks; i++) |
3408 | kvfree(addr: nm_i->free_nid_bitmap[i]); |
3409 | kvfree(addr: nm_i->free_nid_bitmap); |
3410 | } |
3411 | kvfree(addr: nm_i->free_nid_count); |
3412 | |
3413 | kvfree(addr: nm_i->nat_bitmap); |
3414 | kvfree(addr: nm_i->nat_bits); |
3415 | #ifdef CONFIG_F2FS_CHECK_FS |
3416 | kvfree(addr: nm_i->nat_bitmap_mir); |
3417 | #endif |
3418 | sbi->nm_info = NULL; |
3419 | kfree(objp: nm_i); |
3420 | } |
3421 | |
3422 | int __init f2fs_create_node_manager_caches(void) |
3423 | { |
3424 | nat_entry_slab = f2fs_kmem_cache_create(name: "f2fs_nat_entry" , |
3425 | size: sizeof(struct nat_entry)); |
3426 | if (!nat_entry_slab) |
3427 | goto fail; |
3428 | |
3429 | free_nid_slab = f2fs_kmem_cache_create(name: "f2fs_free_nid" , |
3430 | size: sizeof(struct free_nid)); |
3431 | if (!free_nid_slab) |
3432 | goto destroy_nat_entry; |
3433 | |
3434 | nat_entry_set_slab = f2fs_kmem_cache_create(name: "f2fs_nat_entry_set" , |
3435 | size: sizeof(struct nat_entry_set)); |
3436 | if (!nat_entry_set_slab) |
3437 | goto destroy_free_nid; |
3438 | |
3439 | fsync_node_entry_slab = f2fs_kmem_cache_create(name: "f2fs_fsync_node_entry" , |
3440 | size: sizeof(struct fsync_node_entry)); |
3441 | if (!fsync_node_entry_slab) |
3442 | goto destroy_nat_entry_set; |
3443 | return 0; |
3444 | |
3445 | destroy_nat_entry_set: |
3446 | kmem_cache_destroy(s: nat_entry_set_slab); |
3447 | destroy_free_nid: |
3448 | kmem_cache_destroy(s: free_nid_slab); |
3449 | destroy_nat_entry: |
3450 | kmem_cache_destroy(s: nat_entry_slab); |
3451 | fail: |
3452 | return -ENOMEM; |
3453 | } |
3454 | |
3455 | void f2fs_destroy_node_manager_caches(void) |
3456 | { |
3457 | kmem_cache_destroy(s: fsync_node_entry_slab); |
3458 | kmem_cache_destroy(s: nat_entry_set_slab); |
3459 | kmem_cache_destroy(s: free_nid_slab); |
3460 | kmem_cache_destroy(s: nat_entry_slab); |
3461 | } |
3462 | |