1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * fs/f2fs/segment.h |
4 | * |
5 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
6 | * http://www.samsung.com/ |
7 | */ |
8 | #include <linux/blkdev.h> |
9 | #include <linux/backing-dev.h> |
10 | |
11 | /* constant macro */ |
12 | #define NULL_SEGNO ((unsigned int)(~0)) |
13 | #define NULL_SECNO ((unsigned int)(~0)) |
14 | |
15 | #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */ |
16 | #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */ |
17 | |
18 | #define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */ |
19 | #define F2FS_MIN_META_SEGMENTS 8 /* SB + 2 (CP + SIT + NAT) + SSA */ |
20 | |
21 | /* L: Logical segment # in volume, R: Relative segment # in main area */ |
22 | #define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno) |
23 | #define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno) |
24 | |
25 | #define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA) |
26 | #define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE) |
27 | #define SE_PAGETYPE(se) ((IS_NODESEG((se)->type) ? NODE : DATA)) |
28 | |
29 | static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi, |
30 | unsigned short seg_type) |
31 | { |
32 | f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG); |
33 | } |
34 | |
35 | #define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA) |
36 | #define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA) |
37 | #define IS_COLD(t) ((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA) |
38 | |
39 | #define IS_CURSEG(sbi, seg) \ |
40 | (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \ |
41 | ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \ |
42 | ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ |
43 | ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ |
44 | ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ |
45 | ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \ |
46 | ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \ |
47 | ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno)) |
48 | |
49 | #define IS_CURSEC(sbi, secno) \ |
50 | (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \ |
51 | (sbi)->segs_per_sec) || \ |
52 | ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \ |
53 | (sbi)->segs_per_sec) || \ |
54 | ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \ |
55 | (sbi)->segs_per_sec) || \ |
56 | ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \ |
57 | (sbi)->segs_per_sec) || \ |
58 | ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ |
59 | (sbi)->segs_per_sec) || \ |
60 | ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ |
61 | (sbi)->segs_per_sec) || \ |
62 | ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \ |
63 | (sbi)->segs_per_sec) || \ |
64 | ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \ |
65 | (sbi)->segs_per_sec)) |
66 | |
67 | #define MAIN_BLKADDR(sbi) \ |
68 | (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \ |
69 | le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr)) |
70 | #define SEG0_BLKADDR(sbi) \ |
71 | (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \ |
72 | le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr)) |
73 | |
74 | #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments) |
75 | #define MAIN_SECS(sbi) ((sbi)->total_sections) |
76 | |
77 | #define TOTAL_SEGS(sbi) \ |
78 | (SM_I(sbi) ? SM_I(sbi)->segment_count : \ |
79 | le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count)) |
80 | #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg) |
81 | |
82 | #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi)) |
83 | #define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \ |
84 | (sbi)->log_blocks_per_seg)) |
85 | |
86 | #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \ |
87 | (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg)) |
88 | |
89 | #define NEXT_FREE_BLKADDR(sbi, curseg) \ |
90 | (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff) |
91 | |
92 | #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi)) |
93 | #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \ |
94 | (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg) |
95 | #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \ |
96 | (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1)) |
97 | |
98 | #define GET_SEGNO(sbi, blk_addr) \ |
99 | ((!__is_valid_data_blkaddr(blk_addr)) ? \ |
100 | NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \ |
101 | GET_SEGNO_FROM_SEG0(sbi, blk_addr))) |
102 | #define BLKS_PER_SEC(sbi) \ |
103 | ((sbi)->segs_per_sec * (sbi)->blocks_per_seg) |
104 | #define CAP_BLKS_PER_SEC(sbi) \ |
105 | ((sbi)->segs_per_sec * (sbi)->blocks_per_seg - \ |
106 | (sbi)->unusable_blocks_per_sec) |
107 | #define CAP_SEGS_PER_SEC(sbi) \ |
108 | ((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\ |
109 | (sbi)->log_blocks_per_seg)) |
110 | #define GET_SEC_FROM_SEG(sbi, segno) \ |
111 | (((segno) == -1) ? -1 : (segno) / (sbi)->segs_per_sec) |
112 | #define GET_SEG_FROM_SEC(sbi, secno) \ |
113 | ((secno) * (sbi)->segs_per_sec) |
114 | #define GET_ZONE_FROM_SEC(sbi, secno) \ |
115 | (((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone) |
116 | #define GET_ZONE_FROM_SEG(sbi, segno) \ |
117 | GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno)) |
118 | |
119 | #define GET_SUM_BLOCK(sbi, segno) \ |
120 | ((sbi)->sm_info->ssa_blkaddr + (segno)) |
121 | |
122 | #define GET_SUM_TYPE(footer) ((footer)->entry_type) |
123 | #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type)) |
124 | |
125 | #define SIT_ENTRY_OFFSET(sit_i, segno) \ |
126 | ((segno) % (sit_i)->sents_per_block) |
127 | #define SIT_BLOCK_OFFSET(segno) \ |
128 | ((segno) / SIT_ENTRY_PER_BLOCK) |
129 | #define START_SEGNO(segno) \ |
130 | (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK) |
131 | #define SIT_BLK_CNT(sbi) \ |
132 | DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK) |
133 | #define f2fs_bitmap_size(nr) \ |
134 | (BITS_TO_LONGS(nr) * sizeof(unsigned long)) |
135 | |
136 | #define SECTOR_FROM_BLOCK(blk_addr) \ |
137 | (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK) |
138 | #define SECTOR_TO_BLOCK(sectors) \ |
139 | ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK) |
140 | |
141 | /* |
142 | * indicate a block allocation direction: RIGHT and LEFT. |
143 | * RIGHT means allocating new sections towards the end of volume. |
144 | * LEFT means the opposite direction. |
145 | */ |
146 | enum { |
147 | ALLOC_RIGHT = 0, |
148 | ALLOC_LEFT |
149 | }; |
150 | |
151 | /* |
152 | * In the victim_sel_policy->alloc_mode, there are three block allocation modes. |
153 | * LFS writes data sequentially with cleaning operations. |
154 | * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations. |
155 | * AT_SSR (Age Threshold based Slack Space Recycle) merges fragments into |
156 | * fragmented segment which has similar aging degree. |
157 | */ |
158 | enum { |
159 | LFS = 0, |
160 | SSR, |
161 | AT_SSR, |
162 | }; |
163 | |
164 | /* |
165 | * In the victim_sel_policy->gc_mode, there are three gc, aka cleaning, modes. |
166 | * GC_CB is based on cost-benefit algorithm. |
167 | * GC_GREEDY is based on greedy algorithm. |
168 | * GC_AT is based on age-threshold algorithm. |
169 | */ |
170 | enum { |
171 | GC_CB = 0, |
172 | GC_GREEDY, |
173 | GC_AT, |
174 | ALLOC_NEXT, |
175 | FLUSH_DEVICE, |
176 | MAX_GC_POLICY, |
177 | }; |
178 | |
179 | /* |
180 | * BG_GC means the background cleaning job. |
181 | * FG_GC means the on-demand cleaning job. |
182 | */ |
183 | enum { |
184 | BG_GC = 0, |
185 | FG_GC, |
186 | }; |
187 | |
188 | /* for a function parameter to select a victim segment */ |
189 | struct victim_sel_policy { |
190 | int alloc_mode; /* LFS or SSR */ |
191 | int gc_mode; /* GC_CB or GC_GREEDY */ |
192 | unsigned long *dirty_bitmap; /* dirty segment/section bitmap */ |
193 | unsigned int max_search; /* |
194 | * maximum # of segments/sections |
195 | * to search |
196 | */ |
197 | unsigned int offset; /* last scanned bitmap offset */ |
198 | unsigned int ofs_unit; /* bitmap search unit */ |
199 | unsigned int min_cost; /* minimum cost */ |
200 | unsigned long long oldest_age; /* oldest age of segments having the same min cost */ |
201 | unsigned int min_segno; /* segment # having min. cost */ |
202 | unsigned long long age; /* mtime of GCed section*/ |
203 | unsigned long long age_threshold;/* age threshold */ |
204 | }; |
205 | |
206 | struct seg_entry { |
207 | unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */ |
208 | unsigned int valid_blocks:10; /* # of valid blocks */ |
209 | unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */ |
210 | unsigned int padding:6; /* padding */ |
211 | unsigned char *cur_valid_map; /* validity bitmap of blocks */ |
212 | #ifdef CONFIG_F2FS_CHECK_FS |
213 | unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */ |
214 | #endif |
215 | /* |
216 | * # of valid blocks and the validity bitmap stored in the last |
217 | * checkpoint pack. This information is used by the SSR mode. |
218 | */ |
219 | unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */ |
220 | unsigned char *discard_map; |
221 | unsigned long long mtime; /* modification time of the segment */ |
222 | }; |
223 | |
224 | struct sec_entry { |
225 | unsigned int valid_blocks; /* # of valid blocks in a section */ |
226 | }; |
227 | |
228 | #define MAX_SKIP_GC_COUNT 16 |
229 | |
230 | struct revoke_entry { |
231 | struct list_head list; |
232 | block_t old_addr; /* for revoking when fail to commit */ |
233 | pgoff_t index; |
234 | }; |
235 | |
236 | struct sit_info { |
237 | block_t sit_base_addr; /* start block address of SIT area */ |
238 | block_t sit_blocks; /* # of blocks used by SIT area */ |
239 | block_t written_valid_blocks; /* # of valid blocks in main area */ |
240 | char *bitmap; /* all bitmaps pointer */ |
241 | char *sit_bitmap; /* SIT bitmap pointer */ |
242 | #ifdef CONFIG_F2FS_CHECK_FS |
243 | char *sit_bitmap_mir; /* SIT bitmap mirror */ |
244 | |
245 | /* bitmap of segments to be ignored by GC in case of errors */ |
246 | unsigned long *invalid_segmap; |
247 | #endif |
248 | unsigned int bitmap_size; /* SIT bitmap size */ |
249 | |
250 | unsigned long *tmp_map; /* bitmap for temporal use */ |
251 | unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */ |
252 | unsigned int dirty_sentries; /* # of dirty sentries */ |
253 | unsigned int sents_per_block; /* # of SIT entries per block */ |
254 | struct rw_semaphore sentry_lock; /* to protect SIT cache */ |
255 | struct seg_entry *sentries; /* SIT segment-level cache */ |
256 | struct sec_entry *sec_entries; /* SIT section-level cache */ |
257 | |
258 | /* for cost-benefit algorithm in cleaning procedure */ |
259 | unsigned long long elapsed_time; /* elapsed time after mount */ |
260 | unsigned long long mounted_time; /* mount time */ |
261 | unsigned long long min_mtime; /* min. modification time */ |
262 | unsigned long long max_mtime; /* max. modification time */ |
263 | unsigned long long dirty_min_mtime; /* rerange candidates in GC_AT */ |
264 | unsigned long long dirty_max_mtime; /* rerange candidates in GC_AT */ |
265 | |
266 | unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */ |
267 | }; |
268 | |
269 | struct free_segmap_info { |
270 | unsigned int start_segno; /* start segment number logically */ |
271 | unsigned int free_segments; /* # of free segments */ |
272 | unsigned int free_sections; /* # of free sections */ |
273 | spinlock_t segmap_lock; /* free segmap lock */ |
274 | unsigned long *free_segmap; /* free segment bitmap */ |
275 | unsigned long *free_secmap; /* free section bitmap */ |
276 | }; |
277 | |
278 | /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */ |
279 | enum dirty_type { |
280 | DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */ |
281 | DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */ |
282 | DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */ |
283 | DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */ |
284 | DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */ |
285 | DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */ |
286 | DIRTY, /* to count # of dirty segments */ |
287 | PRE, /* to count # of entirely obsolete segments */ |
288 | NR_DIRTY_TYPE |
289 | }; |
290 | |
291 | struct dirty_seglist_info { |
292 | unsigned long *dirty_segmap[NR_DIRTY_TYPE]; |
293 | unsigned long *dirty_secmap; |
294 | struct mutex seglist_lock; /* lock for segment bitmaps */ |
295 | int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */ |
296 | unsigned long *victim_secmap; /* background GC victims */ |
297 | unsigned long *pinned_secmap; /* pinned victims from foreground GC */ |
298 | unsigned int pinned_secmap_cnt; /* count of victims which has pinned data */ |
299 | bool enable_pin_section; /* enable pinning section */ |
300 | }; |
301 | |
302 | /* for active log information */ |
303 | struct curseg_info { |
304 | struct mutex curseg_mutex; /* lock for consistency */ |
305 | struct f2fs_summary_block *sum_blk; /* cached summary block */ |
306 | struct rw_semaphore journal_rwsem; /* protect journal area */ |
307 | struct f2fs_journal *journal; /* cached journal info */ |
308 | unsigned char alloc_type; /* current allocation type */ |
309 | unsigned short seg_type; /* segment type like CURSEG_XXX_TYPE */ |
310 | unsigned int segno; /* current segment number */ |
311 | unsigned short next_blkoff; /* next block offset to write */ |
312 | unsigned int zone; /* current zone number */ |
313 | unsigned int next_segno; /* preallocated segment */ |
314 | int fragment_remained_chunk; /* remained block size in a chunk for block fragmentation mode */ |
315 | bool inited; /* indicate inmem log is inited */ |
316 | }; |
317 | |
318 | struct sit_entry_set { |
319 | struct list_head set_list; /* link with all sit sets */ |
320 | unsigned int start_segno; /* start segno of sits in set */ |
321 | unsigned int entry_cnt; /* the # of sit entries in set */ |
322 | }; |
323 | |
324 | /* |
325 | * inline functions |
326 | */ |
327 | static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) |
328 | { |
329 | return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); |
330 | } |
331 | |
332 | static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi, |
333 | unsigned int segno) |
334 | { |
335 | struct sit_info *sit_i = SIT_I(sbi); |
336 | return &sit_i->sentries[segno]; |
337 | } |
338 | |
339 | static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi, |
340 | unsigned int segno) |
341 | { |
342 | struct sit_info *sit_i = SIT_I(sbi); |
343 | return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)]; |
344 | } |
345 | |
346 | static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi, |
347 | unsigned int segno, bool use_section) |
348 | { |
349 | /* |
350 | * In order to get # of valid blocks in a section instantly from many |
351 | * segments, f2fs manages two counting structures separately. |
352 | */ |
353 | if (use_section && __is_large_section(sbi)) |
354 | return get_sec_entry(sbi, segno)->valid_blocks; |
355 | else |
356 | return get_seg_entry(sbi, segno)->valid_blocks; |
357 | } |
358 | |
359 | static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi, |
360 | unsigned int segno, bool use_section) |
361 | { |
362 | if (use_section && __is_large_section(sbi)) { |
363 | unsigned int start_segno = START_SEGNO(segno); |
364 | unsigned int blocks = 0; |
365 | int i; |
366 | |
367 | for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) { |
368 | struct seg_entry *se = get_seg_entry(sbi, segno: start_segno); |
369 | |
370 | blocks += se->ckpt_valid_blocks; |
371 | } |
372 | return blocks; |
373 | } |
374 | return get_seg_entry(sbi, segno)->ckpt_valid_blocks; |
375 | } |
376 | |
377 | static inline void seg_info_from_raw_sit(struct seg_entry *se, |
378 | struct f2fs_sit_entry *rs) |
379 | { |
380 | se->valid_blocks = GET_SIT_VBLOCKS(rs); |
381 | se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs); |
382 | memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); |
383 | memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); |
384 | #ifdef CONFIG_F2FS_CHECK_FS |
385 | memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE); |
386 | #endif |
387 | se->type = GET_SIT_TYPE(rs); |
388 | se->mtime = le64_to_cpu(rs->mtime); |
389 | } |
390 | |
391 | static inline void __seg_info_to_raw_sit(struct seg_entry *se, |
392 | struct f2fs_sit_entry *rs) |
393 | { |
394 | unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) | |
395 | se->valid_blocks; |
396 | rs->vblocks = cpu_to_le16(raw_vblocks); |
397 | memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); |
398 | rs->mtime = cpu_to_le64(se->mtime); |
399 | } |
400 | |
401 | static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi, |
402 | struct page *page, unsigned int start) |
403 | { |
404 | struct f2fs_sit_block *raw_sit; |
405 | struct seg_entry *se; |
406 | struct f2fs_sit_entry *rs; |
407 | unsigned int end = min(start + SIT_ENTRY_PER_BLOCK, |
408 | (unsigned long)MAIN_SEGS(sbi)); |
409 | int i; |
410 | |
411 | raw_sit = (struct f2fs_sit_block *)page_address(page); |
412 | memset(raw_sit, 0, PAGE_SIZE); |
413 | for (i = 0; i < end - start; i++) { |
414 | rs = &raw_sit->entries[i]; |
415 | se = get_seg_entry(sbi, segno: start + i); |
416 | __seg_info_to_raw_sit(se, rs); |
417 | } |
418 | } |
419 | |
420 | static inline void seg_info_to_raw_sit(struct seg_entry *se, |
421 | struct f2fs_sit_entry *rs) |
422 | { |
423 | __seg_info_to_raw_sit(se, rs); |
424 | |
425 | memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); |
426 | se->ckpt_valid_blocks = se->valid_blocks; |
427 | } |
428 | |
429 | static inline unsigned int find_next_inuse(struct free_segmap_info *free_i, |
430 | unsigned int max, unsigned int segno) |
431 | { |
432 | unsigned int ret; |
433 | spin_lock(lock: &free_i->segmap_lock); |
434 | ret = find_next_bit(addr: free_i->free_segmap, size: max, offset: segno); |
435 | spin_unlock(lock: &free_i->segmap_lock); |
436 | return ret; |
437 | } |
438 | |
439 | static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) |
440 | { |
441 | struct free_segmap_info *free_i = FREE_I(sbi); |
442 | unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); |
443 | unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); |
444 | unsigned int next; |
445 | unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno); |
446 | |
447 | spin_lock(lock: &free_i->segmap_lock); |
448 | clear_bit(nr: segno, addr: free_i->free_segmap); |
449 | free_i->free_segments++; |
450 | |
451 | next = find_next_bit(addr: free_i->free_segmap, |
452 | size: start_segno + sbi->segs_per_sec, offset: start_segno); |
453 | if (next >= start_segno + usable_segs) { |
454 | clear_bit(nr: secno, addr: free_i->free_secmap); |
455 | free_i->free_sections++; |
456 | } |
457 | spin_unlock(lock: &free_i->segmap_lock); |
458 | } |
459 | |
460 | static inline void __set_inuse(struct f2fs_sb_info *sbi, |
461 | unsigned int segno) |
462 | { |
463 | struct free_segmap_info *free_i = FREE_I(sbi); |
464 | unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); |
465 | |
466 | set_bit(nr: segno, addr: free_i->free_segmap); |
467 | free_i->free_segments--; |
468 | if (!test_and_set_bit(nr: secno, addr: free_i->free_secmap)) |
469 | free_i->free_sections--; |
470 | } |
471 | |
472 | static inline void __set_test_and_free(struct f2fs_sb_info *sbi, |
473 | unsigned int segno, bool inmem) |
474 | { |
475 | struct free_segmap_info *free_i = FREE_I(sbi); |
476 | unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); |
477 | unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); |
478 | unsigned int next; |
479 | unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno); |
480 | |
481 | spin_lock(lock: &free_i->segmap_lock); |
482 | if (test_and_clear_bit(nr: segno, addr: free_i->free_segmap)) { |
483 | free_i->free_segments++; |
484 | |
485 | if (!inmem && IS_CURSEC(sbi, secno)) |
486 | goto skip_free; |
487 | next = find_next_bit(addr: free_i->free_segmap, |
488 | size: start_segno + sbi->segs_per_sec, offset: start_segno); |
489 | if (next >= start_segno + usable_segs) { |
490 | if (test_and_clear_bit(nr: secno, addr: free_i->free_secmap)) |
491 | free_i->free_sections++; |
492 | } |
493 | } |
494 | skip_free: |
495 | spin_unlock(lock: &free_i->segmap_lock); |
496 | } |
497 | |
498 | static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, |
499 | unsigned int segno) |
500 | { |
501 | struct free_segmap_info *free_i = FREE_I(sbi); |
502 | unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); |
503 | |
504 | spin_lock(lock: &free_i->segmap_lock); |
505 | if (!test_and_set_bit(nr: segno, addr: free_i->free_segmap)) { |
506 | free_i->free_segments--; |
507 | if (!test_and_set_bit(nr: secno, addr: free_i->free_secmap)) |
508 | free_i->free_sections--; |
509 | } |
510 | spin_unlock(lock: &free_i->segmap_lock); |
511 | } |
512 | |
513 | static inline void get_sit_bitmap(struct f2fs_sb_info *sbi, |
514 | void *dst_addr) |
515 | { |
516 | struct sit_info *sit_i = SIT_I(sbi); |
517 | |
518 | #ifdef CONFIG_F2FS_CHECK_FS |
519 | if (memcmp(p: sit_i->sit_bitmap, q: sit_i->sit_bitmap_mir, |
520 | size: sit_i->bitmap_size)) |
521 | f2fs_bug_on(sbi, 1); |
522 | #endif |
523 | memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size); |
524 | } |
525 | |
526 | static inline block_t written_block_count(struct f2fs_sb_info *sbi) |
527 | { |
528 | return SIT_I(sbi)->written_valid_blocks; |
529 | } |
530 | |
531 | static inline unsigned int free_segments(struct f2fs_sb_info *sbi) |
532 | { |
533 | return FREE_I(sbi)->free_segments; |
534 | } |
535 | |
536 | static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi) |
537 | { |
538 | return SM_I(sbi)->reserved_segments + |
539 | SM_I(sbi)->additional_reserved_segments; |
540 | } |
541 | |
542 | static inline unsigned int free_sections(struct f2fs_sb_info *sbi) |
543 | { |
544 | return FREE_I(sbi)->free_sections; |
545 | } |
546 | |
547 | static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi) |
548 | { |
549 | return DIRTY_I(sbi)->nr_dirty[PRE]; |
550 | } |
551 | |
552 | static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi) |
553 | { |
554 | return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] + |
555 | DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] + |
556 | DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] + |
557 | DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] + |
558 | DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] + |
559 | DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE]; |
560 | } |
561 | |
562 | static inline int overprovision_segments(struct f2fs_sb_info *sbi) |
563 | { |
564 | return SM_I(sbi)->ovp_segments; |
565 | } |
566 | |
567 | static inline int reserved_sections(struct f2fs_sb_info *sbi) |
568 | { |
569 | return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi)); |
570 | } |
571 | |
572 | static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi, |
573 | unsigned int node_blocks, unsigned int dent_blocks) |
574 | { |
575 | |
576 | unsigned int segno, left_blocks; |
577 | int i; |
578 | |
579 | /* check current node segment */ |
580 | for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) { |
581 | segno = CURSEG_I(sbi, type: i)->segno; |
582 | left_blocks = f2fs_usable_blks_in_seg(sbi, segno) - |
583 | get_seg_entry(sbi, segno)->ckpt_valid_blocks; |
584 | |
585 | if (node_blocks > left_blocks) |
586 | return false; |
587 | } |
588 | |
589 | /* check current data segment */ |
590 | segno = CURSEG_I(sbi, type: CURSEG_HOT_DATA)->segno; |
591 | left_blocks = f2fs_usable_blks_in_seg(sbi, segno) - |
592 | get_seg_entry(sbi, segno)->ckpt_valid_blocks; |
593 | if (dent_blocks > left_blocks) |
594 | return false; |
595 | return true; |
596 | } |
597 | |
598 | /* |
599 | * calculate needed sections for dirty node/dentry |
600 | * and call has_curseg_enough_space |
601 | */ |
602 | static inline void __get_secs_required(struct f2fs_sb_info *sbi, |
603 | unsigned int *lower_p, unsigned int *upper_p, bool *curseg_p) |
604 | { |
605 | unsigned int total_node_blocks = get_pages(sbi, count_type: F2FS_DIRTY_NODES) + |
606 | get_pages(sbi, count_type: F2FS_DIRTY_DENTS) + |
607 | get_pages(sbi, count_type: F2FS_DIRTY_IMETA); |
608 | unsigned int total_dent_blocks = get_pages(sbi, count_type: F2FS_DIRTY_DENTS); |
609 | unsigned int node_secs = total_node_blocks / CAP_BLKS_PER_SEC(sbi); |
610 | unsigned int dent_secs = total_dent_blocks / CAP_BLKS_PER_SEC(sbi); |
611 | unsigned int node_blocks = total_node_blocks % CAP_BLKS_PER_SEC(sbi); |
612 | unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi); |
613 | |
614 | if (lower_p) |
615 | *lower_p = node_secs + dent_secs; |
616 | if (upper_p) |
617 | *upper_p = node_secs + dent_secs + |
618 | (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0); |
619 | if (curseg_p) |
620 | *curseg_p = has_curseg_enough_space(sbi, |
621 | node_blocks, dent_blocks); |
622 | } |
623 | |
624 | static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, |
625 | int freed, int needed) |
626 | { |
627 | unsigned int free_secs, lower_secs, upper_secs; |
628 | bool curseg_space; |
629 | |
630 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) |
631 | return false; |
632 | |
633 | __get_secs_required(sbi, lower_p: &lower_secs, upper_p: &upper_secs, curseg_p: &curseg_space); |
634 | |
635 | free_secs = free_sections(sbi) + freed; |
636 | lower_secs += needed + reserved_sections(sbi); |
637 | upper_secs += needed + reserved_sections(sbi); |
638 | |
639 | if (free_secs > upper_secs) |
640 | return false; |
641 | else if (free_secs <= lower_secs) |
642 | return true; |
643 | return !curseg_space; |
644 | } |
645 | |
646 | static inline bool has_enough_free_secs(struct f2fs_sb_info *sbi, |
647 | int freed, int needed) |
648 | { |
649 | return !has_not_enough_free_secs(sbi, freed, needed); |
650 | } |
651 | |
652 | static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi) |
653 | { |
654 | if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) |
655 | return true; |
656 | if (likely(has_enough_free_secs(sbi, 0, 0))) |
657 | return true; |
658 | return false; |
659 | } |
660 | |
661 | static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi) |
662 | { |
663 | return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments; |
664 | } |
665 | |
666 | static inline int utilization(struct f2fs_sb_info *sbi) |
667 | { |
668 | return div_u64(dividend: (u64)valid_user_blocks(sbi) * 100, |
669 | divisor: sbi->user_block_count); |
670 | } |
671 | |
672 | /* |
673 | * Sometimes f2fs may be better to drop out-of-place update policy. |
674 | * And, users can control the policy through sysfs entries. |
675 | * There are five policies with triggering conditions as follows. |
676 | * F2FS_IPU_FORCE - all the time, |
677 | * F2FS_IPU_SSR - if SSR mode is activated, |
678 | * F2FS_IPU_UTIL - if FS utilization is over threashold, |
679 | * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over |
680 | * threashold, |
681 | * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash |
682 | * storages. IPU will be triggered only if the # of dirty |
683 | * pages over min_fsync_blocks. (=default option) |
684 | * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests. |
685 | * F2FS_IPU_NOCACHE - disable IPU bio cache. |
686 | * F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has |
687 | * FI_OPU_WRITE flag. |
688 | * F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode) |
689 | */ |
690 | #define DEF_MIN_IPU_UTIL 70 |
691 | #define DEF_MIN_FSYNC_BLOCKS 8 |
692 | #define DEF_MIN_HOT_BLOCKS 16 |
693 | |
694 | #define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */ |
695 | |
696 | #define F2FS_IPU_DISABLE 0 |
697 | |
698 | /* Modification on enum should be synchronized with ipu_mode_names array */ |
699 | enum { |
700 | F2FS_IPU_FORCE, |
701 | F2FS_IPU_SSR, |
702 | F2FS_IPU_UTIL, |
703 | F2FS_IPU_SSR_UTIL, |
704 | F2FS_IPU_FSYNC, |
705 | F2FS_IPU_ASYNC, |
706 | F2FS_IPU_NOCACHE, |
707 | F2FS_IPU_HONOR_OPU_WRITE, |
708 | F2FS_IPU_MAX, |
709 | }; |
710 | |
711 | static inline bool IS_F2FS_IPU_DISABLE(struct f2fs_sb_info *sbi) |
712 | { |
713 | return SM_I(sbi)->ipu_policy == F2FS_IPU_DISABLE; |
714 | } |
715 | |
716 | #define F2FS_IPU_POLICY(name) \ |
717 | static inline bool IS_##name(struct f2fs_sb_info *sbi) \ |
718 | { \ |
719 | return SM_I(sbi)->ipu_policy & BIT(name); \ |
720 | } |
721 | |
722 | F2FS_IPU_POLICY(F2FS_IPU_FORCE); |
723 | F2FS_IPU_POLICY(F2FS_IPU_SSR); |
724 | F2FS_IPU_POLICY(F2FS_IPU_UTIL); |
725 | F2FS_IPU_POLICY(F2FS_IPU_SSR_UTIL); |
726 | F2FS_IPU_POLICY(F2FS_IPU_FSYNC); |
727 | F2FS_IPU_POLICY(F2FS_IPU_ASYNC); |
728 | F2FS_IPU_POLICY(F2FS_IPU_NOCACHE); |
729 | F2FS_IPU_POLICY(F2FS_IPU_HONOR_OPU_WRITE); |
730 | |
731 | static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, |
732 | int type) |
733 | { |
734 | struct curseg_info *curseg = CURSEG_I(sbi, type); |
735 | return curseg->segno; |
736 | } |
737 | |
738 | static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi, |
739 | int type) |
740 | { |
741 | struct curseg_info *curseg = CURSEG_I(sbi, type); |
742 | return curseg->alloc_type; |
743 | } |
744 | |
745 | static inline bool valid_main_segno(struct f2fs_sb_info *sbi, |
746 | unsigned int segno) |
747 | { |
748 | return segno <= (MAIN_SEGS(sbi) - 1); |
749 | } |
750 | |
751 | static inline void verify_fio_blkaddr(struct f2fs_io_info *fio) |
752 | { |
753 | struct f2fs_sb_info *sbi = fio->sbi; |
754 | |
755 | if (__is_valid_data_blkaddr(blkaddr: fio->old_blkaddr)) |
756 | verify_blkaddr(sbi, blkaddr: fio->old_blkaddr, __is_meta_io(fio) ? |
757 | META_GENERIC : DATA_GENERIC); |
758 | verify_blkaddr(sbi, blkaddr: fio->new_blkaddr, __is_meta_io(fio) ? |
759 | META_GENERIC : DATA_GENERIC_ENHANCE); |
760 | } |
761 | |
762 | /* |
763 | * Summary block is always treated as an invalid block |
764 | */ |
765 | static inline int check_block_count(struct f2fs_sb_info *sbi, |
766 | int segno, struct f2fs_sit_entry *raw_sit) |
767 | { |
768 | bool is_valid = test_bit_le(nr: 0, addr: raw_sit->valid_map) ? true : false; |
769 | int valid_blocks = 0; |
770 | int cur_pos = 0, next_pos; |
771 | unsigned int usable_blks_per_seg = f2fs_usable_blks_in_seg(sbi, segno); |
772 | |
773 | /* check bitmap with valid block count */ |
774 | do { |
775 | if (is_valid) { |
776 | next_pos = find_next_zero_bit_le(addr: &raw_sit->valid_map, |
777 | size: usable_blks_per_seg, |
778 | offset: cur_pos); |
779 | valid_blocks += next_pos - cur_pos; |
780 | } else |
781 | next_pos = find_next_bit_le(addr: &raw_sit->valid_map, |
782 | size: usable_blks_per_seg, |
783 | offset: cur_pos); |
784 | cur_pos = next_pos; |
785 | is_valid = !is_valid; |
786 | } while (cur_pos < usable_blks_per_seg); |
787 | |
788 | if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) { |
789 | f2fs_err(sbi, "Mismatch valid blocks %d vs. %d" , |
790 | GET_SIT_VBLOCKS(raw_sit), valid_blocks); |
791 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
792 | f2fs_handle_error(sbi, error: ERROR_INCONSISTENT_SIT); |
793 | return -EFSCORRUPTED; |
794 | } |
795 | |
796 | if (usable_blks_per_seg < sbi->blocks_per_seg) |
797 | f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map, |
798 | sbi->blocks_per_seg, |
799 | usable_blks_per_seg) != sbi->blocks_per_seg); |
800 | |
801 | /* check segment usage, and check boundary of a given segment number */ |
802 | if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg |
803 | || !valid_main_segno(sbi, segno))) { |
804 | f2fs_err(sbi, "Wrong valid blocks %d or segno %u" , |
805 | GET_SIT_VBLOCKS(raw_sit), segno); |
806 | set_sbi_flag(sbi, type: SBI_NEED_FSCK); |
807 | f2fs_handle_error(sbi, error: ERROR_INCONSISTENT_SIT); |
808 | return -EFSCORRUPTED; |
809 | } |
810 | return 0; |
811 | } |
812 | |
813 | static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, |
814 | unsigned int start) |
815 | { |
816 | struct sit_info *sit_i = SIT_I(sbi); |
817 | unsigned int offset = SIT_BLOCK_OFFSET(start); |
818 | block_t blk_addr = sit_i->sit_base_addr + offset; |
819 | |
820 | f2fs_bug_on(sbi, !valid_main_segno(sbi, start)); |
821 | |
822 | #ifdef CONFIG_F2FS_CHECK_FS |
823 | if (f2fs_test_bit(nr: offset, addr: sit_i->sit_bitmap) != |
824 | f2fs_test_bit(nr: offset, addr: sit_i->sit_bitmap_mir)) |
825 | f2fs_bug_on(sbi, 1); |
826 | #endif |
827 | |
828 | /* calculate sit block address */ |
829 | if (f2fs_test_bit(nr: offset, addr: sit_i->sit_bitmap)) |
830 | blk_addr += sit_i->sit_blocks; |
831 | |
832 | return blk_addr; |
833 | } |
834 | |
835 | static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi, |
836 | pgoff_t block_addr) |
837 | { |
838 | struct sit_info *sit_i = SIT_I(sbi); |
839 | block_addr -= sit_i->sit_base_addr; |
840 | if (block_addr < sit_i->sit_blocks) |
841 | block_addr += sit_i->sit_blocks; |
842 | else |
843 | block_addr -= sit_i->sit_blocks; |
844 | |
845 | return block_addr + sit_i->sit_base_addr; |
846 | } |
847 | |
848 | static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) |
849 | { |
850 | unsigned int block_off = SIT_BLOCK_OFFSET(start); |
851 | |
852 | f2fs_change_bit(nr: block_off, addr: sit_i->sit_bitmap); |
853 | #ifdef CONFIG_F2FS_CHECK_FS |
854 | f2fs_change_bit(nr: block_off, addr: sit_i->sit_bitmap_mir); |
855 | #endif |
856 | } |
857 | |
858 | static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi, |
859 | bool base_time) |
860 | { |
861 | struct sit_info *sit_i = SIT_I(sbi); |
862 | time64_t diff, now = ktime_get_boottime_seconds(); |
863 | |
864 | if (now >= sit_i->mounted_time) |
865 | return sit_i->elapsed_time + now - sit_i->mounted_time; |
866 | |
867 | /* system time is set to the past */ |
868 | if (!base_time) { |
869 | diff = sit_i->mounted_time - now; |
870 | if (sit_i->elapsed_time >= diff) |
871 | return sit_i->elapsed_time - diff; |
872 | return 0; |
873 | } |
874 | return sit_i->elapsed_time; |
875 | } |
876 | |
877 | static inline void set_summary(struct f2fs_summary *sum, nid_t nid, |
878 | unsigned int ofs_in_node, unsigned char version) |
879 | { |
880 | sum->nid = cpu_to_le32(nid); |
881 | sum->ofs_in_node = cpu_to_le16(ofs_in_node); |
882 | sum->version = version; |
883 | } |
884 | |
885 | static inline block_t start_sum_block(struct f2fs_sb_info *sbi) |
886 | { |
887 | return __start_cp_addr(sbi) + |
888 | le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); |
889 | } |
890 | |
891 | static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) |
892 | { |
893 | return __start_cp_addr(sbi) + |
894 | le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count) |
895 | - (base + 1) + type; |
896 | } |
897 | |
898 | static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) |
899 | { |
900 | if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno)) |
901 | return true; |
902 | return false; |
903 | } |
904 | |
905 | /* |
906 | * It is very important to gather dirty pages and write at once, so that we can |
907 | * submit a big bio without interfering other data writes. |
908 | * By default, 512 pages for directory data, |
909 | * 512 pages (2MB) * 8 for nodes, and |
910 | * 256 pages * 8 for meta are set. |
911 | */ |
912 | static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) |
913 | { |
914 | if (sbi->sb->s_bdi->wb.dirty_exceeded) |
915 | return 0; |
916 | |
917 | if (type == DATA) |
918 | return sbi->blocks_per_seg; |
919 | else if (type == NODE) |
920 | return 8 * sbi->blocks_per_seg; |
921 | else if (type == META) |
922 | return 8 * BIO_MAX_VECS; |
923 | else |
924 | return 0; |
925 | } |
926 | |
927 | /* |
928 | * When writing pages, it'd better align nr_to_write for segment size. |
929 | */ |
930 | static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type, |
931 | struct writeback_control *wbc) |
932 | { |
933 | long nr_to_write, desired; |
934 | |
935 | if (wbc->sync_mode != WB_SYNC_NONE) |
936 | return 0; |
937 | |
938 | nr_to_write = wbc->nr_to_write; |
939 | desired = BIO_MAX_VECS; |
940 | if (type == NODE) |
941 | desired <<= 1; |
942 | |
943 | wbc->nr_to_write = desired; |
944 | return desired - nr_to_write; |
945 | } |
946 | |
947 | static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force) |
948 | { |
949 | struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; |
950 | bool wakeup = false; |
951 | int i; |
952 | |
953 | if (force) |
954 | goto wake_up; |
955 | |
956 | mutex_lock(&dcc->cmd_lock); |
957 | for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { |
958 | if (i + 1 < dcc->discard_granularity) |
959 | break; |
960 | if (!list_empty(head: &dcc->pend_list[i])) { |
961 | wakeup = true; |
962 | break; |
963 | } |
964 | } |
965 | mutex_unlock(lock: &dcc->cmd_lock); |
966 | if (!wakeup || !is_idle(sbi, type: DISCARD_TIME)) |
967 | return; |
968 | wake_up: |
969 | dcc->discard_wake = true; |
970 | wake_up_interruptible_all(&dcc->discard_wait_queue); |
971 | } |
972 | |