1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * NILFS segment constructor. |
4 | * |
5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
6 | * |
7 | * Written by Ryusuke Konishi. |
8 | * |
9 | */ |
10 | |
11 | #include <linux/pagemap.h> |
12 | #include <linux/buffer_head.h> |
13 | #include <linux/writeback.h> |
14 | #include <linux/bitops.h> |
15 | #include <linux/bio.h> |
16 | #include <linux/completion.h> |
17 | #include <linux/blkdev.h> |
18 | #include <linux/backing-dev.h> |
19 | #include <linux/freezer.h> |
20 | #include <linux/kthread.h> |
21 | #include <linux/crc32.h> |
22 | #include <linux/pagevec.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/sched/signal.h> |
25 | |
26 | #include "nilfs.h" |
27 | #include "btnode.h" |
28 | #include "page.h" |
29 | #include "segment.h" |
30 | #include "sufile.h" |
31 | #include "cpfile.h" |
32 | #include "ifile.h" |
33 | #include "segbuf.h" |
34 | |
35 | |
36 | /* |
37 | * Segment constructor |
38 | */ |
39 | #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */ |
40 | |
41 | #define SC_MAX_SEGDELTA 64 /* |
42 | * Upper limit of the number of segments |
43 | * appended in collection retry loop |
44 | */ |
45 | |
46 | /* Construction mode */ |
47 | enum { |
48 | SC_LSEG_SR = 1, /* Make a logical segment having a super root */ |
49 | SC_LSEG_DSYNC, /* |
50 | * Flush data blocks of a given file and make |
51 | * a logical segment without a super root. |
52 | */ |
53 | SC_FLUSH_FILE, /* |
54 | * Flush data files, leads to segment writes without |
55 | * creating a checkpoint. |
56 | */ |
57 | SC_FLUSH_DAT, /* |
58 | * Flush DAT file. This also creates segments |
59 | * without a checkpoint. |
60 | */ |
61 | }; |
62 | |
63 | /* Stage numbers of dirty block collection */ |
64 | enum { |
65 | NILFS_ST_INIT = 0, |
66 | NILFS_ST_GC, /* Collecting dirty blocks for GC */ |
67 | NILFS_ST_FILE, |
68 | NILFS_ST_IFILE, |
69 | NILFS_ST_CPFILE, |
70 | NILFS_ST_SUFILE, |
71 | NILFS_ST_DAT, |
72 | NILFS_ST_SR, /* Super root */ |
73 | NILFS_ST_DSYNC, /* Data sync blocks */ |
74 | NILFS_ST_DONE, |
75 | }; |
76 | |
77 | #define CREATE_TRACE_POINTS |
78 | #include <trace/events/nilfs2.h> |
79 | |
80 | /* |
81 | * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are |
82 | * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of |
83 | * the variable must use them because transition of stage count must involve |
84 | * trace events (trace_nilfs2_collection_stage_transition). |
85 | * |
86 | * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't |
87 | * produce tracepoint events. It is provided just for making the intention |
88 | * clear. |
89 | */ |
90 | static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci) |
91 | { |
92 | sci->sc_stage.scnt++; |
93 | trace_nilfs2_collection_stage_transition(sci); |
94 | } |
95 | |
96 | static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt) |
97 | { |
98 | sci->sc_stage.scnt = next_scnt; |
99 | trace_nilfs2_collection_stage_transition(sci); |
100 | } |
101 | |
102 | static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci) |
103 | { |
104 | return sci->sc_stage.scnt; |
105 | } |
106 | |
107 | /* State flags of collection */ |
108 | #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */ |
109 | #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */ |
110 | #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */ |
111 | #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED) |
112 | |
113 | /* Operations depending on the construction mode and file type */ |
114 | struct nilfs_sc_operations { |
115 | int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *, |
116 | struct inode *); |
117 | int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *, |
118 | struct inode *); |
119 | int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *, |
120 | struct inode *); |
121 | void (*write_data_binfo)(struct nilfs_sc_info *, |
122 | struct nilfs_segsum_pointer *, |
123 | union nilfs_binfo *); |
124 | void (*write_node_binfo)(struct nilfs_sc_info *, |
125 | struct nilfs_segsum_pointer *, |
126 | union nilfs_binfo *); |
127 | }; |
128 | |
129 | /* |
130 | * Other definitions |
131 | */ |
132 | static void nilfs_segctor_start_timer(struct nilfs_sc_info *); |
133 | static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int); |
134 | static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *); |
135 | static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int); |
136 | |
137 | #define nilfs_cnt32_ge(a, b) \ |
138 | (typecheck(__u32, a) && typecheck(__u32, b) && \ |
139 | ((__s32)(a) - (__s32)(b) >= 0)) |
140 | |
141 | static int nilfs_prepare_segment_lock(struct super_block *sb, |
142 | struct nilfs_transaction_info *ti) |
143 | { |
144 | struct nilfs_transaction_info *cur_ti = current->journal_info; |
145 | void *save = NULL; |
146 | |
147 | if (cur_ti) { |
148 | if (cur_ti->ti_magic == NILFS_TI_MAGIC) |
149 | return ++cur_ti->ti_count; |
150 | |
151 | /* |
152 | * If journal_info field is occupied by other FS, |
153 | * it is saved and will be restored on |
154 | * nilfs_transaction_commit(). |
155 | */ |
156 | nilfs_warn(sb, "journal info from a different FS" ); |
157 | save = current->journal_info; |
158 | } |
159 | if (!ti) { |
160 | ti = kmem_cache_alloc(cachep: nilfs_transaction_cachep, GFP_NOFS); |
161 | if (!ti) |
162 | return -ENOMEM; |
163 | ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC; |
164 | } else { |
165 | ti->ti_flags = 0; |
166 | } |
167 | ti->ti_count = 0; |
168 | ti->ti_save = save; |
169 | ti->ti_magic = NILFS_TI_MAGIC; |
170 | current->journal_info = ti; |
171 | return 0; |
172 | } |
173 | |
174 | /** |
175 | * nilfs_transaction_begin - start indivisible file operations. |
176 | * @sb: super block |
177 | * @ti: nilfs_transaction_info |
178 | * @vacancy_check: flags for vacancy rate checks |
179 | * |
180 | * nilfs_transaction_begin() acquires a reader/writer semaphore, called |
181 | * the segment semaphore, to make a segment construction and write tasks |
182 | * exclusive. The function is used with nilfs_transaction_commit() in pairs. |
183 | * The region enclosed by these two functions can be nested. To avoid a |
184 | * deadlock, the semaphore is only acquired or released in the outermost call. |
185 | * |
186 | * This function allocates a nilfs_transaction_info struct to keep context |
187 | * information on it. It is initialized and hooked onto the current task in |
188 | * the outermost call. If a pre-allocated struct is given to @ti, it is used |
189 | * instead; otherwise a new struct is assigned from a slab. |
190 | * |
191 | * When @vacancy_check flag is set, this function will check the amount of |
192 | * free space, and will wait for the GC to reclaim disk space if low capacity. |
193 | * |
194 | * Return Value: On success, 0 is returned. On error, one of the following |
195 | * negative error code is returned. |
196 | * |
197 | * %-ENOMEM - Insufficient memory available. |
198 | * |
199 | * %-ENOSPC - No space left on device |
200 | */ |
201 | int nilfs_transaction_begin(struct super_block *sb, |
202 | struct nilfs_transaction_info *ti, |
203 | int vacancy_check) |
204 | { |
205 | struct the_nilfs *nilfs; |
206 | int ret = nilfs_prepare_segment_lock(sb, ti); |
207 | struct nilfs_transaction_info *trace_ti; |
208 | |
209 | if (unlikely(ret < 0)) |
210 | return ret; |
211 | if (ret > 0) { |
212 | trace_ti = current->journal_info; |
213 | |
214 | trace_nilfs2_transaction_transition(sb, ti: trace_ti, |
215 | count: trace_ti->ti_count, flags: trace_ti->ti_flags, |
216 | state: TRACE_NILFS2_TRANSACTION_BEGIN); |
217 | return 0; |
218 | } |
219 | |
220 | sb_start_intwrite(sb); |
221 | |
222 | nilfs = sb->s_fs_info; |
223 | down_read(sem: &nilfs->ns_segctor_sem); |
224 | if (vacancy_check && nilfs_near_disk_full(nilfs)) { |
225 | up_read(sem: &nilfs->ns_segctor_sem); |
226 | ret = -ENOSPC; |
227 | goto failed; |
228 | } |
229 | |
230 | trace_ti = current->journal_info; |
231 | trace_nilfs2_transaction_transition(sb, ti: trace_ti, count: trace_ti->ti_count, |
232 | flags: trace_ti->ti_flags, |
233 | state: TRACE_NILFS2_TRANSACTION_BEGIN); |
234 | return 0; |
235 | |
236 | failed: |
237 | ti = current->journal_info; |
238 | current->journal_info = ti->ti_save; |
239 | if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) |
240 | kmem_cache_free(s: nilfs_transaction_cachep, objp: ti); |
241 | sb_end_intwrite(sb); |
242 | return ret; |
243 | } |
244 | |
245 | /** |
246 | * nilfs_transaction_commit - commit indivisible file operations. |
247 | * @sb: super block |
248 | * |
249 | * nilfs_transaction_commit() releases the read semaphore which is |
250 | * acquired by nilfs_transaction_begin(). This is only performed |
251 | * in outermost call of this function. If a commit flag is set, |
252 | * nilfs_transaction_commit() sets a timer to start the segment |
253 | * constructor. If a sync flag is set, it starts construction |
254 | * directly. |
255 | */ |
256 | int nilfs_transaction_commit(struct super_block *sb) |
257 | { |
258 | struct nilfs_transaction_info *ti = current->journal_info; |
259 | struct the_nilfs *nilfs = sb->s_fs_info; |
260 | int err = 0; |
261 | |
262 | BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); |
263 | ti->ti_flags |= NILFS_TI_COMMIT; |
264 | if (ti->ti_count > 0) { |
265 | ti->ti_count--; |
266 | trace_nilfs2_transaction_transition(sb, ti, count: ti->ti_count, |
267 | flags: ti->ti_flags, state: TRACE_NILFS2_TRANSACTION_COMMIT); |
268 | return 0; |
269 | } |
270 | if (nilfs->ns_writer) { |
271 | struct nilfs_sc_info *sci = nilfs->ns_writer; |
272 | |
273 | if (ti->ti_flags & NILFS_TI_COMMIT) |
274 | nilfs_segctor_start_timer(sci); |
275 | if (atomic_read(v: &nilfs->ns_ndirtyblks) > sci->sc_watermark) |
276 | nilfs_segctor_do_flush(sci, 0); |
277 | } |
278 | up_read(sem: &nilfs->ns_segctor_sem); |
279 | trace_nilfs2_transaction_transition(sb, ti, count: ti->ti_count, |
280 | flags: ti->ti_flags, state: TRACE_NILFS2_TRANSACTION_COMMIT); |
281 | |
282 | current->journal_info = ti->ti_save; |
283 | |
284 | if (ti->ti_flags & NILFS_TI_SYNC) |
285 | err = nilfs_construct_segment(sb); |
286 | if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) |
287 | kmem_cache_free(s: nilfs_transaction_cachep, objp: ti); |
288 | sb_end_intwrite(sb); |
289 | return err; |
290 | } |
291 | |
292 | void nilfs_transaction_abort(struct super_block *sb) |
293 | { |
294 | struct nilfs_transaction_info *ti = current->journal_info; |
295 | struct the_nilfs *nilfs = sb->s_fs_info; |
296 | |
297 | BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); |
298 | if (ti->ti_count > 0) { |
299 | ti->ti_count--; |
300 | trace_nilfs2_transaction_transition(sb, ti, count: ti->ti_count, |
301 | flags: ti->ti_flags, state: TRACE_NILFS2_TRANSACTION_ABORT); |
302 | return; |
303 | } |
304 | up_read(sem: &nilfs->ns_segctor_sem); |
305 | |
306 | trace_nilfs2_transaction_transition(sb, ti, count: ti->ti_count, |
307 | flags: ti->ti_flags, state: TRACE_NILFS2_TRANSACTION_ABORT); |
308 | |
309 | current->journal_info = ti->ti_save; |
310 | if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) |
311 | kmem_cache_free(s: nilfs_transaction_cachep, objp: ti); |
312 | sb_end_intwrite(sb); |
313 | } |
314 | |
315 | void nilfs_relax_pressure_in_lock(struct super_block *sb) |
316 | { |
317 | struct the_nilfs *nilfs = sb->s_fs_info; |
318 | struct nilfs_sc_info *sci = nilfs->ns_writer; |
319 | |
320 | if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request) |
321 | return; |
322 | |
323 | set_bit(nr: NILFS_SC_PRIOR_FLUSH, addr: &sci->sc_flags); |
324 | up_read(sem: &nilfs->ns_segctor_sem); |
325 | |
326 | down_write(sem: &nilfs->ns_segctor_sem); |
327 | if (sci->sc_flush_request && |
328 | test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) { |
329 | struct nilfs_transaction_info *ti = current->journal_info; |
330 | |
331 | ti->ti_flags |= NILFS_TI_WRITER; |
332 | nilfs_segctor_do_immediate_flush(sci); |
333 | ti->ti_flags &= ~NILFS_TI_WRITER; |
334 | } |
335 | downgrade_write(sem: &nilfs->ns_segctor_sem); |
336 | } |
337 | |
338 | static void nilfs_transaction_lock(struct super_block *sb, |
339 | struct nilfs_transaction_info *ti, |
340 | int gcflag) |
341 | { |
342 | struct nilfs_transaction_info *cur_ti = current->journal_info; |
343 | struct the_nilfs *nilfs = sb->s_fs_info; |
344 | struct nilfs_sc_info *sci = nilfs->ns_writer; |
345 | |
346 | WARN_ON(cur_ti); |
347 | ti->ti_flags = NILFS_TI_WRITER; |
348 | ti->ti_count = 0; |
349 | ti->ti_save = cur_ti; |
350 | ti->ti_magic = NILFS_TI_MAGIC; |
351 | current->journal_info = ti; |
352 | |
353 | for (;;) { |
354 | trace_nilfs2_transaction_transition(sb, ti, count: ti->ti_count, |
355 | flags: ti->ti_flags, state: TRACE_NILFS2_TRANSACTION_TRYLOCK); |
356 | |
357 | down_write(sem: &nilfs->ns_segctor_sem); |
358 | if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) |
359 | break; |
360 | |
361 | nilfs_segctor_do_immediate_flush(sci); |
362 | |
363 | up_write(sem: &nilfs->ns_segctor_sem); |
364 | cond_resched(); |
365 | } |
366 | if (gcflag) |
367 | ti->ti_flags |= NILFS_TI_GC; |
368 | |
369 | trace_nilfs2_transaction_transition(sb, ti, count: ti->ti_count, |
370 | flags: ti->ti_flags, state: TRACE_NILFS2_TRANSACTION_LOCK); |
371 | } |
372 | |
373 | static void nilfs_transaction_unlock(struct super_block *sb) |
374 | { |
375 | struct nilfs_transaction_info *ti = current->journal_info; |
376 | struct the_nilfs *nilfs = sb->s_fs_info; |
377 | |
378 | BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); |
379 | BUG_ON(ti->ti_count > 0); |
380 | |
381 | up_write(sem: &nilfs->ns_segctor_sem); |
382 | current->journal_info = ti->ti_save; |
383 | |
384 | trace_nilfs2_transaction_transition(sb, ti, count: ti->ti_count, |
385 | flags: ti->ti_flags, state: TRACE_NILFS2_TRANSACTION_UNLOCK); |
386 | } |
387 | |
388 | static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, |
389 | struct nilfs_segsum_pointer *ssp, |
390 | unsigned int bytes) |
391 | { |
392 | struct nilfs_segment_buffer *segbuf = sci->sc_curseg; |
393 | unsigned int blocksize = sci->sc_super->s_blocksize; |
394 | void *p; |
395 | |
396 | if (unlikely(ssp->offset + bytes > blocksize)) { |
397 | ssp->offset = 0; |
398 | BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh, |
399 | &segbuf->sb_segsum_buffers)); |
400 | ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh); |
401 | } |
402 | p = ssp->bh->b_data + ssp->offset; |
403 | ssp->offset += bytes; |
404 | return p; |
405 | } |
406 | |
407 | /** |
408 | * nilfs_segctor_reset_segment_buffer - reset the current segment buffer |
409 | * @sci: nilfs_sc_info |
410 | */ |
411 | static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci) |
412 | { |
413 | struct nilfs_segment_buffer *segbuf = sci->sc_curseg; |
414 | struct buffer_head *sumbh; |
415 | unsigned int sumbytes; |
416 | unsigned int flags = 0; |
417 | int err; |
418 | |
419 | if (nilfs_doing_gc()) |
420 | flags = NILFS_SS_GC; |
421 | err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno); |
422 | if (unlikely(err)) |
423 | return err; |
424 | |
425 | sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers); |
426 | sumbytes = segbuf->sb_sum.sumbytes; |
427 | sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes; |
428 | sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes; |
429 | sci->sc_blk_cnt = sci->sc_datablk_cnt = 0; |
430 | return 0; |
431 | } |
432 | |
433 | /** |
434 | * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area |
435 | * @sci: segment constructor object |
436 | * |
437 | * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of |
438 | * the current segment summary block. |
439 | */ |
440 | static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci) |
441 | { |
442 | struct nilfs_segsum_pointer *ssp; |
443 | |
444 | ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr; |
445 | if (ssp->offset < ssp->bh->b_size) |
446 | memset(ssp->bh->b_data + ssp->offset, 0, |
447 | ssp->bh->b_size - ssp->offset); |
448 | } |
449 | |
450 | static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci) |
451 | { |
452 | sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; |
453 | if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs)) |
454 | return -E2BIG; /* |
455 | * The current segment is filled up |
456 | * (internal code) |
457 | */ |
458 | nilfs_segctor_zeropad_segsum(sci); |
459 | sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg); |
460 | return nilfs_segctor_reset_segment_buffer(sci); |
461 | } |
462 | |
463 | static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci) |
464 | { |
465 | struct nilfs_segment_buffer *segbuf = sci->sc_curseg; |
466 | int err; |
467 | |
468 | if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) { |
469 | err = nilfs_segctor_feed_segment(sci); |
470 | if (err) |
471 | return err; |
472 | segbuf = sci->sc_curseg; |
473 | } |
474 | err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root); |
475 | if (likely(!err)) |
476 | segbuf->sb_sum.flags |= NILFS_SS_SR; |
477 | return err; |
478 | } |
479 | |
480 | /* |
481 | * Functions for making segment summary and payloads |
482 | */ |
483 | static int nilfs_segctor_segsum_block_required( |
484 | struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp, |
485 | unsigned int binfo_size) |
486 | { |
487 | unsigned int blocksize = sci->sc_super->s_blocksize; |
488 | /* Size of finfo and binfo is enough small against blocksize */ |
489 | |
490 | return ssp->offset + binfo_size + |
491 | (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) > |
492 | blocksize; |
493 | } |
494 | |
495 | static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci, |
496 | struct inode *inode) |
497 | { |
498 | sci->sc_curseg->sb_sum.nfinfo++; |
499 | sci->sc_binfo_ptr = sci->sc_finfo_ptr; |
500 | nilfs_segctor_map_segsum_entry( |
501 | sci, ssp: &sci->sc_binfo_ptr, bytes: sizeof(struct nilfs_finfo)); |
502 | |
503 | if (NILFS_I(inode)->i_root && |
504 | !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) |
505 | set_bit(nr: NILFS_SC_HAVE_DELTA, addr: &sci->sc_flags); |
506 | /* skip finfo */ |
507 | } |
508 | |
509 | static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci, |
510 | struct inode *inode) |
511 | { |
512 | struct nilfs_finfo *finfo; |
513 | struct nilfs_inode_info *ii; |
514 | struct nilfs_segment_buffer *segbuf; |
515 | __u64 cno; |
516 | |
517 | if (sci->sc_blk_cnt == 0) |
518 | return; |
519 | |
520 | ii = NILFS_I(inode); |
521 | |
522 | if (test_bit(NILFS_I_GCINODE, &ii->i_state)) |
523 | cno = ii->i_cno; |
524 | else if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) |
525 | cno = 0; |
526 | else |
527 | cno = sci->sc_cno; |
528 | |
529 | finfo = nilfs_segctor_map_segsum_entry(sci, ssp: &sci->sc_finfo_ptr, |
530 | bytes: sizeof(*finfo)); |
531 | finfo->fi_ino = cpu_to_le64(inode->i_ino); |
532 | finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt); |
533 | finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt); |
534 | finfo->fi_cno = cpu_to_le64(cno); |
535 | |
536 | segbuf = sci->sc_curseg; |
537 | segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset + |
538 | sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1); |
539 | sci->sc_finfo_ptr = sci->sc_binfo_ptr; |
540 | sci->sc_blk_cnt = sci->sc_datablk_cnt = 0; |
541 | } |
542 | |
543 | static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci, |
544 | struct buffer_head *bh, |
545 | struct inode *inode, |
546 | unsigned int binfo_size) |
547 | { |
548 | struct nilfs_segment_buffer *segbuf; |
549 | int required, err = 0; |
550 | |
551 | retry: |
552 | segbuf = sci->sc_curseg; |
553 | required = nilfs_segctor_segsum_block_required( |
554 | sci, ssp: &sci->sc_binfo_ptr, binfo_size); |
555 | if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) { |
556 | nilfs_segctor_end_finfo(sci, inode); |
557 | err = nilfs_segctor_feed_segment(sci); |
558 | if (err) |
559 | return err; |
560 | goto retry; |
561 | } |
562 | if (unlikely(required)) { |
563 | nilfs_segctor_zeropad_segsum(sci); |
564 | err = nilfs_segbuf_extend_segsum(segbuf); |
565 | if (unlikely(err)) |
566 | goto failed; |
567 | } |
568 | if (sci->sc_blk_cnt == 0) |
569 | nilfs_segctor_begin_finfo(sci, inode); |
570 | |
571 | nilfs_segctor_map_segsum_entry(sci, ssp: &sci->sc_binfo_ptr, bytes: binfo_size); |
572 | /* Substitution to vblocknr is delayed until update_blocknr() */ |
573 | nilfs_segbuf_add_file_buffer(segbuf, bh); |
574 | sci->sc_blk_cnt++; |
575 | failed: |
576 | return err; |
577 | } |
578 | |
579 | /* |
580 | * Callback functions that enumerate, mark, and collect dirty blocks |
581 | */ |
582 | static int nilfs_collect_file_data(struct nilfs_sc_info *sci, |
583 | struct buffer_head *bh, struct inode *inode) |
584 | { |
585 | int err; |
586 | |
587 | err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); |
588 | if (err < 0) |
589 | return err; |
590 | |
591 | err = nilfs_segctor_add_file_block(sci, bh, inode, |
592 | binfo_size: sizeof(struct nilfs_binfo_v)); |
593 | if (!err) |
594 | sci->sc_datablk_cnt++; |
595 | return err; |
596 | } |
597 | |
598 | static int nilfs_collect_file_node(struct nilfs_sc_info *sci, |
599 | struct buffer_head *bh, |
600 | struct inode *inode) |
601 | { |
602 | return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); |
603 | } |
604 | |
605 | static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci, |
606 | struct buffer_head *bh, |
607 | struct inode *inode) |
608 | { |
609 | WARN_ON(!buffer_dirty(bh)); |
610 | return nilfs_segctor_add_file_block(sci, bh, inode, binfo_size: sizeof(__le64)); |
611 | } |
612 | |
613 | static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci, |
614 | struct nilfs_segsum_pointer *ssp, |
615 | union nilfs_binfo *binfo) |
616 | { |
617 | struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry( |
618 | sci, ssp, bytes: sizeof(*binfo_v)); |
619 | *binfo_v = binfo->bi_v; |
620 | } |
621 | |
622 | static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci, |
623 | struct nilfs_segsum_pointer *ssp, |
624 | union nilfs_binfo *binfo) |
625 | { |
626 | __le64 *vblocknr = nilfs_segctor_map_segsum_entry( |
627 | sci, ssp, bytes: sizeof(*vblocknr)); |
628 | *vblocknr = binfo->bi_v.bi_vblocknr; |
629 | } |
630 | |
631 | static const struct nilfs_sc_operations nilfs_sc_file_ops = { |
632 | .collect_data = nilfs_collect_file_data, |
633 | .collect_node = nilfs_collect_file_node, |
634 | .collect_bmap = nilfs_collect_file_bmap, |
635 | .write_data_binfo = nilfs_write_file_data_binfo, |
636 | .write_node_binfo = nilfs_write_file_node_binfo, |
637 | }; |
638 | |
639 | static int nilfs_collect_dat_data(struct nilfs_sc_info *sci, |
640 | struct buffer_head *bh, struct inode *inode) |
641 | { |
642 | int err; |
643 | |
644 | err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); |
645 | if (err < 0) |
646 | return err; |
647 | |
648 | err = nilfs_segctor_add_file_block(sci, bh, inode, binfo_size: sizeof(__le64)); |
649 | if (!err) |
650 | sci->sc_datablk_cnt++; |
651 | return err; |
652 | } |
653 | |
654 | static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci, |
655 | struct buffer_head *bh, struct inode *inode) |
656 | { |
657 | WARN_ON(!buffer_dirty(bh)); |
658 | return nilfs_segctor_add_file_block(sci, bh, inode, |
659 | binfo_size: sizeof(struct nilfs_binfo_dat)); |
660 | } |
661 | |
662 | static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci, |
663 | struct nilfs_segsum_pointer *ssp, |
664 | union nilfs_binfo *binfo) |
665 | { |
666 | __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp, |
667 | bytes: sizeof(*blkoff)); |
668 | *blkoff = binfo->bi_dat.bi_blkoff; |
669 | } |
670 | |
671 | static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci, |
672 | struct nilfs_segsum_pointer *ssp, |
673 | union nilfs_binfo *binfo) |
674 | { |
675 | struct nilfs_binfo_dat *binfo_dat = |
676 | nilfs_segctor_map_segsum_entry(sci, ssp, bytes: sizeof(*binfo_dat)); |
677 | *binfo_dat = binfo->bi_dat; |
678 | } |
679 | |
680 | static const struct nilfs_sc_operations nilfs_sc_dat_ops = { |
681 | .collect_data = nilfs_collect_dat_data, |
682 | .collect_node = nilfs_collect_file_node, |
683 | .collect_bmap = nilfs_collect_dat_bmap, |
684 | .write_data_binfo = nilfs_write_dat_data_binfo, |
685 | .write_node_binfo = nilfs_write_dat_node_binfo, |
686 | }; |
687 | |
688 | static const struct nilfs_sc_operations nilfs_sc_dsync_ops = { |
689 | .collect_data = nilfs_collect_file_data, |
690 | .collect_node = NULL, |
691 | .collect_bmap = NULL, |
692 | .write_data_binfo = nilfs_write_file_data_binfo, |
693 | .write_node_binfo = NULL, |
694 | }; |
695 | |
696 | static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, |
697 | struct list_head *listp, |
698 | size_t nlimit, |
699 | loff_t start, loff_t end) |
700 | { |
701 | struct address_space *mapping = inode->i_mapping; |
702 | struct folio_batch fbatch; |
703 | pgoff_t index = 0, last = ULONG_MAX; |
704 | size_t ndirties = 0; |
705 | int i; |
706 | |
707 | if (unlikely(start != 0 || end != LLONG_MAX)) { |
708 | /* |
709 | * A valid range is given for sync-ing data pages. The |
710 | * range is rounded to per-page; extra dirty buffers |
711 | * may be included if blocksize < pagesize. |
712 | */ |
713 | index = start >> PAGE_SHIFT; |
714 | last = end >> PAGE_SHIFT; |
715 | } |
716 | folio_batch_init(fbatch: &fbatch); |
717 | repeat: |
718 | if (unlikely(index > last) || |
719 | !filemap_get_folios_tag(mapping, start: &index, end: last, |
720 | PAGECACHE_TAG_DIRTY, fbatch: &fbatch)) |
721 | return ndirties; |
722 | |
723 | for (i = 0; i < folio_batch_count(fbatch: &fbatch); i++) { |
724 | struct buffer_head *bh, *head; |
725 | struct folio *folio = fbatch.folios[i]; |
726 | |
727 | folio_lock(folio); |
728 | if (unlikely(folio->mapping != mapping)) { |
729 | /* Exclude folios removed from the address space */ |
730 | folio_unlock(folio); |
731 | continue; |
732 | } |
733 | head = folio_buffers(folio); |
734 | if (!head) |
735 | head = create_empty_buffers(folio, |
736 | blocksize: i_blocksize(node: inode), b_state: 0); |
737 | folio_unlock(folio); |
738 | |
739 | bh = head; |
740 | do { |
741 | if (!buffer_dirty(bh) || buffer_async_write(bh)) |
742 | continue; |
743 | get_bh(bh); |
744 | list_add_tail(new: &bh->b_assoc_buffers, head: listp); |
745 | ndirties++; |
746 | if (unlikely(ndirties >= nlimit)) { |
747 | folio_batch_release(fbatch: &fbatch); |
748 | cond_resched(); |
749 | return ndirties; |
750 | } |
751 | } while (bh = bh->b_this_page, bh != head); |
752 | } |
753 | folio_batch_release(fbatch: &fbatch); |
754 | cond_resched(); |
755 | goto repeat; |
756 | } |
757 | |
758 | static void nilfs_lookup_dirty_node_buffers(struct inode *inode, |
759 | struct list_head *listp) |
760 | { |
761 | struct nilfs_inode_info *ii = NILFS_I(inode); |
762 | struct inode *btnc_inode = ii->i_assoc_inode; |
763 | struct folio_batch fbatch; |
764 | struct buffer_head *bh, *head; |
765 | unsigned int i; |
766 | pgoff_t index = 0; |
767 | |
768 | if (!btnc_inode) |
769 | return; |
770 | folio_batch_init(fbatch: &fbatch); |
771 | |
772 | while (filemap_get_folios_tag(mapping: btnc_inode->i_mapping, start: &index, |
773 | end: (pgoff_t)-1, PAGECACHE_TAG_DIRTY, fbatch: &fbatch)) { |
774 | for (i = 0; i < folio_batch_count(fbatch: &fbatch); i++) { |
775 | bh = head = folio_buffers(fbatch.folios[i]); |
776 | do { |
777 | if (buffer_dirty(bh) && |
778 | !buffer_async_write(bh)) { |
779 | get_bh(bh); |
780 | list_add_tail(new: &bh->b_assoc_buffers, |
781 | head: listp); |
782 | } |
783 | bh = bh->b_this_page; |
784 | } while (bh != head); |
785 | } |
786 | folio_batch_release(fbatch: &fbatch); |
787 | cond_resched(); |
788 | } |
789 | } |
790 | |
791 | static void nilfs_dispose_list(struct the_nilfs *nilfs, |
792 | struct list_head *head, int force) |
793 | { |
794 | struct nilfs_inode_info *ii, *n; |
795 | struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii; |
796 | unsigned int nv = 0; |
797 | |
798 | while (!list_empty(head)) { |
799 | spin_lock(lock: &nilfs->ns_inode_lock); |
800 | list_for_each_entry_safe(ii, n, head, i_dirty) { |
801 | list_del_init(entry: &ii->i_dirty); |
802 | if (force) { |
803 | if (unlikely(ii->i_bh)) { |
804 | brelse(bh: ii->i_bh); |
805 | ii->i_bh = NULL; |
806 | } |
807 | } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) { |
808 | set_bit(nr: NILFS_I_QUEUED, addr: &ii->i_state); |
809 | list_add_tail(new: &ii->i_dirty, |
810 | head: &nilfs->ns_dirty_files); |
811 | continue; |
812 | } |
813 | ivec[nv++] = ii; |
814 | if (nv == SC_N_INODEVEC) |
815 | break; |
816 | } |
817 | spin_unlock(lock: &nilfs->ns_inode_lock); |
818 | |
819 | for (pii = ivec; nv > 0; pii++, nv--) |
820 | iput(&(*pii)->vfs_inode); |
821 | } |
822 | } |
823 | |
824 | static void nilfs_iput_work_func(struct work_struct *work) |
825 | { |
826 | struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info, |
827 | sc_iput_work); |
828 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; |
829 | |
830 | nilfs_dispose_list(nilfs, head: &sci->sc_iput_queue, force: 0); |
831 | } |
832 | |
833 | static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs, |
834 | struct nilfs_root *root) |
835 | { |
836 | int ret = 0; |
837 | |
838 | if (nilfs_mdt_fetch_dirty(root->ifile)) |
839 | ret++; |
840 | if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile)) |
841 | ret++; |
842 | if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile)) |
843 | ret++; |
844 | if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat)) |
845 | ret++; |
846 | return ret; |
847 | } |
848 | |
849 | static int nilfs_segctor_clean(struct nilfs_sc_info *sci) |
850 | { |
851 | return list_empty(head: &sci->sc_dirty_files) && |
852 | !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) && |
853 | sci->sc_nfreesegs == 0 && |
854 | (!nilfs_doing_gc() || list_empty(head: &sci->sc_gc_inodes)); |
855 | } |
856 | |
857 | static int nilfs_segctor_confirm(struct nilfs_sc_info *sci) |
858 | { |
859 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; |
860 | int ret = 0; |
861 | |
862 | if (nilfs_test_metadata_dirty(nilfs, root: sci->sc_root)) |
863 | set_bit(nr: NILFS_SC_DIRTY, addr: &sci->sc_flags); |
864 | |
865 | spin_lock(lock: &nilfs->ns_inode_lock); |
866 | if (list_empty(head: &nilfs->ns_dirty_files) && nilfs_segctor_clean(sci)) |
867 | ret++; |
868 | |
869 | spin_unlock(lock: &nilfs->ns_inode_lock); |
870 | return ret; |
871 | } |
872 | |
873 | static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci) |
874 | { |
875 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; |
876 | |
877 | nilfs_mdt_clear_dirty(inode: sci->sc_root->ifile); |
878 | nilfs_mdt_clear_dirty(inode: nilfs->ns_cpfile); |
879 | nilfs_mdt_clear_dirty(inode: nilfs->ns_sufile); |
880 | nilfs_mdt_clear_dirty(inode: nilfs->ns_dat); |
881 | } |
882 | |
883 | static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci) |
884 | { |
885 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; |
886 | struct buffer_head *bh_cp; |
887 | struct nilfs_checkpoint *raw_cp; |
888 | int err; |
889 | |
890 | /* XXX: this interface will be changed */ |
891 | err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1, |
892 | &raw_cp, &bh_cp); |
893 | if (likely(!err)) { |
894 | /* |
895 | * The following code is duplicated with cpfile. But, it is |
896 | * needed to collect the checkpoint even if it was not newly |
897 | * created. |
898 | */ |
899 | mark_buffer_dirty(bh: bh_cp); |
900 | nilfs_mdt_mark_dirty(inode: nilfs->ns_cpfile); |
901 | nilfs_cpfile_put_checkpoint( |
902 | nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); |
903 | } else if (err == -EINVAL || err == -ENOENT) { |
904 | nilfs_error(sci->sc_super, |
905 | "checkpoint creation failed due to metadata corruption." ); |
906 | err = -EIO; |
907 | } |
908 | return err; |
909 | } |
910 | |
911 | static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci) |
912 | { |
913 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; |
914 | struct buffer_head *bh_cp; |
915 | struct nilfs_checkpoint *raw_cp; |
916 | int err; |
917 | |
918 | err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0, |
919 | &raw_cp, &bh_cp); |
920 | if (unlikely(err)) { |
921 | if (err == -EINVAL || err == -ENOENT) { |
922 | nilfs_error(sci->sc_super, |
923 | "checkpoint finalization failed due to metadata corruption." ); |
924 | err = -EIO; |
925 | } |
926 | goto failed_ibh; |
927 | } |
928 | raw_cp->cp_snapshot_list.ssl_next = 0; |
929 | raw_cp->cp_snapshot_list.ssl_prev = 0; |
930 | raw_cp->cp_inodes_count = |
931 | cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count)); |
932 | raw_cp->cp_blocks_count = |
933 | cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count)); |
934 | raw_cp->cp_nblk_inc = |
935 | cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc); |
936 | raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime); |
937 | raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno); |
938 | |
939 | if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) |
940 | nilfs_checkpoint_clear_minor(cp: raw_cp); |
941 | else |
942 | nilfs_checkpoint_set_minor(cp: raw_cp); |
943 | |
944 | nilfs_write_inode_common(sci->sc_root->ifile, |
945 | &raw_cp->cp_ifile_inode, 1); |
946 | nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); |
947 | return 0; |
948 | |
949 | failed_ibh: |
950 | return err; |
951 | } |
952 | |
953 | static void nilfs_fill_in_file_bmap(struct inode *ifile, |
954 | struct nilfs_inode_info *ii) |
955 | |
956 | { |
957 | struct buffer_head *ibh; |
958 | struct nilfs_inode *raw_inode; |
959 | |
960 | if (test_bit(NILFS_I_BMAP, &ii->i_state)) { |
961 | ibh = ii->i_bh; |
962 | BUG_ON(!ibh); |
963 | raw_inode = nilfs_ifile_map_inode(ifile, ino: ii->vfs_inode.i_ino, |
964 | ibh); |
965 | nilfs_bmap_write(ii->i_bmap, raw_inode); |
966 | nilfs_ifile_unmap_inode(ifile, ino: ii->vfs_inode.i_ino, ibh); |
967 | } |
968 | } |
969 | |
970 | static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci) |
971 | { |
972 | struct nilfs_inode_info *ii; |
973 | |
974 | list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) { |
975 | nilfs_fill_in_file_bmap(ifile: sci->sc_root->ifile, ii); |
976 | set_bit(nr: NILFS_I_COLLECTED, addr: &ii->i_state); |
977 | } |
978 | } |
979 | |
980 | static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci, |
981 | struct the_nilfs *nilfs) |
982 | { |
983 | struct buffer_head *bh_sr; |
984 | struct nilfs_super_root *raw_sr; |
985 | unsigned int isz, srsz; |
986 | |
987 | bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root; |
988 | |
989 | lock_buffer(bh: bh_sr); |
990 | raw_sr = (struct nilfs_super_root *)bh_sr->b_data; |
991 | isz = nilfs->ns_inode_size; |
992 | srsz = NILFS_SR_BYTES(isz); |
993 | |
994 | raw_sr->sr_sum = 0; /* Ensure initialization within this update */ |
995 | raw_sr->sr_bytes = cpu_to_le16(srsz); |
996 | raw_sr->sr_nongc_ctime |
997 | = cpu_to_le64(nilfs_doing_gc() ? |
998 | nilfs->ns_nongc_ctime : sci->sc_seg_ctime); |
999 | raw_sr->sr_flags = 0; |
1000 | |
1001 | nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr + |
1002 | NILFS_SR_DAT_OFFSET(isz), 1); |
1003 | nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr + |
1004 | NILFS_SR_CPFILE_OFFSET(isz), 1); |
1005 | nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr + |
1006 | NILFS_SR_SUFILE_OFFSET(isz), 1); |
1007 | memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz); |
1008 | set_buffer_uptodate(bh_sr); |
1009 | unlock_buffer(bh: bh_sr); |
1010 | } |
1011 | |
1012 | static void nilfs_redirty_inodes(struct list_head *head) |
1013 | { |
1014 | struct nilfs_inode_info *ii; |
1015 | |
1016 | list_for_each_entry(ii, head, i_dirty) { |
1017 | if (test_bit(NILFS_I_COLLECTED, &ii->i_state)) |
1018 | clear_bit(nr: NILFS_I_COLLECTED, addr: &ii->i_state); |
1019 | } |
1020 | } |
1021 | |
1022 | static void nilfs_drop_collected_inodes(struct list_head *head) |
1023 | { |
1024 | struct nilfs_inode_info *ii; |
1025 | |
1026 | list_for_each_entry(ii, head, i_dirty) { |
1027 | if (!test_and_clear_bit(nr: NILFS_I_COLLECTED, addr: &ii->i_state)) |
1028 | continue; |
1029 | |
1030 | clear_bit(nr: NILFS_I_INODE_SYNC, addr: &ii->i_state); |
1031 | set_bit(nr: NILFS_I_UPDATED, addr: &ii->i_state); |
1032 | } |
1033 | } |
1034 | |
1035 | static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci, |
1036 | struct inode *inode, |
1037 | struct list_head *listp, |
1038 | int (*collect)(struct nilfs_sc_info *, |
1039 | struct buffer_head *, |
1040 | struct inode *)) |
1041 | { |
1042 | struct buffer_head *bh, *n; |
1043 | int err = 0; |
1044 | |
1045 | if (collect) { |
1046 | list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) { |
1047 | list_del_init(entry: &bh->b_assoc_buffers); |
1048 | err = collect(sci, bh, inode); |
1049 | brelse(bh); |
1050 | if (unlikely(err)) |
1051 | goto dispose_buffers; |
1052 | } |
1053 | return 0; |
1054 | } |
1055 | |
1056 | dispose_buffers: |
1057 | while (!list_empty(head: listp)) { |
1058 | bh = list_first_entry(listp, struct buffer_head, |
1059 | b_assoc_buffers); |
1060 | list_del_init(entry: &bh->b_assoc_buffers); |
1061 | brelse(bh); |
1062 | } |
1063 | return err; |
1064 | } |
1065 | |
1066 | static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci) |
1067 | { |
1068 | /* Remaining number of blocks within segment buffer */ |
1069 | return sci->sc_segbuf_nblocks - |
1070 | (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks); |
1071 | } |
1072 | |
1073 | static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci, |
1074 | struct inode *inode, |
1075 | const struct nilfs_sc_operations *sc_ops) |
1076 | { |
1077 | LIST_HEAD(data_buffers); |
1078 | LIST_HEAD(node_buffers); |
1079 | int err; |
1080 | |
1081 | if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { |
1082 | size_t n, rest = nilfs_segctor_buffer_rest(sci); |
1083 | |
1084 | n = nilfs_lookup_dirty_data_buffers( |
1085 | inode, listp: &data_buffers, nlimit: rest + 1, start: 0, LLONG_MAX); |
1086 | if (n > rest) { |
1087 | err = nilfs_segctor_apply_buffers( |
1088 | sci, inode, listp: &data_buffers, |
1089 | collect: sc_ops->collect_data); |
1090 | BUG_ON(!err); /* always receive -E2BIG or true error */ |
1091 | goto break_or_fail; |
1092 | } |
1093 | } |
1094 | nilfs_lookup_dirty_node_buffers(inode, listp: &node_buffers); |
1095 | |
1096 | if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { |
1097 | err = nilfs_segctor_apply_buffers( |
1098 | sci, inode, listp: &data_buffers, collect: sc_ops->collect_data); |
1099 | if (unlikely(err)) { |
1100 | /* dispose node list */ |
1101 | nilfs_segctor_apply_buffers( |
1102 | sci, inode, listp: &node_buffers, NULL); |
1103 | goto break_or_fail; |
1104 | } |
1105 | sci->sc_stage.flags |= NILFS_CF_NODE; |
1106 | } |
1107 | /* Collect node */ |
1108 | err = nilfs_segctor_apply_buffers( |
1109 | sci, inode, listp: &node_buffers, collect: sc_ops->collect_node); |
1110 | if (unlikely(err)) |
1111 | goto break_or_fail; |
1112 | |
1113 | nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers); |
1114 | err = nilfs_segctor_apply_buffers( |
1115 | sci, inode, listp: &node_buffers, collect: sc_ops->collect_bmap); |
1116 | if (unlikely(err)) |
1117 | goto break_or_fail; |
1118 | |
1119 | nilfs_segctor_end_finfo(sci, inode); |
1120 | sci->sc_stage.flags &= ~NILFS_CF_NODE; |
1121 | |
1122 | break_or_fail: |
1123 | return err; |
1124 | } |
1125 | |
1126 | static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci, |
1127 | struct inode *inode) |
1128 | { |
1129 | LIST_HEAD(data_buffers); |
1130 | size_t n, rest = nilfs_segctor_buffer_rest(sci); |
1131 | int err; |
1132 | |
1133 | n = nilfs_lookup_dirty_data_buffers(inode, listp: &data_buffers, nlimit: rest + 1, |
1134 | start: sci->sc_dsync_start, |
1135 | end: sci->sc_dsync_end); |
1136 | |
1137 | err = nilfs_segctor_apply_buffers(sci, inode, listp: &data_buffers, |
1138 | collect: nilfs_collect_file_data); |
1139 | if (!err) { |
1140 | nilfs_segctor_end_finfo(sci, inode); |
1141 | BUG_ON(n > rest); |
1142 | /* always receive -E2BIG or true error if n > rest */ |
1143 | } |
1144 | return err; |
1145 | } |
1146 | |
1147 | static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) |
1148 | { |
1149 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; |
1150 | struct list_head *head; |
1151 | struct nilfs_inode_info *ii; |
1152 | size_t ndone; |
1153 | int err = 0; |
1154 | |
1155 | switch (nilfs_sc_cstage_get(sci)) { |
1156 | case NILFS_ST_INIT: |
1157 | /* Pre-processes */ |
1158 | sci->sc_stage.flags = 0; |
1159 | |
1160 | if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) { |
1161 | sci->sc_nblk_inc = 0; |
1162 | sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN; |
1163 | if (mode == SC_LSEG_DSYNC) { |
1164 | nilfs_sc_cstage_set(sci, next_scnt: NILFS_ST_DSYNC); |
1165 | goto dsync_mode; |
1166 | } |
1167 | } |
1168 | |
1169 | sci->sc_stage.dirty_file_ptr = NULL; |
1170 | sci->sc_stage.gc_inode_ptr = NULL; |
1171 | if (mode == SC_FLUSH_DAT) { |
1172 | nilfs_sc_cstage_set(sci, next_scnt: NILFS_ST_DAT); |
1173 | goto dat_stage; |
1174 | } |
1175 | nilfs_sc_cstage_inc(sci); |
1176 | fallthrough; |
1177 | case NILFS_ST_GC: |
1178 | if (nilfs_doing_gc()) { |
1179 | head = &sci->sc_gc_inodes; |
1180 | ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr, |
1181 | head, i_dirty); |
1182 | list_for_each_entry_continue(ii, head, i_dirty) { |
1183 | err = nilfs_segctor_scan_file( |
1184 | sci, inode: &ii->vfs_inode, |
1185 | sc_ops: &nilfs_sc_file_ops); |
1186 | if (unlikely(err)) { |
1187 | sci->sc_stage.gc_inode_ptr = list_entry( |
1188 | ii->i_dirty.prev, |
1189 | struct nilfs_inode_info, |
1190 | i_dirty); |
1191 | goto break_or_fail; |
1192 | } |
1193 | set_bit(nr: NILFS_I_COLLECTED, addr: &ii->i_state); |
1194 | } |
1195 | sci->sc_stage.gc_inode_ptr = NULL; |
1196 | } |
1197 | nilfs_sc_cstage_inc(sci); |
1198 | fallthrough; |
1199 | case NILFS_ST_FILE: |
1200 | head = &sci->sc_dirty_files; |
1201 | ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head, |
1202 | i_dirty); |
1203 | list_for_each_entry_continue(ii, head, i_dirty) { |
1204 | clear_bit(nr: NILFS_I_DIRTY, addr: &ii->i_state); |
1205 | |
1206 | err = nilfs_segctor_scan_file(sci, inode: &ii->vfs_inode, |
1207 | sc_ops: &nilfs_sc_file_ops); |
1208 | if (unlikely(err)) { |
1209 | sci->sc_stage.dirty_file_ptr = |
1210 | list_entry(ii->i_dirty.prev, |
1211 | struct nilfs_inode_info, |
1212 | i_dirty); |
1213 | goto break_or_fail; |
1214 | } |
1215 | /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */ |
1216 | /* XXX: required ? */ |
1217 | } |
1218 | sci->sc_stage.dirty_file_ptr = NULL; |
1219 | if (mode == SC_FLUSH_FILE) { |
1220 | nilfs_sc_cstage_set(sci, next_scnt: NILFS_ST_DONE); |
1221 | return 0; |
1222 | } |
1223 | nilfs_sc_cstage_inc(sci); |
1224 | sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED; |
1225 | fallthrough; |
1226 | case NILFS_ST_IFILE: |
1227 | err = nilfs_segctor_scan_file(sci, inode: sci->sc_root->ifile, |
1228 | sc_ops: &nilfs_sc_file_ops); |
1229 | if (unlikely(err)) |
1230 | break; |
1231 | nilfs_sc_cstage_inc(sci); |
1232 | /* Creating a checkpoint */ |
1233 | err = nilfs_segctor_create_checkpoint(sci); |
1234 | if (unlikely(err)) |
1235 | break; |
1236 | fallthrough; |
1237 | case NILFS_ST_CPFILE: |
1238 | err = nilfs_segctor_scan_file(sci, inode: nilfs->ns_cpfile, |
1239 | sc_ops: &nilfs_sc_file_ops); |
1240 | if (unlikely(err)) |
1241 | break; |
1242 | nilfs_sc_cstage_inc(sci); |
1243 | fallthrough; |
1244 | case NILFS_ST_SUFILE: |
1245 | err = nilfs_sufile_freev(sufile: nilfs->ns_sufile, segnumv: sci->sc_freesegs, |
1246 | nsegs: sci->sc_nfreesegs, ndone: &ndone); |
1247 | if (unlikely(err)) { |
1248 | nilfs_sufile_cancel_freev(sufile: nilfs->ns_sufile, |
1249 | segnumv: sci->sc_freesegs, nsegs: ndone, |
1250 | NULL); |
1251 | break; |
1252 | } |
1253 | sci->sc_stage.flags |= NILFS_CF_SUFREED; |
1254 | |
1255 | err = nilfs_segctor_scan_file(sci, inode: nilfs->ns_sufile, |
1256 | sc_ops: &nilfs_sc_file_ops); |
1257 | if (unlikely(err)) |
1258 | break; |
1259 | nilfs_sc_cstage_inc(sci); |
1260 | fallthrough; |
1261 | case NILFS_ST_DAT: |
1262 | dat_stage: |
1263 | err = nilfs_segctor_scan_file(sci, inode: nilfs->ns_dat, |
1264 | sc_ops: &nilfs_sc_dat_ops); |
1265 | if (unlikely(err)) |
1266 | break; |
1267 | if (mode == SC_FLUSH_DAT) { |
1268 | nilfs_sc_cstage_set(sci, next_scnt: NILFS_ST_DONE); |
1269 | return 0; |
1270 | } |
1271 | nilfs_sc_cstage_inc(sci); |
1272 | fallthrough; |
1273 | case NILFS_ST_SR: |
1274 | if (mode == SC_LSEG_SR) { |
1275 | /* Appending a super root */ |
1276 | err = nilfs_segctor_add_super_root(sci); |
1277 | if (unlikely(err)) |
1278 | break; |
1279 | } |
1280 | /* End of a logical segment */ |
1281 | sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; |
1282 | nilfs_sc_cstage_set(sci, next_scnt: NILFS_ST_DONE); |
1283 | return 0; |
1284 | case NILFS_ST_DSYNC: |
1285 | dsync_mode: |
1286 | sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT; |
1287 | ii = sci->sc_dsync_inode; |
1288 | if (!test_bit(NILFS_I_BUSY, &ii->i_state)) |
1289 | break; |
1290 | |
1291 | err = nilfs_segctor_scan_file_dsync(sci, inode: &ii->vfs_inode); |
1292 | if (unlikely(err)) |
1293 | break; |
1294 | sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; |
1295 | nilfs_sc_cstage_set(sci, next_scnt: NILFS_ST_DONE); |
1296 | return 0; |
1297 | case NILFS_ST_DONE: |
1298 | return 0; |
1299 | default: |
1300 | BUG(); |
1301 | } |
1302 | |
1303 | break_or_fail: |
1304 | return err; |
1305 | } |
1306 | |
1307 | /** |
1308 | * nilfs_segctor_begin_construction - setup segment buffer to make a new log |
1309 | * @sci: nilfs_sc_info |
1310 | * @nilfs: nilfs object |
1311 | */ |
1312 | static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci, |
1313 | struct the_nilfs *nilfs) |
1314 | { |
1315 | struct nilfs_segment_buffer *segbuf, *prev; |
1316 | __u64 nextnum; |
1317 | int err, alloc = 0; |
1318 | |
1319 | segbuf = nilfs_segbuf_new(sci->sc_super); |
1320 | if (unlikely(!segbuf)) |
1321 | return -ENOMEM; |
1322 | |
1323 | if (list_empty(head: &sci->sc_write_logs)) { |
1324 | nilfs_segbuf_map(segbuf, nilfs->ns_segnum, |
1325 | nilfs->ns_pseg_offset, nilfs); |
1326 | if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) { |
1327 | nilfs_shift_to_next_segment(nilfs); |
1328 | nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs); |
1329 | } |
1330 | |
1331 | segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq; |
1332 | nextnum = nilfs->ns_nextnum; |
1333 | |
1334 | if (nilfs->ns_segnum == nilfs->ns_nextnum) |
1335 | /* Start from the head of a new full segment */ |
1336 | alloc++; |
1337 | } else { |
1338 | /* Continue logs */ |
1339 | prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs); |
1340 | nilfs_segbuf_map_cont(segbuf, prev); |
1341 | segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq; |
1342 | nextnum = prev->sb_nextnum; |
1343 | |
1344 | if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) { |
1345 | nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs); |
1346 | segbuf->sb_sum.seg_seq++; |
1347 | alloc++; |
1348 | } |
1349 | } |
1350 | |
1351 | err = nilfs_sufile_mark_dirty(sufile: nilfs->ns_sufile, segnum: segbuf->sb_segnum); |
1352 | if (err) |
1353 | goto failed; |
1354 | |
1355 | if (alloc) { |
1356 | err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum); |
1357 | if (err) |
1358 | goto failed; |
1359 | } |
1360 | nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs); |
1361 | |
1362 | BUG_ON(!list_empty(&sci->sc_segbufs)); |
1363 | list_add_tail(new: &segbuf->sb_list, head: &sci->sc_segbufs); |
1364 | sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks; |
1365 | return 0; |
1366 | |
1367 | failed: |
1368 | nilfs_segbuf_free(segbuf); |
1369 | return err; |
1370 | } |
1371 | |
1372 | static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci, |
1373 | struct the_nilfs *nilfs, int nadd) |
1374 | { |
1375 | struct nilfs_segment_buffer *segbuf, *prev; |
1376 | struct inode *sufile = nilfs->ns_sufile; |
1377 | __u64 nextnextnum; |
1378 | LIST_HEAD(list); |
1379 | int err, ret, i; |
1380 | |
1381 | prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs); |
1382 | /* |
1383 | * Since the segment specified with nextnum might be allocated during |
1384 | * the previous construction, the buffer including its segusage may |
1385 | * not be dirty. The following call ensures that the buffer is dirty |
1386 | * and will pin the buffer on memory until the sufile is written. |
1387 | */ |
1388 | err = nilfs_sufile_mark_dirty(sufile, segnum: prev->sb_nextnum); |
1389 | if (unlikely(err)) |
1390 | return err; |
1391 | |
1392 | for (i = 0; i < nadd; i++) { |
1393 | /* extend segment info */ |
1394 | err = -ENOMEM; |
1395 | segbuf = nilfs_segbuf_new(sci->sc_super); |
1396 | if (unlikely(!segbuf)) |
1397 | goto failed; |
1398 | |
1399 | /* map this buffer to region of segment on-disk */ |
1400 | nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs); |
1401 | sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks; |
1402 | |
1403 | /* allocate the next next full segment */ |
1404 | err = nilfs_sufile_alloc(sufile, &nextnextnum); |
1405 | if (unlikely(err)) |
1406 | goto failed_segbuf; |
1407 | |
1408 | segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1; |
1409 | nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs); |
1410 | |
1411 | list_add_tail(new: &segbuf->sb_list, head: &list); |
1412 | prev = segbuf; |
1413 | } |
1414 | list_splice_tail(list: &list, head: &sci->sc_segbufs); |
1415 | return 0; |
1416 | |
1417 | failed_segbuf: |
1418 | nilfs_segbuf_free(segbuf); |
1419 | failed: |
1420 | list_for_each_entry(segbuf, &list, sb_list) { |
1421 | ret = nilfs_sufile_free(sufile, segnum: segbuf->sb_nextnum); |
1422 | WARN_ON(ret); /* never fails */ |
1423 | } |
1424 | nilfs_destroy_logs(logs: &list); |
1425 | return err; |
1426 | } |
1427 | |
1428 | static void nilfs_free_incomplete_logs(struct list_head *logs, |
1429 | struct the_nilfs *nilfs) |
1430 | { |
1431 | struct nilfs_segment_buffer *segbuf, *prev; |
1432 | struct inode *sufile = nilfs->ns_sufile; |
1433 | int ret; |
1434 | |
1435 | segbuf = NILFS_FIRST_SEGBUF(logs); |
1436 | if (nilfs->ns_nextnum != segbuf->sb_nextnum) { |
1437 | ret = nilfs_sufile_free(sufile, segnum: segbuf->sb_nextnum); |
1438 | WARN_ON(ret); /* never fails */ |
1439 | } |
1440 | if (atomic_read(v: &segbuf->sb_err)) { |
1441 | /* Case 1: The first segment failed */ |
1442 | if (segbuf->sb_pseg_start != segbuf->sb_fseg_start) |
1443 | /* |
1444 | * Case 1a: Partial segment appended into an existing |
1445 | * segment |
1446 | */ |
1447 | nilfs_terminate_segment(nilfs, seg_start: segbuf->sb_fseg_start, |
1448 | seg_end: segbuf->sb_fseg_end); |
1449 | else /* Case 1b: New full segment */ |
1450 | set_nilfs_discontinued(nilfs); |
1451 | } |
1452 | |
1453 | prev = segbuf; |
1454 | list_for_each_entry_continue(segbuf, logs, sb_list) { |
1455 | if (prev->sb_nextnum != segbuf->sb_nextnum) { |
1456 | ret = nilfs_sufile_free(sufile, segnum: segbuf->sb_nextnum); |
1457 | WARN_ON(ret); /* never fails */ |
1458 | } |
1459 | if (atomic_read(v: &segbuf->sb_err) && |
1460 | segbuf->sb_segnum != nilfs->ns_nextnum) |
1461 | /* Case 2: extended segment (!= next) failed */ |
1462 | nilfs_sufile_set_error(sufile, segnum: segbuf->sb_segnum); |
1463 | prev = segbuf; |
1464 | } |
1465 | } |
1466 | |
1467 | static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci, |
1468 | struct inode *sufile) |
1469 | { |
1470 | struct nilfs_segment_buffer *segbuf; |
1471 | unsigned long live_blocks; |
1472 | int ret; |
1473 | |
1474 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { |
1475 | live_blocks = segbuf->sb_sum.nblocks + |
1476 | (segbuf->sb_pseg_start - segbuf->sb_fseg_start); |
1477 | ret = nilfs_sufile_set_segment_usage(sufile, segnum: segbuf->sb_segnum, |
1478 | nblocks: live_blocks, |
1479 | modtime: sci->sc_seg_ctime); |
1480 | WARN_ON(ret); /* always succeed because the segusage is dirty */ |
1481 | } |
1482 | } |
1483 | |
1484 | static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile) |
1485 | { |
1486 | struct nilfs_segment_buffer *segbuf; |
1487 | int ret; |
1488 | |
1489 | segbuf = NILFS_FIRST_SEGBUF(logs); |
1490 | ret = nilfs_sufile_set_segment_usage(sufile, segnum: segbuf->sb_segnum, |
1491 | nblocks: segbuf->sb_pseg_start - |
1492 | segbuf->sb_fseg_start, modtime: 0); |
1493 | WARN_ON(ret); /* always succeed because the segusage is dirty */ |
1494 | |
1495 | list_for_each_entry_continue(segbuf, logs, sb_list) { |
1496 | ret = nilfs_sufile_set_segment_usage(sufile, segnum: segbuf->sb_segnum, |
1497 | nblocks: 0, modtime: 0); |
1498 | WARN_ON(ret); /* always succeed */ |
1499 | } |
1500 | } |
1501 | |
1502 | static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci, |
1503 | struct nilfs_segment_buffer *last, |
1504 | struct inode *sufile) |
1505 | { |
1506 | struct nilfs_segment_buffer *segbuf = last; |
1507 | int ret; |
1508 | |
1509 | list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) { |
1510 | sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks; |
1511 | ret = nilfs_sufile_free(sufile, segnum: segbuf->sb_nextnum); |
1512 | WARN_ON(ret); |
1513 | } |
1514 | nilfs_truncate_logs(logs: &sci->sc_segbufs, last); |
1515 | } |
1516 | |
1517 | |
1518 | static int nilfs_segctor_collect(struct nilfs_sc_info *sci, |
1519 | struct the_nilfs *nilfs, int mode) |
1520 | { |
1521 | struct nilfs_cstage prev_stage = sci->sc_stage; |
1522 | int err, nadd = 1; |
1523 | |
1524 | /* Collection retry loop */ |
1525 | for (;;) { |
1526 | sci->sc_nblk_this_inc = 0; |
1527 | sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); |
1528 | |
1529 | err = nilfs_segctor_reset_segment_buffer(sci); |
1530 | if (unlikely(err)) |
1531 | goto failed; |
1532 | |
1533 | err = nilfs_segctor_collect_blocks(sci, mode); |
1534 | sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; |
1535 | if (!err) |
1536 | break; |
1537 | |
1538 | if (unlikely(err != -E2BIG)) |
1539 | goto failed; |
1540 | |
1541 | /* The current segment is filled up */ |
1542 | if (mode != SC_LSEG_SR || |
1543 | nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE) |
1544 | break; |
1545 | |
1546 | nilfs_clear_logs(logs: &sci->sc_segbufs); |
1547 | |
1548 | if (sci->sc_stage.flags & NILFS_CF_SUFREED) { |
1549 | err = nilfs_sufile_cancel_freev(sufile: nilfs->ns_sufile, |
1550 | segnumv: sci->sc_freesegs, |
1551 | nsegs: sci->sc_nfreesegs, |
1552 | NULL); |
1553 | WARN_ON(err); /* do not happen */ |
1554 | sci->sc_stage.flags &= ~NILFS_CF_SUFREED; |
1555 | } |
1556 | |
1557 | err = nilfs_segctor_extend_segments(sci, nilfs, nadd); |
1558 | if (unlikely(err)) |
1559 | return err; |
1560 | |
1561 | nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); |
1562 | sci->sc_stage = prev_stage; |
1563 | } |
1564 | nilfs_segctor_zeropad_segsum(sci); |
1565 | nilfs_segctor_truncate_segments(sci, last: sci->sc_curseg, sufile: nilfs->ns_sufile); |
1566 | return 0; |
1567 | |
1568 | failed: |
1569 | return err; |
1570 | } |
1571 | |
1572 | static void nilfs_list_replace_buffer(struct buffer_head *old_bh, |
1573 | struct buffer_head *new_bh) |
1574 | { |
1575 | BUG_ON(!list_empty(&new_bh->b_assoc_buffers)); |
1576 | |
1577 | list_replace_init(old: &old_bh->b_assoc_buffers, new: &new_bh->b_assoc_buffers); |
1578 | /* The caller must release old_bh */ |
1579 | } |
1580 | |
1581 | static int |
1582 | nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci, |
1583 | struct nilfs_segment_buffer *segbuf, |
1584 | int mode) |
1585 | { |
1586 | struct inode *inode = NULL; |
1587 | sector_t blocknr; |
1588 | unsigned long nfinfo = segbuf->sb_sum.nfinfo; |
1589 | unsigned long nblocks = 0, ndatablk = 0; |
1590 | const struct nilfs_sc_operations *sc_op = NULL; |
1591 | struct nilfs_segsum_pointer ssp; |
1592 | struct nilfs_finfo *finfo = NULL; |
1593 | union nilfs_binfo binfo; |
1594 | struct buffer_head *bh, *bh_org; |
1595 | ino_t ino = 0; |
1596 | int err = 0; |
1597 | |
1598 | if (!nfinfo) |
1599 | goto out; |
1600 | |
1601 | blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk; |
1602 | ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers); |
1603 | ssp.offset = sizeof(struct nilfs_segment_summary); |
1604 | |
1605 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { |
1606 | if (bh == segbuf->sb_super_root) |
1607 | break; |
1608 | if (!finfo) { |
1609 | finfo = nilfs_segctor_map_segsum_entry( |
1610 | sci, ssp: &ssp, bytes: sizeof(*finfo)); |
1611 | ino = le64_to_cpu(finfo->fi_ino); |
1612 | nblocks = le32_to_cpu(finfo->fi_nblocks); |
1613 | ndatablk = le32_to_cpu(finfo->fi_ndatablk); |
1614 | |
1615 | inode = bh->b_folio->mapping->host; |
1616 | |
1617 | if (mode == SC_LSEG_DSYNC) |
1618 | sc_op = &nilfs_sc_dsync_ops; |
1619 | else if (ino == NILFS_DAT_INO) |
1620 | sc_op = &nilfs_sc_dat_ops; |
1621 | else /* file blocks */ |
1622 | sc_op = &nilfs_sc_file_ops; |
1623 | } |
1624 | bh_org = bh; |
1625 | get_bh(bh: bh_org); |
1626 | err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr, |
1627 | &binfo); |
1628 | if (bh != bh_org) |
1629 | nilfs_list_replace_buffer(old_bh: bh_org, new_bh: bh); |
1630 | brelse(bh: bh_org); |
1631 | if (unlikely(err)) |
1632 | goto failed_bmap; |
1633 | |
1634 | if (ndatablk > 0) |
1635 | sc_op->write_data_binfo(sci, &ssp, &binfo); |
1636 | else |
1637 | sc_op->write_node_binfo(sci, &ssp, &binfo); |
1638 | |
1639 | blocknr++; |
1640 | if (--nblocks == 0) { |
1641 | finfo = NULL; |
1642 | if (--nfinfo == 0) |
1643 | break; |
1644 | } else if (ndatablk > 0) |
1645 | ndatablk--; |
1646 | } |
1647 | out: |
1648 | return 0; |
1649 | |
1650 | failed_bmap: |
1651 | return err; |
1652 | } |
1653 | |
1654 | static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode) |
1655 | { |
1656 | struct nilfs_segment_buffer *segbuf; |
1657 | int err; |
1658 | |
1659 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { |
1660 | err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode); |
1661 | if (unlikely(err)) |
1662 | return err; |
1663 | nilfs_segbuf_fill_in_segsum(segbuf); |
1664 | } |
1665 | return 0; |
1666 | } |
1667 | |
1668 | static void nilfs_begin_page_io(struct page *page) |
1669 | { |
1670 | if (!page || PageWriteback(page)) |
1671 | /* |
1672 | * For split b-tree node pages, this function may be called |
1673 | * twice. We ignore the 2nd or later calls by this check. |
1674 | */ |
1675 | return; |
1676 | |
1677 | lock_page(page); |
1678 | clear_page_dirty_for_io(page); |
1679 | set_page_writeback(page); |
1680 | unlock_page(page); |
1681 | } |
1682 | |
1683 | static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) |
1684 | { |
1685 | struct nilfs_segment_buffer *segbuf; |
1686 | struct page *bd_page = NULL, *fs_page = NULL; |
1687 | |
1688 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { |
1689 | struct buffer_head *bh; |
1690 | |
1691 | list_for_each_entry(bh, &segbuf->sb_segsum_buffers, |
1692 | b_assoc_buffers) { |
1693 | if (bh->b_page != bd_page) { |
1694 | if (bd_page) { |
1695 | lock_page(page: bd_page); |
1696 | clear_page_dirty_for_io(page: bd_page); |
1697 | set_page_writeback(bd_page); |
1698 | unlock_page(page: bd_page); |
1699 | } |
1700 | bd_page = bh->b_page; |
1701 | } |
1702 | } |
1703 | |
1704 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, |
1705 | b_assoc_buffers) { |
1706 | set_buffer_async_write(bh); |
1707 | if (bh == segbuf->sb_super_root) { |
1708 | if (bh->b_page != bd_page) { |
1709 | lock_page(page: bd_page); |
1710 | clear_page_dirty_for_io(page: bd_page); |
1711 | set_page_writeback(bd_page); |
1712 | unlock_page(page: bd_page); |
1713 | bd_page = bh->b_page; |
1714 | } |
1715 | break; |
1716 | } |
1717 | if (bh->b_page != fs_page) { |
1718 | nilfs_begin_page_io(page: fs_page); |
1719 | fs_page = bh->b_page; |
1720 | } |
1721 | } |
1722 | } |
1723 | if (bd_page) { |
1724 | lock_page(page: bd_page); |
1725 | clear_page_dirty_for_io(page: bd_page); |
1726 | set_page_writeback(bd_page); |
1727 | unlock_page(page: bd_page); |
1728 | } |
1729 | nilfs_begin_page_io(page: fs_page); |
1730 | } |
1731 | |
1732 | static int nilfs_segctor_write(struct nilfs_sc_info *sci, |
1733 | struct the_nilfs *nilfs) |
1734 | { |
1735 | int ret; |
1736 | |
1737 | ret = nilfs_write_logs(logs: &sci->sc_segbufs, nilfs); |
1738 | list_splice_tail_init(list: &sci->sc_segbufs, head: &sci->sc_write_logs); |
1739 | return ret; |
1740 | } |
1741 | |
1742 | static void nilfs_end_page_io(struct page *page, int err) |
1743 | { |
1744 | if (!page) |
1745 | return; |
1746 | |
1747 | if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) { |
1748 | /* |
1749 | * For b-tree node pages, this function may be called twice |
1750 | * or more because they might be split in a segment. |
1751 | */ |
1752 | if (PageDirty(page)) { |
1753 | /* |
1754 | * For pages holding split b-tree node buffers, dirty |
1755 | * flag on the buffers may be cleared discretely. |
1756 | * In that case, the page is once redirtied for |
1757 | * remaining buffers, and it must be cancelled if |
1758 | * all the buffers get cleaned later. |
1759 | */ |
1760 | lock_page(page); |
1761 | if (nilfs_page_buffers_clean(page)) |
1762 | __nilfs_clear_page_dirty(page); |
1763 | unlock_page(page); |
1764 | } |
1765 | return; |
1766 | } |
1767 | |
1768 | if (!err) { |
1769 | if (!nilfs_page_buffers_clean(page)) |
1770 | __set_page_dirty_nobuffers(page); |
1771 | ClearPageError(page); |
1772 | } else { |
1773 | __set_page_dirty_nobuffers(page); |
1774 | SetPageError(page); |
1775 | } |
1776 | |
1777 | end_page_writeback(page); |
1778 | } |
1779 | |
1780 | static void nilfs_abort_logs(struct list_head *logs, int err) |
1781 | { |
1782 | struct nilfs_segment_buffer *segbuf; |
1783 | struct page *bd_page = NULL, *fs_page = NULL; |
1784 | struct buffer_head *bh; |
1785 | |
1786 | if (list_empty(head: logs)) |
1787 | return; |
1788 | |
1789 | list_for_each_entry(segbuf, logs, sb_list) { |
1790 | list_for_each_entry(bh, &segbuf->sb_segsum_buffers, |
1791 | b_assoc_buffers) { |
1792 | clear_buffer_uptodate(bh); |
1793 | if (bh->b_page != bd_page) { |
1794 | if (bd_page) |
1795 | end_page_writeback(page: bd_page); |
1796 | bd_page = bh->b_page; |
1797 | } |
1798 | } |
1799 | |
1800 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, |
1801 | b_assoc_buffers) { |
1802 | clear_buffer_async_write(bh); |
1803 | if (bh == segbuf->sb_super_root) { |
1804 | clear_buffer_uptodate(bh); |
1805 | if (bh->b_page != bd_page) { |
1806 | end_page_writeback(page: bd_page); |
1807 | bd_page = bh->b_page; |
1808 | } |
1809 | break; |
1810 | } |
1811 | if (bh->b_page != fs_page) { |
1812 | nilfs_end_page_io(page: fs_page, err); |
1813 | fs_page = bh->b_page; |
1814 | } |
1815 | } |
1816 | } |
1817 | if (bd_page) |
1818 | end_page_writeback(page: bd_page); |
1819 | |
1820 | nilfs_end_page_io(page: fs_page, err); |
1821 | } |
1822 | |
1823 | static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci, |
1824 | struct the_nilfs *nilfs, int err) |
1825 | { |
1826 | LIST_HEAD(logs); |
1827 | int ret; |
1828 | |
1829 | list_splice_tail_init(list: &sci->sc_write_logs, head: &logs); |
1830 | ret = nilfs_wait_on_logs(logs: &logs); |
1831 | nilfs_abort_logs(logs: &logs, err: ret ? : err); |
1832 | |
1833 | list_splice_tail_init(list: &sci->sc_segbufs, head: &logs); |
1834 | nilfs_cancel_segusage(logs: &logs, sufile: nilfs->ns_sufile); |
1835 | nilfs_free_incomplete_logs(logs: &logs, nilfs); |
1836 | |
1837 | if (sci->sc_stage.flags & NILFS_CF_SUFREED) { |
1838 | ret = nilfs_sufile_cancel_freev(sufile: nilfs->ns_sufile, |
1839 | segnumv: sci->sc_freesegs, |
1840 | nsegs: sci->sc_nfreesegs, |
1841 | NULL); |
1842 | WARN_ON(ret); /* do not happen */ |
1843 | } |
1844 | |
1845 | nilfs_destroy_logs(logs: &logs); |
1846 | } |
1847 | |
1848 | static void nilfs_set_next_segment(struct the_nilfs *nilfs, |
1849 | struct nilfs_segment_buffer *segbuf) |
1850 | { |
1851 | nilfs->ns_segnum = segbuf->sb_segnum; |
1852 | nilfs->ns_nextnum = segbuf->sb_nextnum; |
1853 | nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start |
1854 | + segbuf->sb_sum.nblocks; |
1855 | nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq; |
1856 | nilfs->ns_ctime = segbuf->sb_sum.ctime; |
1857 | } |
1858 | |
1859 | static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) |
1860 | { |
1861 | struct nilfs_segment_buffer *segbuf; |
1862 | struct page *bd_page = NULL, *fs_page = NULL; |
1863 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; |
1864 | int update_sr = false; |
1865 | |
1866 | list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) { |
1867 | struct buffer_head *bh; |
1868 | |
1869 | list_for_each_entry(bh, &segbuf->sb_segsum_buffers, |
1870 | b_assoc_buffers) { |
1871 | set_buffer_uptodate(bh); |
1872 | clear_buffer_dirty(bh); |
1873 | if (bh->b_page != bd_page) { |
1874 | if (bd_page) |
1875 | end_page_writeback(page: bd_page); |
1876 | bd_page = bh->b_page; |
1877 | } |
1878 | } |
1879 | /* |
1880 | * We assume that the buffers which belong to the same page |
1881 | * continue over the buffer list. |
1882 | * Under this assumption, the last BHs of pages is |
1883 | * identifiable by the discontinuity of bh->b_page |
1884 | * (page != fs_page). |
1885 | * |
1886 | * For B-tree node blocks, however, this assumption is not |
1887 | * guaranteed. The cleanup code of B-tree node pages needs |
1888 | * special care. |
1889 | */ |
1890 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, |
1891 | b_assoc_buffers) { |
1892 | const unsigned long set_bits = BIT(BH_Uptodate); |
1893 | const unsigned long clear_bits = |
1894 | (BIT(BH_Dirty) | BIT(BH_Async_Write) | |
1895 | BIT(BH_Delay) | BIT(BH_NILFS_Volatile) | |
1896 | BIT(BH_NILFS_Redirected)); |
1897 | |
1898 | set_mask_bits(&bh->b_state, clear_bits, set_bits); |
1899 | if (bh == segbuf->sb_super_root) { |
1900 | if (bh->b_page != bd_page) { |
1901 | end_page_writeback(page: bd_page); |
1902 | bd_page = bh->b_page; |
1903 | } |
1904 | update_sr = true; |
1905 | break; |
1906 | } |
1907 | if (bh->b_page != fs_page) { |
1908 | nilfs_end_page_io(page: fs_page, err: 0); |
1909 | fs_page = bh->b_page; |
1910 | } |
1911 | } |
1912 | |
1913 | if (!nilfs_segbuf_simplex(segbuf)) { |
1914 | if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) { |
1915 | set_bit(nr: NILFS_SC_UNCLOSED, addr: &sci->sc_flags); |
1916 | sci->sc_lseg_stime = jiffies; |
1917 | } |
1918 | if (segbuf->sb_sum.flags & NILFS_SS_LOGEND) |
1919 | clear_bit(nr: NILFS_SC_UNCLOSED, addr: &sci->sc_flags); |
1920 | } |
1921 | } |
1922 | /* |
1923 | * Since pages may continue over multiple segment buffers, |
1924 | * end of the last page must be checked outside of the loop. |
1925 | */ |
1926 | if (bd_page) |
1927 | end_page_writeback(page: bd_page); |
1928 | |
1929 | nilfs_end_page_io(page: fs_page, err: 0); |
1930 | |
1931 | nilfs_drop_collected_inodes(head: &sci->sc_dirty_files); |
1932 | |
1933 | if (nilfs_doing_gc()) |
1934 | nilfs_drop_collected_inodes(head: &sci->sc_gc_inodes); |
1935 | else |
1936 | nilfs->ns_nongc_ctime = sci->sc_seg_ctime; |
1937 | |
1938 | sci->sc_nblk_inc += sci->sc_nblk_this_inc; |
1939 | |
1940 | segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs); |
1941 | nilfs_set_next_segment(nilfs, segbuf); |
1942 | |
1943 | if (update_sr) { |
1944 | nilfs->ns_flushed_device = 0; |
1945 | nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start, |
1946 | segbuf->sb_sum.seg_seq, nilfs->ns_cno++); |
1947 | |
1948 | clear_bit(nr: NILFS_SC_HAVE_DELTA, addr: &sci->sc_flags); |
1949 | clear_bit(nr: NILFS_SC_DIRTY, addr: &sci->sc_flags); |
1950 | set_bit(nr: NILFS_SC_SUPER_ROOT, addr: &sci->sc_flags); |
1951 | nilfs_segctor_clear_metadata_dirty(sci); |
1952 | } else |
1953 | clear_bit(nr: NILFS_SC_SUPER_ROOT, addr: &sci->sc_flags); |
1954 | } |
1955 | |
1956 | static int nilfs_segctor_wait(struct nilfs_sc_info *sci) |
1957 | { |
1958 | int ret; |
1959 | |
1960 | ret = nilfs_wait_on_logs(logs: &sci->sc_write_logs); |
1961 | if (!ret) { |
1962 | nilfs_segctor_complete_write(sci); |
1963 | nilfs_destroy_logs(logs: &sci->sc_write_logs); |
1964 | } |
1965 | return ret; |
1966 | } |
1967 | |
1968 | static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, |
1969 | struct the_nilfs *nilfs) |
1970 | { |
1971 | struct nilfs_inode_info *ii, *n; |
1972 | struct inode *ifile = sci->sc_root->ifile; |
1973 | |
1974 | spin_lock(lock: &nilfs->ns_inode_lock); |
1975 | retry: |
1976 | list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) { |
1977 | if (!ii->i_bh) { |
1978 | struct buffer_head *ibh; |
1979 | int err; |
1980 | |
1981 | spin_unlock(lock: &nilfs->ns_inode_lock); |
1982 | err = nilfs_ifile_get_inode_block( |
1983 | ifile, ii->vfs_inode.i_ino, &ibh); |
1984 | if (unlikely(err)) { |
1985 | nilfs_warn(sci->sc_super, |
1986 | "log writer: error %d getting inode block (ino=%lu)" , |
1987 | err, ii->vfs_inode.i_ino); |
1988 | return err; |
1989 | } |
1990 | spin_lock(lock: &nilfs->ns_inode_lock); |
1991 | if (likely(!ii->i_bh)) |
1992 | ii->i_bh = ibh; |
1993 | else |
1994 | brelse(bh: ibh); |
1995 | goto retry; |
1996 | } |
1997 | |
1998 | // Always redirty the buffer to avoid race condition |
1999 | mark_buffer_dirty(bh: ii->i_bh); |
2000 | nilfs_mdt_mark_dirty(inode: ifile); |
2001 | |
2002 | clear_bit(nr: NILFS_I_QUEUED, addr: &ii->i_state); |
2003 | set_bit(nr: NILFS_I_BUSY, addr: &ii->i_state); |
2004 | list_move_tail(list: &ii->i_dirty, head: &sci->sc_dirty_files); |
2005 | } |
2006 | spin_unlock(lock: &nilfs->ns_inode_lock); |
2007 | |
2008 | return 0; |
2009 | } |
2010 | |
2011 | static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, |
2012 | struct the_nilfs *nilfs) |
2013 | { |
2014 | struct nilfs_inode_info *ii, *n; |
2015 | int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE); |
2016 | int defer_iput = false; |
2017 | |
2018 | spin_lock(lock: &nilfs->ns_inode_lock); |
2019 | list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { |
2020 | if (!test_and_clear_bit(nr: NILFS_I_UPDATED, addr: &ii->i_state) || |
2021 | test_bit(NILFS_I_DIRTY, &ii->i_state)) |
2022 | continue; |
2023 | |
2024 | clear_bit(nr: NILFS_I_BUSY, addr: &ii->i_state); |
2025 | brelse(bh: ii->i_bh); |
2026 | ii->i_bh = NULL; |
2027 | list_del_init(entry: &ii->i_dirty); |
2028 | if (!ii->vfs_inode.i_nlink || during_mount) { |
2029 | /* |
2030 | * Defer calling iput() to avoid deadlocks if |
2031 | * i_nlink == 0 or mount is not yet finished. |
2032 | */ |
2033 | list_add_tail(new: &ii->i_dirty, head: &sci->sc_iput_queue); |
2034 | defer_iput = true; |
2035 | } else { |
2036 | spin_unlock(lock: &nilfs->ns_inode_lock); |
2037 | iput(&ii->vfs_inode); |
2038 | spin_lock(lock: &nilfs->ns_inode_lock); |
2039 | } |
2040 | } |
2041 | spin_unlock(lock: &nilfs->ns_inode_lock); |
2042 | |
2043 | if (defer_iput) |
2044 | schedule_work(work: &sci->sc_iput_work); |
2045 | } |
2046 | |
2047 | /* |
2048 | * Main procedure of segment constructor |
2049 | */ |
2050 | static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) |
2051 | { |
2052 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; |
2053 | int err; |
2054 | |
2055 | if (sb_rdonly(sb: sci->sc_super)) |
2056 | return -EROFS; |
2057 | |
2058 | nilfs_sc_cstage_set(sci, next_scnt: NILFS_ST_INIT); |
2059 | sci->sc_cno = nilfs->ns_cno; |
2060 | |
2061 | err = nilfs_segctor_collect_dirty_files(sci, nilfs); |
2062 | if (unlikely(err)) |
2063 | goto out; |
2064 | |
2065 | if (nilfs_test_metadata_dirty(nilfs, root: sci->sc_root)) |
2066 | set_bit(nr: NILFS_SC_DIRTY, addr: &sci->sc_flags); |
2067 | |
2068 | if (nilfs_segctor_clean(sci)) |
2069 | goto out; |
2070 | |
2071 | do { |
2072 | sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK; |
2073 | |
2074 | err = nilfs_segctor_begin_construction(sci, nilfs); |
2075 | if (unlikely(err)) |
2076 | goto out; |
2077 | |
2078 | /* Update time stamp */ |
2079 | sci->sc_seg_ctime = ktime_get_real_seconds(); |
2080 | |
2081 | err = nilfs_segctor_collect(sci, nilfs, mode); |
2082 | if (unlikely(err)) |
2083 | goto failed; |
2084 | |
2085 | /* Avoid empty segment */ |
2086 | if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE && |
2087 | nilfs_segbuf_empty(segbuf: sci->sc_curseg)) { |
2088 | nilfs_segctor_abort_construction(sci, nilfs, err: 1); |
2089 | goto out; |
2090 | } |
2091 | |
2092 | err = nilfs_segctor_assign(sci, mode); |
2093 | if (unlikely(err)) |
2094 | goto failed; |
2095 | |
2096 | if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) |
2097 | nilfs_segctor_fill_in_file_bmap(sci); |
2098 | |
2099 | if (mode == SC_LSEG_SR && |
2100 | nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) { |
2101 | err = nilfs_segctor_fill_in_checkpoint(sci); |
2102 | if (unlikely(err)) |
2103 | goto failed_to_write; |
2104 | |
2105 | nilfs_segctor_fill_in_super_root(sci, nilfs); |
2106 | } |
2107 | nilfs_segctor_update_segusage(sci, sufile: nilfs->ns_sufile); |
2108 | |
2109 | /* Write partial segments */ |
2110 | nilfs_segctor_prepare_write(sci); |
2111 | |
2112 | nilfs_add_checksums_on_logs(logs: &sci->sc_segbufs, |
2113 | seed: nilfs->ns_crc_seed); |
2114 | |
2115 | err = nilfs_segctor_write(sci, nilfs); |
2116 | if (unlikely(err)) |
2117 | goto failed_to_write; |
2118 | |
2119 | if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE || |
2120 | nilfs->ns_blocksize_bits != PAGE_SHIFT) { |
2121 | /* |
2122 | * At this point, we avoid double buffering |
2123 | * for blocksize < pagesize because page dirty |
2124 | * flag is turned off during write and dirty |
2125 | * buffers are not properly collected for |
2126 | * pages crossing over segments. |
2127 | */ |
2128 | err = nilfs_segctor_wait(sci); |
2129 | if (err) |
2130 | goto failed_to_write; |
2131 | } |
2132 | } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE); |
2133 | |
2134 | out: |
2135 | nilfs_segctor_drop_written_files(sci, nilfs); |
2136 | return err; |
2137 | |
2138 | failed_to_write: |
2139 | if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) |
2140 | nilfs_redirty_inodes(head: &sci->sc_dirty_files); |
2141 | |
2142 | failed: |
2143 | if (nilfs_doing_gc()) |
2144 | nilfs_redirty_inodes(head: &sci->sc_gc_inodes); |
2145 | nilfs_segctor_abort_construction(sci, nilfs, err); |
2146 | goto out; |
2147 | } |
2148 | |
2149 | /** |
2150 | * nilfs_segctor_start_timer - set timer of background write |
2151 | * @sci: nilfs_sc_info |
2152 | * |
2153 | * If the timer has already been set, it ignores the new request. |
2154 | * This function MUST be called within a section locking the segment |
2155 | * semaphore. |
2156 | */ |
2157 | static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci) |
2158 | { |
2159 | spin_lock(lock: &sci->sc_state_lock); |
2160 | if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) { |
2161 | sci->sc_timer.expires = jiffies + sci->sc_interval; |
2162 | add_timer(timer: &sci->sc_timer); |
2163 | sci->sc_state |= NILFS_SEGCTOR_COMMIT; |
2164 | } |
2165 | spin_unlock(lock: &sci->sc_state_lock); |
2166 | } |
2167 | |
2168 | static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn) |
2169 | { |
2170 | spin_lock(lock: &sci->sc_state_lock); |
2171 | if (!(sci->sc_flush_request & BIT(bn))) { |
2172 | unsigned long prev_req = sci->sc_flush_request; |
2173 | |
2174 | sci->sc_flush_request |= BIT(bn); |
2175 | if (!prev_req) |
2176 | wake_up(&sci->sc_wait_daemon); |
2177 | } |
2178 | spin_unlock(lock: &sci->sc_state_lock); |
2179 | } |
2180 | |
2181 | /** |
2182 | * nilfs_flush_segment - trigger a segment construction for resource control |
2183 | * @sb: super block |
2184 | * @ino: inode number of the file to be flushed out. |
2185 | */ |
2186 | void nilfs_flush_segment(struct super_block *sb, ino_t ino) |
2187 | { |
2188 | struct the_nilfs *nilfs = sb->s_fs_info; |
2189 | struct nilfs_sc_info *sci = nilfs->ns_writer; |
2190 | |
2191 | if (!sci || nilfs_doing_construction()) |
2192 | return; |
2193 | nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0); |
2194 | /* assign bit 0 to data files */ |
2195 | } |
2196 | |
2197 | struct nilfs_segctor_wait_request { |
2198 | wait_queue_entry_t wq; |
2199 | __u32 seq; |
2200 | int err; |
2201 | atomic_t done; |
2202 | }; |
2203 | |
2204 | static int nilfs_segctor_sync(struct nilfs_sc_info *sci) |
2205 | { |
2206 | struct nilfs_segctor_wait_request wait_req; |
2207 | int err = 0; |
2208 | |
2209 | spin_lock(lock: &sci->sc_state_lock); |
2210 | init_wait(&wait_req.wq); |
2211 | wait_req.err = 0; |
2212 | atomic_set(v: &wait_req.done, i: 0); |
2213 | wait_req.seq = ++sci->sc_seq_request; |
2214 | spin_unlock(lock: &sci->sc_state_lock); |
2215 | |
2216 | init_waitqueue_entry(wq_entry: &wait_req.wq, current); |
2217 | add_wait_queue(wq_head: &sci->sc_wait_request, wq_entry: &wait_req.wq); |
2218 | set_current_state(TASK_INTERRUPTIBLE); |
2219 | wake_up(&sci->sc_wait_daemon); |
2220 | |
2221 | for (;;) { |
2222 | if (atomic_read(v: &wait_req.done)) { |
2223 | err = wait_req.err; |
2224 | break; |
2225 | } |
2226 | if (!signal_pending(current)) { |
2227 | schedule(); |
2228 | continue; |
2229 | } |
2230 | err = -ERESTARTSYS; |
2231 | break; |
2232 | } |
2233 | finish_wait(wq_head: &sci->sc_wait_request, wq_entry: &wait_req.wq); |
2234 | return err; |
2235 | } |
2236 | |
2237 | static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err) |
2238 | { |
2239 | struct nilfs_segctor_wait_request *wrq, *n; |
2240 | unsigned long flags; |
2241 | |
2242 | spin_lock_irqsave(&sci->sc_wait_request.lock, flags); |
2243 | list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) { |
2244 | if (!atomic_read(v: &wrq->done) && |
2245 | nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) { |
2246 | wrq->err = err; |
2247 | atomic_set(v: &wrq->done, i: 1); |
2248 | } |
2249 | if (atomic_read(v: &wrq->done)) { |
2250 | wrq->wq.func(&wrq->wq, |
2251 | TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, |
2252 | 0, NULL); |
2253 | } |
2254 | } |
2255 | spin_unlock_irqrestore(lock: &sci->sc_wait_request.lock, flags); |
2256 | } |
2257 | |
2258 | /** |
2259 | * nilfs_construct_segment - construct a logical segment |
2260 | * @sb: super block |
2261 | * |
2262 | * Return Value: On success, 0 is returned. On errors, one of the following |
2263 | * negative error code is returned. |
2264 | * |
2265 | * %-EROFS - Read only filesystem. |
2266 | * |
2267 | * %-EIO - I/O error |
2268 | * |
2269 | * %-ENOSPC - No space left on device (only in a panic state). |
2270 | * |
2271 | * %-ERESTARTSYS - Interrupted. |
2272 | * |
2273 | * %-ENOMEM - Insufficient memory available. |
2274 | */ |
2275 | int nilfs_construct_segment(struct super_block *sb) |
2276 | { |
2277 | struct the_nilfs *nilfs = sb->s_fs_info; |
2278 | struct nilfs_sc_info *sci = nilfs->ns_writer; |
2279 | struct nilfs_transaction_info *ti; |
2280 | |
2281 | if (sb_rdonly(sb) || unlikely(!sci)) |
2282 | return -EROFS; |
2283 | |
2284 | /* A call inside transactions causes a deadlock. */ |
2285 | BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC); |
2286 | |
2287 | return nilfs_segctor_sync(sci); |
2288 | } |
2289 | |
2290 | /** |
2291 | * nilfs_construct_dsync_segment - construct a data-only logical segment |
2292 | * @sb: super block |
2293 | * @inode: inode whose data blocks should be written out |
2294 | * @start: start byte offset |
2295 | * @end: end byte offset (inclusive) |
2296 | * |
2297 | * Return Value: On success, 0 is returned. On errors, one of the following |
2298 | * negative error code is returned. |
2299 | * |
2300 | * %-EROFS - Read only filesystem. |
2301 | * |
2302 | * %-EIO - I/O error |
2303 | * |
2304 | * %-ENOSPC - No space left on device (only in a panic state). |
2305 | * |
2306 | * %-ERESTARTSYS - Interrupted. |
2307 | * |
2308 | * %-ENOMEM - Insufficient memory available. |
2309 | */ |
2310 | int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, |
2311 | loff_t start, loff_t end) |
2312 | { |
2313 | struct the_nilfs *nilfs = sb->s_fs_info; |
2314 | struct nilfs_sc_info *sci = nilfs->ns_writer; |
2315 | struct nilfs_inode_info *ii; |
2316 | struct nilfs_transaction_info ti; |
2317 | int err = 0; |
2318 | |
2319 | if (sb_rdonly(sb) || unlikely(!sci)) |
2320 | return -EROFS; |
2321 | |
2322 | nilfs_transaction_lock(sb, ti: &ti, gcflag: 0); |
2323 | |
2324 | ii = NILFS_I(inode); |
2325 | if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) || |
2326 | nilfs_test_opt(nilfs, STRICT_ORDER) || |
2327 | test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) || |
2328 | nilfs_discontinued(nilfs)) { |
2329 | nilfs_transaction_unlock(sb); |
2330 | err = nilfs_segctor_sync(sci); |
2331 | return err; |
2332 | } |
2333 | |
2334 | spin_lock(lock: &nilfs->ns_inode_lock); |
2335 | if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && |
2336 | !test_bit(NILFS_I_BUSY, &ii->i_state)) { |
2337 | spin_unlock(lock: &nilfs->ns_inode_lock); |
2338 | nilfs_transaction_unlock(sb); |
2339 | return 0; |
2340 | } |
2341 | spin_unlock(lock: &nilfs->ns_inode_lock); |
2342 | sci->sc_dsync_inode = ii; |
2343 | sci->sc_dsync_start = start; |
2344 | sci->sc_dsync_end = end; |
2345 | |
2346 | err = nilfs_segctor_do_construct(sci, mode: SC_LSEG_DSYNC); |
2347 | if (!err) |
2348 | nilfs->ns_flushed_device = 0; |
2349 | |
2350 | nilfs_transaction_unlock(sb); |
2351 | return err; |
2352 | } |
2353 | |
2354 | #define FLUSH_FILE_BIT (0x1) /* data file only */ |
2355 | #define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */ |
2356 | |
2357 | /** |
2358 | * nilfs_segctor_accept - record accepted sequence count of log-write requests |
2359 | * @sci: segment constructor object |
2360 | */ |
2361 | static void nilfs_segctor_accept(struct nilfs_sc_info *sci) |
2362 | { |
2363 | spin_lock(lock: &sci->sc_state_lock); |
2364 | sci->sc_seq_accepted = sci->sc_seq_request; |
2365 | spin_unlock(lock: &sci->sc_state_lock); |
2366 | del_timer_sync(timer: &sci->sc_timer); |
2367 | } |
2368 | |
2369 | /** |
2370 | * nilfs_segctor_notify - notify the result of request to caller threads |
2371 | * @sci: segment constructor object |
2372 | * @mode: mode of log forming |
2373 | * @err: error code to be notified |
2374 | */ |
2375 | static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err) |
2376 | { |
2377 | /* Clear requests (even when the construction failed) */ |
2378 | spin_lock(lock: &sci->sc_state_lock); |
2379 | |
2380 | if (mode == SC_LSEG_SR) { |
2381 | sci->sc_state &= ~NILFS_SEGCTOR_COMMIT; |
2382 | sci->sc_seq_done = sci->sc_seq_accepted; |
2383 | nilfs_segctor_wakeup(sci, err); |
2384 | sci->sc_flush_request = 0; |
2385 | } else { |
2386 | if (mode == SC_FLUSH_FILE) |
2387 | sci->sc_flush_request &= ~FLUSH_FILE_BIT; |
2388 | else if (mode == SC_FLUSH_DAT) |
2389 | sci->sc_flush_request &= ~FLUSH_DAT_BIT; |
2390 | |
2391 | /* re-enable timer if checkpoint creation was not done */ |
2392 | if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && |
2393 | time_before(jiffies, sci->sc_timer.expires)) |
2394 | add_timer(timer: &sci->sc_timer); |
2395 | } |
2396 | spin_unlock(lock: &sci->sc_state_lock); |
2397 | } |
2398 | |
2399 | /** |
2400 | * nilfs_segctor_construct - form logs and write them to disk |
2401 | * @sci: segment constructor object |
2402 | * @mode: mode of log forming |
2403 | */ |
2404 | static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) |
2405 | { |
2406 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; |
2407 | struct nilfs_super_block **sbp; |
2408 | int err = 0; |
2409 | |
2410 | nilfs_segctor_accept(sci); |
2411 | |
2412 | if (nilfs_discontinued(nilfs)) |
2413 | mode = SC_LSEG_SR; |
2414 | if (!nilfs_segctor_confirm(sci)) |
2415 | err = nilfs_segctor_do_construct(sci, mode); |
2416 | |
2417 | if (likely(!err)) { |
2418 | if (mode != SC_FLUSH_DAT) |
2419 | atomic_set(v: &nilfs->ns_ndirtyblks, i: 0); |
2420 | if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) && |
2421 | nilfs_discontinued(nilfs)) { |
2422 | down_write(sem: &nilfs->ns_sem); |
2423 | err = -EIO; |
2424 | sbp = nilfs_prepare_super(sb: sci->sc_super, |
2425 | flip: nilfs_sb_will_flip(nilfs)); |
2426 | if (likely(sbp)) { |
2427 | nilfs_set_log_cursor(sbp[0], nilfs); |
2428 | err = nilfs_commit_super(sb: sci->sc_super, |
2429 | flag: NILFS_SB_COMMIT); |
2430 | } |
2431 | up_write(sem: &nilfs->ns_sem); |
2432 | } |
2433 | } |
2434 | |
2435 | nilfs_segctor_notify(sci, mode, err); |
2436 | return err; |
2437 | } |
2438 | |
2439 | static void nilfs_construction_timeout(struct timer_list *t) |
2440 | { |
2441 | struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer); |
2442 | |
2443 | wake_up_process(tsk: sci->sc_timer_task); |
2444 | } |
2445 | |
2446 | static void |
2447 | nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) |
2448 | { |
2449 | struct nilfs_inode_info *ii, *n; |
2450 | |
2451 | list_for_each_entry_safe(ii, n, head, i_dirty) { |
2452 | if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) |
2453 | continue; |
2454 | list_del_init(entry: &ii->i_dirty); |
2455 | truncate_inode_pages(&ii->vfs_inode.i_data, 0); |
2456 | nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping); |
2457 | iput(&ii->vfs_inode); |
2458 | } |
2459 | } |
2460 | |
2461 | int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, |
2462 | void **kbufs) |
2463 | { |
2464 | struct the_nilfs *nilfs = sb->s_fs_info; |
2465 | struct nilfs_sc_info *sci = nilfs->ns_writer; |
2466 | struct nilfs_transaction_info ti; |
2467 | int err; |
2468 | |
2469 | if (unlikely(!sci)) |
2470 | return -EROFS; |
2471 | |
2472 | nilfs_transaction_lock(sb, ti: &ti, gcflag: 1); |
2473 | |
2474 | err = nilfs_mdt_save_to_shadow_map(inode: nilfs->ns_dat); |
2475 | if (unlikely(err)) |
2476 | goto out_unlock; |
2477 | |
2478 | err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs); |
2479 | if (unlikely(err)) { |
2480 | nilfs_mdt_restore_from_shadow_map(inode: nilfs->ns_dat); |
2481 | goto out_unlock; |
2482 | } |
2483 | |
2484 | sci->sc_freesegs = kbufs[4]; |
2485 | sci->sc_nfreesegs = argv[4].v_nmembs; |
2486 | list_splice_tail_init(list: &nilfs->ns_gc_inodes, head: &sci->sc_gc_inodes); |
2487 | |
2488 | for (;;) { |
2489 | err = nilfs_segctor_construct(sci, mode: SC_LSEG_SR); |
2490 | nilfs_remove_written_gcinodes(nilfs, head: &sci->sc_gc_inodes); |
2491 | |
2492 | if (likely(!err)) |
2493 | break; |
2494 | |
2495 | nilfs_warn(sb, "error %d cleaning segments" , err); |
2496 | set_current_state(TASK_INTERRUPTIBLE); |
2497 | schedule_timeout(timeout: sci->sc_interval); |
2498 | } |
2499 | if (nilfs_test_opt(nilfs, DISCARD)) { |
2500 | int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs, |
2501 | sci->sc_nfreesegs); |
2502 | if (ret) { |
2503 | nilfs_warn(sb, |
2504 | "error %d on discard request, turning discards off for the device" , |
2505 | ret); |
2506 | nilfs_clear_opt(nilfs, DISCARD); |
2507 | } |
2508 | } |
2509 | |
2510 | out_unlock: |
2511 | sci->sc_freesegs = NULL; |
2512 | sci->sc_nfreesegs = 0; |
2513 | nilfs_mdt_clear_shadow_map(inode: nilfs->ns_dat); |
2514 | nilfs_transaction_unlock(sb); |
2515 | return err; |
2516 | } |
2517 | |
2518 | static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode) |
2519 | { |
2520 | struct nilfs_transaction_info ti; |
2521 | |
2522 | nilfs_transaction_lock(sb: sci->sc_super, ti: &ti, gcflag: 0); |
2523 | nilfs_segctor_construct(sci, mode); |
2524 | |
2525 | /* |
2526 | * Unclosed segment should be retried. We do this using sc_timer. |
2527 | * Timeout of sc_timer will invoke complete construction which leads |
2528 | * to close the current logical segment. |
2529 | */ |
2530 | if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) |
2531 | nilfs_segctor_start_timer(sci); |
2532 | |
2533 | nilfs_transaction_unlock(sb: sci->sc_super); |
2534 | } |
2535 | |
2536 | static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci) |
2537 | { |
2538 | int mode = 0; |
2539 | |
2540 | spin_lock(lock: &sci->sc_state_lock); |
2541 | mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ? |
2542 | SC_FLUSH_DAT : SC_FLUSH_FILE; |
2543 | spin_unlock(lock: &sci->sc_state_lock); |
2544 | |
2545 | if (mode) { |
2546 | nilfs_segctor_do_construct(sci, mode); |
2547 | |
2548 | spin_lock(lock: &sci->sc_state_lock); |
2549 | sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ? |
2550 | ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT; |
2551 | spin_unlock(lock: &sci->sc_state_lock); |
2552 | } |
2553 | clear_bit(nr: NILFS_SC_PRIOR_FLUSH, addr: &sci->sc_flags); |
2554 | } |
2555 | |
2556 | static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci) |
2557 | { |
2558 | if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) || |
2559 | time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) { |
2560 | if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT)) |
2561 | return SC_FLUSH_FILE; |
2562 | else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT)) |
2563 | return SC_FLUSH_DAT; |
2564 | } |
2565 | return SC_LSEG_SR; |
2566 | } |
2567 | |
2568 | /** |
2569 | * nilfs_segctor_thread - main loop of the segment constructor thread. |
2570 | * @arg: pointer to a struct nilfs_sc_info. |
2571 | * |
2572 | * nilfs_segctor_thread() initializes a timer and serves as a daemon |
2573 | * to execute segment constructions. |
2574 | */ |
2575 | static int nilfs_segctor_thread(void *arg) |
2576 | { |
2577 | struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg; |
2578 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; |
2579 | int timeout = 0; |
2580 | |
2581 | sci->sc_timer_task = current; |
2582 | |
2583 | /* start sync. */ |
2584 | sci->sc_task = current; |
2585 | wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */ |
2586 | nilfs_info(sci->sc_super, |
2587 | "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds" , |
2588 | sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ); |
2589 | |
2590 | spin_lock(lock: &sci->sc_state_lock); |
2591 | loop: |
2592 | for (;;) { |
2593 | int mode; |
2594 | |
2595 | if (sci->sc_state & NILFS_SEGCTOR_QUIT) |
2596 | goto end_thread; |
2597 | |
2598 | if (timeout || sci->sc_seq_request != sci->sc_seq_done) |
2599 | mode = SC_LSEG_SR; |
2600 | else if (sci->sc_flush_request) |
2601 | mode = nilfs_segctor_flush_mode(sci); |
2602 | else |
2603 | break; |
2604 | |
2605 | spin_unlock(lock: &sci->sc_state_lock); |
2606 | nilfs_segctor_thread_construct(sci, mode); |
2607 | spin_lock(lock: &sci->sc_state_lock); |
2608 | timeout = 0; |
2609 | } |
2610 | |
2611 | |
2612 | if (freezing(current)) { |
2613 | spin_unlock(lock: &sci->sc_state_lock); |
2614 | try_to_freeze(); |
2615 | spin_lock(lock: &sci->sc_state_lock); |
2616 | } else { |
2617 | DEFINE_WAIT(wait); |
2618 | int should_sleep = 1; |
2619 | |
2620 | prepare_to_wait(wq_head: &sci->sc_wait_daemon, wq_entry: &wait, |
2621 | TASK_INTERRUPTIBLE); |
2622 | |
2623 | if (sci->sc_seq_request != sci->sc_seq_done) |
2624 | should_sleep = 0; |
2625 | else if (sci->sc_flush_request) |
2626 | should_sleep = 0; |
2627 | else if (sci->sc_state & NILFS_SEGCTOR_COMMIT) |
2628 | should_sleep = time_before(jiffies, |
2629 | sci->sc_timer.expires); |
2630 | |
2631 | if (should_sleep) { |
2632 | spin_unlock(lock: &sci->sc_state_lock); |
2633 | schedule(); |
2634 | spin_lock(lock: &sci->sc_state_lock); |
2635 | } |
2636 | finish_wait(wq_head: &sci->sc_wait_daemon, wq_entry: &wait); |
2637 | timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && |
2638 | time_after_eq(jiffies, sci->sc_timer.expires)); |
2639 | |
2640 | if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs)) |
2641 | set_nilfs_discontinued(nilfs); |
2642 | } |
2643 | goto loop; |
2644 | |
2645 | end_thread: |
2646 | /* end sync. */ |
2647 | sci->sc_task = NULL; |
2648 | wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */ |
2649 | spin_unlock(lock: &sci->sc_state_lock); |
2650 | return 0; |
2651 | } |
2652 | |
2653 | static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci) |
2654 | { |
2655 | struct task_struct *t; |
2656 | |
2657 | t = kthread_run(nilfs_segctor_thread, sci, "segctord" ); |
2658 | if (IS_ERR(ptr: t)) { |
2659 | int err = PTR_ERR(ptr: t); |
2660 | |
2661 | nilfs_err(sci->sc_super, "error %d creating segctord thread" , |
2662 | err); |
2663 | return err; |
2664 | } |
2665 | wait_event(sci->sc_wait_task, sci->sc_task != NULL); |
2666 | return 0; |
2667 | } |
2668 | |
2669 | static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci) |
2670 | __acquires(&sci->sc_state_lock) |
2671 | __releases(&sci->sc_state_lock) |
2672 | { |
2673 | sci->sc_state |= NILFS_SEGCTOR_QUIT; |
2674 | |
2675 | while (sci->sc_task) { |
2676 | wake_up(&sci->sc_wait_daemon); |
2677 | spin_unlock(lock: &sci->sc_state_lock); |
2678 | wait_event(sci->sc_wait_task, sci->sc_task == NULL); |
2679 | spin_lock(lock: &sci->sc_state_lock); |
2680 | } |
2681 | } |
2682 | |
2683 | /* |
2684 | * Setup & clean-up functions |
2685 | */ |
2686 | static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb, |
2687 | struct nilfs_root *root) |
2688 | { |
2689 | struct the_nilfs *nilfs = sb->s_fs_info; |
2690 | struct nilfs_sc_info *sci; |
2691 | |
2692 | sci = kzalloc(size: sizeof(*sci), GFP_KERNEL); |
2693 | if (!sci) |
2694 | return NULL; |
2695 | |
2696 | sci->sc_super = sb; |
2697 | |
2698 | nilfs_get_root(root); |
2699 | sci->sc_root = root; |
2700 | |
2701 | init_waitqueue_head(&sci->sc_wait_request); |
2702 | init_waitqueue_head(&sci->sc_wait_daemon); |
2703 | init_waitqueue_head(&sci->sc_wait_task); |
2704 | spin_lock_init(&sci->sc_state_lock); |
2705 | INIT_LIST_HEAD(list: &sci->sc_dirty_files); |
2706 | INIT_LIST_HEAD(list: &sci->sc_segbufs); |
2707 | INIT_LIST_HEAD(list: &sci->sc_write_logs); |
2708 | INIT_LIST_HEAD(list: &sci->sc_gc_inodes); |
2709 | INIT_LIST_HEAD(list: &sci->sc_iput_queue); |
2710 | INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func); |
2711 | timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0); |
2712 | |
2713 | sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; |
2714 | sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ; |
2715 | sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK; |
2716 | |
2717 | if (nilfs->ns_interval) |
2718 | sci->sc_interval = HZ * nilfs->ns_interval; |
2719 | if (nilfs->ns_watermark) |
2720 | sci->sc_watermark = nilfs->ns_watermark; |
2721 | return sci; |
2722 | } |
2723 | |
2724 | static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) |
2725 | { |
2726 | int ret, retrycount = NILFS_SC_CLEANUP_RETRY; |
2727 | |
2728 | /* |
2729 | * The segctord thread was stopped and its timer was removed. |
2730 | * But some tasks remain. |
2731 | */ |
2732 | do { |
2733 | struct nilfs_transaction_info ti; |
2734 | |
2735 | nilfs_transaction_lock(sb: sci->sc_super, ti: &ti, gcflag: 0); |
2736 | ret = nilfs_segctor_construct(sci, mode: SC_LSEG_SR); |
2737 | nilfs_transaction_unlock(sb: sci->sc_super); |
2738 | |
2739 | flush_work(work: &sci->sc_iput_work); |
2740 | |
2741 | } while (ret && ret != -EROFS && retrycount-- > 0); |
2742 | } |
2743 | |
2744 | /** |
2745 | * nilfs_segctor_destroy - destroy the segment constructor. |
2746 | * @sci: nilfs_sc_info |
2747 | * |
2748 | * nilfs_segctor_destroy() kills the segctord thread and frees |
2749 | * the nilfs_sc_info struct. |
2750 | * Caller must hold the segment semaphore. |
2751 | */ |
2752 | static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) |
2753 | { |
2754 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; |
2755 | int flag; |
2756 | |
2757 | up_write(sem: &nilfs->ns_segctor_sem); |
2758 | |
2759 | spin_lock(lock: &sci->sc_state_lock); |
2760 | nilfs_segctor_kill_thread(sci); |
2761 | flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request |
2762 | || sci->sc_seq_request != sci->sc_seq_done); |
2763 | spin_unlock(lock: &sci->sc_state_lock); |
2764 | |
2765 | if (flush_work(work: &sci->sc_iput_work)) |
2766 | flag = true; |
2767 | |
2768 | if (flag || !nilfs_segctor_confirm(sci)) |
2769 | nilfs_segctor_write_out(sci); |
2770 | |
2771 | if (!list_empty(head: &sci->sc_dirty_files)) { |
2772 | nilfs_warn(sci->sc_super, |
2773 | "disposed unprocessed dirty file(s) when stopping log writer" ); |
2774 | nilfs_dispose_list(nilfs, head: &sci->sc_dirty_files, force: 1); |
2775 | } |
2776 | |
2777 | if (!list_empty(head: &sci->sc_iput_queue)) { |
2778 | nilfs_warn(sci->sc_super, |
2779 | "disposed unprocessed inode(s) in iput queue when stopping log writer" ); |
2780 | nilfs_dispose_list(nilfs, head: &sci->sc_iput_queue, force: 1); |
2781 | } |
2782 | |
2783 | WARN_ON(!list_empty(&sci->sc_segbufs)); |
2784 | WARN_ON(!list_empty(&sci->sc_write_logs)); |
2785 | |
2786 | nilfs_put_root(root: sci->sc_root); |
2787 | |
2788 | down_write(sem: &nilfs->ns_segctor_sem); |
2789 | |
2790 | timer_shutdown_sync(timer: &sci->sc_timer); |
2791 | kfree(objp: sci); |
2792 | } |
2793 | |
2794 | /** |
2795 | * nilfs_attach_log_writer - attach log writer |
2796 | * @sb: super block instance |
2797 | * @root: root object of the current filesystem tree |
2798 | * |
2799 | * This allocates a log writer object, initializes it, and starts the |
2800 | * log writer. |
2801 | * |
2802 | * Return Value: On success, 0 is returned. On error, one of the following |
2803 | * negative error code is returned. |
2804 | * |
2805 | * %-ENOMEM - Insufficient memory available. |
2806 | */ |
2807 | int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) |
2808 | { |
2809 | struct the_nilfs *nilfs = sb->s_fs_info; |
2810 | int err; |
2811 | |
2812 | if (nilfs->ns_writer) { |
2813 | /* |
2814 | * This happens if the filesystem is made read-only by |
2815 | * __nilfs_error or nilfs_remount and then remounted |
2816 | * read/write. In these cases, reuse the existing |
2817 | * writer. |
2818 | */ |
2819 | return 0; |
2820 | } |
2821 | |
2822 | nilfs->ns_writer = nilfs_segctor_new(sb, root); |
2823 | if (!nilfs->ns_writer) |
2824 | return -ENOMEM; |
2825 | |
2826 | inode_attach_wb(inode: nilfs->ns_bdev->bd_inode, NULL); |
2827 | |
2828 | err = nilfs_segctor_start_thread(sci: nilfs->ns_writer); |
2829 | if (unlikely(err)) |
2830 | nilfs_detach_log_writer(sb); |
2831 | |
2832 | return err; |
2833 | } |
2834 | |
2835 | /** |
2836 | * nilfs_detach_log_writer - destroy log writer |
2837 | * @sb: super block instance |
2838 | * |
2839 | * This kills log writer daemon, frees the log writer object, and |
2840 | * destroys list of dirty files. |
2841 | */ |
2842 | void nilfs_detach_log_writer(struct super_block *sb) |
2843 | { |
2844 | struct the_nilfs *nilfs = sb->s_fs_info; |
2845 | LIST_HEAD(garbage_list); |
2846 | |
2847 | down_write(sem: &nilfs->ns_segctor_sem); |
2848 | if (nilfs->ns_writer) { |
2849 | nilfs_segctor_destroy(sci: nilfs->ns_writer); |
2850 | nilfs->ns_writer = NULL; |
2851 | } |
2852 | set_nilfs_purging(nilfs); |
2853 | |
2854 | /* Force to free the list of dirty files */ |
2855 | spin_lock(lock: &nilfs->ns_inode_lock); |
2856 | if (!list_empty(head: &nilfs->ns_dirty_files)) { |
2857 | list_splice_init(list: &nilfs->ns_dirty_files, head: &garbage_list); |
2858 | nilfs_warn(sb, |
2859 | "disposed unprocessed dirty file(s) when detaching log writer" ); |
2860 | } |
2861 | spin_unlock(lock: &nilfs->ns_inode_lock); |
2862 | up_write(sem: &nilfs->ns_segctor_sem); |
2863 | |
2864 | nilfs_dispose_list(nilfs, head: &garbage_list, force: 1); |
2865 | clear_nilfs_purging(nilfs); |
2866 | } |
2867 | |