1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * linux/fs/jbd2/commit.c
4 *
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 *
7 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 *
9 * Journal commit routines for the generic filesystem journaling code;
10 * part of the ext2fs journaling system.
11 */
12
13#include <linux/time.h>
14#include <linux/fs.h>
15#include <linux/jbd2.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18#include <linux/mm.h>
19#include <linux/pagemap.h>
20#include <linux/jiffies.h>
21#include <linux/crc32.h>
22#include <linux/writeback.h>
23#include <linux/backing-dev.h>
24#include <linux/bio.h>
25#include <linux/blkdev.h>
26#include <linux/bitops.h>
27#include <trace/events/jbd2.h>
28
29/*
30 * IO end handler for temporary buffer_heads handling writes to the journal.
31 */
32static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
33{
34 struct buffer_head *orig_bh = bh->b_private;
35
36 BUFFER_TRACE(bh, "");
37 if (uptodate)
38 set_buffer_uptodate(bh);
39 else
40 clear_buffer_uptodate(bh);
41 if (orig_bh) {
42 clear_bit_unlock(nr: BH_Shadow, addr: &orig_bh->b_state);
43 smp_mb__after_atomic();
44 wake_up_bit(word: &orig_bh->b_state, bit: BH_Shadow);
45 }
46 unlock_buffer(bh);
47}
48
49/*
50 * When an ext4 file is truncated, it is possible that some pages are not
51 * successfully freed, because they are attached to a committing transaction.
52 * After the transaction commits, these pages are left on the LRU, with no
53 * ->mapping, and with attached buffers. These pages are trivially reclaimable
54 * by the VM, but their apparent absence upsets the VM accounting, and it makes
55 * the numbers in /proc/meminfo look odd.
56 *
57 * So here, we have a buffer which has just come off the forget list. Look to
58 * see if we can strip all buffers from the backing page.
59 *
60 * Called under lock_journal(), and possibly under journal_datalist_lock. The
61 * caller provided us with a ref against the buffer, and we drop that here.
62 */
63static void release_buffer_page(struct buffer_head *bh)
64{
65 struct folio *folio;
66
67 if (buffer_dirty(bh))
68 goto nope;
69 if (atomic_read(v: &bh->b_count) != 1)
70 goto nope;
71 folio = bh->b_folio;
72 if (folio->mapping)
73 goto nope;
74
75 /* OK, it's a truncated page */
76 if (!folio_trylock(folio))
77 goto nope;
78
79 folio_get(folio);
80 __brelse(bh);
81 try_to_free_buffers(folio);
82 folio_unlock(folio);
83 folio_put(folio);
84 return;
85
86nope:
87 __brelse(bh);
88}
89
90static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
91{
92 struct commit_header *h;
93 __u32 csum;
94
95 if (!jbd2_journal_has_csum_v2or3(journal: j))
96 return;
97
98 h = (struct commit_header *)(bh->b_data);
99 h->h_chksum_type = 0;
100 h->h_chksum_size = 0;
101 h->h_chksum[0] = 0;
102 csum = jbd2_chksum(journal: j, crc: j->j_csum_seed, address: bh->b_data, length: j->j_blocksize);
103 h->h_chksum[0] = cpu_to_be32(csum);
104}
105
106/*
107 * Done it all: now submit the commit record. We should have
108 * cleaned up our previous buffers by now, so if we are in abort
109 * mode we can now just skip the rest of the journal write
110 * entirely.
111 *
112 * Returns 1 if the journal needs to be aborted or 0 on success
113 */
114static int journal_submit_commit_record(journal_t *journal,
115 transaction_t *commit_transaction,
116 struct buffer_head **cbh,
117 __u32 crc32_sum)
118{
119 struct commit_header *tmp;
120 struct buffer_head *bh;
121 struct timespec64 now;
122 blk_opf_t write_flags = REQ_OP_WRITE | REQ_SYNC;
123
124 *cbh = NULL;
125
126 if (is_journal_aborted(journal))
127 return 0;
128
129 bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
130 JBD2_COMMIT_BLOCK);
131 if (!bh)
132 return 1;
133
134 tmp = (struct commit_header *)bh->b_data;
135 ktime_get_coarse_real_ts64(ts: &now);
136 tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
137 tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
138
139 if (jbd2_has_feature_checksum(j: journal)) {
140 tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
141 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
142 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
143 }
144 jbd2_commit_block_csum_set(j: journal, bh);
145
146 BUFFER_TRACE(bh, "submit commit block");
147 lock_buffer(bh);
148 clear_buffer_dirty(bh);
149 set_buffer_uptodate(bh);
150 bh->b_end_io = journal_end_buffer_io_sync;
151
152 if (journal->j_flags & JBD2_BARRIER &&
153 !jbd2_has_feature_async_commit(j: journal))
154 write_flags |= REQ_PREFLUSH | REQ_FUA;
155
156 submit_bh(write_flags, bh);
157 *cbh = bh;
158 return 0;
159}
160
161/*
162 * This function along with journal_submit_commit_record
163 * allows to write the commit record asynchronously.
164 */
165static int journal_wait_on_commit_record(journal_t *journal,
166 struct buffer_head *bh)
167{
168 int ret = 0;
169
170 clear_buffer_dirty(bh);
171 wait_on_buffer(bh);
172
173 if (unlikely(!buffer_uptodate(bh)))
174 ret = -EIO;
175 put_bh(bh); /* One for getblk() */
176
177 return ret;
178}
179
180/* Send all the data buffers related to an inode */
181int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode)
182{
183 if (!jinode || !(jinode->i_flags & JI_WRITE_DATA))
184 return 0;
185
186 trace_jbd2_submit_inode_data(inode: jinode->i_vfs_inode);
187 return journal->j_submit_inode_data_buffers(jinode);
188
189}
190EXPORT_SYMBOL(jbd2_submit_inode_data);
191
192int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode)
193{
194 if (!jinode || !(jinode->i_flags & JI_WAIT_DATA) ||
195 !jinode->i_vfs_inode || !jinode->i_vfs_inode->i_mapping)
196 return 0;
197 return filemap_fdatawait_range_keep_errors(
198 mapping: jinode->i_vfs_inode->i_mapping, start_byte: jinode->i_dirty_start,
199 end_byte: jinode->i_dirty_end);
200}
201EXPORT_SYMBOL(jbd2_wait_inode_data);
202
203/*
204 * Submit all the data buffers of inode associated with the transaction to
205 * disk.
206 *
207 * We are in a committing transaction. Therefore no new inode can be added to
208 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
209 * operate on from being released while we write out pages.
210 */
211static int journal_submit_data_buffers(journal_t *journal,
212 transaction_t *commit_transaction)
213{
214 struct jbd2_inode *jinode;
215 int err, ret = 0;
216
217 spin_lock(lock: &journal->j_list_lock);
218 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
219 if (!(jinode->i_flags & JI_WRITE_DATA))
220 continue;
221 jinode->i_flags |= JI_COMMIT_RUNNING;
222 spin_unlock(lock: &journal->j_list_lock);
223 /* submit the inode data buffers. */
224 trace_jbd2_submit_inode_data(inode: jinode->i_vfs_inode);
225 if (journal->j_submit_inode_data_buffers) {
226 err = journal->j_submit_inode_data_buffers(jinode);
227 if (!ret)
228 ret = err;
229 }
230 spin_lock(lock: &journal->j_list_lock);
231 J_ASSERT(jinode->i_transaction == commit_transaction);
232 jinode->i_flags &= ~JI_COMMIT_RUNNING;
233 smp_mb();
234 wake_up_bit(word: &jinode->i_flags, __JI_COMMIT_RUNNING);
235 }
236 spin_unlock(lock: &journal->j_list_lock);
237 return ret;
238}
239
240int jbd2_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
241{
242 struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
243
244 return filemap_fdatawait_range_keep_errors(mapping,
245 start_byte: jinode->i_dirty_start,
246 end_byte: jinode->i_dirty_end);
247}
248
249/*
250 * Wait for data submitted for writeout, refile inodes to proper
251 * transaction if needed.
252 *
253 */
254static int journal_finish_inode_data_buffers(journal_t *journal,
255 transaction_t *commit_transaction)
256{
257 struct jbd2_inode *jinode, *next_i;
258 int err, ret = 0;
259
260 /* For locking, see the comment in journal_submit_data_buffers() */
261 spin_lock(lock: &journal->j_list_lock);
262 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
263 if (!(jinode->i_flags & JI_WAIT_DATA))
264 continue;
265 jinode->i_flags |= JI_COMMIT_RUNNING;
266 spin_unlock(lock: &journal->j_list_lock);
267 /* wait for the inode data buffers writeout. */
268 if (journal->j_finish_inode_data_buffers) {
269 err = journal->j_finish_inode_data_buffers(jinode);
270 if (!ret)
271 ret = err;
272 }
273 spin_lock(lock: &journal->j_list_lock);
274 jinode->i_flags &= ~JI_COMMIT_RUNNING;
275 smp_mb();
276 wake_up_bit(word: &jinode->i_flags, __JI_COMMIT_RUNNING);
277 }
278
279 /* Now refile inode to proper lists */
280 list_for_each_entry_safe(jinode, next_i,
281 &commit_transaction->t_inode_list, i_list) {
282 list_del(entry: &jinode->i_list);
283 if (jinode->i_next_transaction) {
284 jinode->i_transaction = jinode->i_next_transaction;
285 jinode->i_next_transaction = NULL;
286 list_add(new: &jinode->i_list,
287 head: &jinode->i_transaction->t_inode_list);
288 } else {
289 jinode->i_transaction = NULL;
290 jinode->i_dirty_start = 0;
291 jinode->i_dirty_end = 0;
292 }
293 }
294 spin_unlock(lock: &journal->j_list_lock);
295
296 return ret;
297}
298
299static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
300{
301 char *addr;
302 __u32 checksum;
303
304 addr = kmap_local_folio(folio: bh->b_folio, offset: bh_offset(bh));
305 checksum = crc32_be(crc: crc32_sum, p: addr, len: bh->b_size);
306 kunmap_local(addr);
307
308 return checksum;
309}
310
311static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
312 unsigned long long block)
313{
314 tag->t_blocknr = cpu_to_be32(block & (u32)~0);
315 if (jbd2_has_feature_64bit(j))
316 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
317}
318
319static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
320 struct buffer_head *bh, __u32 sequence)
321{
322 journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
323 __u8 *addr;
324 __u32 csum32;
325 __be32 seq;
326
327 if (!jbd2_journal_has_csum_v2or3(journal: j))
328 return;
329
330 seq = cpu_to_be32(sequence);
331 addr = kmap_local_folio(folio: bh->b_folio, offset: bh_offset(bh));
332 csum32 = jbd2_chksum(journal: j, crc: j->j_csum_seed, address: (__u8 *)&seq, length: sizeof(seq));
333 csum32 = jbd2_chksum(journal: j, crc: csum32, address: addr, length: bh->b_size);
334 kunmap_local(addr);
335
336 if (jbd2_has_feature_csum3(j))
337 tag3->t_checksum = cpu_to_be32(csum32);
338 else
339 tag->t_checksum = cpu_to_be16(csum32);
340}
341/*
342 * jbd2_journal_commit_transaction
343 *
344 * The primary function for committing a transaction to the log. This
345 * function is called by the journal thread to begin a complete commit.
346 */
347void jbd2_journal_commit_transaction(journal_t *journal)
348{
349 struct transaction_stats_s stats;
350 transaction_t *commit_transaction;
351 struct journal_head *jh;
352 struct buffer_head *descriptor;
353 struct buffer_head **wbuf = journal->j_wbuf;
354 int bufs;
355 int flags;
356 int err;
357 unsigned long long blocknr;
358 ktime_t start_time;
359 u64 commit_time;
360 char *tagp = NULL;
361 journal_block_tag_t *tag = NULL;
362 int space_left = 0;
363 int first_tag = 0;
364 int tag_flag;
365 int i;
366 int tag_bytes = journal_tag_bytes(journal);
367 struct buffer_head *cbh = NULL; /* For transactional checksums */
368 __u32 crc32_sum = ~0;
369 struct blk_plug plug;
370 /* Tail of the journal */
371 unsigned long first_block;
372 tid_t first_tid;
373 int update_tail;
374 int csum_size = 0;
375 LIST_HEAD(io_bufs);
376 LIST_HEAD(log_bufs);
377
378 if (jbd2_journal_has_csum_v2or3(journal))
379 csum_size = sizeof(struct jbd2_journal_block_tail);
380
381 /*
382 * First job: lock down the current transaction and wait for
383 * all outstanding updates to complete.
384 */
385
386 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
387 if (journal->j_flags & JBD2_FLUSHED) {
388 jbd2_debug(3, "super block updated\n");
389 mutex_lock_io(&journal->j_checkpoint_mutex);
390 /*
391 * We hold j_checkpoint_mutex so tail cannot change under us.
392 * We don't need any special data guarantees for writing sb
393 * since journal is empty and it is ok for write to be
394 * flushed only with transaction commit.
395 */
396 jbd2_journal_update_sb_log_tail(journal,
397 journal->j_tail_sequence,
398 journal->j_tail,
399 REQ_SYNC);
400 mutex_unlock(lock: &journal->j_checkpoint_mutex);
401 } else {
402 jbd2_debug(3, "superblock not updated\n");
403 }
404
405 J_ASSERT(journal->j_running_transaction != NULL);
406 J_ASSERT(journal->j_committing_transaction == NULL);
407
408 write_lock(&journal->j_state_lock);
409 journal->j_flags |= JBD2_FULL_COMMIT_ONGOING;
410 while (journal->j_flags & JBD2_FAST_COMMIT_ONGOING) {
411 DEFINE_WAIT(wait);
412
413 prepare_to_wait(wq_head: &journal->j_fc_wait, wq_entry: &wait,
414 TASK_UNINTERRUPTIBLE);
415 write_unlock(&journal->j_state_lock);
416 schedule();
417 write_lock(&journal->j_state_lock);
418 finish_wait(wq_head: &journal->j_fc_wait, wq_entry: &wait);
419 /*
420 * TODO: by blocking fast commits here, we are increasing
421 * fsync() latency slightly. Strictly speaking, we don't need
422 * to block fast commits until the transaction enters T_FLUSH
423 * state. So an optimization is possible where we block new fast
424 * commits here and wait for existing ones to complete
425 * just before we enter T_FLUSH. That way, the existing fast
426 * commits and this full commit can proceed parallely.
427 */
428 }
429 write_unlock(&journal->j_state_lock);
430
431 commit_transaction = journal->j_running_transaction;
432
433 trace_jbd2_start_commit(journal, commit_transaction);
434 jbd2_debug(1, "JBD2: starting commit of transaction %d\n",
435 commit_transaction->t_tid);
436
437 write_lock(&journal->j_state_lock);
438 journal->j_fc_off = 0;
439 J_ASSERT(commit_transaction->t_state == T_RUNNING);
440 commit_transaction->t_state = T_LOCKED;
441
442 trace_jbd2_commit_locking(journal, commit_transaction);
443 stats.run.rs_wait = commit_transaction->t_max_wait;
444 stats.run.rs_request_delay = 0;
445 stats.run.rs_locked = jiffies;
446 if (commit_transaction->t_requested)
447 stats.run.rs_request_delay =
448 jbd2_time_diff(start: commit_transaction->t_requested,
449 end: stats.run.rs_locked);
450 stats.run.rs_running = jbd2_time_diff(start: commit_transaction->t_start,
451 end: stats.run.rs_locked);
452
453 // waits for any t_updates to finish
454 jbd2_journal_wait_updates(journal);
455
456 commit_transaction->t_state = T_SWITCH;
457
458 J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
459 journal->j_max_transaction_buffers);
460
461 /*
462 * First thing we are allowed to do is to discard any remaining
463 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
464 * that there are no such buffers: if a large filesystem
465 * operation like a truncate needs to split itself over multiple
466 * transactions, then it may try to do a jbd2_journal_restart() while
467 * there are still BJ_Reserved buffers outstanding. These must
468 * be released cleanly from the current transaction.
469 *
470 * In this case, the filesystem must still reserve write access
471 * again before modifying the buffer in the new transaction, but
472 * we do not require it to remember exactly which old buffers it
473 * has reserved. This is consistent with the existing behaviour
474 * that multiple jbd2_journal_get_write_access() calls to the same
475 * buffer are perfectly permissible.
476 * We use journal->j_state_lock here to serialize processing of
477 * t_reserved_list with eviction of buffers from journal_unmap_buffer().
478 */
479 while (commit_transaction->t_reserved_list) {
480 jh = commit_transaction->t_reserved_list;
481 JBUFFER_TRACE(jh, "reserved, unused: refile");
482 /*
483 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
484 * leave undo-committed data.
485 */
486 if (jh->b_committed_data) {
487 struct buffer_head *bh = jh2bh(jh);
488
489 spin_lock(lock: &jh->b_state_lock);
490 jbd2_free(ptr: jh->b_committed_data, size: bh->b_size);
491 jh->b_committed_data = NULL;
492 spin_unlock(lock: &jh->b_state_lock);
493 }
494 jbd2_journal_refile_buffer(journal, jh);
495 }
496
497 write_unlock(&journal->j_state_lock);
498 /*
499 * Now try to drop any written-back buffers from the journal's
500 * checkpoint lists. We do this *before* commit because it potentially
501 * frees some memory
502 */
503 spin_lock(lock: &journal->j_list_lock);
504 __jbd2_journal_clean_checkpoint_list(journal, destroy: false);
505 spin_unlock(lock: &journal->j_list_lock);
506
507 jbd2_debug(3, "JBD2: commit phase 1\n");
508
509 /*
510 * Clear revoked flag to reflect there is no revoked buffers
511 * in the next transaction which is going to be started.
512 */
513 jbd2_clear_buffer_revoked_flags(journal);
514
515 /*
516 * Switch to a new revoke table.
517 */
518 jbd2_journal_switch_revoke_table(journal);
519
520 write_lock(&journal->j_state_lock);
521 /*
522 * Reserved credits cannot be claimed anymore, free them
523 */
524 atomic_sub(i: atomic_read(v: &journal->j_reserved_credits),
525 v: &commit_transaction->t_outstanding_credits);
526
527 trace_jbd2_commit_flushing(journal, commit_transaction);
528 stats.run.rs_flushing = jiffies;
529 stats.run.rs_locked = jbd2_time_diff(start: stats.run.rs_locked,
530 end: stats.run.rs_flushing);
531
532 commit_transaction->t_state = T_FLUSH;
533 journal->j_committing_transaction = commit_transaction;
534 journal->j_running_transaction = NULL;
535 start_time = ktime_get();
536 commit_transaction->t_log_start = journal->j_head;
537 wake_up_all(&journal->j_wait_transaction_locked);
538 write_unlock(&journal->j_state_lock);
539
540 jbd2_debug(3, "JBD2: commit phase 2a\n");
541
542 /*
543 * Now start flushing things to disk, in the order they appear
544 * on the transaction lists. Data blocks go first.
545 */
546 err = journal_submit_data_buffers(journal, commit_transaction);
547 if (err)
548 jbd2_journal_abort(journal, err);
549
550 blk_start_plug(&plug);
551 jbd2_journal_write_revoke_records(transaction: commit_transaction, log_bufs: &log_bufs);
552
553 jbd2_debug(3, "JBD2: commit phase 2b\n");
554
555 /*
556 * Way to go: we have now written out all of the data for a
557 * transaction! Now comes the tricky part: we need to write out
558 * metadata. Loop over the transaction's entire buffer list:
559 */
560 write_lock(&journal->j_state_lock);
561 commit_transaction->t_state = T_COMMIT;
562 write_unlock(&journal->j_state_lock);
563
564 trace_jbd2_commit_logging(journal, commit_transaction);
565 stats.run.rs_logging = jiffies;
566 stats.run.rs_flushing = jbd2_time_diff(start: stats.run.rs_flushing,
567 end: stats.run.rs_logging);
568 stats.run.rs_blocks = commit_transaction->t_nr_buffers;
569 stats.run.rs_blocks_logged = 0;
570
571 J_ASSERT(commit_transaction->t_nr_buffers <=
572 atomic_read(&commit_transaction->t_outstanding_credits));
573
574 err = 0;
575 bufs = 0;
576 descriptor = NULL;
577 while (commit_transaction->t_buffers) {
578
579 /* Find the next buffer to be journaled... */
580
581 jh = commit_transaction->t_buffers;
582
583 /* If we're in abort mode, we just un-journal the buffer and
584 release it. */
585
586 if (is_journal_aborted(journal)) {
587 clear_buffer_jbddirty(bh: jh2bh(jh));
588 JBUFFER_TRACE(jh, "journal is aborting: refile");
589 jbd2_buffer_abort_trigger(jh,
590 triggers: jh->b_frozen_data ?
591 jh->b_frozen_triggers :
592 jh->b_triggers);
593 jbd2_journal_refile_buffer(journal, jh);
594 /* If that was the last one, we need to clean up
595 * any descriptor buffers which may have been
596 * already allocated, even if we are now
597 * aborting. */
598 if (!commit_transaction->t_buffers)
599 goto start_journal_io;
600 continue;
601 }
602
603 /* Make sure we have a descriptor block in which to
604 record the metadata buffer. */
605
606 if (!descriptor) {
607 J_ASSERT (bufs == 0);
608
609 jbd2_debug(4, "JBD2: get descriptor\n");
610
611 descriptor = jbd2_journal_get_descriptor_buffer(
612 commit_transaction,
613 JBD2_DESCRIPTOR_BLOCK);
614 if (!descriptor) {
615 jbd2_journal_abort(journal, -EIO);
616 continue;
617 }
618
619 jbd2_debug(4, "JBD2: got buffer %llu (%p)\n",
620 (unsigned long long)descriptor->b_blocknr,
621 descriptor->b_data);
622 tagp = &descriptor->b_data[sizeof(journal_header_t)];
623 space_left = descriptor->b_size -
624 sizeof(journal_header_t);
625 first_tag = 1;
626 set_buffer_jwrite(descriptor);
627 set_buffer_dirty(descriptor);
628 wbuf[bufs++] = descriptor;
629
630 /* Record it so that we can wait for IO
631 completion later */
632 BUFFER_TRACE(descriptor, "ph3: file as descriptor");
633 jbd2_file_log_bh(head: &log_bufs, bh: descriptor);
634 }
635
636 /* Where is the buffer to be written? */
637
638 err = jbd2_journal_next_log_block(journal, &blocknr);
639 /* If the block mapping failed, just abandon the buffer
640 and repeat this loop: we'll fall into the
641 refile-on-abort condition above. */
642 if (err) {
643 jbd2_journal_abort(journal, err);
644 continue;
645 }
646
647 /*
648 * start_this_handle() uses t_outstanding_credits to determine
649 * the free space in the log.
650 */
651 atomic_dec(v: &commit_transaction->t_outstanding_credits);
652
653 /* Bump b_count to prevent truncate from stumbling over
654 the shadowed buffer! @@@ This can go if we ever get
655 rid of the shadow pairing of buffers. */
656 atomic_inc(v: &jh2bh(jh)->b_count);
657
658 /*
659 * Make a temporary IO buffer with which to write it out
660 * (this will requeue the metadata buffer to BJ_Shadow).
661 */
662 set_bit(nr: BH_JWrite, addr: &jh2bh(jh)->b_state);
663 JBUFFER_TRACE(jh, "ph3: write metadata");
664 flags = jbd2_journal_write_metadata_buffer(transaction: commit_transaction,
665 jh_in: jh, bh_out: &wbuf[bufs], blocknr);
666 if (flags < 0) {
667 jbd2_journal_abort(journal, flags);
668 continue;
669 }
670 jbd2_file_log_bh(head: &io_bufs, bh: wbuf[bufs]);
671
672 /* Record the new block's tag in the current descriptor
673 buffer */
674
675 tag_flag = 0;
676 if (flags & 1)
677 tag_flag |= JBD2_FLAG_ESCAPE;
678 if (!first_tag)
679 tag_flag |= JBD2_FLAG_SAME_UUID;
680
681 tag = (journal_block_tag_t *) tagp;
682 write_tag_block(j: journal, tag, block: jh2bh(jh)->b_blocknr);
683 tag->t_flags = cpu_to_be16(tag_flag);
684 jbd2_block_tag_csum_set(j: journal, tag, bh: wbuf[bufs],
685 sequence: commit_transaction->t_tid);
686 tagp += tag_bytes;
687 space_left -= tag_bytes;
688 bufs++;
689
690 if (first_tag) {
691 memcpy (tagp, journal->j_uuid, 16);
692 tagp += 16;
693 space_left -= 16;
694 first_tag = 0;
695 }
696
697 /* If there's no more to do, or if the descriptor is full,
698 let the IO rip! */
699
700 if (bufs == journal->j_wbufsize ||
701 commit_transaction->t_buffers == NULL ||
702 space_left < tag_bytes + 16 + csum_size) {
703
704 jbd2_debug(4, "JBD2: Submit %d IOs\n", bufs);
705
706 /* Write an end-of-descriptor marker before
707 submitting the IOs. "tag" still points to
708 the last tag we set up. */
709
710 tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
711start_journal_io:
712 if (descriptor)
713 jbd2_descriptor_block_csum_set(journal,
714 descriptor);
715
716 for (i = 0; i < bufs; i++) {
717 struct buffer_head *bh = wbuf[i];
718 /*
719 * Compute checksum.
720 */
721 if (jbd2_has_feature_checksum(j: journal)) {
722 crc32_sum =
723 jbd2_checksum_data(crc32_sum, bh);
724 }
725
726 lock_buffer(bh);
727 clear_buffer_dirty(bh);
728 set_buffer_uptodate(bh);
729 bh->b_end_io = journal_end_buffer_io_sync;
730 submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
731 }
732 cond_resched();
733
734 /* Force a new descriptor to be generated next
735 time round the loop. */
736 descriptor = NULL;
737 bufs = 0;
738 }
739 }
740
741 err = journal_finish_inode_data_buffers(journal, commit_transaction);
742 if (err) {
743 printk(KERN_WARNING
744 "JBD2: Detected IO errors while flushing file data "
745 "on %s\n", journal->j_devname);
746 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
747 jbd2_journal_abort(journal, err);
748 err = 0;
749 }
750
751 /*
752 * Get current oldest transaction in the log before we issue flush
753 * to the filesystem device. After the flush we can be sure that
754 * blocks of all older transactions are checkpointed to persistent
755 * storage and we will be safe to update journal start in the
756 * superblock with the numbers we get here.
757 */
758 update_tail =
759 jbd2_journal_get_log_tail(journal, tid: &first_tid, block: &first_block);
760
761 write_lock(&journal->j_state_lock);
762 if (update_tail) {
763 long freed = first_block - journal->j_tail;
764
765 if (first_block < journal->j_tail)
766 freed += journal->j_last - journal->j_first;
767 /* Update tail only if we free significant amount of space */
768 if (freed < jbd2_journal_get_max_txn_bufs(journal))
769 update_tail = 0;
770 }
771 J_ASSERT(commit_transaction->t_state == T_COMMIT);
772 commit_transaction->t_state = T_COMMIT_DFLUSH;
773 write_unlock(&journal->j_state_lock);
774
775 /*
776 * If the journal is not located on the file system device,
777 * then we must flush the file system device before we issue
778 * the commit record
779 */
780 if (commit_transaction->t_need_data_flush &&
781 (journal->j_fs_dev != journal->j_dev) &&
782 (journal->j_flags & JBD2_BARRIER))
783 blkdev_issue_flush(bdev: journal->j_fs_dev);
784
785 /* Done it all: now write the commit record asynchronously. */
786 if (jbd2_has_feature_async_commit(j: journal)) {
787 err = journal_submit_commit_record(journal, commit_transaction,
788 cbh: &cbh, crc32_sum);
789 if (err)
790 jbd2_journal_abort(journal, err);
791 }
792
793 blk_finish_plug(&plug);
794
795 /* Lo and behold: we have just managed to send a transaction to
796 the log. Before we can commit it, wait for the IO so far to
797 complete. Control buffers being written are on the
798 transaction's t_log_list queue, and metadata buffers are on
799 the io_bufs list.
800
801 Wait for the buffers in reverse order. That way we are
802 less likely to be woken up until all IOs have completed, and
803 so we incur less scheduling load.
804 */
805
806 jbd2_debug(3, "JBD2: commit phase 3\n");
807
808 while (!list_empty(head: &io_bufs)) {
809 struct buffer_head *bh = list_entry(io_bufs.prev,
810 struct buffer_head,
811 b_assoc_buffers);
812
813 wait_on_buffer(bh);
814 cond_resched();
815
816 if (unlikely(!buffer_uptodate(bh)))
817 err = -EIO;
818 jbd2_unfile_log_bh(bh);
819 stats.run.rs_blocks_logged++;
820
821 /*
822 * The list contains temporary buffer heads created by
823 * jbd2_journal_write_metadata_buffer().
824 */
825 BUFFER_TRACE(bh, "dumping temporary bh");
826 __brelse(bh);
827 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
828 free_buffer_head(bh);
829
830 /* We also have to refile the corresponding shadowed buffer */
831 jh = commit_transaction->t_shadow_list->b_tprev;
832 bh = jh2bh(jh);
833 clear_buffer_jwrite(bh);
834 J_ASSERT_BH(bh, buffer_jbddirty(bh));
835 J_ASSERT_BH(bh, !buffer_shadow(bh));
836
837 /* The metadata is now released for reuse, but we need
838 to remember it against this transaction so that when
839 we finally commit, we can do any checkpointing
840 required. */
841 JBUFFER_TRACE(jh, "file as BJ_Forget");
842 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
843 JBUFFER_TRACE(jh, "brelse shadowed buffer");
844 __brelse(bh);
845 }
846
847 J_ASSERT (commit_transaction->t_shadow_list == NULL);
848
849 jbd2_debug(3, "JBD2: commit phase 4\n");
850
851 /* Here we wait for the revoke record and descriptor record buffers */
852 while (!list_empty(head: &log_bufs)) {
853 struct buffer_head *bh;
854
855 bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
856 wait_on_buffer(bh);
857 cond_resched();
858
859 if (unlikely(!buffer_uptodate(bh)))
860 err = -EIO;
861
862 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
863 clear_buffer_jwrite(bh);
864 jbd2_unfile_log_bh(bh);
865 stats.run.rs_blocks_logged++;
866 __brelse(bh); /* One for getblk */
867 /* AKPM: bforget here */
868 }
869
870 if (err)
871 jbd2_journal_abort(journal, err);
872
873 jbd2_debug(3, "JBD2: commit phase 5\n");
874 write_lock(&journal->j_state_lock);
875 J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
876 commit_transaction->t_state = T_COMMIT_JFLUSH;
877 write_unlock(&journal->j_state_lock);
878
879 if (!jbd2_has_feature_async_commit(j: journal)) {
880 err = journal_submit_commit_record(journal, commit_transaction,
881 cbh: &cbh, crc32_sum);
882 if (err)
883 jbd2_journal_abort(journal, err);
884 }
885 if (cbh)
886 err = journal_wait_on_commit_record(journal, bh: cbh);
887 stats.run.rs_blocks_logged++;
888 if (jbd2_has_feature_async_commit(j: journal) &&
889 journal->j_flags & JBD2_BARRIER) {
890 blkdev_issue_flush(bdev: journal->j_dev);
891 }
892
893 if (err)
894 jbd2_journal_abort(journal, err);
895
896 WARN_ON_ONCE(
897 atomic_read(&commit_transaction->t_outstanding_credits) < 0);
898
899 /*
900 * Now disk caches for filesystem device are flushed so we are safe to
901 * erase checkpointed transactions from the log by updating journal
902 * superblock.
903 */
904 if (update_tail)
905 jbd2_update_log_tail(journal, tid: first_tid, block: first_block);
906
907 /* End of a transaction! Finally, we can do checkpoint
908 processing: any buffers committed as a result of this
909 transaction can be removed from any checkpoint list it was on
910 before. */
911
912 jbd2_debug(3, "JBD2: commit phase 6\n");
913
914 J_ASSERT(list_empty(&commit_transaction->t_inode_list));
915 J_ASSERT(commit_transaction->t_buffers == NULL);
916 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
917 J_ASSERT(commit_transaction->t_shadow_list == NULL);
918
919restart_loop:
920 /*
921 * As there are other places (journal_unmap_buffer()) adding buffers
922 * to this list we have to be careful and hold the j_list_lock.
923 */
924 spin_lock(lock: &journal->j_list_lock);
925 while (commit_transaction->t_forget) {
926 transaction_t *cp_transaction;
927 struct buffer_head *bh;
928 int try_to_free = 0;
929 bool drop_ref;
930
931 jh = commit_transaction->t_forget;
932 spin_unlock(lock: &journal->j_list_lock);
933 bh = jh2bh(jh);
934 /*
935 * Get a reference so that bh cannot be freed before we are
936 * done with it.
937 */
938 get_bh(bh);
939 spin_lock(lock: &jh->b_state_lock);
940 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
941
942 /*
943 * If there is undo-protected committed data against
944 * this buffer, then we can remove it now. If it is a
945 * buffer needing such protection, the old frozen_data
946 * field now points to a committed version of the
947 * buffer, so rotate that field to the new committed
948 * data.
949 *
950 * Otherwise, we can just throw away the frozen data now.
951 *
952 * We also know that the frozen data has already fired
953 * its triggers if they exist, so we can clear that too.
954 */
955 if (jh->b_committed_data) {
956 jbd2_free(ptr: jh->b_committed_data, size: bh->b_size);
957 jh->b_committed_data = NULL;
958 if (jh->b_frozen_data) {
959 jh->b_committed_data = jh->b_frozen_data;
960 jh->b_frozen_data = NULL;
961 jh->b_frozen_triggers = NULL;
962 }
963 } else if (jh->b_frozen_data) {
964 jbd2_free(ptr: jh->b_frozen_data, size: bh->b_size);
965 jh->b_frozen_data = NULL;
966 jh->b_frozen_triggers = NULL;
967 }
968
969 spin_lock(lock: &journal->j_list_lock);
970 cp_transaction = jh->b_cp_transaction;
971 if (cp_transaction) {
972 JBUFFER_TRACE(jh, "remove from old cp transaction");
973 cp_transaction->t_chp_stats.cs_dropped++;
974 __jbd2_journal_remove_checkpoint(jh);
975 }
976
977 /* Only re-checkpoint the buffer_head if it is marked
978 * dirty. If the buffer was added to the BJ_Forget list
979 * by jbd2_journal_forget, it may no longer be dirty and
980 * there's no point in keeping a checkpoint record for
981 * it. */
982
983 /*
984 * A buffer which has been freed while still being journaled
985 * by a previous transaction, refile the buffer to BJ_Forget of
986 * the running transaction. If the just committed transaction
987 * contains "add to orphan" operation, we can completely
988 * invalidate the buffer now. We are rather through in that
989 * since the buffer may be still accessible when blocksize <
990 * pagesize and it is attached to the last partial page.
991 */
992 if (buffer_freed(bh) && !jh->b_next_transaction) {
993 struct address_space *mapping;
994
995 clear_buffer_freed(bh);
996 clear_buffer_jbddirty(bh);
997
998 /*
999 * Block device buffers need to stay mapped all the
1000 * time, so it is enough to clear buffer_jbddirty and
1001 * buffer_freed bits. For the file mapping buffers (i.e.
1002 * journalled data) we need to unmap buffer and clear
1003 * more bits. We also need to be careful about the check
1004 * because the data page mapping can get cleared under
1005 * our hands. Note that if mapping == NULL, we don't
1006 * need to make buffer unmapped because the page is
1007 * already detached from the mapping and buffers cannot
1008 * get reused.
1009 */
1010 mapping = READ_ONCE(bh->b_folio->mapping);
1011 if (mapping && !sb_is_blkdev_sb(sb: mapping->host->i_sb)) {
1012 clear_buffer_mapped(bh);
1013 clear_buffer_new(bh);
1014 clear_buffer_req(bh);
1015 bh->b_bdev = NULL;
1016 }
1017 }
1018
1019 if (buffer_jbddirty(bh)) {
1020 JBUFFER_TRACE(jh, "add to new checkpointing trans");
1021 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
1022 if (is_journal_aborted(journal))
1023 clear_buffer_jbddirty(bh);
1024 } else {
1025 J_ASSERT_BH(bh, !buffer_dirty(bh));
1026 /*
1027 * The buffer on BJ_Forget list and not jbddirty means
1028 * it has been freed by this transaction and hence it
1029 * could not have been reallocated until this
1030 * transaction has committed. *BUT* it could be
1031 * reallocated once we have written all the data to
1032 * disk and before we process the buffer on BJ_Forget
1033 * list.
1034 */
1035 if (!jh->b_next_transaction)
1036 try_to_free = 1;
1037 }
1038 JBUFFER_TRACE(jh, "refile or unfile buffer");
1039 drop_ref = __jbd2_journal_refile_buffer(jh);
1040 spin_unlock(lock: &jh->b_state_lock);
1041 if (drop_ref)
1042 jbd2_journal_put_journal_head(jh);
1043 if (try_to_free)
1044 release_buffer_page(bh); /* Drops bh reference */
1045 else
1046 __brelse(bh);
1047 cond_resched_lock(&journal->j_list_lock);
1048 }
1049 spin_unlock(lock: &journal->j_list_lock);
1050 /*
1051 * This is a bit sleazy. We use j_list_lock to protect transition
1052 * of a transaction into T_FINISHED state and calling
1053 * __jbd2_journal_drop_transaction(). Otherwise we could race with
1054 * other checkpointing code processing the transaction...
1055 */
1056 write_lock(&journal->j_state_lock);
1057 spin_lock(lock: &journal->j_list_lock);
1058 /*
1059 * Now recheck if some buffers did not get attached to the transaction
1060 * while the lock was dropped...
1061 */
1062 if (commit_transaction->t_forget) {
1063 spin_unlock(lock: &journal->j_list_lock);
1064 write_unlock(&journal->j_state_lock);
1065 goto restart_loop;
1066 }
1067
1068 /* Add the transaction to the checkpoint list
1069 * __journal_remove_checkpoint() can not destroy transaction
1070 * under us because it is not marked as T_FINISHED yet */
1071 if (journal->j_checkpoint_transactions == NULL) {
1072 journal->j_checkpoint_transactions = commit_transaction;
1073 commit_transaction->t_cpnext = commit_transaction;
1074 commit_transaction->t_cpprev = commit_transaction;
1075 } else {
1076 commit_transaction->t_cpnext =
1077 journal->j_checkpoint_transactions;
1078 commit_transaction->t_cpprev =
1079 commit_transaction->t_cpnext->t_cpprev;
1080 commit_transaction->t_cpnext->t_cpprev =
1081 commit_transaction;
1082 commit_transaction->t_cpprev->t_cpnext =
1083 commit_transaction;
1084 }
1085 spin_unlock(lock: &journal->j_list_lock);
1086
1087 /* Done with this transaction! */
1088
1089 jbd2_debug(3, "JBD2: commit phase 7\n");
1090
1091 J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1092
1093 commit_transaction->t_start = jiffies;
1094 stats.run.rs_logging = jbd2_time_diff(start: stats.run.rs_logging,
1095 end: commit_transaction->t_start);
1096
1097 /*
1098 * File the transaction statistics
1099 */
1100 stats.ts_tid = commit_transaction->t_tid;
1101 stats.run.rs_handle_count =
1102 atomic_read(v: &commit_transaction->t_handle_count);
1103 trace_jbd2_run_stats(dev: journal->j_fs_dev->bd_dev,
1104 tid: commit_transaction->t_tid, stats: &stats.run);
1105 stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1106
1107 commit_transaction->t_state = T_COMMIT_CALLBACK;
1108 J_ASSERT(commit_transaction == journal->j_committing_transaction);
1109 journal->j_commit_sequence = commit_transaction->t_tid;
1110 journal->j_committing_transaction = NULL;
1111 commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1112
1113 /*
1114 * weight the commit time higher than the average time so we don't
1115 * react too strongly to vast changes in the commit time
1116 */
1117 if (likely(journal->j_average_commit_time))
1118 journal->j_average_commit_time = (commit_time +
1119 journal->j_average_commit_time*3) / 4;
1120 else
1121 journal->j_average_commit_time = commit_time;
1122
1123 write_unlock(&journal->j_state_lock);
1124
1125 if (journal->j_commit_callback)
1126 journal->j_commit_callback(journal, commit_transaction);
1127 if (journal->j_fc_cleanup_callback)
1128 journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid);
1129
1130 trace_jbd2_end_commit(journal, commit_transaction);
1131 jbd2_debug(1, "JBD2: commit %d complete, head %d\n",
1132 journal->j_commit_sequence, journal->j_tail_sequence);
1133
1134 write_lock(&journal->j_state_lock);
1135 journal->j_flags &= ~JBD2_FULL_COMMIT_ONGOING;
1136 journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
1137 spin_lock(lock: &journal->j_list_lock);
1138 commit_transaction->t_state = T_FINISHED;
1139 /* Check if the transaction can be dropped now that we are finished */
1140 if (commit_transaction->t_checkpoint_list == NULL) {
1141 __jbd2_journal_drop_transaction(journal, commit_transaction);
1142 jbd2_journal_free_transaction(commit_transaction);
1143 }
1144 spin_unlock(lock: &journal->j_list_lock);
1145 write_unlock(&journal->j_state_lock);
1146 wake_up(&journal->j_wait_done_commit);
1147 wake_up(&journal->j_fc_wait);
1148
1149 /*
1150 * Calculate overall stats
1151 */
1152 spin_lock(lock: &journal->j_history_lock);
1153 journal->j_stats.ts_tid++;
1154 journal->j_stats.ts_requested += stats.ts_requested;
1155 journal->j_stats.run.rs_wait += stats.run.rs_wait;
1156 journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1157 journal->j_stats.run.rs_running += stats.run.rs_running;
1158 journal->j_stats.run.rs_locked += stats.run.rs_locked;
1159 journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1160 journal->j_stats.run.rs_logging += stats.run.rs_logging;
1161 journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1162 journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1163 journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1164 spin_unlock(lock: &journal->j_history_lock);
1165}
1166

source code of linux/fs/jbd2/commit.c