• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * linux/fs/jbd2/commit.c
4  *
5  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6  *
7  * Copyright 1998 Red Hat corp --- All Rights Reserved
8  *
9  * Journal commit routines for the generic filesystem journaling code;
10  * part of the ext2fs journaling system.
11  */
12 
13 #include <linux/time.h>
14 #include <linux/fs.h>
15 #include <linux/jbd2.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/mm.h>
19 #include <linux/pagemap.h>
20 #include <linux/jiffies.h>
21 #include <linux/crc32.h>
22 #include <linux/writeback.h>
23 #include <linux/backing-dev.h>
24 #include <linux/bio.h>
25 #include <linux/blkdev.h>
26 #include <linux/bitops.h>
27 #include <trace/events/jbd2.h>
28 
29 /*
30  * IO end handler for temporary buffer_heads handling writes to the journal.
31  */
journal_end_buffer_io_sync(struct buffer_head * bh,int uptodate)32 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
33 {
34 	struct buffer_head *orig_bh = bh->b_private;
35 
36 	BUFFER_TRACE(bh, "");
37 	if (uptodate)
38 		set_buffer_uptodate(bh);
39 	else
40 		clear_buffer_uptodate(bh);
41 	if (orig_bh) {
42 		clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
43 		smp_mb__after_atomic();
44 		wake_up_bit(&orig_bh->b_state, BH_Shadow);
45 	}
46 	unlock_buffer(bh);
47 }
48 
49 /*
50  * When an ext4 file is truncated, it is possible that some pages are not
51  * successfully freed, because they are attached to a committing transaction.
52  * After the transaction commits, these pages are left on the LRU, with no
53  * ->mapping, and with attached buffers.  These pages are trivially reclaimable
54  * by the VM, but their apparent absence upsets the VM accounting, and it makes
55  * the numbers in /proc/meminfo look odd.
56  *
57  * So here, we have a buffer which has just come off the forget list.  Look to
58  * see if we can strip all buffers from the backing page.
59  *
60  * Called under lock_journal(), and possibly under journal_datalist_lock.  The
61  * caller provided us with a ref against the buffer, and we drop that here.
62  */
release_buffer_page(struct buffer_head * bh)63 static void release_buffer_page(struct buffer_head *bh)
64 {
65 	struct page *page;
66 
67 	if (buffer_dirty(bh))
68 		goto nope;
69 	if (atomic_read(&bh->b_count) != 1)
70 		goto nope;
71 	page = bh->b_page;
72 	if (!page)
73 		goto nope;
74 	if (page->mapping)
75 		goto nope;
76 
77 	/* OK, it's a truncated page */
78 	if (!trylock_page(page))
79 		goto nope;
80 
81 	get_page(page);
82 	__brelse(bh);
83 	try_to_free_buffers(page);
84 	unlock_page(page);
85 	put_page(page);
86 	return;
87 
88 nope:
89 	__brelse(bh);
90 }
91 
jbd2_commit_block_csum_set(journal_t * j,struct buffer_head * bh)92 static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
93 {
94 	struct commit_header *h;
95 	__u32 csum;
96 
97 	if (!jbd2_journal_has_csum_v2or3(j))
98 		return;
99 
100 	h = (struct commit_header *)(bh->b_data);
101 	h->h_chksum_type = 0;
102 	h->h_chksum_size = 0;
103 	h->h_chksum[0] = 0;
104 	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
105 	h->h_chksum[0] = cpu_to_be32(csum);
106 }
107 
108 /*
109  * Done it all: now submit the commit record.  We should have
110  * cleaned up our previous buffers by now, so if we are in abort
111  * mode we can now just skip the rest of the journal write
112  * entirely.
113  *
114  * Returns 1 if the journal needs to be aborted or 0 on success
115  */
journal_submit_commit_record(journal_t * journal,transaction_t * commit_transaction,struct buffer_head ** cbh,__u32 crc32_sum)116 static int journal_submit_commit_record(journal_t *journal,
117 					transaction_t *commit_transaction,
118 					struct buffer_head **cbh,
119 					__u32 crc32_sum)
120 {
121 	struct commit_header *tmp;
122 	struct buffer_head *bh;
123 	int ret;
124 	struct timespec64 now;
125 
126 	*cbh = NULL;
127 
128 	if (is_journal_aborted(journal))
129 		return 0;
130 
131 	bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
132 						JBD2_COMMIT_BLOCK);
133 	if (!bh)
134 		return 1;
135 
136 	tmp = (struct commit_header *)bh->b_data;
137 	ktime_get_coarse_real_ts64(&now);
138 	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
139 	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
140 
141 	if (jbd2_has_feature_checksum(journal)) {
142 		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
143 		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
144 		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
145 	}
146 	jbd2_commit_block_csum_set(journal, bh);
147 
148 	BUFFER_TRACE(bh, "submit commit block");
149 	lock_buffer(bh);
150 	clear_buffer_dirty(bh);
151 	set_buffer_uptodate(bh);
152 	bh->b_end_io = journal_end_buffer_io_sync;
153 
154 	if (journal->j_flags & JBD2_BARRIER &&
155 	    !jbd2_has_feature_async_commit(journal))
156 		ret = submit_bh(REQ_OP_WRITE,
157 			REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
158 	else
159 		ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
160 
161 	*cbh = bh;
162 	return ret;
163 }
164 
165 /*
166  * This function along with journal_submit_commit_record
167  * allows to write the commit record asynchronously.
168  */
journal_wait_on_commit_record(journal_t * journal,struct buffer_head * bh)169 static int journal_wait_on_commit_record(journal_t *journal,
170 					 struct buffer_head *bh)
171 {
172 	int ret = 0;
173 
174 	clear_buffer_dirty(bh);
175 	wait_on_buffer(bh);
176 
177 	if (unlikely(!buffer_uptodate(bh)))
178 		ret = -EIO;
179 	put_bh(bh);            /* One for getblk() */
180 
181 	return ret;
182 }
183 
184 /*
185  * write the filemap data using writepage() address_space_operations.
186  * We don't do block allocation here even for delalloc. We don't
187  * use writepages() because with dealyed allocation we may be doing
188  * block allocation in writepages().
189  */
journal_submit_inode_data_buffers(struct address_space * mapping,loff_t dirty_start,loff_t dirty_end)190 static int journal_submit_inode_data_buffers(struct address_space *mapping,
191 		loff_t dirty_start, loff_t dirty_end)
192 {
193 	int ret;
194 	struct writeback_control wbc = {
195 		.sync_mode =  WB_SYNC_ALL,
196 		.nr_to_write = mapping->nrpages * 2,
197 		.range_start = dirty_start,
198 		.range_end = dirty_end,
199 	};
200 
201 	ret = generic_writepages(mapping, &wbc);
202 	return ret;
203 }
204 
205 /*
206  * Submit all the data buffers of inode associated with the transaction to
207  * disk.
208  *
209  * We are in a committing transaction. Therefore no new inode can be added to
210  * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
211  * operate on from being released while we write out pages.
212  */
journal_submit_data_buffers(journal_t * journal,transaction_t * commit_transaction)213 static int journal_submit_data_buffers(journal_t *journal,
214 		transaction_t *commit_transaction)
215 {
216 	struct jbd2_inode *jinode;
217 	int err, ret = 0;
218 	struct address_space *mapping;
219 
220 	spin_lock(&journal->j_list_lock);
221 	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
222 		loff_t dirty_start = jinode->i_dirty_start;
223 		loff_t dirty_end = jinode->i_dirty_end;
224 
225 		if (!(jinode->i_flags & JI_WRITE_DATA))
226 			continue;
227 		mapping = jinode->i_vfs_inode->i_mapping;
228 		jinode->i_flags |= JI_COMMIT_RUNNING;
229 		spin_unlock(&journal->j_list_lock);
230 		/*
231 		 * submit the inode data buffers. We use writepage
232 		 * instead of writepages. Because writepages can do
233 		 * block allocation  with delalloc. We need to write
234 		 * only allocated blocks here.
235 		 */
236 		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
237 		err = journal_submit_inode_data_buffers(mapping, dirty_start,
238 				dirty_end);
239 		if (!ret)
240 			ret = err;
241 		spin_lock(&journal->j_list_lock);
242 		J_ASSERT(jinode->i_transaction == commit_transaction);
243 		jinode->i_flags &= ~JI_COMMIT_RUNNING;
244 		smp_mb();
245 		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
246 	}
247 	spin_unlock(&journal->j_list_lock);
248 	return ret;
249 }
250 
251 /*
252  * Wait for data submitted for writeout, refile inodes to proper
253  * transaction if needed.
254  *
255  */
journal_finish_inode_data_buffers(journal_t * journal,transaction_t * commit_transaction)256 static int journal_finish_inode_data_buffers(journal_t *journal,
257 		transaction_t *commit_transaction)
258 {
259 	struct jbd2_inode *jinode, *next_i;
260 	int err, ret = 0;
261 
262 	/* For locking, see the comment in journal_submit_data_buffers() */
263 	spin_lock(&journal->j_list_lock);
264 	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
265 		loff_t dirty_start = jinode->i_dirty_start;
266 		loff_t dirty_end = jinode->i_dirty_end;
267 
268 		if (!(jinode->i_flags & JI_WAIT_DATA))
269 			continue;
270 		jinode->i_flags |= JI_COMMIT_RUNNING;
271 		spin_unlock(&journal->j_list_lock);
272 		err = filemap_fdatawait_range_keep_errors(
273 				jinode->i_vfs_inode->i_mapping, dirty_start,
274 				dirty_end);
275 		if (!ret)
276 			ret = err;
277 		spin_lock(&journal->j_list_lock);
278 		jinode->i_flags &= ~JI_COMMIT_RUNNING;
279 		smp_mb();
280 		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
281 	}
282 
283 	/* Now refile inode to proper lists */
284 	list_for_each_entry_safe(jinode, next_i,
285 				 &commit_transaction->t_inode_list, i_list) {
286 		list_del(&jinode->i_list);
287 		if (jinode->i_next_transaction) {
288 			jinode->i_transaction = jinode->i_next_transaction;
289 			jinode->i_next_transaction = NULL;
290 			list_add(&jinode->i_list,
291 				&jinode->i_transaction->t_inode_list);
292 		} else {
293 			jinode->i_transaction = NULL;
294 			jinode->i_dirty_start = 0;
295 			jinode->i_dirty_end = 0;
296 		}
297 	}
298 	spin_unlock(&journal->j_list_lock);
299 
300 	return ret;
301 }
302 
jbd2_checksum_data(__u32 crc32_sum,struct buffer_head * bh)303 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
304 {
305 	struct page *page = bh->b_page;
306 	char *addr;
307 	__u32 checksum;
308 
309 	addr = kmap_atomic(page);
310 	checksum = crc32_be(crc32_sum,
311 		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
312 	kunmap_atomic(addr);
313 
314 	return checksum;
315 }
316 
write_tag_block(journal_t * j,journal_block_tag_t * tag,unsigned long long block)317 static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
318 				   unsigned long long block)
319 {
320 	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
321 	if (jbd2_has_feature_64bit(j))
322 		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
323 }
324 
jbd2_block_tag_csum_set(journal_t * j,journal_block_tag_t * tag,struct buffer_head * bh,__u32 sequence)325 static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
326 				    struct buffer_head *bh, __u32 sequence)
327 {
328 	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
329 	struct page *page = bh->b_page;
330 	__u8 *addr;
331 	__u32 csum32;
332 	__be32 seq;
333 
334 	if (!jbd2_journal_has_csum_v2or3(j))
335 		return;
336 
337 	seq = cpu_to_be32(sequence);
338 	addr = kmap_atomic(page);
339 	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
340 	csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
341 			     bh->b_size);
342 	kunmap_atomic(addr);
343 
344 	if (jbd2_has_feature_csum3(j))
345 		tag3->t_checksum = cpu_to_be32(csum32);
346 	else
347 		tag->t_checksum = cpu_to_be16(csum32);
348 }
349 /*
350  * jbd2_journal_commit_transaction
351  *
352  * The primary function for committing a transaction to the log.  This
353  * function is called by the journal thread to begin a complete commit.
354  */
jbd2_journal_commit_transaction(journal_t * journal)355 void jbd2_journal_commit_transaction(journal_t *journal)
356 {
357 	struct transaction_stats_s stats;
358 	transaction_t *commit_transaction;
359 	struct journal_head *jh;
360 	struct buffer_head *descriptor;
361 	struct buffer_head **wbuf = journal->j_wbuf;
362 	int bufs;
363 	int flags;
364 	int err;
365 	unsigned long long blocknr;
366 	ktime_t start_time;
367 	u64 commit_time;
368 	char *tagp = NULL;
369 	journal_block_tag_t *tag = NULL;
370 	int space_left = 0;
371 	int first_tag = 0;
372 	int tag_flag;
373 	int i;
374 	int tag_bytes = journal_tag_bytes(journal);
375 	struct buffer_head *cbh = NULL; /* For transactional checksums */
376 	__u32 crc32_sum = ~0;
377 	struct blk_plug plug;
378 	/* Tail of the journal */
379 	unsigned long first_block;
380 	tid_t first_tid;
381 	int update_tail;
382 	int csum_size = 0;
383 	LIST_HEAD(io_bufs);
384 	LIST_HEAD(log_bufs);
385 
386 	if (jbd2_journal_has_csum_v2or3(journal))
387 		csum_size = sizeof(struct jbd2_journal_block_tail);
388 
389 	/*
390 	 * First job: lock down the current transaction and wait for
391 	 * all outstanding updates to complete.
392 	 */
393 
394 	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
395 	if (journal->j_flags & JBD2_FLUSHED) {
396 		jbd_debug(3, "super block updated\n");
397 		mutex_lock_io(&journal->j_checkpoint_mutex);
398 		/*
399 		 * We hold j_checkpoint_mutex so tail cannot change under us.
400 		 * We don't need any special data guarantees for writing sb
401 		 * since journal is empty and it is ok for write to be
402 		 * flushed only with transaction commit.
403 		 */
404 		jbd2_journal_update_sb_log_tail(journal,
405 						journal->j_tail_sequence,
406 						journal->j_tail,
407 						REQ_SYNC);
408 		mutex_unlock(&journal->j_checkpoint_mutex);
409 	} else {
410 		jbd_debug(3, "superblock not updated\n");
411 	}
412 
413 	J_ASSERT(journal->j_running_transaction != NULL);
414 	J_ASSERT(journal->j_committing_transaction == NULL);
415 
416 	commit_transaction = journal->j_running_transaction;
417 
418 	trace_jbd2_start_commit(journal, commit_transaction);
419 	jbd_debug(1, "JBD2: starting commit of transaction %d\n",
420 			commit_transaction->t_tid);
421 
422 	write_lock(&journal->j_state_lock);
423 	J_ASSERT(commit_transaction->t_state == T_RUNNING);
424 	commit_transaction->t_state = T_LOCKED;
425 
426 	trace_jbd2_commit_locking(journal, commit_transaction);
427 	stats.run.rs_wait = commit_transaction->t_max_wait;
428 	stats.run.rs_request_delay = 0;
429 	stats.run.rs_locked = jiffies;
430 	if (commit_transaction->t_requested)
431 		stats.run.rs_request_delay =
432 			jbd2_time_diff(commit_transaction->t_requested,
433 				       stats.run.rs_locked);
434 	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
435 					      stats.run.rs_locked);
436 
437 	spin_lock(&commit_transaction->t_handle_lock);
438 	while (atomic_read(&commit_transaction->t_updates)) {
439 		DEFINE_WAIT(wait);
440 
441 		prepare_to_wait(&journal->j_wait_updates, &wait,
442 					TASK_UNINTERRUPTIBLE);
443 		if (atomic_read(&commit_transaction->t_updates)) {
444 			spin_unlock(&commit_transaction->t_handle_lock);
445 			write_unlock(&journal->j_state_lock);
446 			schedule();
447 			write_lock(&journal->j_state_lock);
448 			spin_lock(&commit_transaction->t_handle_lock);
449 		}
450 		finish_wait(&journal->j_wait_updates, &wait);
451 	}
452 	spin_unlock(&commit_transaction->t_handle_lock);
453 
454 	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
455 			journal->j_max_transaction_buffers);
456 
457 	/*
458 	 * First thing we are allowed to do is to discard any remaining
459 	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
460 	 * that there are no such buffers: if a large filesystem
461 	 * operation like a truncate needs to split itself over multiple
462 	 * transactions, then it may try to do a jbd2_journal_restart() while
463 	 * there are still BJ_Reserved buffers outstanding.  These must
464 	 * be released cleanly from the current transaction.
465 	 *
466 	 * In this case, the filesystem must still reserve write access
467 	 * again before modifying the buffer in the new transaction, but
468 	 * we do not require it to remember exactly which old buffers it
469 	 * has reserved.  This is consistent with the existing behaviour
470 	 * that multiple jbd2_journal_get_write_access() calls to the same
471 	 * buffer are perfectly permissible.
472 	 */
473 	while (commit_transaction->t_reserved_list) {
474 		jh = commit_transaction->t_reserved_list;
475 		JBUFFER_TRACE(jh, "reserved, unused: refile");
476 		/*
477 		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
478 		 * leave undo-committed data.
479 		 */
480 		if (jh->b_committed_data) {
481 			struct buffer_head *bh = jh2bh(jh);
482 
483 			jbd_lock_bh_state(bh);
484 			jbd2_free(jh->b_committed_data, bh->b_size);
485 			jh->b_committed_data = NULL;
486 			jbd_unlock_bh_state(bh);
487 		}
488 		jbd2_journal_refile_buffer(journal, jh);
489 	}
490 
491 	/*
492 	 * Now try to drop any written-back buffers from the journal's
493 	 * checkpoint lists.  We do this *before* commit because it potentially
494 	 * frees some memory
495 	 */
496 	spin_lock(&journal->j_list_lock);
497 	__jbd2_journal_clean_checkpoint_list(journal, false);
498 	spin_unlock(&journal->j_list_lock);
499 
500 	jbd_debug(3, "JBD2: commit phase 1\n");
501 
502 	/*
503 	 * Clear revoked flag to reflect there is no revoked buffers
504 	 * in the next transaction which is going to be started.
505 	 */
506 	jbd2_clear_buffer_revoked_flags(journal);
507 
508 	/*
509 	 * Switch to a new revoke table.
510 	 */
511 	jbd2_journal_switch_revoke_table(journal);
512 
513 	/*
514 	 * Reserved credits cannot be claimed anymore, free them
515 	 */
516 	atomic_sub(atomic_read(&journal->j_reserved_credits),
517 		   &commit_transaction->t_outstanding_credits);
518 
519 	trace_jbd2_commit_flushing(journal, commit_transaction);
520 	stats.run.rs_flushing = jiffies;
521 	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
522 					     stats.run.rs_flushing);
523 
524 	commit_transaction->t_state = T_FLUSH;
525 	journal->j_committing_transaction = commit_transaction;
526 	journal->j_running_transaction = NULL;
527 	start_time = ktime_get();
528 	commit_transaction->t_log_start = journal->j_head;
529 	wake_up(&journal->j_wait_transaction_locked);
530 	write_unlock(&journal->j_state_lock);
531 
532 	jbd_debug(3, "JBD2: commit phase 2a\n");
533 
534 	/*
535 	 * Now start flushing things to disk, in the order they appear
536 	 * on the transaction lists.  Data blocks go first.
537 	 */
538 	err = journal_submit_data_buffers(journal, commit_transaction);
539 	if (err)
540 		jbd2_journal_abort(journal, err);
541 
542 	blk_start_plug(&plug);
543 	jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
544 
545 	jbd_debug(3, "JBD2: commit phase 2b\n");
546 
547 	/*
548 	 * Way to go: we have now written out all of the data for a
549 	 * transaction!  Now comes the tricky part: we need to write out
550 	 * metadata.  Loop over the transaction's entire buffer list:
551 	 */
552 	write_lock(&journal->j_state_lock);
553 	commit_transaction->t_state = T_COMMIT;
554 	write_unlock(&journal->j_state_lock);
555 
556 	trace_jbd2_commit_logging(journal, commit_transaction);
557 	stats.run.rs_logging = jiffies;
558 	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
559 					       stats.run.rs_logging);
560 	stats.run.rs_blocks =
561 		atomic_read(&commit_transaction->t_outstanding_credits);
562 	stats.run.rs_blocks_logged = 0;
563 
564 	J_ASSERT(commit_transaction->t_nr_buffers <=
565 		 atomic_read(&commit_transaction->t_outstanding_credits));
566 
567 	err = 0;
568 	bufs = 0;
569 	descriptor = NULL;
570 	while (commit_transaction->t_buffers) {
571 
572 		/* Find the next buffer to be journaled... */
573 
574 		jh = commit_transaction->t_buffers;
575 
576 		/* If we're in abort mode, we just un-journal the buffer and
577 		   release it. */
578 
579 		if (is_journal_aborted(journal)) {
580 			clear_buffer_jbddirty(jh2bh(jh));
581 			JBUFFER_TRACE(jh, "journal is aborting: refile");
582 			jbd2_buffer_abort_trigger(jh,
583 						  jh->b_frozen_data ?
584 						  jh->b_frozen_triggers :
585 						  jh->b_triggers);
586 			jbd2_journal_refile_buffer(journal, jh);
587 			/* If that was the last one, we need to clean up
588 			 * any descriptor buffers which may have been
589 			 * already allocated, even if we are now
590 			 * aborting. */
591 			if (!commit_transaction->t_buffers)
592 				goto start_journal_io;
593 			continue;
594 		}
595 
596 		/* Make sure we have a descriptor block in which to
597 		   record the metadata buffer. */
598 
599 		if (!descriptor) {
600 			J_ASSERT (bufs == 0);
601 
602 			jbd_debug(4, "JBD2: get descriptor\n");
603 
604 			descriptor = jbd2_journal_get_descriptor_buffer(
605 							commit_transaction,
606 							JBD2_DESCRIPTOR_BLOCK);
607 			if (!descriptor) {
608 				jbd2_journal_abort(journal, -EIO);
609 				continue;
610 			}
611 
612 			jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
613 				(unsigned long long)descriptor->b_blocknr,
614 				descriptor->b_data);
615 			tagp = &descriptor->b_data[sizeof(journal_header_t)];
616 			space_left = descriptor->b_size -
617 						sizeof(journal_header_t);
618 			first_tag = 1;
619 			set_buffer_jwrite(descriptor);
620 			set_buffer_dirty(descriptor);
621 			wbuf[bufs++] = descriptor;
622 
623 			/* Record it so that we can wait for IO
624                            completion later */
625 			BUFFER_TRACE(descriptor, "ph3: file as descriptor");
626 			jbd2_file_log_bh(&log_bufs, descriptor);
627 		}
628 
629 		/* Where is the buffer to be written? */
630 
631 		err = jbd2_journal_next_log_block(journal, &blocknr);
632 		/* If the block mapping failed, just abandon the buffer
633 		   and repeat this loop: we'll fall into the
634 		   refile-on-abort condition above. */
635 		if (err) {
636 			jbd2_journal_abort(journal, err);
637 			continue;
638 		}
639 
640 		/*
641 		 * start_this_handle() uses t_outstanding_credits to determine
642 		 * the free space in the log, but this counter is changed
643 		 * by jbd2_journal_next_log_block() also.
644 		 */
645 		atomic_dec(&commit_transaction->t_outstanding_credits);
646 
647 		/* Bump b_count to prevent truncate from stumbling over
648                    the shadowed buffer!  @@@ This can go if we ever get
649                    rid of the shadow pairing of buffers. */
650 		atomic_inc(&jh2bh(jh)->b_count);
651 
652 		/*
653 		 * Make a temporary IO buffer with which to write it out
654 		 * (this will requeue the metadata buffer to BJ_Shadow).
655 		 */
656 		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
657 		JBUFFER_TRACE(jh, "ph3: write metadata");
658 		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
659 						jh, &wbuf[bufs], blocknr);
660 		if (flags < 0) {
661 			jbd2_journal_abort(journal, flags);
662 			continue;
663 		}
664 		jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
665 
666 		/* Record the new block's tag in the current descriptor
667                    buffer */
668 
669 		tag_flag = 0;
670 		if (flags & 1)
671 			tag_flag |= JBD2_FLAG_ESCAPE;
672 		if (!first_tag)
673 			tag_flag |= JBD2_FLAG_SAME_UUID;
674 
675 		tag = (journal_block_tag_t *) tagp;
676 		write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
677 		tag->t_flags = cpu_to_be16(tag_flag);
678 		jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
679 					commit_transaction->t_tid);
680 		tagp += tag_bytes;
681 		space_left -= tag_bytes;
682 		bufs++;
683 
684 		if (first_tag) {
685 			memcpy (tagp, journal->j_uuid, 16);
686 			tagp += 16;
687 			space_left -= 16;
688 			first_tag = 0;
689 		}
690 
691 		/* If there's no more to do, or if the descriptor is full,
692 		   let the IO rip! */
693 
694 		if (bufs == journal->j_wbufsize ||
695 		    commit_transaction->t_buffers == NULL ||
696 		    space_left < tag_bytes + 16 + csum_size) {
697 
698 			jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
699 
700 			/* Write an end-of-descriptor marker before
701                            submitting the IOs.  "tag" still points to
702                            the last tag we set up. */
703 
704 			tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
705 start_journal_io:
706 			if (descriptor)
707 				jbd2_descriptor_block_csum_set(journal,
708 							descriptor);
709 
710 			for (i = 0; i < bufs; i++) {
711 				struct buffer_head *bh = wbuf[i];
712 				/*
713 				 * Compute checksum.
714 				 */
715 				if (jbd2_has_feature_checksum(journal)) {
716 					crc32_sum =
717 					    jbd2_checksum_data(crc32_sum, bh);
718 				}
719 
720 				lock_buffer(bh);
721 				clear_buffer_dirty(bh);
722 				set_buffer_uptodate(bh);
723 				bh->b_end_io = journal_end_buffer_io_sync;
724 				submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
725 			}
726 			cond_resched();
727 
728 			/* Force a new descriptor to be generated next
729                            time round the loop. */
730 			descriptor = NULL;
731 			bufs = 0;
732 		}
733 	}
734 
735 	err = journal_finish_inode_data_buffers(journal, commit_transaction);
736 	if (err) {
737 		printk(KERN_WARNING
738 			"JBD2: Detected IO errors while flushing file data "
739 		       "on %s\n", journal->j_devname);
740 		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
741 			jbd2_journal_abort(journal, err);
742 		err = 0;
743 	}
744 
745 	/*
746 	 * Get current oldest transaction in the log before we issue flush
747 	 * to the filesystem device. After the flush we can be sure that
748 	 * blocks of all older transactions are checkpointed to persistent
749 	 * storage and we will be safe to update journal start in the
750 	 * superblock with the numbers we get here.
751 	 */
752 	update_tail =
753 		jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
754 
755 	write_lock(&journal->j_state_lock);
756 	if (update_tail) {
757 		long freed = first_block - journal->j_tail;
758 
759 		if (first_block < journal->j_tail)
760 			freed += journal->j_last - journal->j_first;
761 		/* Update tail only if we free significant amount of space */
762 		if (freed < journal->j_maxlen / 4)
763 			update_tail = 0;
764 	}
765 	J_ASSERT(commit_transaction->t_state == T_COMMIT);
766 	commit_transaction->t_state = T_COMMIT_DFLUSH;
767 	write_unlock(&journal->j_state_lock);
768 
769 	/*
770 	 * If the journal is not located on the file system device,
771 	 * then we must flush the file system device before we issue
772 	 * the commit record
773 	 */
774 	if (commit_transaction->t_need_data_flush &&
775 	    (journal->j_fs_dev != journal->j_dev) &&
776 	    (journal->j_flags & JBD2_BARRIER))
777 		blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
778 
779 	/* Done it all: now write the commit record asynchronously. */
780 	if (jbd2_has_feature_async_commit(journal)) {
781 		err = journal_submit_commit_record(journal, commit_transaction,
782 						 &cbh, crc32_sum);
783 		if (err)
784 			jbd2_journal_abort(journal, err);
785 	}
786 
787 	blk_finish_plug(&plug);
788 
789 	/* Lo and behold: we have just managed to send a transaction to
790            the log.  Before we can commit it, wait for the IO so far to
791            complete.  Control buffers being written are on the
792            transaction's t_log_list queue, and metadata buffers are on
793            the io_bufs list.
794 
795 	   Wait for the buffers in reverse order.  That way we are
796 	   less likely to be woken up until all IOs have completed, and
797 	   so we incur less scheduling load.
798 	*/
799 
800 	jbd_debug(3, "JBD2: commit phase 3\n");
801 
802 	while (!list_empty(&io_bufs)) {
803 		struct buffer_head *bh = list_entry(io_bufs.prev,
804 						    struct buffer_head,
805 						    b_assoc_buffers);
806 
807 		wait_on_buffer(bh);
808 		cond_resched();
809 
810 		if (unlikely(!buffer_uptodate(bh)))
811 			err = -EIO;
812 		jbd2_unfile_log_bh(bh);
813 		stats.run.rs_blocks_logged++;
814 
815 		/*
816 		 * The list contains temporary buffer heads created by
817 		 * jbd2_journal_write_metadata_buffer().
818 		 */
819 		BUFFER_TRACE(bh, "dumping temporary bh");
820 		__brelse(bh);
821 		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
822 		free_buffer_head(bh);
823 
824 		/* We also have to refile the corresponding shadowed buffer */
825 		jh = commit_transaction->t_shadow_list->b_tprev;
826 		bh = jh2bh(jh);
827 		clear_buffer_jwrite(bh);
828 		J_ASSERT_BH(bh, buffer_jbddirty(bh));
829 		J_ASSERT_BH(bh, !buffer_shadow(bh));
830 
831 		/* The metadata is now released for reuse, but we need
832                    to remember it against this transaction so that when
833                    we finally commit, we can do any checkpointing
834                    required. */
835 		JBUFFER_TRACE(jh, "file as BJ_Forget");
836 		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
837 		JBUFFER_TRACE(jh, "brelse shadowed buffer");
838 		__brelse(bh);
839 	}
840 
841 	J_ASSERT (commit_transaction->t_shadow_list == NULL);
842 
843 	jbd_debug(3, "JBD2: commit phase 4\n");
844 
845 	/* Here we wait for the revoke record and descriptor record buffers */
846 	while (!list_empty(&log_bufs)) {
847 		struct buffer_head *bh;
848 
849 		bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
850 		wait_on_buffer(bh);
851 		cond_resched();
852 
853 		if (unlikely(!buffer_uptodate(bh)))
854 			err = -EIO;
855 
856 		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
857 		clear_buffer_jwrite(bh);
858 		jbd2_unfile_log_bh(bh);
859 		stats.run.rs_blocks_logged++;
860 		__brelse(bh);		/* One for getblk */
861 		/* AKPM: bforget here */
862 	}
863 
864 	if (err)
865 		jbd2_journal_abort(journal, err);
866 
867 	jbd_debug(3, "JBD2: commit phase 5\n");
868 	write_lock(&journal->j_state_lock);
869 	J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
870 	commit_transaction->t_state = T_COMMIT_JFLUSH;
871 	write_unlock(&journal->j_state_lock);
872 
873 	if (!jbd2_has_feature_async_commit(journal)) {
874 		err = journal_submit_commit_record(journal, commit_transaction,
875 						&cbh, crc32_sum);
876 		if (err)
877 			jbd2_journal_abort(journal, err);
878 	}
879 	if (cbh)
880 		err = journal_wait_on_commit_record(journal, cbh);
881 	stats.run.rs_blocks_logged++;
882 	if (jbd2_has_feature_async_commit(journal) &&
883 	    journal->j_flags & JBD2_BARRIER) {
884 		blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
885 	}
886 
887 	if (err)
888 		jbd2_journal_abort(journal, err);
889 
890 	/*
891 	 * Now disk caches for filesystem device are flushed so we are safe to
892 	 * erase checkpointed transactions from the log by updating journal
893 	 * superblock.
894 	 */
895 	if (update_tail)
896 		jbd2_update_log_tail(journal, first_tid, first_block);
897 
898 	/* End of a transaction!  Finally, we can do checkpoint
899            processing: any buffers committed as a result of this
900            transaction can be removed from any checkpoint list it was on
901            before. */
902 
903 	jbd_debug(3, "JBD2: commit phase 6\n");
904 
905 	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
906 	J_ASSERT(commit_transaction->t_buffers == NULL);
907 	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
908 	J_ASSERT(commit_transaction->t_shadow_list == NULL);
909 
910 restart_loop:
911 	/*
912 	 * As there are other places (journal_unmap_buffer()) adding buffers
913 	 * to this list we have to be careful and hold the j_list_lock.
914 	 */
915 	spin_lock(&journal->j_list_lock);
916 	while (commit_transaction->t_forget) {
917 		transaction_t *cp_transaction;
918 		struct buffer_head *bh;
919 		int try_to_free = 0;
920 
921 		jh = commit_transaction->t_forget;
922 		spin_unlock(&journal->j_list_lock);
923 		bh = jh2bh(jh);
924 		/*
925 		 * Get a reference so that bh cannot be freed before we are
926 		 * done with it.
927 		 */
928 		get_bh(bh);
929 		jbd_lock_bh_state(bh);
930 		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
931 
932 		/*
933 		 * If there is undo-protected committed data against
934 		 * this buffer, then we can remove it now.  If it is a
935 		 * buffer needing such protection, the old frozen_data
936 		 * field now points to a committed version of the
937 		 * buffer, so rotate that field to the new committed
938 		 * data.
939 		 *
940 		 * Otherwise, we can just throw away the frozen data now.
941 		 *
942 		 * We also know that the frozen data has already fired
943 		 * its triggers if they exist, so we can clear that too.
944 		 */
945 		if (jh->b_committed_data) {
946 			jbd2_free(jh->b_committed_data, bh->b_size);
947 			jh->b_committed_data = NULL;
948 			if (jh->b_frozen_data) {
949 				jh->b_committed_data = jh->b_frozen_data;
950 				jh->b_frozen_data = NULL;
951 				jh->b_frozen_triggers = NULL;
952 			}
953 		} else if (jh->b_frozen_data) {
954 			jbd2_free(jh->b_frozen_data, bh->b_size);
955 			jh->b_frozen_data = NULL;
956 			jh->b_frozen_triggers = NULL;
957 		}
958 
959 		spin_lock(&journal->j_list_lock);
960 		cp_transaction = jh->b_cp_transaction;
961 		if (cp_transaction) {
962 			JBUFFER_TRACE(jh, "remove from old cp transaction");
963 			cp_transaction->t_chp_stats.cs_dropped++;
964 			__jbd2_journal_remove_checkpoint(jh);
965 		}
966 
967 		/* Only re-checkpoint the buffer_head if it is marked
968 		 * dirty.  If the buffer was added to the BJ_Forget list
969 		 * by jbd2_journal_forget, it may no longer be dirty and
970 		 * there's no point in keeping a checkpoint record for
971 		 * it. */
972 
973 		/*
974 		 * A buffer which has been freed while still being journaled
975 		 * by a previous transaction, refile the buffer to BJ_Forget of
976 		 * the running transaction. If the just committed transaction
977 		 * contains "add to orphan" operation, we can completely
978 		 * invalidate the buffer now. We are rather through in that
979 		 * since the buffer may be still accessible when blocksize <
980 		 * pagesize and it is attached to the last partial page.
981 		 */
982 		if (buffer_freed(bh) && !jh->b_next_transaction) {
983 			struct address_space *mapping;
984 
985 			clear_buffer_freed(bh);
986 			clear_buffer_jbddirty(bh);
987 
988 			/*
989 			 * Block device buffers need to stay mapped all the
990 			 * time, so it is enough to clear buffer_jbddirty and
991 			 * buffer_freed bits. For the file mapping buffers (i.e.
992 			 * journalled data) we need to unmap buffer and clear
993 			 * more bits. We also need to be careful about the check
994 			 * because the data page mapping can get cleared under
995 			 * our hands. Note that if mapping == NULL, we don't
996 			 * need to make buffer unmapped because the page is
997 			 * already detached from the mapping and buffers cannot
998 			 * get reused.
999 			 */
1000 			mapping = READ_ONCE(bh->b_page->mapping);
1001 			if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
1002 				clear_buffer_mapped(bh);
1003 				clear_buffer_new(bh);
1004 				clear_buffer_req(bh);
1005 				bh->b_bdev = NULL;
1006 			}
1007 		}
1008 
1009 		if (buffer_jbddirty(bh)) {
1010 			JBUFFER_TRACE(jh, "add to new checkpointing trans");
1011 			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
1012 			if (is_journal_aborted(journal))
1013 				clear_buffer_jbddirty(bh);
1014 		} else {
1015 			J_ASSERT_BH(bh, !buffer_dirty(bh));
1016 			/*
1017 			 * The buffer on BJ_Forget list and not jbddirty means
1018 			 * it has been freed by this transaction and hence it
1019 			 * could not have been reallocated until this
1020 			 * transaction has committed. *BUT* it could be
1021 			 * reallocated once we have written all the data to
1022 			 * disk and before we process the buffer on BJ_Forget
1023 			 * list.
1024 			 */
1025 			if (!jh->b_next_transaction)
1026 				try_to_free = 1;
1027 		}
1028 		JBUFFER_TRACE(jh, "refile or unfile buffer");
1029 		__jbd2_journal_refile_buffer(jh);
1030 		jbd_unlock_bh_state(bh);
1031 		if (try_to_free)
1032 			release_buffer_page(bh);	/* Drops bh reference */
1033 		else
1034 			__brelse(bh);
1035 		cond_resched_lock(&journal->j_list_lock);
1036 	}
1037 	spin_unlock(&journal->j_list_lock);
1038 	/*
1039 	 * This is a bit sleazy.  We use j_list_lock to protect transition
1040 	 * of a transaction into T_FINISHED state and calling
1041 	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
1042 	 * other checkpointing code processing the transaction...
1043 	 */
1044 	write_lock(&journal->j_state_lock);
1045 	spin_lock(&journal->j_list_lock);
1046 	/*
1047 	 * Now recheck if some buffers did not get attached to the transaction
1048 	 * while the lock was dropped...
1049 	 */
1050 	if (commit_transaction->t_forget) {
1051 		spin_unlock(&journal->j_list_lock);
1052 		write_unlock(&journal->j_state_lock);
1053 		goto restart_loop;
1054 	}
1055 
1056 	/* Add the transaction to the checkpoint list
1057 	 * __journal_remove_checkpoint() can not destroy transaction
1058 	 * under us because it is not marked as T_FINISHED yet */
1059 	if (journal->j_checkpoint_transactions == NULL) {
1060 		journal->j_checkpoint_transactions = commit_transaction;
1061 		commit_transaction->t_cpnext = commit_transaction;
1062 		commit_transaction->t_cpprev = commit_transaction;
1063 	} else {
1064 		commit_transaction->t_cpnext =
1065 			journal->j_checkpoint_transactions;
1066 		commit_transaction->t_cpprev =
1067 			commit_transaction->t_cpnext->t_cpprev;
1068 		commit_transaction->t_cpnext->t_cpprev =
1069 			commit_transaction;
1070 		commit_transaction->t_cpprev->t_cpnext =
1071 				commit_transaction;
1072 	}
1073 	spin_unlock(&journal->j_list_lock);
1074 
1075 	/* Done with this transaction! */
1076 
1077 	jbd_debug(3, "JBD2: commit phase 7\n");
1078 
1079 	J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1080 
1081 	commit_transaction->t_start = jiffies;
1082 	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1083 					      commit_transaction->t_start);
1084 
1085 	/*
1086 	 * File the transaction statistics
1087 	 */
1088 	stats.ts_tid = commit_transaction->t_tid;
1089 	stats.run.rs_handle_count =
1090 		atomic_read(&commit_transaction->t_handle_count);
1091 	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1092 			     commit_transaction->t_tid, &stats.run);
1093 	stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1094 
1095 	commit_transaction->t_state = T_COMMIT_CALLBACK;
1096 	J_ASSERT(commit_transaction == journal->j_committing_transaction);
1097 	journal->j_commit_sequence = commit_transaction->t_tid;
1098 	journal->j_committing_transaction = NULL;
1099 	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1100 
1101 	/*
1102 	 * weight the commit time higher than the average time so we don't
1103 	 * react too strongly to vast changes in the commit time
1104 	 */
1105 	if (likely(journal->j_average_commit_time))
1106 		journal->j_average_commit_time = (commit_time +
1107 				journal->j_average_commit_time*3) / 4;
1108 	else
1109 		journal->j_average_commit_time = commit_time;
1110 
1111 	write_unlock(&journal->j_state_lock);
1112 
1113 	if (journal->j_commit_callback)
1114 		journal->j_commit_callback(journal, commit_transaction);
1115 
1116 	trace_jbd2_end_commit(journal, commit_transaction);
1117 	jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1118 		  journal->j_commit_sequence, journal->j_tail_sequence);
1119 
1120 	write_lock(&journal->j_state_lock);
1121 	spin_lock(&journal->j_list_lock);
1122 	commit_transaction->t_state = T_FINISHED;
1123 	/* Check if the transaction can be dropped now that we are finished */
1124 	if (commit_transaction->t_checkpoint_list == NULL &&
1125 	    commit_transaction->t_checkpoint_io_list == NULL) {
1126 		__jbd2_journal_drop_transaction(journal, commit_transaction);
1127 		jbd2_journal_free_transaction(commit_transaction);
1128 	}
1129 	spin_unlock(&journal->j_list_lock);
1130 	write_unlock(&journal->j_state_lock);
1131 	wake_up(&journal->j_wait_done_commit);
1132 
1133 	/*
1134 	 * Calculate overall stats
1135 	 */
1136 	spin_lock(&journal->j_history_lock);
1137 	journal->j_stats.ts_tid++;
1138 	journal->j_stats.ts_requested += stats.ts_requested;
1139 	journal->j_stats.run.rs_wait += stats.run.rs_wait;
1140 	journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1141 	journal->j_stats.run.rs_running += stats.run.rs_running;
1142 	journal->j_stats.run.rs_locked += stats.run.rs_locked;
1143 	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1144 	journal->j_stats.run.rs_logging += stats.run.rs_logging;
1145 	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1146 	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1147 	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1148 	spin_unlock(&journal->j_history_lock);
1149 }
1150