• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * linux/fs/jbd2/commit.c
4  *
5  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6  *
7  * Copyright 1998 Red Hat corp --- All Rights Reserved
8  *
9  * Journal commit routines for the generic filesystem journaling code;
10  * part of the ext2fs journaling system.
11  */
12 
13 #include <linux/time.h>
14 #include <linux/fs.h>
15 #include <linux/jbd2.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/mm.h>
19 #include <linux/pagemap.h>
20 #include <linux/jiffies.h>
21 #include <linux/crc32.h>
22 #include <linux/writeback.h>
23 #include <linux/backing-dev.h>
24 #include <linux/bio.h>
25 #include <linux/blkdev.h>
26 #include <linux/bitops.h>
27 #include <trace/events/jbd2.h>
28 
29 /*
30  * IO end handler for temporary buffer_heads handling writes to the journal.
31  */
journal_end_buffer_io_sync(struct buffer_head * bh,int uptodate)32 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
33 {
34 	struct buffer_head *orig_bh = bh->b_private;
35 
36 	BUFFER_TRACE(bh, "");
37 	if (uptodate)
38 		set_buffer_uptodate(bh);
39 	else
40 		clear_buffer_uptodate(bh);
41 	if (orig_bh) {
42 		clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
43 		smp_mb__after_atomic();
44 		wake_up_bit(&orig_bh->b_state, BH_Shadow);
45 	}
46 	unlock_buffer(bh);
47 }
48 
49 /*
50  * When an ext4 file is truncated, it is possible that some pages are not
51  * successfully freed, because they are attached to a committing transaction.
52  * After the transaction commits, these pages are left on the LRU, with no
53  * ->mapping, and with attached buffers.  These pages are trivially reclaimable
54  * by the VM, but their apparent absence upsets the VM accounting, and it makes
55  * the numbers in /proc/meminfo look odd.
56  *
57  * So here, we have a buffer which has just come off the forget list.  Look to
58  * see if we can strip all buffers from the backing page.
59  *
60  * Called under lock_journal(), and possibly under journal_datalist_lock.  The
61  * caller provided us with a ref against the buffer, and we drop that here.
62  */
release_buffer_page(struct buffer_head * bh)63 static void release_buffer_page(struct buffer_head *bh)
64 {
65 	struct page *page;
66 
67 	if (buffer_dirty(bh))
68 		goto nope;
69 	if (atomic_read(&bh->b_count) != 1)
70 		goto nope;
71 	page = bh->b_page;
72 	if (!page)
73 		goto nope;
74 	if (page->mapping)
75 		goto nope;
76 
77 	/* OK, it's a truncated page */
78 	if (!trylock_page(page))
79 		goto nope;
80 
81 	get_page(page);
82 	__brelse(bh);
83 	try_to_free_buffers(page);
84 	unlock_page(page);
85 	put_page(page);
86 	return;
87 
88 nope:
89 	__brelse(bh);
90 }
91 
jbd2_commit_block_csum_set(journal_t * j,struct buffer_head * bh)92 static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
93 {
94 	struct commit_header *h;
95 	__u32 csum;
96 
97 	if (!jbd2_journal_has_csum_v2or3(j))
98 		return;
99 
100 	h = (struct commit_header *)(bh->b_data);
101 	h->h_chksum_type = 0;
102 	h->h_chksum_size = 0;
103 	h->h_chksum[0] = 0;
104 	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
105 	h->h_chksum[0] = cpu_to_be32(csum);
106 }
107 
108 /*
109  * Done it all: now submit the commit record.  We should have
110  * cleaned up our previous buffers by now, so if we are in abort
111  * mode we can now just skip the rest of the journal write
112  * entirely.
113  *
114  * Returns 1 if the journal needs to be aborted or 0 on success
115  */
journal_submit_commit_record(journal_t * journal,transaction_t * commit_transaction,struct buffer_head ** cbh,__u32 crc32_sum)116 static int journal_submit_commit_record(journal_t *journal,
117 					transaction_t *commit_transaction,
118 					struct buffer_head **cbh,
119 					__u32 crc32_sum)
120 {
121 	struct commit_header *tmp;
122 	struct buffer_head *bh;
123 	int ret;
124 	struct timespec64 now;
125 
126 	*cbh = NULL;
127 
128 	if (is_journal_aborted(journal))
129 		return 0;
130 
131 	bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
132 						JBD2_COMMIT_BLOCK);
133 	if (!bh)
134 		return 1;
135 
136 	tmp = (struct commit_header *)bh->b_data;
137 	ktime_get_coarse_real_ts64(&now);
138 	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
139 	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
140 
141 	if (jbd2_has_feature_checksum(journal)) {
142 		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
143 		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
144 		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
145 	}
146 	jbd2_commit_block_csum_set(journal, bh);
147 
148 	BUFFER_TRACE(bh, "submit commit block");
149 	lock_buffer(bh);
150 	clear_buffer_dirty(bh);
151 	set_buffer_uptodate(bh);
152 	bh->b_end_io = journal_end_buffer_io_sync;
153 
154 	if (journal->j_flags & JBD2_BARRIER &&
155 	    !jbd2_has_feature_async_commit(journal))
156 		ret = submit_bh(REQ_OP_WRITE,
157 			REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
158 	else
159 		ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
160 
161 	*cbh = bh;
162 	return ret;
163 }
164 
165 /*
166  * This function along with journal_submit_commit_record
167  * allows to write the commit record asynchronously.
168  */
journal_wait_on_commit_record(journal_t * journal,struct buffer_head * bh)169 static int journal_wait_on_commit_record(journal_t *journal,
170 					 struct buffer_head *bh)
171 {
172 	int ret = 0;
173 
174 	clear_buffer_dirty(bh);
175 	wait_on_buffer(bh);
176 
177 	if (unlikely(!buffer_uptodate(bh)))
178 		ret = -EIO;
179 	put_bh(bh);            /* One for getblk() */
180 
181 	return ret;
182 }
183 
184 /*
185  * write the filemap data using writepage() address_space_operations.
186  * We don't do block allocation here even for delalloc. We don't
187  * use writepages() because with delayed allocation we may be doing
188  * block allocation in writepages().
189  */
jbd2_journal_submit_inode_data_buffers(struct jbd2_inode * jinode)190 int jbd2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
191 {
192 	struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
193 	struct writeback_control wbc = {
194 		.sync_mode =  WB_SYNC_ALL,
195 		.nr_to_write = mapping->nrpages * 2,
196 		.range_start = jinode->i_dirty_start,
197 		.range_end = jinode->i_dirty_end,
198 	};
199 
200 	/*
201 	 * submit the inode data buffers. We use writepage
202 	 * instead of writepages. Because writepages can do
203 	 * block allocation with delalloc. We need to write
204 	 * only allocated blocks here.
205 	 */
206 	return generic_writepages(mapping, &wbc);
207 }
208 
209 /* Send all the data buffers related to an inode */
jbd2_submit_inode_data(struct jbd2_inode * jinode)210 int jbd2_submit_inode_data(struct jbd2_inode *jinode)
211 {
212 
213 	if (!jinode || !(jinode->i_flags & JI_WRITE_DATA))
214 		return 0;
215 
216 	trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
217 	return jbd2_journal_submit_inode_data_buffers(jinode);
218 
219 }
220 EXPORT_SYMBOL(jbd2_submit_inode_data);
221 
jbd2_wait_inode_data(journal_t * journal,struct jbd2_inode * jinode)222 int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode)
223 {
224 	if (!jinode || !(jinode->i_flags & JI_WAIT_DATA) ||
225 		!jinode->i_vfs_inode || !jinode->i_vfs_inode->i_mapping)
226 		return 0;
227 	return filemap_fdatawait_range_keep_errors(
228 		jinode->i_vfs_inode->i_mapping, jinode->i_dirty_start,
229 		jinode->i_dirty_end);
230 }
231 EXPORT_SYMBOL(jbd2_wait_inode_data);
232 
233 /*
234  * Submit all the data buffers of inode associated with the transaction to
235  * disk.
236  *
237  * We are in a committing transaction. Therefore no new inode can be added to
238  * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
239  * operate on from being released while we write out pages.
240  */
journal_submit_data_buffers(journal_t * journal,transaction_t * commit_transaction)241 static int journal_submit_data_buffers(journal_t *journal,
242 		transaction_t *commit_transaction)
243 {
244 	struct jbd2_inode *jinode;
245 	int err, ret = 0;
246 
247 	spin_lock(&journal->j_list_lock);
248 	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
249 		if (!(jinode->i_flags & JI_WRITE_DATA))
250 			continue;
251 		jinode->i_flags |= JI_COMMIT_RUNNING;
252 		spin_unlock(&journal->j_list_lock);
253 		/* submit the inode data buffers. */
254 		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
255 		if (journal->j_submit_inode_data_buffers) {
256 			err = journal->j_submit_inode_data_buffers(jinode);
257 			if (!ret)
258 				ret = err;
259 		}
260 		spin_lock(&journal->j_list_lock);
261 		J_ASSERT(jinode->i_transaction == commit_transaction);
262 		jinode->i_flags &= ~JI_COMMIT_RUNNING;
263 		smp_mb();
264 		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
265 	}
266 	spin_unlock(&journal->j_list_lock);
267 	return ret;
268 }
269 
jbd2_journal_finish_inode_data_buffers(struct jbd2_inode * jinode)270 int jbd2_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
271 {
272 	struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
273 
274 	return filemap_fdatawait_range_keep_errors(mapping,
275 						   jinode->i_dirty_start,
276 						   jinode->i_dirty_end);
277 }
278 
279 /*
280  * Wait for data submitted for writeout, refile inodes to proper
281  * transaction if needed.
282  *
283  */
journal_finish_inode_data_buffers(journal_t * journal,transaction_t * commit_transaction)284 static int journal_finish_inode_data_buffers(journal_t *journal,
285 		transaction_t *commit_transaction)
286 {
287 	struct jbd2_inode *jinode, *next_i;
288 	int err, ret = 0;
289 
290 	/* For locking, see the comment in journal_submit_data_buffers() */
291 	spin_lock(&journal->j_list_lock);
292 	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
293 		if (!(jinode->i_flags & JI_WAIT_DATA))
294 			continue;
295 		jinode->i_flags |= JI_COMMIT_RUNNING;
296 		spin_unlock(&journal->j_list_lock);
297 		/* wait for the inode data buffers writeout. */
298 		if (journal->j_finish_inode_data_buffers) {
299 			err = journal->j_finish_inode_data_buffers(jinode);
300 			if (!ret)
301 				ret = err;
302 		}
303 		cond_resched();
304 		spin_lock(&journal->j_list_lock);
305 		jinode->i_flags &= ~JI_COMMIT_RUNNING;
306 		smp_mb();
307 		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
308 	}
309 
310 	/* Now refile inode to proper lists */
311 	list_for_each_entry_safe(jinode, next_i,
312 				 &commit_transaction->t_inode_list, i_list) {
313 		list_del(&jinode->i_list);
314 		if (jinode->i_next_transaction) {
315 			jinode->i_transaction = jinode->i_next_transaction;
316 			jinode->i_next_transaction = NULL;
317 			list_add(&jinode->i_list,
318 				&jinode->i_transaction->t_inode_list);
319 		} else {
320 			jinode->i_transaction = NULL;
321 			jinode->i_dirty_start = 0;
322 			jinode->i_dirty_end = 0;
323 		}
324 	}
325 	spin_unlock(&journal->j_list_lock);
326 
327 	return ret;
328 }
329 
jbd2_checksum_data(__u32 crc32_sum,struct buffer_head * bh)330 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
331 {
332 	struct page *page = bh->b_page;
333 	char *addr;
334 	__u32 checksum;
335 
336 	addr = kmap_atomic(page);
337 	checksum = crc32_be(crc32_sum,
338 		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
339 	kunmap_atomic(addr);
340 
341 	return checksum;
342 }
343 
write_tag_block(journal_t * j,journal_block_tag_t * tag,unsigned long long block)344 static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
345 				   unsigned long long block)
346 {
347 	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
348 	if (jbd2_has_feature_64bit(j))
349 		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
350 }
351 
jbd2_block_tag_csum_set(journal_t * j,journal_block_tag_t * tag,struct buffer_head * bh,__u32 sequence)352 static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
353 				    struct buffer_head *bh, __u32 sequence)
354 {
355 	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
356 	struct page *page = bh->b_page;
357 	__u8 *addr;
358 	__u32 csum32;
359 	__be32 seq;
360 
361 	if (!jbd2_journal_has_csum_v2or3(j))
362 		return;
363 
364 	seq = cpu_to_be32(sequence);
365 	addr = kmap_atomic(page);
366 	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
367 	csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
368 			     bh->b_size);
369 	kunmap_atomic(addr);
370 
371 	if (jbd2_has_feature_csum3(j))
372 		tag3->t_checksum = cpu_to_be32(csum32);
373 	else
374 		tag->t_checksum = cpu_to_be16(csum32);
375 }
376 /*
377  * jbd2_journal_commit_transaction
378  *
379  * The primary function for committing a transaction to the log.  This
380  * function is called by the journal thread to begin a complete commit.
381  */
jbd2_journal_commit_transaction(journal_t * journal)382 void jbd2_journal_commit_transaction(journal_t *journal)
383 {
384 	struct transaction_stats_s stats;
385 	transaction_t *commit_transaction;
386 	struct journal_head *jh;
387 	struct buffer_head *descriptor;
388 	struct buffer_head **wbuf = journal->j_wbuf;
389 	int bufs;
390 	int flags;
391 	int err;
392 	unsigned long long blocknr;
393 	ktime_t start_time;
394 	u64 commit_time;
395 	char *tagp = NULL;
396 	journal_block_tag_t *tag = NULL;
397 	int space_left = 0;
398 	int first_tag = 0;
399 	int tag_flag;
400 	int i;
401 	int tag_bytes = journal_tag_bytes(journal);
402 	struct buffer_head *cbh = NULL; /* For transactional checksums */
403 	__u32 crc32_sum = ~0;
404 	struct blk_plug plug;
405 	/* Tail of the journal */
406 	unsigned long first_block;
407 	tid_t first_tid;
408 	int update_tail;
409 	int csum_size = 0;
410 	LIST_HEAD(io_bufs);
411 	LIST_HEAD(log_bufs);
412 
413 	if (jbd2_journal_has_csum_v2or3(journal))
414 		csum_size = sizeof(struct jbd2_journal_block_tail);
415 
416 	/*
417 	 * First job: lock down the current transaction and wait for
418 	 * all outstanding updates to complete.
419 	 */
420 
421 	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
422 	if (journal->j_flags & JBD2_FLUSHED) {
423 		jbd2_debug(3, "super block updated\n");
424 		mutex_lock_io(&journal->j_checkpoint_mutex);
425 		/*
426 		 * We hold j_checkpoint_mutex so tail cannot change under us.
427 		 * We don't need any special data guarantees for writing sb
428 		 * since journal is empty and it is ok for write to be
429 		 * flushed only with transaction commit.
430 		 */
431 		jbd2_journal_update_sb_log_tail(journal,
432 						journal->j_tail_sequence,
433 						journal->j_tail,
434 						REQ_SYNC);
435 		mutex_unlock(&journal->j_checkpoint_mutex);
436 	} else {
437 		jbd2_debug(3, "superblock not updated\n");
438 	}
439 
440 	J_ASSERT(journal->j_running_transaction != NULL);
441 	J_ASSERT(journal->j_committing_transaction == NULL);
442 
443 	write_lock(&journal->j_state_lock);
444 	journal->j_flags |= JBD2_FULL_COMMIT_ONGOING;
445 	while (journal->j_flags & JBD2_FAST_COMMIT_ONGOING) {
446 		DEFINE_WAIT(wait);
447 
448 		prepare_to_wait(&journal->j_fc_wait, &wait,
449 				TASK_UNINTERRUPTIBLE);
450 		write_unlock(&journal->j_state_lock);
451 		schedule();
452 		write_lock(&journal->j_state_lock);
453 		finish_wait(&journal->j_fc_wait, &wait);
454 		/*
455 		 * TODO: by blocking fast commits here, we are increasing
456 		 * fsync() latency slightly. Strictly speaking, we don't need
457 		 * to block fast commits until the transaction enters T_FLUSH
458 		 * state. So an optimization is possible where we block new fast
459 		 * commits here and wait for existing ones to complete
460 		 * just before we enter T_FLUSH. That way, the existing fast
461 		 * commits and this full commit can proceed parallely.
462 		 */
463 	}
464 	write_unlock(&journal->j_state_lock);
465 
466 	commit_transaction = journal->j_running_transaction;
467 
468 	trace_jbd2_start_commit(journal, commit_transaction);
469 	jbd2_debug(1, "JBD2: starting commit of transaction %d\n",
470 			commit_transaction->t_tid);
471 
472 	write_lock(&journal->j_state_lock);
473 	journal->j_fc_off = 0;
474 	J_ASSERT(commit_transaction->t_state == T_RUNNING);
475 	commit_transaction->t_state = T_LOCKED;
476 
477 	trace_jbd2_commit_locking(journal, commit_transaction);
478 	stats.run.rs_wait = commit_transaction->t_max_wait;
479 	stats.run.rs_request_delay = 0;
480 	stats.run.rs_locked = jiffies;
481 	if (commit_transaction->t_requested)
482 		stats.run.rs_request_delay =
483 			jbd2_time_diff(commit_transaction->t_requested,
484 				       stats.run.rs_locked);
485 	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
486 					      stats.run.rs_locked);
487 
488 	// waits for any t_updates to finish
489 	jbd2_journal_wait_updates(journal);
490 
491 	commit_transaction->t_state = T_SWITCH;
492 
493 	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
494 			journal->j_max_transaction_buffers);
495 
496 	/*
497 	 * First thing we are allowed to do is to discard any remaining
498 	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
499 	 * that there are no such buffers: if a large filesystem
500 	 * operation like a truncate needs to split itself over multiple
501 	 * transactions, then it may try to do a jbd2_journal_restart() while
502 	 * there are still BJ_Reserved buffers outstanding.  These must
503 	 * be released cleanly from the current transaction.
504 	 *
505 	 * In this case, the filesystem must still reserve write access
506 	 * again before modifying the buffer in the new transaction, but
507 	 * we do not require it to remember exactly which old buffers it
508 	 * has reserved.  This is consistent with the existing behaviour
509 	 * that multiple jbd2_journal_get_write_access() calls to the same
510 	 * buffer are perfectly permissible.
511 	 * We use journal->j_state_lock here to serialize processing of
512 	 * t_reserved_list with eviction of buffers from journal_unmap_buffer().
513 	 */
514 	while (commit_transaction->t_reserved_list) {
515 		jh = commit_transaction->t_reserved_list;
516 		JBUFFER_TRACE(jh, "reserved, unused: refile");
517 		/*
518 		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
519 		 * leave undo-committed data.
520 		 */
521 		if (jh->b_committed_data) {
522 			struct buffer_head *bh = jh2bh(jh);
523 
524 			spin_lock(&jh->b_state_lock);
525 			jbd2_free(jh->b_committed_data, bh->b_size);
526 			jh->b_committed_data = NULL;
527 			spin_unlock(&jh->b_state_lock);
528 		}
529 		jbd2_journal_refile_buffer(journal, jh);
530 	}
531 
532 	write_unlock(&journal->j_state_lock);
533 	/*
534 	 * Now try to drop any written-back buffers from the journal's
535 	 * checkpoint lists.  We do this *before* commit because it potentially
536 	 * frees some memory
537 	 */
538 	spin_lock(&journal->j_list_lock);
539 	__jbd2_journal_clean_checkpoint_list(journal, false);
540 	spin_unlock(&journal->j_list_lock);
541 
542 	jbd2_debug(3, "JBD2: commit phase 1\n");
543 
544 	/*
545 	 * Clear revoked flag to reflect there is no revoked buffers
546 	 * in the next transaction which is going to be started.
547 	 */
548 	jbd2_clear_buffer_revoked_flags(journal);
549 
550 	/*
551 	 * Switch to a new revoke table.
552 	 */
553 	jbd2_journal_switch_revoke_table(journal);
554 
555 	write_lock(&journal->j_state_lock);
556 	/*
557 	 * Reserved credits cannot be claimed anymore, free them
558 	 */
559 	atomic_sub(atomic_read(&journal->j_reserved_credits),
560 		   &commit_transaction->t_outstanding_credits);
561 
562 	trace_jbd2_commit_flushing(journal, commit_transaction);
563 	stats.run.rs_flushing = jiffies;
564 	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
565 					     stats.run.rs_flushing);
566 
567 	commit_transaction->t_state = T_FLUSH;
568 	journal->j_committing_transaction = commit_transaction;
569 	journal->j_running_transaction = NULL;
570 	start_time = ktime_get();
571 	commit_transaction->t_log_start = journal->j_head;
572 	wake_up_all(&journal->j_wait_transaction_locked);
573 	write_unlock(&journal->j_state_lock);
574 
575 	jbd2_debug(3, "JBD2: commit phase 2a\n");
576 
577 	/*
578 	 * Now start flushing things to disk, in the order they appear
579 	 * on the transaction lists.  Data blocks go first.
580 	 */
581 	err = journal_submit_data_buffers(journal, commit_transaction);
582 	if (err)
583 		jbd2_journal_abort(journal, err);
584 
585 	blk_start_plug(&plug);
586 	jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
587 
588 	jbd2_debug(3, "JBD2: commit phase 2b\n");
589 
590 	/*
591 	 * Way to go: we have now written out all of the data for a
592 	 * transaction!  Now comes the tricky part: we need to write out
593 	 * metadata.  Loop over the transaction's entire buffer list:
594 	 */
595 	write_lock(&journal->j_state_lock);
596 	commit_transaction->t_state = T_COMMIT;
597 	write_unlock(&journal->j_state_lock);
598 
599 	trace_jbd2_commit_logging(journal, commit_transaction);
600 	stats.run.rs_logging = jiffies;
601 	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
602 					       stats.run.rs_logging);
603 	stats.run.rs_blocks = commit_transaction->t_nr_buffers;
604 	stats.run.rs_blocks_logged = 0;
605 
606 	J_ASSERT(commit_transaction->t_nr_buffers <=
607 		 atomic_read(&commit_transaction->t_outstanding_credits));
608 
609 	err = 0;
610 	bufs = 0;
611 	descriptor = NULL;
612 	while (commit_transaction->t_buffers) {
613 
614 		/* Find the next buffer to be journaled... */
615 
616 		jh = commit_transaction->t_buffers;
617 
618 		/* If we're in abort mode, we just un-journal the buffer and
619 		   release it. */
620 
621 		if (is_journal_aborted(journal)) {
622 			clear_buffer_jbddirty(jh2bh(jh));
623 			JBUFFER_TRACE(jh, "journal is aborting: refile");
624 			jbd2_buffer_abort_trigger(jh,
625 						  jh->b_frozen_data ?
626 						  jh->b_frozen_triggers :
627 						  jh->b_triggers);
628 			jbd2_journal_refile_buffer(journal, jh);
629 			/* If that was the last one, we need to clean up
630 			 * any descriptor buffers which may have been
631 			 * already allocated, even if we are now
632 			 * aborting. */
633 			if (!commit_transaction->t_buffers)
634 				goto start_journal_io;
635 			continue;
636 		}
637 
638 		/* Make sure we have a descriptor block in which to
639 		   record the metadata buffer. */
640 
641 		if (!descriptor) {
642 			J_ASSERT (bufs == 0);
643 
644 			jbd2_debug(4, "JBD2: get descriptor\n");
645 
646 			descriptor = jbd2_journal_get_descriptor_buffer(
647 							commit_transaction,
648 							JBD2_DESCRIPTOR_BLOCK);
649 			if (!descriptor) {
650 				jbd2_journal_abort(journal, -EIO);
651 				continue;
652 			}
653 
654 			jbd2_debug(4, "JBD2: got buffer %llu (%p)\n",
655 				(unsigned long long)descriptor->b_blocknr,
656 				descriptor->b_data);
657 			tagp = &descriptor->b_data[sizeof(journal_header_t)];
658 			space_left = descriptor->b_size -
659 						sizeof(journal_header_t);
660 			first_tag = 1;
661 			set_buffer_jwrite(descriptor);
662 			set_buffer_dirty(descriptor);
663 			wbuf[bufs++] = descriptor;
664 
665 			/* Record it so that we can wait for IO
666                            completion later */
667 			BUFFER_TRACE(descriptor, "ph3: file as descriptor");
668 			jbd2_file_log_bh(&log_bufs, descriptor);
669 		}
670 
671 		/* Where is the buffer to be written? */
672 
673 		err = jbd2_journal_next_log_block(journal, &blocknr);
674 		/* If the block mapping failed, just abandon the buffer
675 		   and repeat this loop: we'll fall into the
676 		   refile-on-abort condition above. */
677 		if (err) {
678 			jbd2_journal_abort(journal, err);
679 			continue;
680 		}
681 
682 		/*
683 		 * start_this_handle() uses t_outstanding_credits to determine
684 		 * the free space in the log.
685 		 */
686 		atomic_dec(&commit_transaction->t_outstanding_credits);
687 
688 		/* Bump b_count to prevent truncate from stumbling over
689                    the shadowed buffer!  @@@ This can go if we ever get
690                    rid of the shadow pairing of buffers. */
691 		atomic_inc(&jh2bh(jh)->b_count);
692 
693 		/*
694 		 * Make a temporary IO buffer with which to write it out
695 		 * (this will requeue the metadata buffer to BJ_Shadow).
696 		 */
697 		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
698 		JBUFFER_TRACE(jh, "ph3: write metadata");
699 		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
700 						jh, &wbuf[bufs], blocknr);
701 		if (flags < 0) {
702 			jbd2_journal_abort(journal, flags);
703 			continue;
704 		}
705 		jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
706 
707 		/* Record the new block's tag in the current descriptor
708                    buffer */
709 
710 		tag_flag = 0;
711 		if (flags & 1)
712 			tag_flag |= JBD2_FLAG_ESCAPE;
713 		if (!first_tag)
714 			tag_flag |= JBD2_FLAG_SAME_UUID;
715 
716 		tag = (journal_block_tag_t *) tagp;
717 		write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
718 		tag->t_flags = cpu_to_be16(tag_flag);
719 		jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
720 					commit_transaction->t_tid);
721 		tagp += tag_bytes;
722 		space_left -= tag_bytes;
723 		bufs++;
724 
725 		if (first_tag) {
726 			memcpy (tagp, journal->j_uuid, 16);
727 			tagp += 16;
728 			space_left -= 16;
729 			first_tag = 0;
730 		}
731 
732 		/* If there's no more to do, or if the descriptor is full,
733 		   let the IO rip! */
734 
735 		if (bufs == journal->j_wbufsize ||
736 		    commit_transaction->t_buffers == NULL ||
737 		    space_left < tag_bytes + 16 + csum_size) {
738 
739 			jbd2_debug(4, "JBD2: Submit %d IOs\n", bufs);
740 
741 			/* Write an end-of-descriptor marker before
742                            submitting the IOs.  "tag" still points to
743                            the last tag we set up. */
744 
745 			tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
746 start_journal_io:
747 			if (descriptor)
748 				jbd2_descriptor_block_csum_set(journal,
749 							descriptor);
750 
751 			for (i = 0; i < bufs; i++) {
752 				struct buffer_head *bh = wbuf[i];
753 				/*
754 				 * Compute checksum.
755 				 */
756 				if (jbd2_has_feature_checksum(journal)) {
757 					crc32_sum =
758 					    jbd2_checksum_data(crc32_sum, bh);
759 				}
760 
761 				lock_buffer(bh);
762 				clear_buffer_dirty(bh);
763 				set_buffer_uptodate(bh);
764 				bh->b_end_io = journal_end_buffer_io_sync;
765 				submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
766 			}
767 			cond_resched();
768 
769 			/* Force a new descriptor to be generated next
770                            time round the loop. */
771 			descriptor = NULL;
772 			bufs = 0;
773 		}
774 	}
775 
776 	err = journal_finish_inode_data_buffers(journal, commit_transaction);
777 	if (err) {
778 		printk(KERN_WARNING
779 			"JBD2: Detected IO errors while flushing file data "
780 		       "on %s\n", journal->j_devname);
781 		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
782 			jbd2_journal_abort(journal, err);
783 		err = 0;
784 	}
785 
786 	/*
787 	 * Get current oldest transaction in the log before we issue flush
788 	 * to the filesystem device. After the flush we can be sure that
789 	 * blocks of all older transactions are checkpointed to persistent
790 	 * storage and we will be safe to update journal start in the
791 	 * superblock with the numbers we get here.
792 	 */
793 	update_tail =
794 		jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
795 
796 	write_lock(&journal->j_state_lock);
797 	if (update_tail) {
798 		long freed = first_block - journal->j_tail;
799 
800 		if (first_block < journal->j_tail)
801 			freed += journal->j_last - journal->j_first;
802 		/* Update tail only if we free significant amount of space */
803 		if (freed < jbd2_journal_get_max_txn_bufs(journal))
804 			update_tail = 0;
805 	}
806 	J_ASSERT(commit_transaction->t_state == T_COMMIT);
807 	commit_transaction->t_state = T_COMMIT_DFLUSH;
808 	write_unlock(&journal->j_state_lock);
809 
810 	/*
811 	 * If the journal is not located on the file system device,
812 	 * then we must flush the file system device before we issue
813 	 * the commit record
814 	 */
815 	if (commit_transaction->t_need_data_flush &&
816 	    (journal->j_fs_dev != journal->j_dev) &&
817 	    (journal->j_flags & JBD2_BARRIER))
818 		blkdev_issue_flush(journal->j_fs_dev);
819 
820 	/* Done it all: now write the commit record asynchronously. */
821 	if (jbd2_has_feature_async_commit(journal)) {
822 		err = journal_submit_commit_record(journal, commit_transaction,
823 						 &cbh, crc32_sum);
824 		if (err)
825 			jbd2_journal_abort(journal, err);
826 	}
827 
828 	blk_finish_plug(&plug);
829 
830 	/* Lo and behold: we have just managed to send a transaction to
831            the log.  Before we can commit it, wait for the IO so far to
832            complete.  Control buffers being written are on the
833            transaction's t_log_list queue, and metadata buffers are on
834            the io_bufs list.
835 
836 	   Wait for the buffers in reverse order.  That way we are
837 	   less likely to be woken up until all IOs have completed, and
838 	   so we incur less scheduling load.
839 	*/
840 
841 	jbd2_debug(3, "JBD2: commit phase 3\n");
842 
843 	while (!list_empty(&io_bufs)) {
844 		struct buffer_head *bh = list_entry(io_bufs.prev,
845 						    struct buffer_head,
846 						    b_assoc_buffers);
847 
848 		wait_on_buffer(bh);
849 		cond_resched();
850 
851 		if (unlikely(!buffer_uptodate(bh)))
852 			err = -EIO;
853 		jbd2_unfile_log_bh(bh);
854 		stats.run.rs_blocks_logged++;
855 
856 		/*
857 		 * The list contains temporary buffer heads created by
858 		 * jbd2_journal_write_metadata_buffer().
859 		 */
860 		BUFFER_TRACE(bh, "dumping temporary bh");
861 		__brelse(bh);
862 		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
863 		free_buffer_head(bh);
864 
865 		/* We also have to refile the corresponding shadowed buffer */
866 		jh = commit_transaction->t_shadow_list->b_tprev;
867 		bh = jh2bh(jh);
868 		clear_buffer_jwrite(bh);
869 		J_ASSERT_BH(bh, buffer_jbddirty(bh));
870 		J_ASSERT_BH(bh, !buffer_shadow(bh));
871 
872 		/* The metadata is now released for reuse, but we need
873                    to remember it against this transaction so that when
874                    we finally commit, we can do any checkpointing
875                    required. */
876 		JBUFFER_TRACE(jh, "file as BJ_Forget");
877 		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
878 		JBUFFER_TRACE(jh, "brelse shadowed buffer");
879 		__brelse(bh);
880 	}
881 
882 	J_ASSERT (commit_transaction->t_shadow_list == NULL);
883 
884 	jbd2_debug(3, "JBD2: commit phase 4\n");
885 
886 	/* Here we wait for the revoke record and descriptor record buffers */
887 	while (!list_empty(&log_bufs)) {
888 		struct buffer_head *bh;
889 
890 		bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
891 		wait_on_buffer(bh);
892 		cond_resched();
893 
894 		if (unlikely(!buffer_uptodate(bh)))
895 			err = -EIO;
896 
897 		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
898 		clear_buffer_jwrite(bh);
899 		jbd2_unfile_log_bh(bh);
900 		stats.run.rs_blocks_logged++;
901 		__brelse(bh);		/* One for getblk */
902 		/* AKPM: bforget here */
903 	}
904 
905 	if (err)
906 		jbd2_journal_abort(journal, err);
907 
908 	jbd2_debug(3, "JBD2: commit phase 5\n");
909 	write_lock(&journal->j_state_lock);
910 	J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
911 	commit_transaction->t_state = T_COMMIT_JFLUSH;
912 	write_unlock(&journal->j_state_lock);
913 
914 	if (!jbd2_has_feature_async_commit(journal)) {
915 		err = journal_submit_commit_record(journal, commit_transaction,
916 						&cbh, crc32_sum);
917 		if (err)
918 			jbd2_journal_abort(journal, err);
919 	}
920 	if (cbh)
921 		err = journal_wait_on_commit_record(journal, cbh);
922 	stats.run.rs_blocks_logged++;
923 	if (jbd2_has_feature_async_commit(journal) &&
924 	    journal->j_flags & JBD2_BARRIER) {
925 		blkdev_issue_flush(journal->j_dev);
926 	}
927 
928 	if (err)
929 		jbd2_journal_abort(journal, err);
930 
931 	WARN_ON_ONCE(
932 		atomic_read(&commit_transaction->t_outstanding_credits) < 0);
933 
934 	/*
935 	 * Now disk caches for filesystem device are flushed so we are safe to
936 	 * erase checkpointed transactions from the log by updating journal
937 	 * superblock.
938 	 */
939 	if (update_tail)
940 		jbd2_update_log_tail(journal, first_tid, first_block);
941 
942 	/* End of a transaction!  Finally, we can do checkpoint
943            processing: any buffers committed as a result of this
944            transaction can be removed from any checkpoint list it was on
945            before. */
946 
947 	jbd2_debug(3, "JBD2: commit phase 6\n");
948 
949 	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
950 	J_ASSERT(commit_transaction->t_buffers == NULL);
951 	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
952 	J_ASSERT(commit_transaction->t_shadow_list == NULL);
953 
954 restart_loop:
955 	/*
956 	 * As there are other places (journal_unmap_buffer()) adding buffers
957 	 * to this list we have to be careful and hold the j_list_lock.
958 	 */
959 	spin_lock(&journal->j_list_lock);
960 	while (commit_transaction->t_forget) {
961 		transaction_t *cp_transaction;
962 		struct buffer_head *bh;
963 		int try_to_free = 0;
964 		bool drop_ref;
965 
966 		jh = commit_transaction->t_forget;
967 		spin_unlock(&journal->j_list_lock);
968 		bh = jh2bh(jh);
969 		/*
970 		 * Get a reference so that bh cannot be freed before we are
971 		 * done with it.
972 		 */
973 		get_bh(bh);
974 		spin_lock(&jh->b_state_lock);
975 		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
976 
977 		/*
978 		 * If there is undo-protected committed data against
979 		 * this buffer, then we can remove it now.  If it is a
980 		 * buffer needing such protection, the old frozen_data
981 		 * field now points to a committed version of the
982 		 * buffer, so rotate that field to the new committed
983 		 * data.
984 		 *
985 		 * Otherwise, we can just throw away the frozen data now.
986 		 *
987 		 * We also know that the frozen data has already fired
988 		 * its triggers if they exist, so we can clear that too.
989 		 */
990 		if (jh->b_committed_data) {
991 			jbd2_free(jh->b_committed_data, bh->b_size);
992 			jh->b_committed_data = NULL;
993 			if (jh->b_frozen_data) {
994 				jh->b_committed_data = jh->b_frozen_data;
995 				jh->b_frozen_data = NULL;
996 				jh->b_frozen_triggers = NULL;
997 			}
998 		} else if (jh->b_frozen_data) {
999 			jbd2_free(jh->b_frozen_data, bh->b_size);
1000 			jh->b_frozen_data = NULL;
1001 			jh->b_frozen_triggers = NULL;
1002 		}
1003 
1004 		spin_lock(&journal->j_list_lock);
1005 		cp_transaction = jh->b_cp_transaction;
1006 		if (cp_transaction) {
1007 			JBUFFER_TRACE(jh, "remove from old cp transaction");
1008 			cp_transaction->t_chp_stats.cs_dropped++;
1009 			__jbd2_journal_remove_checkpoint(jh);
1010 		}
1011 
1012 		/* Only re-checkpoint the buffer_head if it is marked
1013 		 * dirty.  If the buffer was added to the BJ_Forget list
1014 		 * by jbd2_journal_forget, it may no longer be dirty and
1015 		 * there's no point in keeping a checkpoint record for
1016 		 * it. */
1017 
1018 		/*
1019 		 * A buffer which has been freed while still being journaled
1020 		 * by a previous transaction, refile the buffer to BJ_Forget of
1021 		 * the running transaction. If the just committed transaction
1022 		 * contains "add to orphan" operation, we can completely
1023 		 * invalidate the buffer now. We are rather through in that
1024 		 * since the buffer may be still accessible when blocksize <
1025 		 * pagesize and it is attached to the last partial page.
1026 		 */
1027 		if (buffer_freed(bh) && !jh->b_next_transaction) {
1028 			struct address_space *mapping;
1029 
1030 			clear_buffer_freed(bh);
1031 			clear_buffer_jbddirty(bh);
1032 
1033 			/*
1034 			 * Block device buffers need to stay mapped all the
1035 			 * time, so it is enough to clear buffer_jbddirty and
1036 			 * buffer_freed bits. For the file mapping buffers (i.e.
1037 			 * journalled data) we need to unmap buffer and clear
1038 			 * more bits. We also need to be careful about the check
1039 			 * because the data page mapping can get cleared under
1040 			 * our hands. Note that if mapping == NULL, we don't
1041 			 * need to make buffer unmapped because the page is
1042 			 * already detached from the mapping and buffers cannot
1043 			 * get reused.
1044 			 */
1045 			mapping = READ_ONCE(bh->b_page->mapping);
1046 			if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
1047 				clear_buffer_mapped(bh);
1048 				clear_buffer_new(bh);
1049 				clear_buffer_req(bh);
1050 				bh->b_bdev = NULL;
1051 			}
1052 		}
1053 
1054 		if (buffer_jbddirty(bh)) {
1055 			JBUFFER_TRACE(jh, "add to new checkpointing trans");
1056 			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
1057 			if (is_journal_aborted(journal))
1058 				clear_buffer_jbddirty(bh);
1059 		} else {
1060 			J_ASSERT_BH(bh, !buffer_dirty(bh));
1061 			/*
1062 			 * The buffer on BJ_Forget list and not jbddirty means
1063 			 * it has been freed by this transaction and hence it
1064 			 * could not have been reallocated until this
1065 			 * transaction has committed. *BUT* it could be
1066 			 * reallocated once we have written all the data to
1067 			 * disk and before we process the buffer on BJ_Forget
1068 			 * list.
1069 			 */
1070 			if (!jh->b_next_transaction)
1071 				try_to_free = 1;
1072 		}
1073 		JBUFFER_TRACE(jh, "refile or unfile buffer");
1074 		drop_ref = __jbd2_journal_refile_buffer(jh);
1075 		spin_unlock(&jh->b_state_lock);
1076 		if (drop_ref)
1077 			jbd2_journal_put_journal_head(jh);
1078 		if (try_to_free)
1079 			release_buffer_page(bh);	/* Drops bh reference */
1080 		else
1081 			__brelse(bh);
1082 		cond_resched_lock(&journal->j_list_lock);
1083 	}
1084 	spin_unlock(&journal->j_list_lock);
1085 	/*
1086 	 * This is a bit sleazy.  We use j_list_lock to protect transition
1087 	 * of a transaction into T_FINISHED state and calling
1088 	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
1089 	 * other checkpointing code processing the transaction...
1090 	 */
1091 	write_lock(&journal->j_state_lock);
1092 	spin_lock(&journal->j_list_lock);
1093 	/*
1094 	 * Now recheck if some buffers did not get attached to the transaction
1095 	 * while the lock was dropped...
1096 	 */
1097 	if (commit_transaction->t_forget) {
1098 		spin_unlock(&journal->j_list_lock);
1099 		write_unlock(&journal->j_state_lock);
1100 		goto restart_loop;
1101 	}
1102 
1103 	/* Add the transaction to the checkpoint list
1104 	 * __journal_remove_checkpoint() can not destroy transaction
1105 	 * under us because it is not marked as T_FINISHED yet */
1106 	if (journal->j_checkpoint_transactions == NULL) {
1107 		journal->j_checkpoint_transactions = commit_transaction;
1108 		commit_transaction->t_cpnext = commit_transaction;
1109 		commit_transaction->t_cpprev = commit_transaction;
1110 	} else {
1111 		commit_transaction->t_cpnext =
1112 			journal->j_checkpoint_transactions;
1113 		commit_transaction->t_cpprev =
1114 			commit_transaction->t_cpnext->t_cpprev;
1115 		commit_transaction->t_cpnext->t_cpprev =
1116 			commit_transaction;
1117 		commit_transaction->t_cpprev->t_cpnext =
1118 				commit_transaction;
1119 	}
1120 	spin_unlock(&journal->j_list_lock);
1121 
1122 	/* Done with this transaction! */
1123 
1124 	jbd2_debug(3, "JBD2: commit phase 7\n");
1125 
1126 	J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1127 
1128 	commit_transaction->t_start = jiffies;
1129 	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1130 					      commit_transaction->t_start);
1131 
1132 	/*
1133 	 * File the transaction statistics
1134 	 */
1135 	stats.ts_tid = commit_transaction->t_tid;
1136 	stats.run.rs_handle_count =
1137 		atomic_read(&commit_transaction->t_handle_count);
1138 	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1139 			     commit_transaction->t_tid, &stats.run);
1140 	stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1141 
1142 	commit_transaction->t_state = T_COMMIT_CALLBACK;
1143 	J_ASSERT(commit_transaction == journal->j_committing_transaction);
1144 	journal->j_commit_sequence = commit_transaction->t_tid;
1145 	journal->j_committing_transaction = NULL;
1146 	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1147 
1148 	/*
1149 	 * weight the commit time higher than the average time so we don't
1150 	 * react too strongly to vast changes in the commit time
1151 	 */
1152 	if (likely(journal->j_average_commit_time))
1153 		journal->j_average_commit_time = (commit_time +
1154 				journal->j_average_commit_time*3) / 4;
1155 	else
1156 		journal->j_average_commit_time = commit_time;
1157 
1158 	write_unlock(&journal->j_state_lock);
1159 
1160 	if (journal->j_commit_callback)
1161 		journal->j_commit_callback(journal, commit_transaction);
1162 	if (journal->j_fc_cleanup_callback)
1163 		journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid);
1164 
1165 	trace_jbd2_end_commit(journal, commit_transaction);
1166 	jbd2_debug(1, "JBD2: commit %d complete, head %d\n",
1167 		  journal->j_commit_sequence, journal->j_tail_sequence);
1168 
1169 	write_lock(&journal->j_state_lock);
1170 	journal->j_flags &= ~JBD2_FULL_COMMIT_ONGOING;
1171 	journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
1172 	spin_lock(&journal->j_list_lock);
1173 	commit_transaction->t_state = T_FINISHED;
1174 	/* Check if the transaction can be dropped now that we are finished */
1175 	if (commit_transaction->t_checkpoint_list == NULL) {
1176 		__jbd2_journal_drop_transaction(journal, commit_transaction);
1177 		jbd2_journal_free_transaction(commit_transaction);
1178 	}
1179 	spin_unlock(&journal->j_list_lock);
1180 	write_unlock(&journal->j_state_lock);
1181 	wake_up(&journal->j_wait_done_commit);
1182 	wake_up(&journal->j_fc_wait);
1183 
1184 	/*
1185 	 * Calculate overall stats
1186 	 */
1187 	spin_lock(&journal->j_history_lock);
1188 	journal->j_stats.ts_tid++;
1189 	journal->j_stats.ts_requested += stats.ts_requested;
1190 	journal->j_stats.run.rs_wait += stats.run.rs_wait;
1191 	journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1192 	journal->j_stats.run.rs_running += stats.run.rs_running;
1193 	journal->j_stats.run.rs_locked += stats.run.rs_locked;
1194 	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1195 	journal->j_stats.run.rs_logging += stats.run.rs_logging;
1196 	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1197 	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1198 	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1199 	spin_unlock(&journal->j_history_lock);
1200 }
1201