1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * linux/fs/jbd2/transaction.c
4 *
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 *
7 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 *
9 * Generic filesystem transaction handling code; part of the ext2fs
10 * journaling system.
11 *
12 * This file manages transactions (compound commits managed by the
13 * journaling code) and handles (individual atomic operations by the
14 * filesystem).
15 */
16
17 #include <linux/time.h>
18 #include <linux/fs.h>
19 #include <linux/jbd2.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/mm.h>
24 #include <linux/highmem.h>
25 #include <linux/hrtimer.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bug.h>
28 #include <linux/module.h>
29 #include <linux/sched/mm.h>
30
31 #include <trace/events/jbd2.h>
32
33 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
34 static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
35
36 static struct kmem_cache *transaction_cache;
jbd2_journal_init_transaction_cache(void)37 int __init jbd2_journal_init_transaction_cache(void)
38 {
39 J_ASSERT(!transaction_cache);
40 transaction_cache = kmem_cache_create("jbd2_transaction_s",
41 sizeof(transaction_t),
42 0,
43 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
44 NULL);
45 if (!transaction_cache) {
46 pr_emerg("JBD2: failed to create transaction cache\n");
47 return -ENOMEM;
48 }
49 return 0;
50 }
51
jbd2_journal_destroy_transaction_cache(void)52 void jbd2_journal_destroy_transaction_cache(void)
53 {
54 kmem_cache_destroy(transaction_cache);
55 transaction_cache = NULL;
56 }
57
jbd2_journal_free_transaction(transaction_t * transaction)58 void jbd2_journal_free_transaction(transaction_t *transaction)
59 {
60 if (unlikely(ZERO_OR_NULL_PTR(transaction)))
61 return;
62 kmem_cache_free(transaction_cache, transaction);
63 }
64
65 /*
66 * Base amount of descriptor blocks we reserve for each transaction.
67 */
jbd2_descriptor_blocks_per_trans(journal_t * journal)68 static int jbd2_descriptor_blocks_per_trans(journal_t *journal)
69 {
70 int tag_space = journal->j_blocksize - sizeof(journal_header_t);
71 int tags_per_block;
72
73 /* Subtract UUID */
74 tag_space -= 16;
75 if (jbd2_journal_has_csum_v2or3(journal))
76 tag_space -= sizeof(struct jbd2_journal_block_tail);
77 /* Commit code leaves a slack space of 16 bytes at the end of block */
78 tags_per_block = (tag_space - 16) / journal_tag_bytes(journal);
79 /*
80 * Revoke descriptors are accounted separately so we need to reserve
81 * space for commit block and normal transaction descriptor blocks.
82 */
83 return 1 + DIV_ROUND_UP(journal->j_max_transaction_buffers,
84 tags_per_block);
85 }
86
87 /*
88 * jbd2_get_transaction: obtain a new transaction_t object.
89 *
90 * Simply initialise a new transaction. Initialize it in
91 * RUNNING state and add it to the current journal (which should not
92 * have an existing running transaction: we only make a new transaction
93 * once we have started to commit the old one).
94 *
95 * Preconditions:
96 * The journal MUST be locked. We don't perform atomic mallocs on the
97 * new transaction and we can't block without protecting against other
98 * processes trying to touch the journal while it is in transition.
99 *
100 */
101
jbd2_get_transaction(journal_t * journal,transaction_t * transaction)102 static void jbd2_get_transaction(journal_t *journal,
103 transaction_t *transaction)
104 {
105 transaction->t_journal = journal;
106 transaction->t_state = T_RUNNING;
107 transaction->t_start_time = ktime_get();
108 transaction->t_tid = journal->j_transaction_sequence++;
109 transaction->t_expires = jiffies + journal->j_commit_interval;
110 atomic_set(&transaction->t_updates, 0);
111 atomic_set(&transaction->t_outstanding_credits,
112 jbd2_descriptor_blocks_per_trans(journal) +
113 atomic_read(&journal->j_reserved_credits));
114 atomic_set(&transaction->t_outstanding_revokes, 0);
115 atomic_set(&transaction->t_handle_count, 0);
116 INIT_LIST_HEAD(&transaction->t_inode_list);
117 INIT_LIST_HEAD(&transaction->t_private_list);
118
119 /* Set up the commit timer for the new transaction. */
120 journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
121 add_timer(&journal->j_commit_timer);
122
123 J_ASSERT(journal->j_running_transaction == NULL);
124 journal->j_running_transaction = transaction;
125 transaction->t_max_wait = 0;
126 transaction->t_start = jiffies;
127 transaction->t_requested = 0;
128 }
129
130 /*
131 * Handle management.
132 *
133 * A handle_t is an object which represents a single atomic update to a
134 * filesystem, and which tracks all of the modifications which form part
135 * of that one update.
136 */
137
138 /*
139 * Update transaction's maximum wait time, if debugging is enabled.
140 *
141 * t_max_wait is carefully updated here with use of atomic compare exchange.
142 * Note that there could be multiplre threads trying to do this simultaneously
143 * hence using cmpxchg to avoid any use of locks in this case.
144 * With this t_max_wait can be updated w/o enabling jbd2_journal_enable_debug.
145 */
update_t_max_wait(transaction_t * transaction,unsigned long ts)146 static inline void update_t_max_wait(transaction_t *transaction,
147 unsigned long ts)
148 {
149 unsigned long oldts, newts;
150
151 if (time_after(transaction->t_start, ts)) {
152 newts = jbd2_time_diff(ts, transaction->t_start);
153 oldts = READ_ONCE(transaction->t_max_wait);
154 while (oldts < newts)
155 oldts = cmpxchg(&transaction->t_max_wait, oldts, newts);
156 }
157 }
158
159 /*
160 * Wait until running transaction passes to T_FLUSH state and new transaction
161 * can thus be started. Also starts the commit if needed. The function expects
162 * running transaction to exist and releases j_state_lock.
163 */
wait_transaction_locked(journal_t * journal)164 static void wait_transaction_locked(journal_t *journal)
165 __releases(journal->j_state_lock)
166 {
167 DEFINE_WAIT(wait);
168 int need_to_start;
169 tid_t tid = journal->j_running_transaction->t_tid;
170
171 prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait,
172 TASK_UNINTERRUPTIBLE);
173 need_to_start = !tid_geq(journal->j_commit_request, tid);
174 read_unlock(&journal->j_state_lock);
175 if (need_to_start)
176 jbd2_log_start_commit(journal, tid);
177 jbd2_might_wait_for_commit(journal);
178 schedule();
179 finish_wait(&journal->j_wait_transaction_locked, &wait);
180 }
181
182 /*
183 * Wait until running transaction transitions from T_SWITCH to T_FLUSH
184 * state and new transaction can thus be started. The function releases
185 * j_state_lock.
186 */
wait_transaction_switching(journal_t * journal)187 static void wait_transaction_switching(journal_t *journal)
188 __releases(journal->j_state_lock)
189 {
190 DEFINE_WAIT(wait);
191
192 if (WARN_ON(!journal->j_running_transaction ||
193 journal->j_running_transaction->t_state != T_SWITCH)) {
194 read_unlock(&journal->j_state_lock);
195 return;
196 }
197 prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait,
198 TASK_UNINTERRUPTIBLE);
199 read_unlock(&journal->j_state_lock);
200 /*
201 * We don't call jbd2_might_wait_for_commit() here as there's no
202 * waiting for outstanding handles happening anymore in T_SWITCH state
203 * and handling of reserved handles actually relies on that for
204 * correctness.
205 */
206 schedule();
207 finish_wait(&journal->j_wait_transaction_locked, &wait);
208 }
209
sub_reserved_credits(journal_t * journal,int blocks)210 static void sub_reserved_credits(journal_t *journal, int blocks)
211 {
212 atomic_sub(blocks, &journal->j_reserved_credits);
213 wake_up(&journal->j_wait_reserved);
214 }
215
216 /*
217 * Wait until we can add credits for handle to the running transaction. Called
218 * with j_state_lock held for reading. Returns 0 if handle joined the running
219 * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and
220 * caller must retry.
221 *
222 * Note: because j_state_lock may be dropped depending on the return
223 * value, we need to fake out sparse so ti doesn't complain about a
224 * locking imbalance. Callers of add_transaction_credits will need to
225 * make a similar accomodation.
226 */
add_transaction_credits(journal_t * journal,int blocks,int rsv_blocks)227 static int add_transaction_credits(journal_t *journal, int blocks,
228 int rsv_blocks)
229 __must_hold(&journal->j_state_lock)
230 {
231 transaction_t *t = journal->j_running_transaction;
232 int needed;
233 int total = blocks + rsv_blocks;
234
235 /*
236 * If the current transaction is locked down for commit, wait
237 * for the lock to be released.
238 */
239 if (t->t_state != T_RUNNING) {
240 WARN_ON_ONCE(t->t_state >= T_FLUSH);
241 wait_transaction_locked(journal);
242 __acquire(&journal->j_state_lock); /* fake out sparse */
243 return 1;
244 }
245
246 /*
247 * If there is not enough space left in the log to write all
248 * potential buffers requested by this operation, we need to
249 * stall pending a log checkpoint to free some more log space.
250 */
251 needed = atomic_add_return(total, &t->t_outstanding_credits);
252 if (needed > journal->j_max_transaction_buffers) {
253 /*
254 * If the current transaction is already too large,
255 * then start to commit it: we can then go back and
256 * attach this handle to a new transaction.
257 */
258 atomic_sub(total, &t->t_outstanding_credits);
259
260 /*
261 * Is the number of reserved credits in the current transaction too
262 * big to fit this handle? Wait until reserved credits are freed.
263 */
264 if (atomic_read(&journal->j_reserved_credits) + total >
265 journal->j_max_transaction_buffers) {
266 read_unlock(&journal->j_state_lock);
267 jbd2_might_wait_for_commit(journal);
268 wait_event(journal->j_wait_reserved,
269 atomic_read(&journal->j_reserved_credits) + total <=
270 journal->j_max_transaction_buffers);
271 __acquire(&journal->j_state_lock); /* fake out sparse */
272 return 1;
273 }
274
275 wait_transaction_locked(journal);
276 __acquire(&journal->j_state_lock); /* fake out sparse */
277 return 1;
278 }
279
280 /*
281 * The commit code assumes that it can get enough log space
282 * without forcing a checkpoint. This is *critical* for
283 * correctness: a checkpoint of a buffer which is also
284 * associated with a committing transaction creates a deadlock,
285 * so commit simply cannot force through checkpoints.
286 *
287 * We must therefore ensure the necessary space in the journal
288 * *before* starting to dirty potentially checkpointed buffers
289 * in the new transaction.
290 */
291 if (jbd2_log_space_left(journal) < journal->j_max_transaction_buffers) {
292 atomic_sub(total, &t->t_outstanding_credits);
293 read_unlock(&journal->j_state_lock);
294 jbd2_might_wait_for_commit(journal);
295 write_lock(&journal->j_state_lock);
296 if (jbd2_log_space_left(journal) <
297 journal->j_max_transaction_buffers)
298 __jbd2_log_wait_for_space(journal);
299 write_unlock(&journal->j_state_lock);
300 __acquire(&journal->j_state_lock); /* fake out sparse */
301 return 1;
302 }
303
304 /* No reservation? We are done... */
305 if (!rsv_blocks)
306 return 0;
307
308 needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits);
309 /* We allow at most half of a transaction to be reserved */
310 if (needed > journal->j_max_transaction_buffers / 2) {
311 sub_reserved_credits(journal, rsv_blocks);
312 atomic_sub(total, &t->t_outstanding_credits);
313 read_unlock(&journal->j_state_lock);
314 jbd2_might_wait_for_commit(journal);
315 wait_event(journal->j_wait_reserved,
316 atomic_read(&journal->j_reserved_credits) + rsv_blocks
317 <= journal->j_max_transaction_buffers / 2);
318 __acquire(&journal->j_state_lock); /* fake out sparse */
319 return 1;
320 }
321 return 0;
322 }
323
324 /*
325 * start_this_handle: Given a handle, deal with any locking or stalling
326 * needed to make sure that there is enough journal space for the handle
327 * to begin. Attach the handle to a transaction and set up the
328 * transaction's buffer credits.
329 */
330
start_this_handle(journal_t * journal,handle_t * handle,gfp_t gfp_mask)331 static int start_this_handle(journal_t *journal, handle_t *handle,
332 gfp_t gfp_mask)
333 {
334 transaction_t *transaction, *new_transaction = NULL;
335 int blocks = handle->h_total_credits;
336 int rsv_blocks = 0;
337 unsigned long ts = jiffies;
338
339 if (handle->h_rsv_handle)
340 rsv_blocks = handle->h_rsv_handle->h_total_credits;
341
342 /*
343 * Limit the number of reserved credits to 1/2 of maximum transaction
344 * size and limit the number of total credits to not exceed maximum
345 * transaction size per operation.
346 */
347 if ((rsv_blocks > journal->j_max_transaction_buffers / 2) ||
348 (rsv_blocks + blocks > journal->j_max_transaction_buffers)) {
349 printk(KERN_ERR "JBD2: %s wants too many credits "
350 "credits:%d rsv_credits:%d max:%d\n",
351 current->comm, blocks, rsv_blocks,
352 journal->j_max_transaction_buffers);
353 WARN_ON(1);
354 return -ENOSPC;
355 }
356
357 alloc_transaction:
358 /*
359 * This check is racy but it is just an optimization of allocating new
360 * transaction early if there are high chances we'll need it. If we
361 * guess wrong, we'll retry or free unused transaction.
362 */
363 if (!data_race(journal->j_running_transaction)) {
364 /*
365 * If __GFP_FS is not present, then we may be being called from
366 * inside the fs writeback layer, so we MUST NOT fail.
367 */
368 if ((gfp_mask & __GFP_FS) == 0)
369 gfp_mask |= __GFP_NOFAIL;
370 new_transaction = kmem_cache_zalloc(transaction_cache,
371 gfp_mask);
372 if (!new_transaction)
373 return -ENOMEM;
374 }
375
376 jbd2_debug(3, "New handle %p going live.\n", handle);
377
378 /*
379 * We need to hold j_state_lock until t_updates has been incremented,
380 * for proper journal barrier handling
381 */
382 repeat:
383 read_lock(&journal->j_state_lock);
384 BUG_ON(journal->j_flags & JBD2_UNMOUNT);
385 if (is_journal_aborted(journal) ||
386 (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
387 read_unlock(&journal->j_state_lock);
388 jbd2_journal_free_transaction(new_transaction);
389 return -EROFS;
390 }
391
392 /*
393 * Wait on the journal's transaction barrier if necessary. Specifically
394 * we allow reserved handles to proceed because otherwise commit could
395 * deadlock on page writeback not being able to complete.
396 */
397 if (!handle->h_reserved && journal->j_barrier_count) {
398 read_unlock(&journal->j_state_lock);
399 wait_event(journal->j_wait_transaction_locked,
400 journal->j_barrier_count == 0);
401 goto repeat;
402 }
403
404 if (!journal->j_running_transaction) {
405 read_unlock(&journal->j_state_lock);
406 if (!new_transaction)
407 goto alloc_transaction;
408 write_lock(&journal->j_state_lock);
409 if (!journal->j_running_transaction &&
410 (handle->h_reserved || !journal->j_barrier_count)) {
411 jbd2_get_transaction(journal, new_transaction);
412 new_transaction = NULL;
413 }
414 write_unlock(&journal->j_state_lock);
415 goto repeat;
416 }
417
418 transaction = journal->j_running_transaction;
419
420 if (!handle->h_reserved) {
421 /* We may have dropped j_state_lock - restart in that case */
422 if (add_transaction_credits(journal, blocks, rsv_blocks)) {
423 /*
424 * add_transaction_credits releases
425 * j_state_lock on a non-zero return
426 */
427 __release(&journal->j_state_lock);
428 goto repeat;
429 }
430 } else {
431 /*
432 * We have handle reserved so we are allowed to join T_LOCKED
433 * transaction and we don't have to check for transaction size
434 * and journal space. But we still have to wait while running
435 * transaction is being switched to a committing one as it
436 * won't wait for any handles anymore.
437 */
438 if (transaction->t_state == T_SWITCH) {
439 wait_transaction_switching(journal);
440 goto repeat;
441 }
442 sub_reserved_credits(journal, blocks);
443 handle->h_reserved = 0;
444 }
445
446 /* OK, account for the buffers that this operation expects to
447 * use and add the handle to the running transaction.
448 */
449 update_t_max_wait(transaction, ts);
450 handle->h_transaction = transaction;
451 handle->h_requested_credits = blocks;
452 handle->h_revoke_credits_requested = handle->h_revoke_credits;
453 handle->h_start_jiffies = jiffies;
454 atomic_inc(&transaction->t_updates);
455 atomic_inc(&transaction->t_handle_count);
456 jbd2_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
457 handle, blocks,
458 atomic_read(&transaction->t_outstanding_credits),
459 jbd2_log_space_left(journal));
460 read_unlock(&journal->j_state_lock);
461 current->journal_info = handle;
462
463 rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_);
464 jbd2_journal_free_transaction(new_transaction);
465 /*
466 * Ensure that no allocations done while the transaction is open are
467 * going to recurse back to the fs layer.
468 */
469 handle->saved_alloc_context = memalloc_nofs_save();
470 return 0;
471 }
472
473 /* Allocate a new handle. This should probably be in a slab... */
new_handle(int nblocks)474 static handle_t *new_handle(int nblocks)
475 {
476 handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
477 if (!handle)
478 return NULL;
479 handle->h_total_credits = nblocks;
480 handle->h_ref = 1;
481
482 return handle;
483 }
484
jbd2__journal_start(journal_t * journal,int nblocks,int rsv_blocks,int revoke_records,gfp_t gfp_mask,unsigned int type,unsigned int line_no)485 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
486 int revoke_records, gfp_t gfp_mask,
487 unsigned int type, unsigned int line_no)
488 {
489 handle_t *handle = journal_current_handle();
490 int err;
491
492 if (!journal)
493 return ERR_PTR(-EROFS);
494
495 if (handle) {
496 J_ASSERT(handle->h_transaction->t_journal == journal);
497 handle->h_ref++;
498 return handle;
499 }
500
501 nblocks += DIV_ROUND_UP(revoke_records,
502 journal->j_revoke_records_per_block);
503 handle = new_handle(nblocks);
504 if (!handle)
505 return ERR_PTR(-ENOMEM);
506 if (rsv_blocks) {
507 handle_t *rsv_handle;
508
509 rsv_handle = new_handle(rsv_blocks);
510 if (!rsv_handle) {
511 jbd2_free_handle(handle);
512 return ERR_PTR(-ENOMEM);
513 }
514 rsv_handle->h_reserved = 1;
515 rsv_handle->h_journal = journal;
516 handle->h_rsv_handle = rsv_handle;
517 }
518 handle->h_revoke_credits = revoke_records;
519
520 err = start_this_handle(journal, handle, gfp_mask);
521 if (err < 0) {
522 if (handle->h_rsv_handle)
523 jbd2_free_handle(handle->h_rsv_handle);
524 jbd2_free_handle(handle);
525 return ERR_PTR(err);
526 }
527 handle->h_type = type;
528 handle->h_line_no = line_no;
529 trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
530 handle->h_transaction->t_tid, type,
531 line_no, nblocks);
532
533 return handle;
534 }
535 EXPORT_SYMBOL(jbd2__journal_start);
536
537
538 /**
539 * jbd2_journal_start() - Obtain a new handle.
540 * @journal: Journal to start transaction on.
541 * @nblocks: number of block buffer we might modify
542 *
543 * We make sure that the transaction can guarantee at least nblocks of
544 * modified buffers in the log. We block until the log can guarantee
545 * that much space. Additionally, if rsv_blocks > 0, we also create another
546 * handle with rsv_blocks reserved blocks in the journal. This handle is
547 * stored in h_rsv_handle. It is not attached to any particular transaction
548 * and thus doesn't block transaction commit. If the caller uses this reserved
549 * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
550 * on the parent handle will dispose the reserved one. Reserved handle has to
551 * be converted to a normal handle using jbd2_journal_start_reserved() before
552 * it can be used.
553 *
554 * Return a pointer to a newly allocated handle, or an ERR_PTR() value
555 * on failure.
556 */
jbd2_journal_start(journal_t * journal,int nblocks)557 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
558 {
559 return jbd2__journal_start(journal, nblocks, 0, 0, GFP_NOFS, 0, 0);
560 }
561 EXPORT_SYMBOL(jbd2_journal_start);
562
__jbd2_journal_unreserve_handle(handle_t * handle,transaction_t * t)563 static void __jbd2_journal_unreserve_handle(handle_t *handle, transaction_t *t)
564 {
565 journal_t *journal = handle->h_journal;
566
567 WARN_ON(!handle->h_reserved);
568 sub_reserved_credits(journal, handle->h_total_credits);
569 if (t)
570 atomic_sub(handle->h_total_credits, &t->t_outstanding_credits);
571 }
572
jbd2_journal_free_reserved(handle_t * handle)573 void jbd2_journal_free_reserved(handle_t *handle)
574 {
575 journal_t *journal = handle->h_journal;
576
577 /* Get j_state_lock to pin running transaction if it exists */
578 read_lock(&journal->j_state_lock);
579 __jbd2_journal_unreserve_handle(handle, journal->j_running_transaction);
580 read_unlock(&journal->j_state_lock);
581 jbd2_free_handle(handle);
582 }
583 EXPORT_SYMBOL(jbd2_journal_free_reserved);
584
585 /**
586 * jbd2_journal_start_reserved() - start reserved handle
587 * @handle: handle to start
588 * @type: for handle statistics
589 * @line_no: for handle statistics
590 *
591 * Start handle that has been previously reserved with jbd2_journal_reserve().
592 * This attaches @handle to the running transaction (or creates one if there's
593 * not transaction running). Unlike jbd2_journal_start() this function cannot
594 * block on journal commit, checkpointing, or similar stuff. It can block on
595 * memory allocation or frozen journal though.
596 *
597 * Return 0 on success, non-zero on error - handle is freed in that case.
598 */
jbd2_journal_start_reserved(handle_t * handle,unsigned int type,unsigned int line_no)599 int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
600 unsigned int line_no)
601 {
602 journal_t *journal = handle->h_journal;
603 int ret = -EIO;
604
605 if (WARN_ON(!handle->h_reserved)) {
606 /* Someone passed in normal handle? Just stop it. */
607 jbd2_journal_stop(handle);
608 return ret;
609 }
610 /*
611 * Usefulness of mixing of reserved and unreserved handles is
612 * questionable. So far nobody seems to need it so just error out.
613 */
614 if (WARN_ON(current->journal_info)) {
615 jbd2_journal_free_reserved(handle);
616 return ret;
617 }
618
619 handle->h_journal = NULL;
620 /*
621 * GFP_NOFS is here because callers are likely from writeback or
622 * similarly constrained call sites
623 */
624 ret = start_this_handle(journal, handle, GFP_NOFS);
625 if (ret < 0) {
626 handle->h_journal = journal;
627 jbd2_journal_free_reserved(handle);
628 return ret;
629 }
630 handle->h_type = type;
631 handle->h_line_no = line_no;
632 trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
633 handle->h_transaction->t_tid, type,
634 line_no, handle->h_total_credits);
635 return 0;
636 }
637 EXPORT_SYMBOL(jbd2_journal_start_reserved);
638
639 /**
640 * jbd2_journal_extend() - extend buffer credits.
641 * @handle: handle to 'extend'
642 * @nblocks: nr blocks to try to extend by.
643 * @revoke_records: number of revoke records to try to extend by.
644 *
645 * Some transactions, such as large extends and truncates, can be done
646 * atomically all at once or in several stages. The operation requests
647 * a credit for a number of buffer modifications in advance, but can
648 * extend its credit if it needs more.
649 *
650 * jbd2_journal_extend tries to give the running handle more buffer credits.
651 * It does not guarantee that allocation - this is a best-effort only.
652 * The calling process MUST be able to deal cleanly with a failure to
653 * extend here.
654 *
655 * Return 0 on success, non-zero on failure.
656 *
657 * return code < 0 implies an error
658 * return code > 0 implies normal transaction-full status.
659 */
jbd2_journal_extend(handle_t * handle,int nblocks,int revoke_records)660 int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records)
661 {
662 transaction_t *transaction = handle->h_transaction;
663 journal_t *journal;
664 int result;
665 int wanted;
666
667 if (is_handle_aborted(handle))
668 return -EROFS;
669 journal = transaction->t_journal;
670
671 result = 1;
672
673 read_lock(&journal->j_state_lock);
674
675 /* Don't extend a locked-down transaction! */
676 if (transaction->t_state != T_RUNNING) {
677 jbd2_debug(3, "denied handle %p %d blocks: "
678 "transaction not running\n", handle, nblocks);
679 goto error_out;
680 }
681
682 nblocks += DIV_ROUND_UP(
683 handle->h_revoke_credits_requested + revoke_records,
684 journal->j_revoke_records_per_block) -
685 DIV_ROUND_UP(
686 handle->h_revoke_credits_requested,
687 journal->j_revoke_records_per_block);
688 wanted = atomic_add_return(nblocks,
689 &transaction->t_outstanding_credits);
690
691 if (wanted > journal->j_max_transaction_buffers) {
692 jbd2_debug(3, "denied handle %p %d blocks: "
693 "transaction too large\n", handle, nblocks);
694 atomic_sub(nblocks, &transaction->t_outstanding_credits);
695 goto error_out;
696 }
697
698 trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
699 transaction->t_tid,
700 handle->h_type, handle->h_line_no,
701 handle->h_total_credits,
702 nblocks);
703
704 handle->h_total_credits += nblocks;
705 handle->h_requested_credits += nblocks;
706 handle->h_revoke_credits += revoke_records;
707 handle->h_revoke_credits_requested += revoke_records;
708 result = 0;
709
710 jbd2_debug(3, "extended handle %p by %d\n", handle, nblocks);
711 error_out:
712 read_unlock(&journal->j_state_lock);
713 return result;
714 }
715
stop_this_handle(handle_t * handle)716 static void stop_this_handle(handle_t *handle)
717 {
718 transaction_t *transaction = handle->h_transaction;
719 journal_t *journal = transaction->t_journal;
720 int revokes;
721
722 J_ASSERT(journal_current_handle() == handle);
723 J_ASSERT(atomic_read(&transaction->t_updates) > 0);
724 current->journal_info = NULL;
725 /*
726 * Subtract necessary revoke descriptor blocks from handle credits. We
727 * take care to account only for revoke descriptor blocks the
728 * transaction will really need as large sequences of transactions with
729 * small numbers of revokes are relatively common.
730 */
731 revokes = handle->h_revoke_credits_requested - handle->h_revoke_credits;
732 if (revokes) {
733 int t_revokes, revoke_descriptors;
734 int rr_per_blk = journal->j_revoke_records_per_block;
735
736 WARN_ON_ONCE(DIV_ROUND_UP(revokes, rr_per_blk)
737 > handle->h_total_credits);
738 t_revokes = atomic_add_return(revokes,
739 &transaction->t_outstanding_revokes);
740 revoke_descriptors =
741 DIV_ROUND_UP(t_revokes, rr_per_blk) -
742 DIV_ROUND_UP(t_revokes - revokes, rr_per_blk);
743 handle->h_total_credits -= revoke_descriptors;
744 }
745 atomic_sub(handle->h_total_credits,
746 &transaction->t_outstanding_credits);
747 if (handle->h_rsv_handle)
748 __jbd2_journal_unreserve_handle(handle->h_rsv_handle,
749 transaction);
750 if (atomic_dec_and_test(&transaction->t_updates))
751 wake_up(&journal->j_wait_updates);
752
753 rwsem_release(&journal->j_trans_commit_map, _THIS_IP_);
754 /*
755 * Scope of the GFP_NOFS context is over here and so we can restore the
756 * original alloc context.
757 */
758 memalloc_nofs_restore(handle->saved_alloc_context);
759 }
760
761 /**
762 * jbd2__journal_restart() - restart a handle .
763 * @handle: handle to restart
764 * @nblocks: nr credits requested
765 * @revoke_records: number of revoke record credits requested
766 * @gfp_mask: memory allocation flags (for start_this_handle)
767 *
768 * Restart a handle for a multi-transaction filesystem
769 * operation.
770 *
771 * If the jbd2_journal_extend() call above fails to grant new buffer credits
772 * to a running handle, a call to jbd2_journal_restart will commit the
773 * handle's transaction so far and reattach the handle to a new
774 * transaction capable of guaranteeing the requested number of
775 * credits. We preserve reserved handle if there's any attached to the
776 * passed in handle.
777 */
jbd2__journal_restart(handle_t * handle,int nblocks,int revoke_records,gfp_t gfp_mask)778 int jbd2__journal_restart(handle_t *handle, int nblocks, int revoke_records,
779 gfp_t gfp_mask)
780 {
781 transaction_t *transaction = handle->h_transaction;
782 journal_t *journal;
783 tid_t tid;
784 int need_to_start;
785 int ret;
786
787 /* If we've had an abort of any type, don't even think about
788 * actually doing the restart! */
789 if (is_handle_aborted(handle))
790 return 0;
791 journal = transaction->t_journal;
792 tid = transaction->t_tid;
793
794 /*
795 * First unlink the handle from its current transaction, and start the
796 * commit on that.
797 */
798 jbd2_debug(2, "restarting handle %p\n", handle);
799 stop_this_handle(handle);
800 handle->h_transaction = NULL;
801
802 /*
803 * TODO: If we use READ_ONCE / WRITE_ONCE for j_commit_request we can
804 * get rid of pointless j_state_lock traffic like this.
805 */
806 read_lock(&journal->j_state_lock);
807 need_to_start = !tid_geq(journal->j_commit_request, tid);
808 read_unlock(&journal->j_state_lock);
809 if (need_to_start)
810 jbd2_log_start_commit(journal, tid);
811 handle->h_total_credits = nblocks +
812 DIV_ROUND_UP(revoke_records,
813 journal->j_revoke_records_per_block);
814 handle->h_revoke_credits = revoke_records;
815 ret = start_this_handle(journal, handle, gfp_mask);
816 trace_jbd2_handle_restart(journal->j_fs_dev->bd_dev,
817 ret ? 0 : handle->h_transaction->t_tid,
818 handle->h_type, handle->h_line_no,
819 handle->h_total_credits);
820 return ret;
821 }
822 EXPORT_SYMBOL(jbd2__journal_restart);
823
824
jbd2_journal_restart(handle_t * handle,int nblocks)825 int jbd2_journal_restart(handle_t *handle, int nblocks)
826 {
827 return jbd2__journal_restart(handle, nblocks, 0, GFP_NOFS);
828 }
829 EXPORT_SYMBOL(jbd2_journal_restart);
830
831 /*
832 * Waits for any outstanding t_updates to finish.
833 * This is called with write j_state_lock held.
834 */
jbd2_journal_wait_updates(journal_t * journal)835 void jbd2_journal_wait_updates(journal_t *journal)
836 {
837 DEFINE_WAIT(wait);
838
839 while (1) {
840 /*
841 * Note that the running transaction can get freed under us if
842 * this transaction is getting committed in
843 * jbd2_journal_commit_transaction() ->
844 * jbd2_journal_free_transaction(). This can only happen when we
845 * release j_state_lock -> schedule() -> acquire j_state_lock.
846 * Hence we should everytime retrieve new j_running_transaction
847 * value (after j_state_lock release acquire cycle), else it may
848 * lead to use-after-free of old freed transaction.
849 */
850 transaction_t *transaction = journal->j_running_transaction;
851
852 if (!transaction)
853 break;
854
855 prepare_to_wait(&journal->j_wait_updates, &wait,
856 TASK_UNINTERRUPTIBLE);
857 if (!atomic_read(&transaction->t_updates)) {
858 finish_wait(&journal->j_wait_updates, &wait);
859 break;
860 }
861 write_unlock(&journal->j_state_lock);
862 schedule();
863 finish_wait(&journal->j_wait_updates, &wait);
864 write_lock(&journal->j_state_lock);
865 }
866 }
867
868 /**
869 * jbd2_journal_lock_updates () - establish a transaction barrier.
870 * @journal: Journal to establish a barrier on.
871 *
872 * This locks out any further updates from being started, and blocks
873 * until all existing updates have completed, returning only once the
874 * journal is in a quiescent state with no updates running.
875 *
876 * The journal lock should not be held on entry.
877 */
jbd2_journal_lock_updates(journal_t * journal)878 void jbd2_journal_lock_updates(journal_t *journal)
879 {
880 jbd2_might_wait_for_commit(journal);
881
882 write_lock(&journal->j_state_lock);
883 ++journal->j_barrier_count;
884
885 /* Wait until there are no reserved handles */
886 if (atomic_read(&journal->j_reserved_credits)) {
887 write_unlock(&journal->j_state_lock);
888 wait_event(journal->j_wait_reserved,
889 atomic_read(&journal->j_reserved_credits) == 0);
890 write_lock(&journal->j_state_lock);
891 }
892
893 /* Wait until there are no running t_updates */
894 jbd2_journal_wait_updates(journal);
895
896 write_unlock(&journal->j_state_lock);
897
898 /*
899 * We have now established a barrier against other normal updates, but
900 * we also need to barrier against other jbd2_journal_lock_updates() calls
901 * to make sure that we serialise special journal-locked operations
902 * too.
903 */
904 mutex_lock(&journal->j_barrier);
905 }
906
907 /**
908 * jbd2_journal_unlock_updates () - release barrier
909 * @journal: Journal to release the barrier on.
910 *
911 * Release a transaction barrier obtained with jbd2_journal_lock_updates().
912 *
913 * Should be called without the journal lock held.
914 */
jbd2_journal_unlock_updates(journal_t * journal)915 void jbd2_journal_unlock_updates (journal_t *journal)
916 {
917 J_ASSERT(journal->j_barrier_count != 0);
918
919 mutex_unlock(&journal->j_barrier);
920 write_lock(&journal->j_state_lock);
921 --journal->j_barrier_count;
922 write_unlock(&journal->j_state_lock);
923 wake_up_all(&journal->j_wait_transaction_locked);
924 }
925
warn_dirty_buffer(struct buffer_head * bh)926 static void warn_dirty_buffer(struct buffer_head *bh)
927 {
928 printk(KERN_WARNING
929 "JBD2: Spotted dirty metadata buffer (dev = %pg, blocknr = %llu). "
930 "There's a risk of filesystem corruption in case of system "
931 "crash.\n",
932 bh->b_bdev, (unsigned long long)bh->b_blocknr);
933 }
934
935 /* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
jbd2_freeze_jh_data(struct journal_head * jh)936 static void jbd2_freeze_jh_data(struct journal_head *jh)
937 {
938 struct page *page;
939 int offset;
940 char *source;
941 struct buffer_head *bh = jh2bh(jh);
942
943 J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n");
944 page = bh->b_page;
945 offset = offset_in_page(bh->b_data);
946 source = kmap_atomic(page);
947 /* Fire data frozen trigger just before we copy the data */
948 jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers);
949 memcpy(jh->b_frozen_data, source + offset, bh->b_size);
950 kunmap_atomic(source);
951
952 /*
953 * Now that the frozen data is saved off, we need to store any matching
954 * triggers.
955 */
956 jh->b_frozen_triggers = jh->b_triggers;
957 }
958
959 /*
960 * If the buffer is already part of the current transaction, then there
961 * is nothing we need to do. If it is already part of a prior
962 * transaction which we are still committing to disk, then we need to
963 * make sure that we do not overwrite the old copy: we do copy-out to
964 * preserve the copy going to disk. We also account the buffer against
965 * the handle's metadata buffer credits (unless the buffer is already
966 * part of the transaction, that is).
967 *
968 */
969 static int
do_get_write_access(handle_t * handle,struct journal_head * jh,int force_copy)970 do_get_write_access(handle_t *handle, struct journal_head *jh,
971 int force_copy)
972 {
973 struct buffer_head *bh;
974 transaction_t *transaction = handle->h_transaction;
975 journal_t *journal;
976 int error;
977 char *frozen_buffer = NULL;
978 unsigned long start_lock, time_lock;
979
980 journal = transaction->t_journal;
981
982 jbd2_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
983
984 JBUFFER_TRACE(jh, "entry");
985 repeat:
986 bh = jh2bh(jh);
987
988 /* @@@ Need to check for errors here at some point. */
989
990 start_lock = jiffies;
991 lock_buffer(bh);
992 spin_lock(&jh->b_state_lock);
993
994 /* If it takes too long to lock the buffer, trace it */
995 time_lock = jbd2_time_diff(start_lock, jiffies);
996 if (time_lock > HZ/10)
997 trace_jbd2_lock_buffer_stall(bh->b_bdev->bd_dev,
998 jiffies_to_msecs(time_lock));
999
1000 /* We now hold the buffer lock so it is safe to query the buffer
1001 * state. Is the buffer dirty?
1002 *
1003 * If so, there are two possibilities. The buffer may be
1004 * non-journaled, and undergoing a quite legitimate writeback.
1005 * Otherwise, it is journaled, and we don't expect dirty buffers
1006 * in that state (the buffers should be marked JBD_Dirty
1007 * instead.) So either the IO is being done under our own
1008 * control and this is a bug, or it's a third party IO such as
1009 * dump(8) (which may leave the buffer scheduled for read ---
1010 * ie. locked but not dirty) or tune2fs (which may actually have
1011 * the buffer dirtied, ugh.) */
1012
1013 if (buffer_dirty(bh) && jh->b_transaction) {
1014 warn_dirty_buffer(bh);
1015 /*
1016 * We need to clean the dirty flag and we must do it under the
1017 * buffer lock to be sure we don't race with running write-out.
1018 */
1019 JBUFFER_TRACE(jh, "Journalling dirty buffer");
1020 clear_buffer_dirty(bh);
1021 /*
1022 * The buffer is going to be added to BJ_Reserved list now and
1023 * nothing guarantees jbd2_journal_dirty_metadata() will be
1024 * ever called for it. So we need to set jbddirty bit here to
1025 * make sure the buffer is dirtied and written out when the
1026 * journaling machinery is done with it.
1027 */
1028 set_buffer_jbddirty(bh);
1029 }
1030
1031 error = -EROFS;
1032 if (is_handle_aborted(handle)) {
1033 spin_unlock(&jh->b_state_lock);
1034 unlock_buffer(bh);
1035 goto out;
1036 }
1037 error = 0;
1038
1039 /*
1040 * The buffer is already part of this transaction if b_transaction or
1041 * b_next_transaction points to it
1042 */
1043 if (jh->b_transaction == transaction ||
1044 jh->b_next_transaction == transaction) {
1045 unlock_buffer(bh);
1046 goto done;
1047 }
1048
1049 /*
1050 * this is the first time this transaction is touching this buffer,
1051 * reset the modified flag
1052 */
1053 jh->b_modified = 0;
1054
1055 /*
1056 * If the buffer is not journaled right now, we need to make sure it
1057 * doesn't get written to disk before the caller actually commits the
1058 * new data
1059 */
1060 if (!jh->b_transaction) {
1061 JBUFFER_TRACE(jh, "no transaction");
1062 J_ASSERT_JH(jh, !jh->b_next_transaction);
1063 JBUFFER_TRACE(jh, "file as BJ_Reserved");
1064 /*
1065 * Make sure all stores to jh (b_modified, b_frozen_data) are
1066 * visible before attaching it to the running transaction.
1067 * Paired with barrier in jbd2_write_access_granted()
1068 */
1069 smp_wmb();
1070 spin_lock(&journal->j_list_lock);
1071 if (test_clear_buffer_dirty(bh)) {
1072 /*
1073 * Execute buffer dirty clearing and jh->b_transaction
1074 * assignment under journal->j_list_lock locked to
1075 * prevent bh being removed from checkpoint list if
1076 * the buffer is in an intermediate state (not dirty
1077 * and jh->b_transaction is NULL).
1078 */
1079 JBUFFER_TRACE(jh, "Journalling dirty buffer");
1080 set_buffer_jbddirty(bh);
1081 }
1082 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
1083 spin_unlock(&journal->j_list_lock);
1084 unlock_buffer(bh);
1085 goto done;
1086 }
1087 unlock_buffer(bh);
1088
1089 /*
1090 * If there is already a copy-out version of this buffer, then we don't
1091 * need to make another one
1092 */
1093 if (jh->b_frozen_data) {
1094 JBUFFER_TRACE(jh, "has frozen data");
1095 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1096 goto attach_next;
1097 }
1098
1099 JBUFFER_TRACE(jh, "owned by older transaction");
1100 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1101 J_ASSERT_JH(jh, jh->b_transaction == journal->j_committing_transaction);
1102
1103 /*
1104 * There is one case we have to be very careful about. If the
1105 * committing transaction is currently writing this buffer out to disk
1106 * and has NOT made a copy-out, then we cannot modify the buffer
1107 * contents at all right now. The essence of copy-out is that it is
1108 * the extra copy, not the primary copy, which gets journaled. If the
1109 * primary copy is already going to disk then we cannot do copy-out
1110 * here.
1111 */
1112 if (buffer_shadow(bh)) {
1113 JBUFFER_TRACE(jh, "on shadow: sleep");
1114 spin_unlock(&jh->b_state_lock);
1115 wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE);
1116 goto repeat;
1117 }
1118
1119 /*
1120 * Only do the copy if the currently-owning transaction still needs it.
1121 * If buffer isn't on BJ_Metadata list, the committing transaction is
1122 * past that stage (here we use the fact that BH_Shadow is set under
1123 * bh_state lock together with refiling to BJ_Shadow list and at this
1124 * point we know the buffer doesn't have BH_Shadow set).
1125 *
1126 * Subtle point, though: if this is a get_undo_access, then we will be
1127 * relying on the frozen_data to contain the new value of the
1128 * committed_data record after the transaction, so we HAVE to force the
1129 * frozen_data copy in that case.
1130 */
1131 if (jh->b_jlist == BJ_Metadata || force_copy) {
1132 JBUFFER_TRACE(jh, "generate frozen data");
1133 if (!frozen_buffer) {
1134 JBUFFER_TRACE(jh, "allocate memory for buffer");
1135 spin_unlock(&jh->b_state_lock);
1136 frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size,
1137 GFP_NOFS | __GFP_NOFAIL);
1138 goto repeat;
1139 }
1140 jh->b_frozen_data = frozen_buffer;
1141 frozen_buffer = NULL;
1142 jbd2_freeze_jh_data(jh);
1143 }
1144 attach_next:
1145 /*
1146 * Make sure all stores to jh (b_modified, b_frozen_data) are visible
1147 * before attaching it to the running transaction. Paired with barrier
1148 * in jbd2_write_access_granted()
1149 */
1150 smp_wmb();
1151 jh->b_next_transaction = transaction;
1152
1153 done:
1154 spin_unlock(&jh->b_state_lock);
1155
1156 /*
1157 * If we are about to journal a buffer, then any revoke pending on it is
1158 * no longer valid
1159 */
1160 jbd2_journal_cancel_revoke(handle, jh);
1161
1162 out:
1163 if (unlikely(frozen_buffer)) /* It's usually NULL */
1164 jbd2_free(frozen_buffer, bh->b_size);
1165
1166 JBUFFER_TRACE(jh, "exit");
1167 return error;
1168 }
1169
1170 /* Fast check whether buffer is already attached to the required transaction */
jbd2_write_access_granted(handle_t * handle,struct buffer_head * bh,bool undo)1171 static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
1172 bool undo)
1173 {
1174 struct journal_head *jh;
1175 bool ret = false;
1176
1177 /* Dirty buffers require special handling... */
1178 if (buffer_dirty(bh))
1179 return false;
1180
1181 /*
1182 * RCU protects us from dereferencing freed pages. So the checks we do
1183 * are guaranteed not to oops. However the jh slab object can get freed
1184 * & reallocated while we work with it. So we have to be careful. When
1185 * we see jh attached to the running transaction, we know it must stay
1186 * so until the transaction is committed. Thus jh won't be freed and
1187 * will be attached to the same bh while we run. However it can
1188 * happen jh gets freed, reallocated, and attached to the transaction
1189 * just after we get pointer to it from bh. So we have to be careful
1190 * and recheck jh still belongs to our bh before we return success.
1191 */
1192 rcu_read_lock();
1193 if (!buffer_jbd(bh))
1194 goto out;
1195 /* This should be bh2jh() but that doesn't work with inline functions */
1196 jh = READ_ONCE(bh->b_private);
1197 if (!jh)
1198 goto out;
1199 /* For undo access buffer must have data copied */
1200 if (undo && !jh->b_committed_data)
1201 goto out;
1202 if (READ_ONCE(jh->b_transaction) != handle->h_transaction &&
1203 READ_ONCE(jh->b_next_transaction) != handle->h_transaction)
1204 goto out;
1205 /*
1206 * There are two reasons for the barrier here:
1207 * 1) Make sure to fetch b_bh after we did previous checks so that we
1208 * detect when jh went through free, realloc, attach to transaction
1209 * while we were checking. Paired with implicit barrier in that path.
1210 * 2) So that access to bh done after jbd2_write_access_granted()
1211 * doesn't get reordered and see inconsistent state of concurrent
1212 * do_get_write_access().
1213 */
1214 smp_mb();
1215 if (unlikely(jh->b_bh != bh))
1216 goto out;
1217 ret = true;
1218 out:
1219 rcu_read_unlock();
1220 return ret;
1221 }
1222
1223 /**
1224 * jbd2_journal_get_write_access() - notify intent to modify a buffer
1225 * for metadata (not data) update.
1226 * @handle: transaction to add buffer modifications to
1227 * @bh: bh to be used for metadata writes
1228 *
1229 * Returns: error code or 0 on success.
1230 *
1231 * In full data journalling mode the buffer may be of type BJ_AsyncData,
1232 * because we're ``write()ing`` a buffer which is also part of a shared mapping.
1233 */
1234
jbd2_journal_get_write_access(handle_t * handle,struct buffer_head * bh)1235 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
1236 {
1237 struct journal_head *jh;
1238 int rc;
1239
1240 if (is_handle_aborted(handle))
1241 return -EROFS;
1242
1243 if (jbd2_write_access_granted(handle, bh, false))
1244 return 0;
1245
1246 jh = jbd2_journal_add_journal_head(bh);
1247 /* We do not want to get caught playing with fields which the
1248 * log thread also manipulates. Make sure that the buffer
1249 * completes any outstanding IO before proceeding. */
1250 rc = do_get_write_access(handle, jh, 0);
1251 jbd2_journal_put_journal_head(jh);
1252 return rc;
1253 }
1254
1255
1256 /*
1257 * When the user wants to journal a newly created buffer_head
1258 * (ie. getblk() returned a new buffer and we are going to populate it
1259 * manually rather than reading off disk), then we need to keep the
1260 * buffer_head locked until it has been completely filled with new
1261 * data. In this case, we should be able to make the assertion that
1262 * the bh is not already part of an existing transaction.
1263 *
1264 * The buffer should already be locked by the caller by this point.
1265 * There is no lock ranking violation: it was a newly created,
1266 * unlocked buffer beforehand. */
1267
1268 /**
1269 * jbd2_journal_get_create_access () - notify intent to use newly created bh
1270 * @handle: transaction to new buffer to
1271 * @bh: new buffer.
1272 *
1273 * Call this if you create a new bh.
1274 */
jbd2_journal_get_create_access(handle_t * handle,struct buffer_head * bh)1275 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
1276 {
1277 transaction_t *transaction = handle->h_transaction;
1278 journal_t *journal;
1279 struct journal_head *jh = jbd2_journal_add_journal_head(bh);
1280 int err;
1281
1282 jbd2_debug(5, "journal_head %p\n", jh);
1283 err = -EROFS;
1284 if (is_handle_aborted(handle))
1285 goto out;
1286 journal = transaction->t_journal;
1287 err = 0;
1288
1289 JBUFFER_TRACE(jh, "entry");
1290 /*
1291 * The buffer may already belong to this transaction due to pre-zeroing
1292 * in the filesystem's new_block code. It may also be on the previous,
1293 * committing transaction's lists, but it HAS to be in Forget state in
1294 * that case: the transaction must have deleted the buffer for it to be
1295 * reused here.
1296 */
1297 spin_lock(&jh->b_state_lock);
1298 J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
1299 jh->b_transaction == NULL ||
1300 (jh->b_transaction == journal->j_committing_transaction &&
1301 jh->b_jlist == BJ_Forget)));
1302
1303 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1304 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
1305
1306 if (jh->b_transaction == NULL) {
1307 /*
1308 * Previous jbd2_journal_forget() could have left the buffer
1309 * with jbddirty bit set because it was being committed. When
1310 * the commit finished, we've filed the buffer for
1311 * checkpointing and marked it dirty. Now we are reallocating
1312 * the buffer so the transaction freeing it must have
1313 * committed and so it's safe to clear the dirty bit.
1314 */
1315 clear_buffer_dirty(jh2bh(jh));
1316 /* first access by this transaction */
1317 jh->b_modified = 0;
1318
1319 JBUFFER_TRACE(jh, "file as BJ_Reserved");
1320 spin_lock(&journal->j_list_lock);
1321 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
1322 spin_unlock(&journal->j_list_lock);
1323 } else if (jh->b_transaction == journal->j_committing_transaction) {
1324 /* first access by this transaction */
1325 jh->b_modified = 0;
1326
1327 JBUFFER_TRACE(jh, "set next transaction");
1328 spin_lock(&journal->j_list_lock);
1329 jh->b_next_transaction = transaction;
1330 spin_unlock(&journal->j_list_lock);
1331 }
1332 spin_unlock(&jh->b_state_lock);
1333
1334 /*
1335 * akpm: I added this. ext3_alloc_branch can pick up new indirect
1336 * blocks which contain freed but then revoked metadata. We need
1337 * to cancel the revoke in case we end up freeing it yet again
1338 * and the reallocating as data - this would cause a second revoke,
1339 * which hits an assertion error.
1340 */
1341 JBUFFER_TRACE(jh, "cancelling revoke");
1342 jbd2_journal_cancel_revoke(handle, jh);
1343 out:
1344 jbd2_journal_put_journal_head(jh);
1345 return err;
1346 }
1347
1348 /**
1349 * jbd2_journal_get_undo_access() - Notify intent to modify metadata with
1350 * non-rewindable consequences
1351 * @handle: transaction
1352 * @bh: buffer to undo
1353 *
1354 * Sometimes there is a need to distinguish between metadata which has
1355 * been committed to disk and that which has not. The ext3fs code uses
1356 * this for freeing and allocating space, we have to make sure that we
1357 * do not reuse freed space until the deallocation has been committed,
1358 * since if we overwrote that space we would make the delete
1359 * un-rewindable in case of a crash.
1360 *
1361 * To deal with that, jbd2_journal_get_undo_access requests write access to a
1362 * buffer for parts of non-rewindable operations such as delete
1363 * operations on the bitmaps. The journaling code must keep a copy of
1364 * the buffer's contents prior to the undo_access call until such time
1365 * as we know that the buffer has definitely been committed to disk.
1366 *
1367 * We never need to know which transaction the committed data is part
1368 * of, buffers touched here are guaranteed to be dirtied later and so
1369 * will be committed to a new transaction in due course, at which point
1370 * we can discard the old committed data pointer.
1371 *
1372 * Returns error number or 0 on success.
1373 */
jbd2_journal_get_undo_access(handle_t * handle,struct buffer_head * bh)1374 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
1375 {
1376 int err;
1377 struct journal_head *jh;
1378 char *committed_data = NULL;
1379
1380 if (is_handle_aborted(handle))
1381 return -EROFS;
1382
1383 if (jbd2_write_access_granted(handle, bh, true))
1384 return 0;
1385
1386 jh = jbd2_journal_add_journal_head(bh);
1387 JBUFFER_TRACE(jh, "entry");
1388
1389 /*
1390 * Do this first --- it can drop the journal lock, so we want to
1391 * make sure that obtaining the committed_data is done
1392 * atomically wrt. completion of any outstanding commits.
1393 */
1394 err = do_get_write_access(handle, jh, 1);
1395 if (err)
1396 goto out;
1397
1398 repeat:
1399 if (!jh->b_committed_data)
1400 committed_data = jbd2_alloc(jh2bh(jh)->b_size,
1401 GFP_NOFS|__GFP_NOFAIL);
1402
1403 spin_lock(&jh->b_state_lock);
1404 if (!jh->b_committed_data) {
1405 /* Copy out the current buffer contents into the
1406 * preserved, committed copy. */
1407 JBUFFER_TRACE(jh, "generate b_committed data");
1408 if (!committed_data) {
1409 spin_unlock(&jh->b_state_lock);
1410 goto repeat;
1411 }
1412
1413 jh->b_committed_data = committed_data;
1414 committed_data = NULL;
1415 memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
1416 }
1417 spin_unlock(&jh->b_state_lock);
1418 out:
1419 jbd2_journal_put_journal_head(jh);
1420 if (unlikely(committed_data))
1421 jbd2_free(committed_data, bh->b_size);
1422 return err;
1423 }
1424
1425 /**
1426 * jbd2_journal_set_triggers() - Add triggers for commit writeout
1427 * @bh: buffer to trigger on
1428 * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
1429 *
1430 * Set any triggers on this journal_head. This is always safe, because
1431 * triggers for a committing buffer will be saved off, and triggers for
1432 * a running transaction will match the buffer in that transaction.
1433 *
1434 * Call with NULL to clear the triggers.
1435 */
jbd2_journal_set_triggers(struct buffer_head * bh,struct jbd2_buffer_trigger_type * type)1436 void jbd2_journal_set_triggers(struct buffer_head *bh,
1437 struct jbd2_buffer_trigger_type *type)
1438 {
1439 struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
1440
1441 if (WARN_ON_ONCE(!jh))
1442 return;
1443 jh->b_triggers = type;
1444 jbd2_journal_put_journal_head(jh);
1445 }
1446
jbd2_buffer_frozen_trigger(struct journal_head * jh,void * mapped_data,struct jbd2_buffer_trigger_type * triggers)1447 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
1448 struct jbd2_buffer_trigger_type *triggers)
1449 {
1450 struct buffer_head *bh = jh2bh(jh);
1451
1452 if (!triggers || !triggers->t_frozen)
1453 return;
1454
1455 triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
1456 }
1457
jbd2_buffer_abort_trigger(struct journal_head * jh,struct jbd2_buffer_trigger_type * triggers)1458 void jbd2_buffer_abort_trigger(struct journal_head *jh,
1459 struct jbd2_buffer_trigger_type *triggers)
1460 {
1461 if (!triggers || !triggers->t_abort)
1462 return;
1463
1464 triggers->t_abort(triggers, jh2bh(jh));
1465 }
1466
1467 /**
1468 * jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
1469 * @handle: transaction to add buffer to.
1470 * @bh: buffer to mark
1471 *
1472 * mark dirty metadata which needs to be journaled as part of the current
1473 * transaction.
1474 *
1475 * The buffer must have previously had jbd2_journal_get_write_access()
1476 * called so that it has a valid journal_head attached to the buffer
1477 * head.
1478 *
1479 * The buffer is placed on the transaction's metadata list and is marked
1480 * as belonging to the transaction.
1481 *
1482 * Returns error number or 0 on success.
1483 *
1484 * Special care needs to be taken if the buffer already belongs to the
1485 * current committing transaction (in which case we should have frozen
1486 * data present for that commit). In that case, we don't relink the
1487 * buffer: that only gets done when the old transaction finally
1488 * completes its commit.
1489 */
jbd2_journal_dirty_metadata(handle_t * handle,struct buffer_head * bh)1490 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1491 {
1492 transaction_t *transaction = handle->h_transaction;
1493 journal_t *journal;
1494 struct journal_head *jh;
1495 int ret = 0;
1496
1497 if (!buffer_jbd(bh))
1498 return -EUCLEAN;
1499
1500 /*
1501 * We don't grab jh reference here since the buffer must be part
1502 * of the running transaction.
1503 */
1504 jh = bh2jh(bh);
1505 jbd2_debug(5, "journal_head %p\n", jh);
1506 JBUFFER_TRACE(jh, "entry");
1507
1508 /*
1509 * This and the following assertions are unreliable since we may see jh
1510 * in inconsistent state unless we grab bh_state lock. But this is
1511 * crucial to catch bugs so let's do a reliable check until the
1512 * lockless handling is fully proven.
1513 */
1514 if (data_race(jh->b_transaction != transaction &&
1515 jh->b_next_transaction != transaction)) {
1516 spin_lock(&jh->b_state_lock);
1517 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
1518 jh->b_next_transaction == transaction);
1519 spin_unlock(&jh->b_state_lock);
1520 }
1521 if (jh->b_modified == 1) {
1522 /* If it's in our transaction it must be in BJ_Metadata list. */
1523 if (data_race(jh->b_transaction == transaction &&
1524 jh->b_jlist != BJ_Metadata)) {
1525 spin_lock(&jh->b_state_lock);
1526 if (jh->b_transaction == transaction &&
1527 jh->b_jlist != BJ_Metadata)
1528 pr_err("JBD2: assertion failure: h_type=%u "
1529 "h_line_no=%u block_no=%llu jlist=%u\n",
1530 handle->h_type, handle->h_line_no,
1531 (unsigned long long) bh->b_blocknr,
1532 jh->b_jlist);
1533 J_ASSERT_JH(jh, jh->b_transaction != transaction ||
1534 jh->b_jlist == BJ_Metadata);
1535 spin_unlock(&jh->b_state_lock);
1536 }
1537 goto out;
1538 }
1539
1540 journal = transaction->t_journal;
1541 spin_lock(&jh->b_state_lock);
1542
1543 if (is_handle_aborted(handle)) {
1544 /*
1545 * Check journal aborting with @jh->b_state_lock locked,
1546 * since 'jh->b_transaction' could be replaced with
1547 * 'jh->b_next_transaction' during old transaction
1548 * committing if journal aborted, which may fail
1549 * assertion on 'jh->b_frozen_data == NULL'.
1550 */
1551 ret = -EROFS;
1552 goto out_unlock_bh;
1553 }
1554
1555 if (jh->b_modified == 0) {
1556 /*
1557 * This buffer's got modified and becoming part
1558 * of the transaction. This needs to be done
1559 * once a transaction -bzzz
1560 */
1561 if (WARN_ON_ONCE(jbd2_handle_buffer_credits(handle) <= 0)) {
1562 ret = -ENOSPC;
1563 goto out_unlock_bh;
1564 }
1565 jh->b_modified = 1;
1566 handle->h_total_credits--;
1567 }
1568
1569 /*
1570 * fastpath, to avoid expensive locking. If this buffer is already
1571 * on the running transaction's metadata list there is nothing to do.
1572 * Nobody can take it off again because there is a handle open.
1573 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1574 * result in this test being false, so we go in and take the locks.
1575 */
1576 if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1577 JBUFFER_TRACE(jh, "fastpath");
1578 if (unlikely(jh->b_transaction !=
1579 journal->j_running_transaction)) {
1580 printk(KERN_ERR "JBD2: %s: "
1581 "jh->b_transaction (%llu, %p, %u) != "
1582 "journal->j_running_transaction (%p, %u)\n",
1583 journal->j_devname,
1584 (unsigned long long) bh->b_blocknr,
1585 jh->b_transaction,
1586 jh->b_transaction ? jh->b_transaction->t_tid : 0,
1587 journal->j_running_transaction,
1588 journal->j_running_transaction ?
1589 journal->j_running_transaction->t_tid : 0);
1590 ret = -EINVAL;
1591 }
1592 goto out_unlock_bh;
1593 }
1594
1595 set_buffer_jbddirty(bh);
1596
1597 /*
1598 * Metadata already on the current transaction list doesn't
1599 * need to be filed. Metadata on another transaction's list must
1600 * be committing, and will be refiled once the commit completes:
1601 * leave it alone for now.
1602 */
1603 if (jh->b_transaction != transaction) {
1604 JBUFFER_TRACE(jh, "already on other transaction");
1605 if (unlikely(((jh->b_transaction !=
1606 journal->j_committing_transaction)) ||
1607 (jh->b_next_transaction != transaction))) {
1608 printk(KERN_ERR "jbd2_journal_dirty_metadata: %s: "
1609 "bad jh for block %llu: "
1610 "transaction (%p, %u), "
1611 "jh->b_transaction (%p, %u), "
1612 "jh->b_next_transaction (%p, %u), jlist %u\n",
1613 journal->j_devname,
1614 (unsigned long long) bh->b_blocknr,
1615 transaction, transaction->t_tid,
1616 jh->b_transaction,
1617 jh->b_transaction ?
1618 jh->b_transaction->t_tid : 0,
1619 jh->b_next_transaction,
1620 jh->b_next_transaction ?
1621 jh->b_next_transaction->t_tid : 0,
1622 jh->b_jlist);
1623 WARN_ON(1);
1624 ret = -EINVAL;
1625 }
1626 /* And this case is illegal: we can't reuse another
1627 * transaction's data buffer, ever. */
1628 goto out_unlock_bh;
1629 }
1630
1631 /* That test should have eliminated the following case: */
1632 J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1633
1634 JBUFFER_TRACE(jh, "file as BJ_Metadata");
1635 spin_lock(&journal->j_list_lock);
1636 __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata);
1637 spin_unlock(&journal->j_list_lock);
1638 out_unlock_bh:
1639 spin_unlock(&jh->b_state_lock);
1640 out:
1641 JBUFFER_TRACE(jh, "exit");
1642 return ret;
1643 }
1644
1645 /**
1646 * jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1647 * @handle: transaction handle
1648 * @bh: bh to 'forget'
1649 *
1650 * We can only do the bforget if there are no commits pending against the
1651 * buffer. If the buffer is dirty in the current running transaction we
1652 * can safely unlink it.
1653 *
1654 * bh may not be a journalled buffer at all - it may be a non-JBD
1655 * buffer which came off the hashtable. Check for this.
1656 *
1657 * Decrements bh->b_count by one.
1658 *
1659 * Allow this call even if the handle has aborted --- it may be part of
1660 * the caller's cleanup after an abort.
1661 */
jbd2_journal_forget(handle_t * handle,struct buffer_head * bh)1662 int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
1663 {
1664 transaction_t *transaction = handle->h_transaction;
1665 journal_t *journal;
1666 struct journal_head *jh;
1667 int drop_reserve = 0;
1668 int err = 0;
1669 int was_modified = 0;
1670
1671 if (is_handle_aborted(handle))
1672 return -EROFS;
1673 journal = transaction->t_journal;
1674
1675 BUFFER_TRACE(bh, "entry");
1676
1677 jh = jbd2_journal_grab_journal_head(bh);
1678 if (!jh) {
1679 __bforget(bh);
1680 return 0;
1681 }
1682
1683 spin_lock(&jh->b_state_lock);
1684
1685 /* Critical error: attempting to delete a bitmap buffer, maybe?
1686 * Don't do any jbd operations, and return an error. */
1687 if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1688 "inconsistent data on disk")) {
1689 err = -EIO;
1690 goto drop;
1691 }
1692
1693 /* keep track of whether or not this transaction modified us */
1694 was_modified = jh->b_modified;
1695
1696 /*
1697 * The buffer's going from the transaction, we must drop
1698 * all references -bzzz
1699 */
1700 jh->b_modified = 0;
1701
1702 if (jh->b_transaction == transaction) {
1703 J_ASSERT_JH(jh, !jh->b_frozen_data);
1704
1705 /* If we are forgetting a buffer which is already part
1706 * of this transaction, then we can just drop it from
1707 * the transaction immediately. */
1708 clear_buffer_dirty(bh);
1709 clear_buffer_jbddirty(bh);
1710
1711 JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1712
1713 /*
1714 * we only want to drop a reference if this transaction
1715 * modified the buffer
1716 */
1717 if (was_modified)
1718 drop_reserve = 1;
1719
1720 /*
1721 * We are no longer going to journal this buffer.
1722 * However, the commit of this transaction is still
1723 * important to the buffer: the delete that we are now
1724 * processing might obsolete an old log entry, so by
1725 * committing, we can satisfy the buffer's checkpoint.
1726 *
1727 * So, if we have a checkpoint on the buffer, we should
1728 * now refile the buffer on our BJ_Forget list so that
1729 * we know to remove the checkpoint after we commit.
1730 */
1731
1732 spin_lock(&journal->j_list_lock);
1733 if (jh->b_cp_transaction) {
1734 __jbd2_journal_temp_unlink_buffer(jh);
1735 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1736 } else {
1737 __jbd2_journal_unfile_buffer(jh);
1738 jbd2_journal_put_journal_head(jh);
1739 }
1740 spin_unlock(&journal->j_list_lock);
1741 } else if (jh->b_transaction) {
1742 J_ASSERT_JH(jh, (jh->b_transaction ==
1743 journal->j_committing_transaction));
1744 /* However, if the buffer is still owned by a prior
1745 * (committing) transaction, we can't drop it yet... */
1746 JBUFFER_TRACE(jh, "belongs to older transaction");
1747 /* ... but we CAN drop it from the new transaction through
1748 * marking the buffer as freed and set j_next_transaction to
1749 * the new transaction, so that not only the commit code
1750 * knows it should clear dirty bits when it is done with the
1751 * buffer, but also the buffer can be checkpointed only
1752 * after the new transaction commits. */
1753
1754 set_buffer_freed(bh);
1755
1756 if (!jh->b_next_transaction) {
1757 spin_lock(&journal->j_list_lock);
1758 jh->b_next_transaction = transaction;
1759 spin_unlock(&journal->j_list_lock);
1760 } else {
1761 J_ASSERT(jh->b_next_transaction == transaction);
1762
1763 /*
1764 * only drop a reference if this transaction modified
1765 * the buffer
1766 */
1767 if (was_modified)
1768 drop_reserve = 1;
1769 }
1770 } else {
1771 /*
1772 * Finally, if the buffer is not belongs to any
1773 * transaction, we can just drop it now if it has no
1774 * checkpoint.
1775 */
1776 spin_lock(&journal->j_list_lock);
1777 if (!jh->b_cp_transaction) {
1778 JBUFFER_TRACE(jh, "belongs to none transaction");
1779 spin_unlock(&journal->j_list_lock);
1780 goto drop;
1781 }
1782
1783 /*
1784 * Otherwise, if the buffer has been written to disk,
1785 * it is safe to remove the checkpoint and drop it.
1786 */
1787 if (jbd2_journal_try_remove_checkpoint(jh) >= 0) {
1788 spin_unlock(&journal->j_list_lock);
1789 goto drop;
1790 }
1791
1792 /*
1793 * The buffer is still not written to disk, we should
1794 * attach this buffer to current transaction so that the
1795 * buffer can be checkpointed only after the current
1796 * transaction commits.
1797 */
1798 clear_buffer_dirty(bh);
1799 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1800 spin_unlock(&journal->j_list_lock);
1801 }
1802 drop:
1803 __brelse(bh);
1804 spin_unlock(&jh->b_state_lock);
1805 jbd2_journal_put_journal_head(jh);
1806 if (drop_reserve) {
1807 /* no need to reserve log space for this block -bzzz */
1808 handle->h_total_credits++;
1809 }
1810 return err;
1811 }
1812
1813 /**
1814 * jbd2_journal_stop() - complete a transaction
1815 * @handle: transaction to complete.
1816 *
1817 * All done for a particular handle.
1818 *
1819 * There is not much action needed here. We just return any remaining
1820 * buffer credits to the transaction and remove the handle. The only
1821 * complication is that we need to start a commit operation if the
1822 * filesystem is marked for synchronous update.
1823 *
1824 * jbd2_journal_stop itself will not usually return an error, but it may
1825 * do so in unusual circumstances. In particular, expect it to
1826 * return -EIO if a jbd2_journal_abort has been executed since the
1827 * transaction began.
1828 */
jbd2_journal_stop(handle_t * handle)1829 int jbd2_journal_stop(handle_t *handle)
1830 {
1831 transaction_t *transaction = handle->h_transaction;
1832 journal_t *journal;
1833 int err = 0, wait_for_commit = 0;
1834 tid_t tid;
1835 pid_t pid;
1836
1837 if (--handle->h_ref > 0) {
1838 jbd2_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1839 handle->h_ref);
1840 if (is_handle_aborted(handle))
1841 return -EIO;
1842 return 0;
1843 }
1844 if (!transaction) {
1845 /*
1846 * Handle is already detached from the transaction so there is
1847 * nothing to do other than free the handle.
1848 */
1849 memalloc_nofs_restore(handle->saved_alloc_context);
1850 goto free_and_exit;
1851 }
1852 journal = transaction->t_journal;
1853 tid = transaction->t_tid;
1854
1855 if (is_handle_aborted(handle))
1856 err = -EIO;
1857
1858 jbd2_debug(4, "Handle %p going down\n", handle);
1859 trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
1860 tid, handle->h_type, handle->h_line_no,
1861 jiffies - handle->h_start_jiffies,
1862 handle->h_sync, handle->h_requested_credits,
1863 (handle->h_requested_credits -
1864 handle->h_total_credits));
1865
1866 /*
1867 * Implement synchronous transaction batching. If the handle
1868 * was synchronous, don't force a commit immediately. Let's
1869 * yield and let another thread piggyback onto this
1870 * transaction. Keep doing that while new threads continue to
1871 * arrive. It doesn't cost much - we're about to run a commit
1872 * and sleep on IO anyway. Speeds up many-threaded, many-dir
1873 * operations by 30x or more...
1874 *
1875 * We try and optimize the sleep time against what the
1876 * underlying disk can do, instead of having a static sleep
1877 * time. This is useful for the case where our storage is so
1878 * fast that it is more optimal to go ahead and force a flush
1879 * and wait for the transaction to be committed than it is to
1880 * wait for an arbitrary amount of time for new writers to
1881 * join the transaction. We achieve this by measuring how
1882 * long it takes to commit a transaction, and compare it with
1883 * how long this transaction has been running, and if run time
1884 * < commit time then we sleep for the delta and commit. This
1885 * greatly helps super fast disks that would see slowdowns as
1886 * more threads started doing fsyncs.
1887 *
1888 * But don't do this if this process was the most recent one
1889 * to perform a synchronous write. We do this to detect the
1890 * case where a single process is doing a stream of sync
1891 * writes. No point in waiting for joiners in that case.
1892 *
1893 * Setting max_batch_time to 0 disables this completely.
1894 */
1895 pid = current->pid;
1896 if (handle->h_sync && journal->j_last_sync_writer != pid &&
1897 journal->j_max_batch_time) {
1898 u64 commit_time, trans_time;
1899
1900 journal->j_last_sync_writer = pid;
1901
1902 read_lock(&journal->j_state_lock);
1903 commit_time = journal->j_average_commit_time;
1904 read_unlock(&journal->j_state_lock);
1905
1906 trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1907 transaction->t_start_time));
1908
1909 commit_time = max_t(u64, commit_time,
1910 1000*journal->j_min_batch_time);
1911 commit_time = min_t(u64, commit_time,
1912 1000*journal->j_max_batch_time);
1913
1914 if (trans_time < commit_time) {
1915 ktime_t expires = ktime_add_ns(ktime_get(),
1916 commit_time);
1917 set_current_state(TASK_UNINTERRUPTIBLE);
1918 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1919 }
1920 }
1921
1922 if (handle->h_sync)
1923 transaction->t_synchronous_commit = 1;
1924
1925 /*
1926 * If the handle is marked SYNC, we need to set another commit
1927 * going! We also want to force a commit if the transaction is too
1928 * old now.
1929 */
1930 if (handle->h_sync ||
1931 time_after_eq(jiffies, transaction->t_expires)) {
1932 /* Do this even for aborted journals: an abort still
1933 * completes the commit thread, it just doesn't write
1934 * anything to disk. */
1935
1936 jbd2_debug(2, "transaction too old, requesting commit for "
1937 "handle %p\n", handle);
1938 /* This is non-blocking */
1939 jbd2_log_start_commit(journal, tid);
1940
1941 /*
1942 * Special case: JBD2_SYNC synchronous updates require us
1943 * to wait for the commit to complete.
1944 */
1945 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1946 wait_for_commit = 1;
1947 }
1948
1949 /*
1950 * Once stop_this_handle() drops t_updates, the transaction could start
1951 * committing on us and eventually disappear. So we must not
1952 * dereference transaction pointer again after calling
1953 * stop_this_handle().
1954 */
1955 stop_this_handle(handle);
1956
1957 if (wait_for_commit)
1958 err = jbd2_log_wait_commit(journal, tid);
1959
1960 free_and_exit:
1961 if (handle->h_rsv_handle)
1962 jbd2_free_handle(handle->h_rsv_handle);
1963 jbd2_free_handle(handle);
1964 return err;
1965 }
1966
1967 /*
1968 *
1969 * List management code snippets: various functions for manipulating the
1970 * transaction buffer lists.
1971 *
1972 */
1973
1974 /*
1975 * Append a buffer to a transaction list, given the transaction's list head
1976 * pointer.
1977 *
1978 * j_list_lock is held.
1979 *
1980 * jh->b_state_lock is held.
1981 */
1982
1983 static inline void
__blist_add_buffer(struct journal_head ** list,struct journal_head * jh)1984 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1985 {
1986 if (!*list) {
1987 jh->b_tnext = jh->b_tprev = jh;
1988 *list = jh;
1989 } else {
1990 /* Insert at the tail of the list to preserve order */
1991 struct journal_head *first = *list, *last = first->b_tprev;
1992 jh->b_tprev = last;
1993 jh->b_tnext = first;
1994 last->b_tnext = first->b_tprev = jh;
1995 }
1996 }
1997
1998 /*
1999 * Remove a buffer from a transaction list, given the transaction's list
2000 * head pointer.
2001 *
2002 * Called with j_list_lock held, and the journal may not be locked.
2003 *
2004 * jh->b_state_lock is held.
2005 */
2006
2007 static inline void
__blist_del_buffer(struct journal_head ** list,struct journal_head * jh)2008 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
2009 {
2010 if (*list == jh) {
2011 *list = jh->b_tnext;
2012 if (*list == jh)
2013 *list = NULL;
2014 }
2015 jh->b_tprev->b_tnext = jh->b_tnext;
2016 jh->b_tnext->b_tprev = jh->b_tprev;
2017 }
2018
2019 /*
2020 * Remove a buffer from the appropriate transaction list.
2021 *
2022 * Note that this function can *change* the value of
2023 * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or
2024 * t_reserved_list. If the caller is holding onto a copy of one of these
2025 * pointers, it could go bad. Generally the caller needs to re-read the
2026 * pointer from the transaction_t.
2027 *
2028 * Called under j_list_lock.
2029 */
__jbd2_journal_temp_unlink_buffer(struct journal_head * jh)2030 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
2031 {
2032 struct journal_head **list = NULL;
2033 transaction_t *transaction;
2034 struct buffer_head *bh = jh2bh(jh);
2035
2036 lockdep_assert_held(&jh->b_state_lock);
2037 transaction = jh->b_transaction;
2038 if (transaction)
2039 assert_spin_locked(&transaction->t_journal->j_list_lock);
2040
2041 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2042 if (jh->b_jlist != BJ_None)
2043 J_ASSERT_JH(jh, transaction != NULL);
2044
2045 switch (jh->b_jlist) {
2046 case BJ_None:
2047 return;
2048 case BJ_Metadata:
2049 transaction->t_nr_buffers--;
2050 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
2051 list = &transaction->t_buffers;
2052 break;
2053 case BJ_Forget:
2054 list = &transaction->t_forget;
2055 break;
2056 case BJ_Shadow:
2057 list = &transaction->t_shadow_list;
2058 break;
2059 case BJ_Reserved:
2060 list = &transaction->t_reserved_list;
2061 break;
2062 }
2063
2064 __blist_del_buffer(list, jh);
2065 jh->b_jlist = BJ_None;
2066 if (transaction && is_journal_aborted(transaction->t_journal))
2067 clear_buffer_jbddirty(bh);
2068 else if (test_clear_buffer_jbddirty(bh))
2069 mark_buffer_dirty(bh); /* Expose it to the VM */
2070 }
2071
2072 /*
2073 * Remove buffer from all transactions. The caller is responsible for dropping
2074 * the jh reference that belonged to the transaction.
2075 *
2076 * Called with bh_state lock and j_list_lock
2077 */
__jbd2_journal_unfile_buffer(struct journal_head * jh)2078 static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
2079 {
2080 J_ASSERT_JH(jh, jh->b_transaction != NULL);
2081 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
2082
2083 __jbd2_journal_temp_unlink_buffer(jh);
2084 jh->b_transaction = NULL;
2085 }
2086
jbd2_journal_unfile_buffer(journal_t * journal,struct journal_head * jh)2087 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
2088 {
2089 struct buffer_head *bh = jh2bh(jh);
2090
2091 /* Get reference so that buffer cannot be freed before we unlock it */
2092 get_bh(bh);
2093 spin_lock(&jh->b_state_lock);
2094 spin_lock(&journal->j_list_lock);
2095 __jbd2_journal_unfile_buffer(jh);
2096 spin_unlock(&journal->j_list_lock);
2097 spin_unlock(&jh->b_state_lock);
2098 jbd2_journal_put_journal_head(jh);
2099 __brelse(bh);
2100 }
2101
2102 /*
2103 * Called from jbd2_journal_try_to_free_buffers().
2104 *
2105 * Called under jh->b_state_lock
2106 */
2107 static void
__journal_try_to_free_buffer(journal_t * journal,struct buffer_head * bh)2108 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
2109 {
2110 struct journal_head *jh;
2111
2112 jh = bh2jh(bh);
2113
2114 if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
2115 return;
2116
2117 spin_lock(&journal->j_list_lock);
2118 /* Remove written-back checkpointed metadata buffer */
2119 if (jh->b_cp_transaction != NULL)
2120 jbd2_journal_try_remove_checkpoint(jh);
2121 spin_unlock(&journal->j_list_lock);
2122 return;
2123 }
2124
2125 /**
2126 * jbd2_journal_try_to_free_buffers() - try to free page buffers.
2127 * @journal: journal for operation
2128 * @folio: Folio to detach data from.
2129 *
2130 * For all the buffers on this page,
2131 * if they are fully written out ordered data, move them onto BUF_CLEAN
2132 * so try_to_free_buffers() can reap them.
2133 *
2134 * This function returns non-zero if we wish try_to_free_buffers()
2135 * to be called. We do this if the page is releasable by try_to_free_buffers().
2136 * We also do it if the page has locked or dirty buffers and the caller wants
2137 * us to perform sync or async writeout.
2138 *
2139 * This complicates JBD locking somewhat. We aren't protected by the
2140 * BKL here. We wish to remove the buffer from its committing or
2141 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
2142 *
2143 * This may *change* the value of transaction_t->t_datalist, so anyone
2144 * who looks at t_datalist needs to lock against this function.
2145 *
2146 * Even worse, someone may be doing a jbd2_journal_dirty_data on this
2147 * buffer. So we need to lock against that. jbd2_journal_dirty_data()
2148 * will come out of the lock with the buffer dirty, which makes it
2149 * ineligible for release here.
2150 *
2151 * Who else is affected by this? hmm... Really the only contender
2152 * is do_get_write_access() - it could be looking at the buffer while
2153 * journal_try_to_free_buffer() is changing its state. But that
2154 * cannot happen because we never reallocate freed data as metadata
2155 * while the data is part of a transaction. Yes?
2156 *
2157 * Return false on failure, true on success
2158 */
jbd2_journal_try_to_free_buffers(journal_t * journal,struct folio * folio)2159 bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio)
2160 {
2161 struct buffer_head *head;
2162 struct buffer_head *bh;
2163 bool ret = false;
2164
2165 J_ASSERT(folio_test_locked(folio));
2166
2167 head = folio_buffers(folio);
2168 bh = head;
2169 do {
2170 struct journal_head *jh;
2171
2172 /*
2173 * We take our own ref against the journal_head here to avoid
2174 * having to add tons of locking around each instance of
2175 * jbd2_journal_put_journal_head().
2176 */
2177 jh = jbd2_journal_grab_journal_head(bh);
2178 if (!jh)
2179 continue;
2180
2181 spin_lock(&jh->b_state_lock);
2182 __journal_try_to_free_buffer(journal, bh);
2183 spin_unlock(&jh->b_state_lock);
2184 jbd2_journal_put_journal_head(jh);
2185 if (buffer_jbd(bh))
2186 goto busy;
2187 } while ((bh = bh->b_this_page) != head);
2188
2189 ret = try_to_free_buffers(folio);
2190 busy:
2191 return ret;
2192 }
2193
2194 /*
2195 * This buffer is no longer needed. If it is on an older transaction's
2196 * checkpoint list we need to record it on this transaction's forget list
2197 * to pin this buffer (and hence its checkpointing transaction) down until
2198 * this transaction commits. If the buffer isn't on a checkpoint list, we
2199 * release it.
2200 * Returns non-zero if JBD no longer has an interest in the buffer.
2201 *
2202 * Called under j_list_lock.
2203 *
2204 * Called under jh->b_state_lock.
2205 */
__dispose_buffer(struct journal_head * jh,transaction_t * transaction)2206 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
2207 {
2208 int may_free = 1;
2209 struct buffer_head *bh = jh2bh(jh);
2210
2211 if (jh->b_cp_transaction) {
2212 JBUFFER_TRACE(jh, "on running+cp transaction");
2213 __jbd2_journal_temp_unlink_buffer(jh);
2214 /*
2215 * We don't want to write the buffer anymore, clear the
2216 * bit so that we don't confuse checks in
2217 * __journal_file_buffer
2218 */
2219 clear_buffer_dirty(bh);
2220 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
2221 may_free = 0;
2222 } else {
2223 JBUFFER_TRACE(jh, "on running transaction");
2224 __jbd2_journal_unfile_buffer(jh);
2225 jbd2_journal_put_journal_head(jh);
2226 }
2227 return may_free;
2228 }
2229
2230 /*
2231 * jbd2_journal_invalidate_folio
2232 *
2233 * This code is tricky. It has a number of cases to deal with.
2234 *
2235 * There are two invariants which this code relies on:
2236 *
2237 * i_size must be updated on disk before we start calling invalidate_folio
2238 * on the data.
2239 *
2240 * This is done in ext3 by defining an ext3_setattr method which
2241 * updates i_size before truncate gets going. By maintaining this
2242 * invariant, we can be sure that it is safe to throw away any buffers
2243 * attached to the current transaction: once the transaction commits,
2244 * we know that the data will not be needed.
2245 *
2246 * Note however that we can *not* throw away data belonging to the
2247 * previous, committing transaction!
2248 *
2249 * Any disk blocks which *are* part of the previous, committing
2250 * transaction (and which therefore cannot be discarded immediately) are
2251 * not going to be reused in the new running transaction
2252 *
2253 * The bitmap committed_data images guarantee this: any block which is
2254 * allocated in one transaction and removed in the next will be marked
2255 * as in-use in the committed_data bitmap, so cannot be reused until
2256 * the next transaction to delete the block commits. This means that
2257 * leaving committing buffers dirty is quite safe: the disk blocks
2258 * cannot be reallocated to a different file and so buffer aliasing is
2259 * not possible.
2260 *
2261 *
2262 * The above applies mainly to ordered data mode. In writeback mode we
2263 * don't make guarantees about the order in which data hits disk --- in
2264 * particular we don't guarantee that new dirty data is flushed before
2265 * transaction commit --- so it is always safe just to discard data
2266 * immediately in that mode. --sct
2267 */
2268
2269 /*
2270 * The journal_unmap_buffer helper function returns zero if the buffer
2271 * concerned remains pinned as an anonymous buffer belonging to an older
2272 * transaction.
2273 *
2274 * We're outside-transaction here. Either or both of j_running_transaction
2275 * and j_committing_transaction may be NULL.
2276 */
journal_unmap_buffer(journal_t * journal,struct buffer_head * bh,int partial_page)2277 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
2278 int partial_page)
2279 {
2280 transaction_t *transaction;
2281 struct journal_head *jh;
2282 int may_free = 1;
2283
2284 BUFFER_TRACE(bh, "entry");
2285
2286 /*
2287 * It is safe to proceed here without the j_list_lock because the
2288 * buffers cannot be stolen by try_to_free_buffers as long as we are
2289 * holding the page lock. --sct
2290 */
2291
2292 jh = jbd2_journal_grab_journal_head(bh);
2293 if (!jh)
2294 goto zap_buffer_unlocked;
2295
2296 /* OK, we have data buffer in journaled mode */
2297 write_lock(&journal->j_state_lock);
2298 spin_lock(&jh->b_state_lock);
2299 spin_lock(&journal->j_list_lock);
2300
2301 /*
2302 * We cannot remove the buffer from checkpoint lists until the
2303 * transaction adding inode to orphan list (let's call it T)
2304 * is committed. Otherwise if the transaction changing the
2305 * buffer would be cleaned from the journal before T is
2306 * committed, a crash will cause that the correct contents of
2307 * the buffer will be lost. On the other hand we have to
2308 * clear the buffer dirty bit at latest at the moment when the
2309 * transaction marking the buffer as freed in the filesystem
2310 * structures is committed because from that moment on the
2311 * block can be reallocated and used by a different page.
2312 * Since the block hasn't been freed yet but the inode has
2313 * already been added to orphan list, it is safe for us to add
2314 * the buffer to BJ_Forget list of the newest transaction.
2315 *
2316 * Also we have to clear buffer_mapped flag of a truncated buffer
2317 * because the buffer_head may be attached to the page straddling
2318 * i_size (can happen only when blocksize < pagesize) and thus the
2319 * buffer_head can be reused when the file is extended again. So we end
2320 * up keeping around invalidated buffers attached to transactions'
2321 * BJ_Forget list just to stop checkpointing code from cleaning up
2322 * the transaction this buffer was modified in.
2323 */
2324 transaction = jh->b_transaction;
2325 if (transaction == NULL) {
2326 /* First case: not on any transaction. If it
2327 * has no checkpoint link, then we can zap it:
2328 * it's a writeback-mode buffer so we don't care
2329 * if it hits disk safely. */
2330 if (!jh->b_cp_transaction) {
2331 JBUFFER_TRACE(jh, "not on any transaction: zap");
2332 goto zap_buffer;
2333 }
2334
2335 if (!buffer_dirty(bh)) {
2336 /* bdflush has written it. We can drop it now */
2337 __jbd2_journal_remove_checkpoint(jh);
2338 goto zap_buffer;
2339 }
2340
2341 /* OK, it must be in the journal but still not
2342 * written fully to disk: it's metadata or
2343 * journaled data... */
2344
2345 if (journal->j_running_transaction) {
2346 /* ... and once the current transaction has
2347 * committed, the buffer won't be needed any
2348 * longer. */
2349 JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
2350 may_free = __dispose_buffer(jh,
2351 journal->j_running_transaction);
2352 goto zap_buffer;
2353 } else {
2354 /* There is no currently-running transaction. So the
2355 * orphan record which we wrote for this file must have
2356 * passed into commit. We must attach this buffer to
2357 * the committing transaction, if it exists. */
2358 if (journal->j_committing_transaction) {
2359 JBUFFER_TRACE(jh, "give to committing trans");
2360 may_free = __dispose_buffer(jh,
2361 journal->j_committing_transaction);
2362 goto zap_buffer;
2363 } else {
2364 /* The orphan record's transaction has
2365 * committed. We can cleanse this buffer */
2366 clear_buffer_jbddirty(bh);
2367 __jbd2_journal_remove_checkpoint(jh);
2368 goto zap_buffer;
2369 }
2370 }
2371 } else if (transaction == journal->j_committing_transaction) {
2372 JBUFFER_TRACE(jh, "on committing transaction");
2373 /*
2374 * The buffer is committing, we simply cannot touch
2375 * it. If the page is straddling i_size we have to wait
2376 * for commit and try again.
2377 */
2378 if (partial_page) {
2379 spin_unlock(&journal->j_list_lock);
2380 spin_unlock(&jh->b_state_lock);
2381 write_unlock(&journal->j_state_lock);
2382 jbd2_journal_put_journal_head(jh);
2383 /* Already zapped buffer? Nothing to do... */
2384 if (!bh->b_bdev)
2385 return 0;
2386 return -EBUSY;
2387 }
2388 /*
2389 * OK, buffer won't be reachable after truncate. We just clear
2390 * b_modified to not confuse transaction credit accounting, and
2391 * set j_next_transaction to the running transaction (if there
2392 * is one) and mark buffer as freed so that commit code knows
2393 * it should clear dirty bits when it is done with the buffer.
2394 */
2395 set_buffer_freed(bh);
2396 if (journal->j_running_transaction && buffer_jbddirty(bh))
2397 jh->b_next_transaction = journal->j_running_transaction;
2398 jh->b_modified = 0;
2399 spin_unlock(&journal->j_list_lock);
2400 spin_unlock(&jh->b_state_lock);
2401 write_unlock(&journal->j_state_lock);
2402 jbd2_journal_put_journal_head(jh);
2403 return 0;
2404 } else {
2405 /* Good, the buffer belongs to the running transaction.
2406 * We are writing our own transaction's data, not any
2407 * previous one's, so it is safe to throw it away
2408 * (remember that we expect the filesystem to have set
2409 * i_size already for this truncate so recovery will not
2410 * expose the disk blocks we are discarding here.) */
2411 J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
2412 JBUFFER_TRACE(jh, "on running transaction");
2413 may_free = __dispose_buffer(jh, transaction);
2414 }
2415
2416 zap_buffer:
2417 /*
2418 * This is tricky. Although the buffer is truncated, it may be reused
2419 * if blocksize < pagesize and it is attached to the page straddling
2420 * EOF. Since the buffer might have been added to BJ_Forget list of the
2421 * running transaction, journal_get_write_access() won't clear
2422 * b_modified and credit accounting gets confused. So clear b_modified
2423 * here.
2424 */
2425 jh->b_modified = 0;
2426 spin_unlock(&journal->j_list_lock);
2427 spin_unlock(&jh->b_state_lock);
2428 write_unlock(&journal->j_state_lock);
2429 jbd2_journal_put_journal_head(jh);
2430 zap_buffer_unlocked:
2431 clear_buffer_dirty(bh);
2432 J_ASSERT_BH(bh, !buffer_jbddirty(bh));
2433 clear_buffer_mapped(bh);
2434 clear_buffer_req(bh);
2435 clear_buffer_new(bh);
2436 clear_buffer_delay(bh);
2437 clear_buffer_unwritten(bh);
2438 bh->b_bdev = NULL;
2439 return may_free;
2440 }
2441
2442 /**
2443 * jbd2_journal_invalidate_folio()
2444 * @journal: journal to use for flush...
2445 * @folio: folio to flush
2446 * @offset: start of the range to invalidate
2447 * @length: length of the range to invalidate
2448 *
2449 * Reap page buffers containing data after in the specified range in page.
2450 * Can return -EBUSY if buffers are part of the committing transaction and
2451 * the page is straddling i_size. Caller then has to wait for current commit
2452 * and try again.
2453 */
jbd2_journal_invalidate_folio(journal_t * journal,struct folio * folio,size_t offset,size_t length)2454 int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio,
2455 size_t offset, size_t length)
2456 {
2457 struct buffer_head *head, *bh, *next;
2458 unsigned int stop = offset + length;
2459 unsigned int curr_off = 0;
2460 int partial_page = (offset || length < folio_size(folio));
2461 int may_free = 1;
2462 int ret = 0;
2463
2464 if (!folio_test_locked(folio))
2465 BUG();
2466 head = folio_buffers(folio);
2467 if (!head)
2468 return 0;
2469
2470 BUG_ON(stop > folio_size(folio) || stop < length);
2471
2472 /* We will potentially be playing with lists other than just the
2473 * data lists (especially for journaled data mode), so be
2474 * cautious in our locking. */
2475
2476 bh = head;
2477 do {
2478 unsigned int next_off = curr_off + bh->b_size;
2479 next = bh->b_this_page;
2480
2481 if (next_off > stop)
2482 return 0;
2483
2484 if (offset <= curr_off) {
2485 /* This block is wholly outside the truncation point */
2486 lock_buffer(bh);
2487 ret = journal_unmap_buffer(journal, bh, partial_page);
2488 unlock_buffer(bh);
2489 if (ret < 0)
2490 return ret;
2491 may_free &= ret;
2492 }
2493 curr_off = next_off;
2494 bh = next;
2495
2496 } while (bh != head);
2497
2498 if (!partial_page) {
2499 if (may_free && try_to_free_buffers(folio))
2500 J_ASSERT(!folio_buffers(folio));
2501 }
2502 return 0;
2503 }
2504
2505 /*
2506 * File a buffer on the given transaction list.
2507 */
__jbd2_journal_file_buffer(struct journal_head * jh,transaction_t * transaction,int jlist)2508 void __jbd2_journal_file_buffer(struct journal_head *jh,
2509 transaction_t *transaction, int jlist)
2510 {
2511 struct journal_head **list = NULL;
2512 int was_dirty = 0;
2513 struct buffer_head *bh = jh2bh(jh);
2514
2515 lockdep_assert_held(&jh->b_state_lock);
2516 assert_spin_locked(&transaction->t_journal->j_list_lock);
2517
2518 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2519 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
2520 jh->b_transaction == NULL);
2521
2522 if (jh->b_transaction && jh->b_jlist == jlist)
2523 return;
2524
2525 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
2526 jlist == BJ_Shadow || jlist == BJ_Forget) {
2527 /*
2528 * For metadata buffers, we track dirty bit in buffer_jbddirty
2529 * instead of buffer_dirty. We should not see a dirty bit set
2530 * here because we clear it in do_get_write_access but e.g.
2531 * tune2fs can modify the sb and set the dirty bit at any time
2532 * so we try to gracefully handle that.
2533 */
2534 if (buffer_dirty(bh))
2535 warn_dirty_buffer(bh);
2536 if (test_clear_buffer_dirty(bh) ||
2537 test_clear_buffer_jbddirty(bh))
2538 was_dirty = 1;
2539 }
2540
2541 if (jh->b_transaction)
2542 __jbd2_journal_temp_unlink_buffer(jh);
2543 else
2544 jbd2_journal_grab_journal_head(bh);
2545 jh->b_transaction = transaction;
2546
2547 switch (jlist) {
2548 case BJ_None:
2549 J_ASSERT_JH(jh, !jh->b_committed_data);
2550 J_ASSERT_JH(jh, !jh->b_frozen_data);
2551 return;
2552 case BJ_Metadata:
2553 transaction->t_nr_buffers++;
2554 list = &transaction->t_buffers;
2555 break;
2556 case BJ_Forget:
2557 list = &transaction->t_forget;
2558 break;
2559 case BJ_Shadow:
2560 list = &transaction->t_shadow_list;
2561 break;
2562 case BJ_Reserved:
2563 list = &transaction->t_reserved_list;
2564 break;
2565 }
2566
2567 __blist_add_buffer(list, jh);
2568 jh->b_jlist = jlist;
2569
2570 if (was_dirty)
2571 set_buffer_jbddirty(bh);
2572 }
2573
jbd2_journal_file_buffer(struct journal_head * jh,transaction_t * transaction,int jlist)2574 void jbd2_journal_file_buffer(struct journal_head *jh,
2575 transaction_t *transaction, int jlist)
2576 {
2577 spin_lock(&jh->b_state_lock);
2578 spin_lock(&transaction->t_journal->j_list_lock);
2579 __jbd2_journal_file_buffer(jh, transaction, jlist);
2580 spin_unlock(&transaction->t_journal->j_list_lock);
2581 spin_unlock(&jh->b_state_lock);
2582 }
2583
2584 /*
2585 * Remove a buffer from its current buffer list in preparation for
2586 * dropping it from its current transaction entirely. If the buffer has
2587 * already started to be used by a subsequent transaction, refile the
2588 * buffer on that transaction's metadata list.
2589 *
2590 * Called under j_list_lock
2591 * Called under jh->b_state_lock
2592 *
2593 * When this function returns true, there's no next transaction to refile to
2594 * and the caller has to drop jh reference through
2595 * jbd2_journal_put_journal_head().
2596 */
__jbd2_journal_refile_buffer(struct journal_head * jh)2597 bool __jbd2_journal_refile_buffer(struct journal_head *jh)
2598 {
2599 int was_dirty, jlist;
2600 struct buffer_head *bh = jh2bh(jh);
2601
2602 lockdep_assert_held(&jh->b_state_lock);
2603 if (jh->b_transaction)
2604 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
2605
2606 /* If the buffer is now unused, just drop it. */
2607 if (jh->b_next_transaction == NULL) {
2608 __jbd2_journal_unfile_buffer(jh);
2609 return true;
2610 }
2611
2612 /*
2613 * It has been modified by a later transaction: add it to the new
2614 * transaction's metadata list.
2615 */
2616
2617 was_dirty = test_clear_buffer_jbddirty(bh);
2618 __jbd2_journal_temp_unlink_buffer(jh);
2619
2620 /*
2621 * b_transaction must be set, otherwise the new b_transaction won't
2622 * be holding jh reference
2623 */
2624 J_ASSERT_JH(jh, jh->b_transaction != NULL);
2625
2626 /*
2627 * We set b_transaction here because b_next_transaction will inherit
2628 * our jh reference and thus __jbd2_journal_file_buffer() must not
2629 * take a new one.
2630 */
2631 WRITE_ONCE(jh->b_transaction, jh->b_next_transaction);
2632 WRITE_ONCE(jh->b_next_transaction, NULL);
2633 if (buffer_freed(bh))
2634 jlist = BJ_Forget;
2635 else if (jh->b_modified)
2636 jlist = BJ_Metadata;
2637 else
2638 jlist = BJ_Reserved;
2639 __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
2640 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2641
2642 if (was_dirty)
2643 set_buffer_jbddirty(bh);
2644 return false;
2645 }
2646
2647 /*
2648 * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2649 * bh reference so that we can safely unlock bh.
2650 *
2651 * The jh and bh may be freed by this call.
2652 */
jbd2_journal_refile_buffer(journal_t * journal,struct journal_head * jh)2653 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2654 {
2655 bool drop;
2656
2657 spin_lock(&jh->b_state_lock);
2658 spin_lock(&journal->j_list_lock);
2659 drop = __jbd2_journal_refile_buffer(jh);
2660 spin_unlock(&jh->b_state_lock);
2661 spin_unlock(&journal->j_list_lock);
2662 if (drop)
2663 jbd2_journal_put_journal_head(jh);
2664 }
2665
2666 /*
2667 * File inode in the inode list of the handle's transaction
2668 */
jbd2_journal_file_inode(handle_t * handle,struct jbd2_inode * jinode,unsigned long flags,loff_t start_byte,loff_t end_byte)2669 static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
2670 unsigned long flags, loff_t start_byte, loff_t end_byte)
2671 {
2672 transaction_t *transaction = handle->h_transaction;
2673 journal_t *journal;
2674
2675 if (is_handle_aborted(handle))
2676 return -EROFS;
2677 journal = transaction->t_journal;
2678
2679 jbd2_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
2680 transaction->t_tid);
2681
2682 spin_lock(&journal->j_list_lock);
2683 jinode->i_flags |= flags;
2684
2685 if (jinode->i_dirty_end) {
2686 jinode->i_dirty_start = min(jinode->i_dirty_start, start_byte);
2687 jinode->i_dirty_end = max(jinode->i_dirty_end, end_byte);
2688 } else {
2689 jinode->i_dirty_start = start_byte;
2690 jinode->i_dirty_end = end_byte;
2691 }
2692
2693 /* Is inode already attached where we need it? */
2694 if (jinode->i_transaction == transaction ||
2695 jinode->i_next_transaction == transaction)
2696 goto done;
2697
2698 /*
2699 * We only ever set this variable to 1 so the test is safe. Since
2700 * t_need_data_flush is likely to be set, we do the test to save some
2701 * cacheline bouncing
2702 */
2703 if (!transaction->t_need_data_flush)
2704 transaction->t_need_data_flush = 1;
2705 /* On some different transaction's list - should be
2706 * the committing one */
2707 if (jinode->i_transaction) {
2708 J_ASSERT(jinode->i_next_transaction == NULL);
2709 J_ASSERT(jinode->i_transaction ==
2710 journal->j_committing_transaction);
2711 jinode->i_next_transaction = transaction;
2712 goto done;
2713 }
2714 /* Not on any transaction list... */
2715 J_ASSERT(!jinode->i_next_transaction);
2716 jinode->i_transaction = transaction;
2717 list_add(&jinode->i_list, &transaction->t_inode_list);
2718 done:
2719 spin_unlock(&journal->j_list_lock);
2720
2721 return 0;
2722 }
2723
jbd2_journal_inode_ranged_write(handle_t * handle,struct jbd2_inode * jinode,loff_t start_byte,loff_t length)2724 int jbd2_journal_inode_ranged_write(handle_t *handle,
2725 struct jbd2_inode *jinode, loff_t start_byte, loff_t length)
2726 {
2727 return jbd2_journal_file_inode(handle, jinode,
2728 JI_WRITE_DATA | JI_WAIT_DATA, start_byte,
2729 start_byte + length - 1);
2730 }
2731
jbd2_journal_inode_ranged_wait(handle_t * handle,struct jbd2_inode * jinode,loff_t start_byte,loff_t length)2732 int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode *jinode,
2733 loff_t start_byte, loff_t length)
2734 {
2735 return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA,
2736 start_byte, start_byte + length - 1);
2737 }
2738
2739 /*
2740 * File truncate and transaction commit interact with each other in a
2741 * non-trivial way. If a transaction writing data block A is
2742 * committing, we cannot discard the data by truncate until we have
2743 * written them. Otherwise if we crashed after the transaction with
2744 * write has committed but before the transaction with truncate has
2745 * committed, we could see stale data in block A. This function is a
2746 * helper to solve this problem. It starts writeout of the truncated
2747 * part in case it is in the committing transaction.
2748 *
2749 * Filesystem code must call this function when inode is journaled in
2750 * ordered mode before truncation happens and after the inode has been
2751 * placed on orphan list with the new inode size. The second condition
2752 * avoids the race that someone writes new data and we start
2753 * committing the transaction after this function has been called but
2754 * before a transaction for truncate is started (and furthermore it
2755 * allows us to optimize the case where the addition to orphan list
2756 * happens in the same transaction as write --- we don't have to write
2757 * any data in such case).
2758 */
jbd2_journal_begin_ordered_truncate(journal_t * journal,struct jbd2_inode * jinode,loff_t new_size)2759 int jbd2_journal_begin_ordered_truncate(journal_t *journal,
2760 struct jbd2_inode *jinode,
2761 loff_t new_size)
2762 {
2763 transaction_t *inode_trans, *commit_trans;
2764 int ret = 0;
2765
2766 /* This is a quick check to avoid locking if not necessary */
2767 if (!jinode->i_transaction)
2768 goto out;
2769 /* Locks are here just to force reading of recent values, it is
2770 * enough that the transaction was not committing before we started
2771 * a transaction adding the inode to orphan list */
2772 read_lock(&journal->j_state_lock);
2773 commit_trans = journal->j_committing_transaction;
2774 read_unlock(&journal->j_state_lock);
2775 spin_lock(&journal->j_list_lock);
2776 inode_trans = jinode->i_transaction;
2777 spin_unlock(&journal->j_list_lock);
2778 if (inode_trans == commit_trans) {
2779 ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
2780 new_size, LLONG_MAX);
2781 if (ret)
2782 jbd2_journal_abort(journal, ret);
2783 }
2784 out:
2785 return ret;
2786 }
2787