1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * linux/fs/jbd2/transaction.c
4 *
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 *
7 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 *
9 * Generic filesystem transaction handling code; part of the ext2fs
10 * journaling system.
11 *
12 * This file manages transactions (compound commits managed by the
13 * journaling code) and handles (individual atomic operations by the
14 * filesystem).
15 */
16
17 #include <linux/time.h>
18 #include <linux/fs.h>
19 #include <linux/jbd2.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/mm.h>
24 #include <linux/highmem.h>
25 #include <linux/hrtimer.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bug.h>
28 #include <linux/module.h>
29 #include <linux/sched/mm.h>
30
31 #include <trace/events/jbd2.h>
32
33 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
34 static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
35
36 static struct kmem_cache *transaction_cache;
jbd2_journal_init_transaction_cache(void)37 int __init jbd2_journal_init_transaction_cache(void)
38 {
39 J_ASSERT(!transaction_cache);
40 transaction_cache = kmem_cache_create("jbd2_transaction_s",
41 sizeof(transaction_t),
42 0,
43 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
44 NULL);
45 if (!transaction_cache) {
46 pr_emerg("JBD2: failed to create transaction cache\n");
47 return -ENOMEM;
48 }
49 return 0;
50 }
51
jbd2_journal_destroy_transaction_cache(void)52 void jbd2_journal_destroy_transaction_cache(void)
53 {
54 kmem_cache_destroy(transaction_cache);
55 transaction_cache = NULL;
56 }
57
jbd2_journal_free_transaction(transaction_t * transaction)58 void jbd2_journal_free_transaction(transaction_t *transaction)
59 {
60 if (unlikely(ZERO_OR_NULL_PTR(transaction)))
61 return;
62 kmem_cache_free(transaction_cache, transaction);
63 }
64
65 /*
66 * Base amount of descriptor blocks we reserve for each transaction.
67 */
jbd2_descriptor_blocks_per_trans(journal_t * journal)68 static int jbd2_descriptor_blocks_per_trans(journal_t *journal)
69 {
70 int tag_space = journal->j_blocksize - sizeof(journal_header_t);
71 int tags_per_block;
72
73 /* Subtract UUID */
74 tag_space -= 16;
75 if (jbd2_journal_has_csum_v2or3(journal))
76 tag_space -= sizeof(struct jbd2_journal_block_tail);
77 /* Commit code leaves a slack space of 16 bytes at the end of block */
78 tags_per_block = (tag_space - 16) / journal_tag_bytes(journal);
79 /*
80 * Revoke descriptors are accounted separately so we need to reserve
81 * space for commit block and normal transaction descriptor blocks.
82 */
83 return 1 + DIV_ROUND_UP(journal->j_max_transaction_buffers,
84 tags_per_block);
85 }
86
87 /*
88 * jbd2_get_transaction: obtain a new transaction_t object.
89 *
90 * Simply initialise a new transaction. Initialize it in
91 * RUNNING state and add it to the current journal (which should not
92 * have an existing running transaction: we only make a new transaction
93 * once we have started to commit the old one).
94 *
95 * Preconditions:
96 * The journal MUST be locked. We don't perform atomic mallocs on the
97 * new transaction and we can't block without protecting against other
98 * processes trying to touch the journal while it is in transition.
99 *
100 */
101
jbd2_get_transaction(journal_t * journal,transaction_t * transaction)102 static void jbd2_get_transaction(journal_t *journal,
103 transaction_t *transaction)
104 {
105 transaction->t_journal = journal;
106 transaction->t_state = T_RUNNING;
107 transaction->t_start_time = ktime_get();
108 transaction->t_tid = journal->j_transaction_sequence++;
109 transaction->t_expires = jiffies + journal->j_commit_interval;
110 spin_lock_init(&transaction->t_handle_lock);
111 atomic_set(&transaction->t_updates, 0);
112 atomic_set(&transaction->t_outstanding_credits,
113 jbd2_descriptor_blocks_per_trans(journal) +
114 atomic_read(&journal->j_reserved_credits));
115 atomic_set(&transaction->t_outstanding_revokes, 0);
116 atomic_set(&transaction->t_handle_count, 0);
117 INIT_LIST_HEAD(&transaction->t_inode_list);
118 INIT_LIST_HEAD(&transaction->t_private_list);
119
120 /* Set up the commit timer for the new transaction. */
121 journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
122 add_timer(&journal->j_commit_timer);
123
124 J_ASSERT(journal->j_running_transaction == NULL);
125 journal->j_running_transaction = transaction;
126 transaction->t_max_wait = 0;
127 transaction->t_start = jiffies;
128 transaction->t_requested = 0;
129 }
130
131 /*
132 * Handle management.
133 *
134 * A handle_t is an object which represents a single atomic update to a
135 * filesystem, and which tracks all of the modifications which form part
136 * of that one update.
137 */
138
139 /*
140 * Update transaction's maximum wait time, if debugging is enabled.
141 *
142 * In order for t_max_wait to be reliable, it must be protected by a
143 * lock. But doing so will mean that start_this_handle() can not be
144 * run in parallel on SMP systems, which limits our scalability. So
145 * unless debugging is enabled, we no longer update t_max_wait, which
146 * means that maximum wait time reported by the jbd2_run_stats
147 * tracepoint will always be zero.
148 */
update_t_max_wait(transaction_t * transaction,unsigned long ts)149 static inline void update_t_max_wait(transaction_t *transaction,
150 unsigned long ts)
151 {
152 #ifdef CONFIG_JBD2_DEBUG
153 if (jbd2_journal_enable_debug &&
154 time_after(transaction->t_start, ts)) {
155 ts = jbd2_time_diff(ts, transaction->t_start);
156 spin_lock(&transaction->t_handle_lock);
157 if (ts > transaction->t_max_wait)
158 transaction->t_max_wait = ts;
159 spin_unlock(&transaction->t_handle_lock);
160 }
161 #endif
162 }
163
164 /*
165 * Wait until running transaction passes to T_FLUSH state and new transaction
166 * can thus be started. Also starts the commit if needed. The function expects
167 * running transaction to exist and releases j_state_lock.
168 */
wait_transaction_locked(journal_t * journal)169 static void wait_transaction_locked(journal_t *journal)
170 __releases(journal->j_state_lock)
171 {
172 DEFINE_WAIT(wait);
173 int need_to_start;
174 tid_t tid = journal->j_running_transaction->t_tid;
175
176 prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait,
177 TASK_UNINTERRUPTIBLE);
178 need_to_start = !tid_geq(journal->j_commit_request, tid);
179 read_unlock(&journal->j_state_lock);
180 if (need_to_start)
181 jbd2_log_start_commit(journal, tid);
182 jbd2_might_wait_for_commit(journal);
183 schedule();
184 finish_wait(&journal->j_wait_transaction_locked, &wait);
185 }
186
187 /*
188 * Wait until running transaction transitions from T_SWITCH to T_FLUSH
189 * state and new transaction can thus be started. The function releases
190 * j_state_lock.
191 */
wait_transaction_switching(journal_t * journal)192 static void wait_transaction_switching(journal_t *journal)
193 __releases(journal->j_state_lock)
194 {
195 DEFINE_WAIT(wait);
196
197 if (WARN_ON(!journal->j_running_transaction ||
198 journal->j_running_transaction->t_state != T_SWITCH)) {
199 read_unlock(&journal->j_state_lock);
200 return;
201 }
202 prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait,
203 TASK_UNINTERRUPTIBLE);
204 read_unlock(&journal->j_state_lock);
205 /*
206 * We don't call jbd2_might_wait_for_commit() here as there's no
207 * waiting for outstanding handles happening anymore in T_SWITCH state
208 * and handling of reserved handles actually relies on that for
209 * correctness.
210 */
211 schedule();
212 finish_wait(&journal->j_wait_transaction_locked, &wait);
213 }
214
sub_reserved_credits(journal_t * journal,int blocks)215 static void sub_reserved_credits(journal_t *journal, int blocks)
216 {
217 atomic_sub(blocks, &journal->j_reserved_credits);
218 wake_up(&journal->j_wait_reserved);
219 }
220
221 /*
222 * Wait until we can add credits for handle to the running transaction. Called
223 * with j_state_lock held for reading. Returns 0 if handle joined the running
224 * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and
225 * caller must retry.
226 */
add_transaction_credits(journal_t * journal,int blocks,int rsv_blocks)227 static int add_transaction_credits(journal_t *journal, int blocks,
228 int rsv_blocks)
229 {
230 transaction_t *t = journal->j_running_transaction;
231 int needed;
232 int total = blocks + rsv_blocks;
233
234 /*
235 * If the current transaction is locked down for commit, wait
236 * for the lock to be released.
237 */
238 if (t->t_state != T_RUNNING) {
239 WARN_ON_ONCE(t->t_state >= T_FLUSH);
240 wait_transaction_locked(journal);
241 return 1;
242 }
243
244 /*
245 * If there is not enough space left in the log to write all
246 * potential buffers requested by this operation, we need to
247 * stall pending a log checkpoint to free some more log space.
248 */
249 needed = atomic_add_return(total, &t->t_outstanding_credits);
250 if (needed > journal->j_max_transaction_buffers) {
251 /*
252 * If the current transaction is already too large,
253 * then start to commit it: we can then go back and
254 * attach this handle to a new transaction.
255 */
256 atomic_sub(total, &t->t_outstanding_credits);
257
258 /*
259 * Is the number of reserved credits in the current transaction too
260 * big to fit this handle? Wait until reserved credits are freed.
261 */
262 if (atomic_read(&journal->j_reserved_credits) + total >
263 journal->j_max_transaction_buffers) {
264 read_unlock(&journal->j_state_lock);
265 jbd2_might_wait_for_commit(journal);
266 wait_event(journal->j_wait_reserved,
267 atomic_read(&journal->j_reserved_credits) + total <=
268 journal->j_max_transaction_buffers);
269 return 1;
270 }
271
272 wait_transaction_locked(journal);
273 return 1;
274 }
275
276 /*
277 * The commit code assumes that it can get enough log space
278 * without forcing a checkpoint. This is *critical* for
279 * correctness: a checkpoint of a buffer which is also
280 * associated with a committing transaction creates a deadlock,
281 * so commit simply cannot force through checkpoints.
282 *
283 * We must therefore ensure the necessary space in the journal
284 * *before* starting to dirty potentially checkpointed buffers
285 * in the new transaction.
286 */
287 if (jbd2_log_space_left(journal) < journal->j_max_transaction_buffers) {
288 atomic_sub(total, &t->t_outstanding_credits);
289 read_unlock(&journal->j_state_lock);
290 jbd2_might_wait_for_commit(journal);
291 write_lock(&journal->j_state_lock);
292 if (jbd2_log_space_left(journal) <
293 journal->j_max_transaction_buffers)
294 __jbd2_log_wait_for_space(journal);
295 write_unlock(&journal->j_state_lock);
296 return 1;
297 }
298
299 /* No reservation? We are done... */
300 if (!rsv_blocks)
301 return 0;
302
303 needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits);
304 /* We allow at most half of a transaction to be reserved */
305 if (needed > journal->j_max_transaction_buffers / 2) {
306 sub_reserved_credits(journal, rsv_blocks);
307 atomic_sub(total, &t->t_outstanding_credits);
308 read_unlock(&journal->j_state_lock);
309 jbd2_might_wait_for_commit(journal);
310 wait_event(journal->j_wait_reserved,
311 atomic_read(&journal->j_reserved_credits) + rsv_blocks
312 <= journal->j_max_transaction_buffers / 2);
313 return 1;
314 }
315 return 0;
316 }
317
318 /*
319 * start_this_handle: Given a handle, deal with any locking or stalling
320 * needed to make sure that there is enough journal space for the handle
321 * to begin. Attach the handle to a transaction and set up the
322 * transaction's buffer credits.
323 */
324
start_this_handle(journal_t * journal,handle_t * handle,gfp_t gfp_mask)325 static int start_this_handle(journal_t *journal, handle_t *handle,
326 gfp_t gfp_mask)
327 {
328 transaction_t *transaction, *new_transaction = NULL;
329 int blocks = handle->h_total_credits;
330 int rsv_blocks = 0;
331 unsigned long ts = jiffies;
332
333 if (handle->h_rsv_handle)
334 rsv_blocks = handle->h_rsv_handle->h_total_credits;
335
336 /*
337 * Limit the number of reserved credits to 1/2 of maximum transaction
338 * size and limit the number of total credits to not exceed maximum
339 * transaction size per operation.
340 */
341 if ((rsv_blocks > journal->j_max_transaction_buffers / 2) ||
342 (rsv_blocks + blocks > journal->j_max_transaction_buffers)) {
343 printk(KERN_ERR "JBD2: %s wants too many credits "
344 "credits:%d rsv_credits:%d max:%d\n",
345 current->comm, blocks, rsv_blocks,
346 journal->j_max_transaction_buffers);
347 WARN_ON(1);
348 return -ENOSPC;
349 }
350
351 alloc_transaction:
352 /*
353 * This check is racy but it is just an optimization of allocating new
354 * transaction early if there are high chances we'll need it. If we
355 * guess wrong, we'll retry or free unused transaction.
356 */
357 if (!data_race(journal->j_running_transaction)) {
358 /*
359 * If __GFP_FS is not present, then we may be being called from
360 * inside the fs writeback layer, so we MUST NOT fail.
361 */
362 if ((gfp_mask & __GFP_FS) == 0)
363 gfp_mask |= __GFP_NOFAIL;
364 new_transaction = kmem_cache_zalloc(transaction_cache,
365 gfp_mask);
366 if (!new_transaction)
367 return -ENOMEM;
368 }
369
370 jbd_debug(3, "New handle %p going live.\n", handle);
371
372 /*
373 * We need to hold j_state_lock until t_updates has been incremented,
374 * for proper journal barrier handling
375 */
376 repeat:
377 read_lock(&journal->j_state_lock);
378 BUG_ON(journal->j_flags & JBD2_UNMOUNT);
379 if (is_journal_aborted(journal) ||
380 (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
381 read_unlock(&journal->j_state_lock);
382 jbd2_journal_free_transaction(new_transaction);
383 return -EROFS;
384 }
385
386 /*
387 * Wait on the journal's transaction barrier if necessary. Specifically
388 * we allow reserved handles to proceed because otherwise commit could
389 * deadlock on page writeback not being able to complete.
390 */
391 if (!handle->h_reserved && journal->j_barrier_count) {
392 read_unlock(&journal->j_state_lock);
393 wait_event(journal->j_wait_transaction_locked,
394 journal->j_barrier_count == 0);
395 goto repeat;
396 }
397
398 if (!journal->j_running_transaction) {
399 read_unlock(&journal->j_state_lock);
400 if (!new_transaction)
401 goto alloc_transaction;
402 write_lock(&journal->j_state_lock);
403 if (!journal->j_running_transaction &&
404 (handle->h_reserved || !journal->j_barrier_count)) {
405 jbd2_get_transaction(journal, new_transaction);
406 new_transaction = NULL;
407 }
408 write_unlock(&journal->j_state_lock);
409 goto repeat;
410 }
411
412 transaction = journal->j_running_transaction;
413
414 if (!handle->h_reserved) {
415 /* We may have dropped j_state_lock - restart in that case */
416 if (add_transaction_credits(journal, blocks, rsv_blocks))
417 goto repeat;
418 } else {
419 /*
420 * We have handle reserved so we are allowed to join T_LOCKED
421 * transaction and we don't have to check for transaction size
422 * and journal space. But we still have to wait while running
423 * transaction is being switched to a committing one as it
424 * won't wait for any handles anymore.
425 */
426 if (transaction->t_state == T_SWITCH) {
427 wait_transaction_switching(journal);
428 goto repeat;
429 }
430 sub_reserved_credits(journal, blocks);
431 handle->h_reserved = 0;
432 }
433
434 /* OK, account for the buffers that this operation expects to
435 * use and add the handle to the running transaction.
436 */
437 update_t_max_wait(transaction, ts);
438 handle->h_transaction = transaction;
439 handle->h_requested_credits = blocks;
440 handle->h_revoke_credits_requested = handle->h_revoke_credits;
441 handle->h_start_jiffies = jiffies;
442 atomic_inc(&transaction->t_updates);
443 atomic_inc(&transaction->t_handle_count);
444 jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
445 handle, blocks,
446 atomic_read(&transaction->t_outstanding_credits),
447 jbd2_log_space_left(journal));
448 read_unlock(&journal->j_state_lock);
449 current->journal_info = handle;
450
451 rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_);
452 jbd2_journal_free_transaction(new_transaction);
453 /*
454 * Ensure that no allocations done while the transaction is open are
455 * going to recurse back to the fs layer.
456 */
457 handle->saved_alloc_context = memalloc_nofs_save();
458 return 0;
459 }
460
461 /* Allocate a new handle. This should probably be in a slab... */
new_handle(int nblocks)462 static handle_t *new_handle(int nblocks)
463 {
464 handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
465 if (!handle)
466 return NULL;
467 handle->h_total_credits = nblocks;
468 handle->h_ref = 1;
469
470 return handle;
471 }
472
jbd2__journal_start(journal_t * journal,int nblocks,int rsv_blocks,int revoke_records,gfp_t gfp_mask,unsigned int type,unsigned int line_no)473 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
474 int revoke_records, gfp_t gfp_mask,
475 unsigned int type, unsigned int line_no)
476 {
477 handle_t *handle = journal_current_handle();
478 int err;
479
480 if (!journal)
481 return ERR_PTR(-EROFS);
482
483 if (handle) {
484 J_ASSERT(handle->h_transaction->t_journal == journal);
485 handle->h_ref++;
486 return handle;
487 }
488
489 nblocks += DIV_ROUND_UP(revoke_records,
490 journal->j_revoke_records_per_block);
491 handle = new_handle(nblocks);
492 if (!handle)
493 return ERR_PTR(-ENOMEM);
494 if (rsv_blocks) {
495 handle_t *rsv_handle;
496
497 rsv_handle = new_handle(rsv_blocks);
498 if (!rsv_handle) {
499 jbd2_free_handle(handle);
500 return ERR_PTR(-ENOMEM);
501 }
502 rsv_handle->h_reserved = 1;
503 rsv_handle->h_journal = journal;
504 handle->h_rsv_handle = rsv_handle;
505 }
506 handle->h_revoke_credits = revoke_records;
507
508 err = start_this_handle(journal, handle, gfp_mask);
509 if (err < 0) {
510 if (handle->h_rsv_handle)
511 jbd2_free_handle(handle->h_rsv_handle);
512 jbd2_free_handle(handle);
513 return ERR_PTR(err);
514 }
515 handle->h_type = type;
516 handle->h_line_no = line_no;
517 trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
518 handle->h_transaction->t_tid, type,
519 line_no, nblocks);
520
521 return handle;
522 }
523 EXPORT_SYMBOL(jbd2__journal_start);
524
525
526 /**
527 * jbd2_journal_start() - Obtain a new handle.
528 * @journal: Journal to start transaction on.
529 * @nblocks: number of block buffer we might modify
530 *
531 * We make sure that the transaction can guarantee at least nblocks of
532 * modified buffers in the log. We block until the log can guarantee
533 * that much space. Additionally, if rsv_blocks > 0, we also create another
534 * handle with rsv_blocks reserved blocks in the journal. This handle is
535 * stored in h_rsv_handle. It is not attached to any particular transaction
536 * and thus doesn't block transaction commit. If the caller uses this reserved
537 * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
538 * on the parent handle will dispose the reserved one. Reserved handle has to
539 * be converted to a normal handle using jbd2_journal_start_reserved() before
540 * it can be used.
541 *
542 * Return a pointer to a newly allocated handle, or an ERR_PTR() value
543 * on failure.
544 */
jbd2_journal_start(journal_t * journal,int nblocks)545 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
546 {
547 return jbd2__journal_start(journal, nblocks, 0, 0, GFP_NOFS, 0, 0);
548 }
549 EXPORT_SYMBOL(jbd2_journal_start);
550
__jbd2_journal_unreserve_handle(handle_t * handle,transaction_t * t)551 static void __jbd2_journal_unreserve_handle(handle_t *handle, transaction_t *t)
552 {
553 journal_t *journal = handle->h_journal;
554
555 WARN_ON(!handle->h_reserved);
556 sub_reserved_credits(journal, handle->h_total_credits);
557 if (t)
558 atomic_sub(handle->h_total_credits, &t->t_outstanding_credits);
559 }
560
jbd2_journal_free_reserved(handle_t * handle)561 void jbd2_journal_free_reserved(handle_t *handle)
562 {
563 journal_t *journal = handle->h_journal;
564
565 /* Get j_state_lock to pin running transaction if it exists */
566 read_lock(&journal->j_state_lock);
567 __jbd2_journal_unreserve_handle(handle, journal->j_running_transaction);
568 read_unlock(&journal->j_state_lock);
569 jbd2_free_handle(handle);
570 }
571 EXPORT_SYMBOL(jbd2_journal_free_reserved);
572
573 /**
574 * jbd2_journal_start_reserved() - start reserved handle
575 * @handle: handle to start
576 * @type: for handle statistics
577 * @line_no: for handle statistics
578 *
579 * Start handle that has been previously reserved with jbd2_journal_reserve().
580 * This attaches @handle to the running transaction (or creates one if there's
581 * not transaction running). Unlike jbd2_journal_start() this function cannot
582 * block on journal commit, checkpointing, or similar stuff. It can block on
583 * memory allocation or frozen journal though.
584 *
585 * Return 0 on success, non-zero on error - handle is freed in that case.
586 */
jbd2_journal_start_reserved(handle_t * handle,unsigned int type,unsigned int line_no)587 int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
588 unsigned int line_no)
589 {
590 journal_t *journal = handle->h_journal;
591 int ret = -EIO;
592
593 if (WARN_ON(!handle->h_reserved)) {
594 /* Someone passed in normal handle? Just stop it. */
595 jbd2_journal_stop(handle);
596 return ret;
597 }
598 /*
599 * Usefulness of mixing of reserved and unreserved handles is
600 * questionable. So far nobody seems to need it so just error out.
601 */
602 if (WARN_ON(current->journal_info)) {
603 jbd2_journal_free_reserved(handle);
604 return ret;
605 }
606
607 handle->h_journal = NULL;
608 /*
609 * GFP_NOFS is here because callers are likely from writeback or
610 * similarly constrained call sites
611 */
612 ret = start_this_handle(journal, handle, GFP_NOFS);
613 if (ret < 0) {
614 handle->h_journal = journal;
615 jbd2_journal_free_reserved(handle);
616 return ret;
617 }
618 handle->h_type = type;
619 handle->h_line_no = line_no;
620 trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
621 handle->h_transaction->t_tid, type,
622 line_no, handle->h_total_credits);
623 return 0;
624 }
625 EXPORT_SYMBOL(jbd2_journal_start_reserved);
626
627 /**
628 * jbd2_journal_extend() - extend buffer credits.
629 * @handle: handle to 'extend'
630 * @nblocks: nr blocks to try to extend by.
631 * @revoke_records: number of revoke records to try to extend by.
632 *
633 * Some transactions, such as large extends and truncates, can be done
634 * atomically all at once or in several stages. The operation requests
635 * a credit for a number of buffer modifications in advance, but can
636 * extend its credit if it needs more.
637 *
638 * jbd2_journal_extend tries to give the running handle more buffer credits.
639 * It does not guarantee that allocation - this is a best-effort only.
640 * The calling process MUST be able to deal cleanly with a failure to
641 * extend here.
642 *
643 * Return 0 on success, non-zero on failure.
644 *
645 * return code < 0 implies an error
646 * return code > 0 implies normal transaction-full status.
647 */
jbd2_journal_extend(handle_t * handle,int nblocks,int revoke_records)648 int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records)
649 {
650 transaction_t *transaction = handle->h_transaction;
651 journal_t *journal;
652 int result;
653 int wanted;
654
655 if (is_handle_aborted(handle))
656 return -EROFS;
657 journal = transaction->t_journal;
658
659 result = 1;
660
661 read_lock(&journal->j_state_lock);
662
663 /* Don't extend a locked-down transaction! */
664 if (transaction->t_state != T_RUNNING) {
665 jbd_debug(3, "denied handle %p %d blocks: "
666 "transaction not running\n", handle, nblocks);
667 goto error_out;
668 }
669
670 nblocks += DIV_ROUND_UP(
671 handle->h_revoke_credits_requested + revoke_records,
672 journal->j_revoke_records_per_block) -
673 DIV_ROUND_UP(
674 handle->h_revoke_credits_requested,
675 journal->j_revoke_records_per_block);
676 spin_lock(&transaction->t_handle_lock);
677 wanted = atomic_add_return(nblocks,
678 &transaction->t_outstanding_credits);
679
680 if (wanted > journal->j_max_transaction_buffers) {
681 jbd_debug(3, "denied handle %p %d blocks: "
682 "transaction too large\n", handle, nblocks);
683 atomic_sub(nblocks, &transaction->t_outstanding_credits);
684 goto unlock;
685 }
686
687 trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
688 transaction->t_tid,
689 handle->h_type, handle->h_line_no,
690 handle->h_total_credits,
691 nblocks);
692
693 handle->h_total_credits += nblocks;
694 handle->h_requested_credits += nblocks;
695 handle->h_revoke_credits += revoke_records;
696 handle->h_revoke_credits_requested += revoke_records;
697 result = 0;
698
699 jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
700 unlock:
701 spin_unlock(&transaction->t_handle_lock);
702 error_out:
703 read_unlock(&journal->j_state_lock);
704 return result;
705 }
706
stop_this_handle(handle_t * handle)707 static void stop_this_handle(handle_t *handle)
708 {
709 transaction_t *transaction = handle->h_transaction;
710 journal_t *journal = transaction->t_journal;
711 int revokes;
712
713 J_ASSERT(journal_current_handle() == handle);
714 J_ASSERT(atomic_read(&transaction->t_updates) > 0);
715 current->journal_info = NULL;
716 /*
717 * Subtract necessary revoke descriptor blocks from handle credits. We
718 * take care to account only for revoke descriptor blocks the
719 * transaction will really need as large sequences of transactions with
720 * small numbers of revokes are relatively common.
721 */
722 revokes = handle->h_revoke_credits_requested - handle->h_revoke_credits;
723 if (revokes) {
724 int t_revokes, revoke_descriptors;
725 int rr_per_blk = journal->j_revoke_records_per_block;
726
727 WARN_ON_ONCE(DIV_ROUND_UP(revokes, rr_per_blk)
728 > handle->h_total_credits);
729 t_revokes = atomic_add_return(revokes,
730 &transaction->t_outstanding_revokes);
731 revoke_descriptors =
732 DIV_ROUND_UP(t_revokes, rr_per_blk) -
733 DIV_ROUND_UP(t_revokes - revokes, rr_per_blk);
734 handle->h_total_credits -= revoke_descriptors;
735 }
736 atomic_sub(handle->h_total_credits,
737 &transaction->t_outstanding_credits);
738 if (handle->h_rsv_handle)
739 __jbd2_journal_unreserve_handle(handle->h_rsv_handle,
740 transaction);
741 if (atomic_dec_and_test(&transaction->t_updates))
742 wake_up(&journal->j_wait_updates);
743
744 rwsem_release(&journal->j_trans_commit_map, _THIS_IP_);
745 /*
746 * Scope of the GFP_NOFS context is over here and so we can restore the
747 * original alloc context.
748 */
749 memalloc_nofs_restore(handle->saved_alloc_context);
750 }
751
752 /**
753 * jbd2__journal_restart() - restart a handle .
754 * @handle: handle to restart
755 * @nblocks: nr credits requested
756 * @revoke_records: number of revoke record credits requested
757 * @gfp_mask: memory allocation flags (for start_this_handle)
758 *
759 * Restart a handle for a multi-transaction filesystem
760 * operation.
761 *
762 * If the jbd2_journal_extend() call above fails to grant new buffer credits
763 * to a running handle, a call to jbd2_journal_restart will commit the
764 * handle's transaction so far and reattach the handle to a new
765 * transaction capable of guaranteeing the requested number of
766 * credits. We preserve reserved handle if there's any attached to the
767 * passed in handle.
768 */
jbd2__journal_restart(handle_t * handle,int nblocks,int revoke_records,gfp_t gfp_mask)769 int jbd2__journal_restart(handle_t *handle, int nblocks, int revoke_records,
770 gfp_t gfp_mask)
771 {
772 transaction_t *transaction = handle->h_transaction;
773 journal_t *journal;
774 tid_t tid;
775 int need_to_start;
776 int ret;
777
778 /* If we've had an abort of any type, don't even think about
779 * actually doing the restart! */
780 if (is_handle_aborted(handle))
781 return 0;
782 journal = transaction->t_journal;
783 tid = transaction->t_tid;
784
785 /*
786 * First unlink the handle from its current transaction, and start the
787 * commit on that.
788 */
789 jbd_debug(2, "restarting handle %p\n", handle);
790 stop_this_handle(handle);
791 handle->h_transaction = NULL;
792
793 /*
794 * TODO: If we use READ_ONCE / WRITE_ONCE for j_commit_request we can
795 * get rid of pointless j_state_lock traffic like this.
796 */
797 read_lock(&journal->j_state_lock);
798 need_to_start = !tid_geq(journal->j_commit_request, tid);
799 read_unlock(&journal->j_state_lock);
800 if (need_to_start)
801 jbd2_log_start_commit(journal, tid);
802 handle->h_total_credits = nblocks +
803 DIV_ROUND_UP(revoke_records,
804 journal->j_revoke_records_per_block);
805 handle->h_revoke_credits = revoke_records;
806 ret = start_this_handle(journal, handle, gfp_mask);
807 trace_jbd2_handle_restart(journal->j_fs_dev->bd_dev,
808 ret ? 0 : handle->h_transaction->t_tid,
809 handle->h_type, handle->h_line_no,
810 handle->h_total_credits);
811 return ret;
812 }
813 EXPORT_SYMBOL(jbd2__journal_restart);
814
815
jbd2_journal_restart(handle_t * handle,int nblocks)816 int jbd2_journal_restart(handle_t *handle, int nblocks)
817 {
818 return jbd2__journal_restart(handle, nblocks, 0, GFP_NOFS);
819 }
820 EXPORT_SYMBOL(jbd2_journal_restart);
821
822 /**
823 * jbd2_journal_lock_updates () - establish a transaction barrier.
824 * @journal: Journal to establish a barrier on.
825 *
826 * This locks out any further updates from being started, and blocks
827 * until all existing updates have completed, returning only once the
828 * journal is in a quiescent state with no updates running.
829 *
830 * The journal lock should not be held on entry.
831 */
jbd2_journal_lock_updates(journal_t * journal)832 void jbd2_journal_lock_updates(journal_t *journal)
833 {
834 DEFINE_WAIT(wait);
835
836 jbd2_might_wait_for_commit(journal);
837
838 write_lock(&journal->j_state_lock);
839 ++journal->j_barrier_count;
840
841 /* Wait until there are no reserved handles */
842 if (atomic_read(&journal->j_reserved_credits)) {
843 write_unlock(&journal->j_state_lock);
844 wait_event(journal->j_wait_reserved,
845 atomic_read(&journal->j_reserved_credits) == 0);
846 write_lock(&journal->j_state_lock);
847 }
848
849 /* Wait until there are no running updates */
850 while (1) {
851 transaction_t *transaction = journal->j_running_transaction;
852
853 if (!transaction)
854 break;
855
856 spin_lock(&transaction->t_handle_lock);
857 prepare_to_wait(&journal->j_wait_updates, &wait,
858 TASK_UNINTERRUPTIBLE);
859 if (!atomic_read(&transaction->t_updates)) {
860 spin_unlock(&transaction->t_handle_lock);
861 finish_wait(&journal->j_wait_updates, &wait);
862 break;
863 }
864 spin_unlock(&transaction->t_handle_lock);
865 write_unlock(&journal->j_state_lock);
866 schedule();
867 finish_wait(&journal->j_wait_updates, &wait);
868 write_lock(&journal->j_state_lock);
869 }
870 write_unlock(&journal->j_state_lock);
871
872 /*
873 * We have now established a barrier against other normal updates, but
874 * we also need to barrier against other jbd2_journal_lock_updates() calls
875 * to make sure that we serialise special journal-locked operations
876 * too.
877 */
878 mutex_lock(&journal->j_barrier);
879 }
880
881 /**
882 * jbd2_journal_unlock_updates () - release barrier
883 * @journal: Journal to release the barrier on.
884 *
885 * Release a transaction barrier obtained with jbd2_journal_lock_updates().
886 *
887 * Should be called without the journal lock held.
888 */
jbd2_journal_unlock_updates(journal_t * journal)889 void jbd2_journal_unlock_updates (journal_t *journal)
890 {
891 J_ASSERT(journal->j_barrier_count != 0);
892
893 mutex_unlock(&journal->j_barrier);
894 write_lock(&journal->j_state_lock);
895 --journal->j_barrier_count;
896 write_unlock(&journal->j_state_lock);
897 wake_up_all(&journal->j_wait_transaction_locked);
898 }
899
warn_dirty_buffer(struct buffer_head * bh)900 static void warn_dirty_buffer(struct buffer_head *bh)
901 {
902 printk(KERN_WARNING
903 "JBD2: Spotted dirty metadata buffer (dev = %pg, blocknr = %llu). "
904 "There's a risk of filesystem corruption in case of system "
905 "crash.\n",
906 bh->b_bdev, (unsigned long long)bh->b_blocknr);
907 }
908
909 /* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
jbd2_freeze_jh_data(struct journal_head * jh)910 static void jbd2_freeze_jh_data(struct journal_head *jh)
911 {
912 struct page *page;
913 int offset;
914 char *source;
915 struct buffer_head *bh = jh2bh(jh);
916
917 J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n");
918 page = bh->b_page;
919 offset = offset_in_page(bh->b_data);
920 source = kmap_atomic(page);
921 /* Fire data frozen trigger just before we copy the data */
922 jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers);
923 memcpy(jh->b_frozen_data, source + offset, bh->b_size);
924 kunmap_atomic(source);
925
926 /*
927 * Now that the frozen data is saved off, we need to store any matching
928 * triggers.
929 */
930 jh->b_frozen_triggers = jh->b_triggers;
931 }
932
933 /*
934 * If the buffer is already part of the current transaction, then there
935 * is nothing we need to do. If it is already part of a prior
936 * transaction which we are still committing to disk, then we need to
937 * make sure that we do not overwrite the old copy: we do copy-out to
938 * preserve the copy going to disk. We also account the buffer against
939 * the handle's metadata buffer credits (unless the buffer is already
940 * part of the transaction, that is).
941 *
942 */
943 static int
do_get_write_access(handle_t * handle,struct journal_head * jh,int force_copy)944 do_get_write_access(handle_t *handle, struct journal_head *jh,
945 int force_copy)
946 {
947 struct buffer_head *bh;
948 transaction_t *transaction = handle->h_transaction;
949 journal_t *journal;
950 int error;
951 char *frozen_buffer = NULL;
952 unsigned long start_lock, time_lock;
953
954 journal = transaction->t_journal;
955
956 jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
957
958 JBUFFER_TRACE(jh, "entry");
959 repeat:
960 bh = jh2bh(jh);
961
962 /* @@@ Need to check for errors here at some point. */
963
964 start_lock = jiffies;
965 lock_buffer(bh);
966 spin_lock(&jh->b_state_lock);
967
968 /* If it takes too long to lock the buffer, trace it */
969 time_lock = jbd2_time_diff(start_lock, jiffies);
970 if (time_lock > HZ/10)
971 trace_jbd2_lock_buffer_stall(bh->b_bdev->bd_dev,
972 jiffies_to_msecs(time_lock));
973
974 /* We now hold the buffer lock so it is safe to query the buffer
975 * state. Is the buffer dirty?
976 *
977 * If so, there are two possibilities. The buffer may be
978 * non-journaled, and undergoing a quite legitimate writeback.
979 * Otherwise, it is journaled, and we don't expect dirty buffers
980 * in that state (the buffers should be marked JBD_Dirty
981 * instead.) So either the IO is being done under our own
982 * control and this is a bug, or it's a third party IO such as
983 * dump(8) (which may leave the buffer scheduled for read ---
984 * ie. locked but not dirty) or tune2fs (which may actually have
985 * the buffer dirtied, ugh.) */
986
987 if (buffer_dirty(bh)) {
988 /*
989 * First question: is this buffer already part of the current
990 * transaction or the existing committing transaction?
991 */
992 if (jh->b_transaction) {
993 J_ASSERT_JH(jh,
994 jh->b_transaction == transaction ||
995 jh->b_transaction ==
996 journal->j_committing_transaction);
997 if (jh->b_next_transaction)
998 J_ASSERT_JH(jh, jh->b_next_transaction ==
999 transaction);
1000 warn_dirty_buffer(bh);
1001 }
1002 /*
1003 * In any case we need to clean the dirty flag and we must
1004 * do it under the buffer lock to be sure we don't race
1005 * with running write-out.
1006 */
1007 JBUFFER_TRACE(jh, "Journalling dirty buffer");
1008 clear_buffer_dirty(bh);
1009 set_buffer_jbddirty(bh);
1010 }
1011
1012 unlock_buffer(bh);
1013
1014 error = -EROFS;
1015 if (is_handle_aborted(handle)) {
1016 spin_unlock(&jh->b_state_lock);
1017 goto out;
1018 }
1019 error = 0;
1020
1021 /*
1022 * The buffer is already part of this transaction if b_transaction or
1023 * b_next_transaction points to it
1024 */
1025 if (jh->b_transaction == transaction ||
1026 jh->b_next_transaction == transaction)
1027 goto done;
1028
1029 /*
1030 * this is the first time this transaction is touching this buffer,
1031 * reset the modified flag
1032 */
1033 jh->b_modified = 0;
1034
1035 /*
1036 * If the buffer is not journaled right now, we need to make sure it
1037 * doesn't get written to disk before the caller actually commits the
1038 * new data
1039 */
1040 if (!jh->b_transaction) {
1041 JBUFFER_TRACE(jh, "no transaction");
1042 J_ASSERT_JH(jh, !jh->b_next_transaction);
1043 JBUFFER_TRACE(jh, "file as BJ_Reserved");
1044 /*
1045 * Make sure all stores to jh (b_modified, b_frozen_data) are
1046 * visible before attaching it to the running transaction.
1047 * Paired with barrier in jbd2_write_access_granted()
1048 */
1049 smp_wmb();
1050 spin_lock(&journal->j_list_lock);
1051 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
1052 spin_unlock(&journal->j_list_lock);
1053 goto done;
1054 }
1055 /*
1056 * If there is already a copy-out version of this buffer, then we don't
1057 * need to make another one
1058 */
1059 if (jh->b_frozen_data) {
1060 JBUFFER_TRACE(jh, "has frozen data");
1061 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1062 goto attach_next;
1063 }
1064
1065 JBUFFER_TRACE(jh, "owned by older transaction");
1066 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1067 J_ASSERT_JH(jh, jh->b_transaction == journal->j_committing_transaction);
1068
1069 /*
1070 * There is one case we have to be very careful about. If the
1071 * committing transaction is currently writing this buffer out to disk
1072 * and has NOT made a copy-out, then we cannot modify the buffer
1073 * contents at all right now. The essence of copy-out is that it is
1074 * the extra copy, not the primary copy, which gets journaled. If the
1075 * primary copy is already going to disk then we cannot do copy-out
1076 * here.
1077 */
1078 if (buffer_shadow(bh)) {
1079 JBUFFER_TRACE(jh, "on shadow: sleep");
1080 spin_unlock(&jh->b_state_lock);
1081 wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE);
1082 goto repeat;
1083 }
1084
1085 /*
1086 * Only do the copy if the currently-owning transaction still needs it.
1087 * If buffer isn't on BJ_Metadata list, the committing transaction is
1088 * past that stage (here we use the fact that BH_Shadow is set under
1089 * bh_state lock together with refiling to BJ_Shadow list and at this
1090 * point we know the buffer doesn't have BH_Shadow set).
1091 *
1092 * Subtle point, though: if this is a get_undo_access, then we will be
1093 * relying on the frozen_data to contain the new value of the
1094 * committed_data record after the transaction, so we HAVE to force the
1095 * frozen_data copy in that case.
1096 */
1097 if (jh->b_jlist == BJ_Metadata || force_copy) {
1098 JBUFFER_TRACE(jh, "generate frozen data");
1099 if (!frozen_buffer) {
1100 JBUFFER_TRACE(jh, "allocate memory for buffer");
1101 spin_unlock(&jh->b_state_lock);
1102 frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size,
1103 GFP_NOFS | __GFP_NOFAIL);
1104 goto repeat;
1105 }
1106 jh->b_frozen_data = frozen_buffer;
1107 frozen_buffer = NULL;
1108 jbd2_freeze_jh_data(jh);
1109 }
1110 attach_next:
1111 /*
1112 * Make sure all stores to jh (b_modified, b_frozen_data) are visible
1113 * before attaching it to the running transaction. Paired with barrier
1114 * in jbd2_write_access_granted()
1115 */
1116 smp_wmb();
1117 jh->b_next_transaction = transaction;
1118
1119 done:
1120 spin_unlock(&jh->b_state_lock);
1121
1122 /*
1123 * If we are about to journal a buffer, then any revoke pending on it is
1124 * no longer valid
1125 */
1126 jbd2_journal_cancel_revoke(handle, jh);
1127
1128 out:
1129 if (unlikely(frozen_buffer)) /* It's usually NULL */
1130 jbd2_free(frozen_buffer, bh->b_size);
1131
1132 JBUFFER_TRACE(jh, "exit");
1133 return error;
1134 }
1135
1136 /* Fast check whether buffer is already attached to the required transaction */
jbd2_write_access_granted(handle_t * handle,struct buffer_head * bh,bool undo)1137 static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
1138 bool undo)
1139 {
1140 struct journal_head *jh;
1141 bool ret = false;
1142
1143 /* Dirty buffers require special handling... */
1144 if (buffer_dirty(bh))
1145 return false;
1146
1147 /*
1148 * RCU protects us from dereferencing freed pages. So the checks we do
1149 * are guaranteed not to oops. However the jh slab object can get freed
1150 * & reallocated while we work with it. So we have to be careful. When
1151 * we see jh attached to the running transaction, we know it must stay
1152 * so until the transaction is committed. Thus jh won't be freed and
1153 * will be attached to the same bh while we run. However it can
1154 * happen jh gets freed, reallocated, and attached to the transaction
1155 * just after we get pointer to it from bh. So we have to be careful
1156 * and recheck jh still belongs to our bh before we return success.
1157 */
1158 rcu_read_lock();
1159 if (!buffer_jbd(bh))
1160 goto out;
1161 /* This should be bh2jh() but that doesn't work with inline functions */
1162 jh = READ_ONCE(bh->b_private);
1163 if (!jh)
1164 goto out;
1165 /* For undo access buffer must have data copied */
1166 if (undo && !jh->b_committed_data)
1167 goto out;
1168 if (READ_ONCE(jh->b_transaction) != handle->h_transaction &&
1169 READ_ONCE(jh->b_next_transaction) != handle->h_transaction)
1170 goto out;
1171 /*
1172 * There are two reasons for the barrier here:
1173 * 1) Make sure to fetch b_bh after we did previous checks so that we
1174 * detect when jh went through free, realloc, attach to transaction
1175 * while we were checking. Paired with implicit barrier in that path.
1176 * 2) So that access to bh done after jbd2_write_access_granted()
1177 * doesn't get reordered and see inconsistent state of concurrent
1178 * do_get_write_access().
1179 */
1180 smp_mb();
1181 if (unlikely(jh->b_bh != bh))
1182 goto out;
1183 ret = true;
1184 out:
1185 rcu_read_unlock();
1186 return ret;
1187 }
1188
1189 /**
1190 * jbd2_journal_get_write_access() - notify intent to modify a buffer
1191 * for metadata (not data) update.
1192 * @handle: transaction to add buffer modifications to
1193 * @bh: bh to be used for metadata writes
1194 *
1195 * Returns: error code or 0 on success.
1196 *
1197 * In full data journalling mode the buffer may be of type BJ_AsyncData,
1198 * because we're ``write()ing`` a buffer which is also part of a shared mapping.
1199 */
1200
jbd2_journal_get_write_access(handle_t * handle,struct buffer_head * bh)1201 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
1202 {
1203 struct journal_head *jh;
1204 int rc;
1205
1206 if (is_handle_aborted(handle))
1207 return -EROFS;
1208
1209 if (jbd2_write_access_granted(handle, bh, false))
1210 return 0;
1211
1212 jh = jbd2_journal_add_journal_head(bh);
1213 /* We do not want to get caught playing with fields which the
1214 * log thread also manipulates. Make sure that the buffer
1215 * completes any outstanding IO before proceeding. */
1216 rc = do_get_write_access(handle, jh, 0);
1217 jbd2_journal_put_journal_head(jh);
1218 return rc;
1219 }
1220
1221
1222 /*
1223 * When the user wants to journal a newly created buffer_head
1224 * (ie. getblk() returned a new buffer and we are going to populate it
1225 * manually rather than reading off disk), then we need to keep the
1226 * buffer_head locked until it has been completely filled with new
1227 * data. In this case, we should be able to make the assertion that
1228 * the bh is not already part of an existing transaction.
1229 *
1230 * The buffer should already be locked by the caller by this point.
1231 * There is no lock ranking violation: it was a newly created,
1232 * unlocked buffer beforehand. */
1233
1234 /**
1235 * jbd2_journal_get_create_access () - notify intent to use newly created bh
1236 * @handle: transaction to new buffer to
1237 * @bh: new buffer.
1238 *
1239 * Call this if you create a new bh.
1240 */
jbd2_journal_get_create_access(handle_t * handle,struct buffer_head * bh)1241 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
1242 {
1243 transaction_t *transaction = handle->h_transaction;
1244 journal_t *journal;
1245 struct journal_head *jh = jbd2_journal_add_journal_head(bh);
1246 int err;
1247
1248 jbd_debug(5, "journal_head %p\n", jh);
1249 err = -EROFS;
1250 if (is_handle_aborted(handle))
1251 goto out;
1252 journal = transaction->t_journal;
1253 err = 0;
1254
1255 JBUFFER_TRACE(jh, "entry");
1256 /*
1257 * The buffer may already belong to this transaction due to pre-zeroing
1258 * in the filesystem's new_block code. It may also be on the previous,
1259 * committing transaction's lists, but it HAS to be in Forget state in
1260 * that case: the transaction must have deleted the buffer for it to be
1261 * reused here.
1262 */
1263 spin_lock(&jh->b_state_lock);
1264 J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
1265 jh->b_transaction == NULL ||
1266 (jh->b_transaction == journal->j_committing_transaction &&
1267 jh->b_jlist == BJ_Forget)));
1268
1269 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1270 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
1271
1272 if (jh->b_transaction == NULL) {
1273 /*
1274 * Previous jbd2_journal_forget() could have left the buffer
1275 * with jbddirty bit set because it was being committed. When
1276 * the commit finished, we've filed the buffer for
1277 * checkpointing and marked it dirty. Now we are reallocating
1278 * the buffer so the transaction freeing it must have
1279 * committed and so it's safe to clear the dirty bit.
1280 */
1281 clear_buffer_dirty(jh2bh(jh));
1282 /* first access by this transaction */
1283 jh->b_modified = 0;
1284
1285 JBUFFER_TRACE(jh, "file as BJ_Reserved");
1286 spin_lock(&journal->j_list_lock);
1287 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
1288 spin_unlock(&journal->j_list_lock);
1289 } else if (jh->b_transaction == journal->j_committing_transaction) {
1290 /* first access by this transaction */
1291 jh->b_modified = 0;
1292
1293 JBUFFER_TRACE(jh, "set next transaction");
1294 spin_lock(&journal->j_list_lock);
1295 jh->b_next_transaction = transaction;
1296 spin_unlock(&journal->j_list_lock);
1297 }
1298 spin_unlock(&jh->b_state_lock);
1299
1300 /*
1301 * akpm: I added this. ext3_alloc_branch can pick up new indirect
1302 * blocks which contain freed but then revoked metadata. We need
1303 * to cancel the revoke in case we end up freeing it yet again
1304 * and the reallocating as data - this would cause a second revoke,
1305 * which hits an assertion error.
1306 */
1307 JBUFFER_TRACE(jh, "cancelling revoke");
1308 jbd2_journal_cancel_revoke(handle, jh);
1309 out:
1310 jbd2_journal_put_journal_head(jh);
1311 return err;
1312 }
1313
1314 /**
1315 * jbd2_journal_get_undo_access() - Notify intent to modify metadata with
1316 * non-rewindable consequences
1317 * @handle: transaction
1318 * @bh: buffer to undo
1319 *
1320 * Sometimes there is a need to distinguish between metadata which has
1321 * been committed to disk and that which has not. The ext3fs code uses
1322 * this for freeing and allocating space, we have to make sure that we
1323 * do not reuse freed space until the deallocation has been committed,
1324 * since if we overwrote that space we would make the delete
1325 * un-rewindable in case of a crash.
1326 *
1327 * To deal with that, jbd2_journal_get_undo_access requests write access to a
1328 * buffer for parts of non-rewindable operations such as delete
1329 * operations on the bitmaps. The journaling code must keep a copy of
1330 * the buffer's contents prior to the undo_access call until such time
1331 * as we know that the buffer has definitely been committed to disk.
1332 *
1333 * We never need to know which transaction the committed data is part
1334 * of, buffers touched here are guaranteed to be dirtied later and so
1335 * will be committed to a new transaction in due course, at which point
1336 * we can discard the old committed data pointer.
1337 *
1338 * Returns error number or 0 on success.
1339 */
jbd2_journal_get_undo_access(handle_t * handle,struct buffer_head * bh)1340 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
1341 {
1342 int err;
1343 struct journal_head *jh;
1344 char *committed_data = NULL;
1345
1346 if (is_handle_aborted(handle))
1347 return -EROFS;
1348
1349 if (jbd2_write_access_granted(handle, bh, true))
1350 return 0;
1351
1352 jh = jbd2_journal_add_journal_head(bh);
1353 JBUFFER_TRACE(jh, "entry");
1354
1355 /*
1356 * Do this first --- it can drop the journal lock, so we want to
1357 * make sure that obtaining the committed_data is done
1358 * atomically wrt. completion of any outstanding commits.
1359 */
1360 err = do_get_write_access(handle, jh, 1);
1361 if (err)
1362 goto out;
1363
1364 repeat:
1365 if (!jh->b_committed_data)
1366 committed_data = jbd2_alloc(jh2bh(jh)->b_size,
1367 GFP_NOFS|__GFP_NOFAIL);
1368
1369 spin_lock(&jh->b_state_lock);
1370 if (!jh->b_committed_data) {
1371 /* Copy out the current buffer contents into the
1372 * preserved, committed copy. */
1373 JBUFFER_TRACE(jh, "generate b_committed data");
1374 if (!committed_data) {
1375 spin_unlock(&jh->b_state_lock);
1376 goto repeat;
1377 }
1378
1379 jh->b_committed_data = committed_data;
1380 committed_data = NULL;
1381 memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
1382 }
1383 spin_unlock(&jh->b_state_lock);
1384 out:
1385 jbd2_journal_put_journal_head(jh);
1386 if (unlikely(committed_data))
1387 jbd2_free(committed_data, bh->b_size);
1388 return err;
1389 }
1390
1391 /**
1392 * jbd2_journal_set_triggers() - Add triggers for commit writeout
1393 * @bh: buffer to trigger on
1394 * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
1395 *
1396 * Set any triggers on this journal_head. This is always safe, because
1397 * triggers for a committing buffer will be saved off, and triggers for
1398 * a running transaction will match the buffer in that transaction.
1399 *
1400 * Call with NULL to clear the triggers.
1401 */
jbd2_journal_set_triggers(struct buffer_head * bh,struct jbd2_buffer_trigger_type * type)1402 void jbd2_journal_set_triggers(struct buffer_head *bh,
1403 struct jbd2_buffer_trigger_type *type)
1404 {
1405 struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
1406
1407 if (WARN_ON(!jh))
1408 return;
1409 jh->b_triggers = type;
1410 jbd2_journal_put_journal_head(jh);
1411 }
1412
jbd2_buffer_frozen_trigger(struct journal_head * jh,void * mapped_data,struct jbd2_buffer_trigger_type * triggers)1413 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
1414 struct jbd2_buffer_trigger_type *triggers)
1415 {
1416 struct buffer_head *bh = jh2bh(jh);
1417
1418 if (!triggers || !triggers->t_frozen)
1419 return;
1420
1421 triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
1422 }
1423
jbd2_buffer_abort_trigger(struct journal_head * jh,struct jbd2_buffer_trigger_type * triggers)1424 void jbd2_buffer_abort_trigger(struct journal_head *jh,
1425 struct jbd2_buffer_trigger_type *triggers)
1426 {
1427 if (!triggers || !triggers->t_abort)
1428 return;
1429
1430 triggers->t_abort(triggers, jh2bh(jh));
1431 }
1432
1433 /**
1434 * jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
1435 * @handle: transaction to add buffer to.
1436 * @bh: buffer to mark
1437 *
1438 * mark dirty metadata which needs to be journaled as part of the current
1439 * transaction.
1440 *
1441 * The buffer must have previously had jbd2_journal_get_write_access()
1442 * called so that it has a valid journal_head attached to the buffer
1443 * head.
1444 *
1445 * The buffer is placed on the transaction's metadata list and is marked
1446 * as belonging to the transaction.
1447 *
1448 * Returns error number or 0 on success.
1449 *
1450 * Special care needs to be taken if the buffer already belongs to the
1451 * current committing transaction (in which case we should have frozen
1452 * data present for that commit). In that case, we don't relink the
1453 * buffer: that only gets done when the old transaction finally
1454 * completes its commit.
1455 */
jbd2_journal_dirty_metadata(handle_t * handle,struct buffer_head * bh)1456 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1457 {
1458 transaction_t *transaction = handle->h_transaction;
1459 journal_t *journal;
1460 struct journal_head *jh;
1461 int ret = 0;
1462
1463 if (!buffer_jbd(bh))
1464 return -EUCLEAN;
1465
1466 /*
1467 * We don't grab jh reference here since the buffer must be part
1468 * of the running transaction.
1469 */
1470 jh = bh2jh(bh);
1471 jbd_debug(5, "journal_head %p\n", jh);
1472 JBUFFER_TRACE(jh, "entry");
1473
1474 /*
1475 * This and the following assertions are unreliable since we may see jh
1476 * in inconsistent state unless we grab bh_state lock. But this is
1477 * crucial to catch bugs so let's do a reliable check until the
1478 * lockless handling is fully proven.
1479 */
1480 if (data_race(jh->b_transaction != transaction &&
1481 jh->b_next_transaction != transaction)) {
1482 spin_lock(&jh->b_state_lock);
1483 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
1484 jh->b_next_transaction == transaction);
1485 spin_unlock(&jh->b_state_lock);
1486 }
1487 if (jh->b_modified == 1) {
1488 /* If it's in our transaction it must be in BJ_Metadata list. */
1489 if (data_race(jh->b_transaction == transaction &&
1490 jh->b_jlist != BJ_Metadata)) {
1491 spin_lock(&jh->b_state_lock);
1492 if (jh->b_transaction == transaction &&
1493 jh->b_jlist != BJ_Metadata)
1494 pr_err("JBD2: assertion failure: h_type=%u "
1495 "h_line_no=%u block_no=%llu jlist=%u\n",
1496 handle->h_type, handle->h_line_no,
1497 (unsigned long long) bh->b_blocknr,
1498 jh->b_jlist);
1499 J_ASSERT_JH(jh, jh->b_transaction != transaction ||
1500 jh->b_jlist == BJ_Metadata);
1501 spin_unlock(&jh->b_state_lock);
1502 }
1503 goto out;
1504 }
1505
1506 journal = transaction->t_journal;
1507 spin_lock(&jh->b_state_lock);
1508
1509 if (is_handle_aborted(handle)) {
1510 /*
1511 * Check journal aborting with @jh->b_state_lock locked,
1512 * since 'jh->b_transaction' could be replaced with
1513 * 'jh->b_next_transaction' during old transaction
1514 * committing if journal aborted, which may fail
1515 * assertion on 'jh->b_frozen_data == NULL'.
1516 */
1517 ret = -EROFS;
1518 goto out_unlock_bh;
1519 }
1520
1521 if (jh->b_modified == 0) {
1522 /*
1523 * This buffer's got modified and becoming part
1524 * of the transaction. This needs to be done
1525 * once a transaction -bzzz
1526 */
1527 if (WARN_ON_ONCE(jbd2_handle_buffer_credits(handle) <= 0)) {
1528 ret = -ENOSPC;
1529 goto out_unlock_bh;
1530 }
1531 jh->b_modified = 1;
1532 handle->h_total_credits--;
1533 }
1534
1535 /*
1536 * fastpath, to avoid expensive locking. If this buffer is already
1537 * on the running transaction's metadata list there is nothing to do.
1538 * Nobody can take it off again because there is a handle open.
1539 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1540 * result in this test being false, so we go in and take the locks.
1541 */
1542 if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1543 JBUFFER_TRACE(jh, "fastpath");
1544 if (unlikely(jh->b_transaction !=
1545 journal->j_running_transaction)) {
1546 printk(KERN_ERR "JBD2: %s: "
1547 "jh->b_transaction (%llu, %p, %u) != "
1548 "journal->j_running_transaction (%p, %u)\n",
1549 journal->j_devname,
1550 (unsigned long long) bh->b_blocknr,
1551 jh->b_transaction,
1552 jh->b_transaction ? jh->b_transaction->t_tid : 0,
1553 journal->j_running_transaction,
1554 journal->j_running_transaction ?
1555 journal->j_running_transaction->t_tid : 0);
1556 ret = -EINVAL;
1557 }
1558 goto out_unlock_bh;
1559 }
1560
1561 set_buffer_jbddirty(bh);
1562
1563 /*
1564 * Metadata already on the current transaction list doesn't
1565 * need to be filed. Metadata on another transaction's list must
1566 * be committing, and will be refiled once the commit completes:
1567 * leave it alone for now.
1568 */
1569 if (jh->b_transaction != transaction) {
1570 JBUFFER_TRACE(jh, "already on other transaction");
1571 if (unlikely(((jh->b_transaction !=
1572 journal->j_committing_transaction)) ||
1573 (jh->b_next_transaction != transaction))) {
1574 printk(KERN_ERR "jbd2_journal_dirty_metadata: %s: "
1575 "bad jh for block %llu: "
1576 "transaction (%p, %u), "
1577 "jh->b_transaction (%p, %u), "
1578 "jh->b_next_transaction (%p, %u), jlist %u\n",
1579 journal->j_devname,
1580 (unsigned long long) bh->b_blocknr,
1581 transaction, transaction->t_tid,
1582 jh->b_transaction,
1583 jh->b_transaction ?
1584 jh->b_transaction->t_tid : 0,
1585 jh->b_next_transaction,
1586 jh->b_next_transaction ?
1587 jh->b_next_transaction->t_tid : 0,
1588 jh->b_jlist);
1589 WARN_ON(1);
1590 ret = -EINVAL;
1591 }
1592 /* And this case is illegal: we can't reuse another
1593 * transaction's data buffer, ever. */
1594 goto out_unlock_bh;
1595 }
1596
1597 /* That test should have eliminated the following case: */
1598 J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1599
1600 JBUFFER_TRACE(jh, "file as BJ_Metadata");
1601 spin_lock(&journal->j_list_lock);
1602 __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata);
1603 spin_unlock(&journal->j_list_lock);
1604 out_unlock_bh:
1605 spin_unlock(&jh->b_state_lock);
1606 out:
1607 JBUFFER_TRACE(jh, "exit");
1608 return ret;
1609 }
1610
1611 /**
1612 * jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1613 * @handle: transaction handle
1614 * @bh: bh to 'forget'
1615 *
1616 * We can only do the bforget if there are no commits pending against the
1617 * buffer. If the buffer is dirty in the current running transaction we
1618 * can safely unlink it.
1619 *
1620 * bh may not be a journalled buffer at all - it may be a non-JBD
1621 * buffer which came off the hashtable. Check for this.
1622 *
1623 * Decrements bh->b_count by one.
1624 *
1625 * Allow this call even if the handle has aborted --- it may be part of
1626 * the caller's cleanup after an abort.
1627 */
jbd2_journal_forget(handle_t * handle,struct buffer_head * bh)1628 int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
1629 {
1630 transaction_t *transaction = handle->h_transaction;
1631 journal_t *journal;
1632 struct journal_head *jh;
1633 int drop_reserve = 0;
1634 int err = 0;
1635 int was_modified = 0;
1636
1637 if (is_handle_aborted(handle))
1638 return -EROFS;
1639 journal = transaction->t_journal;
1640
1641 BUFFER_TRACE(bh, "entry");
1642
1643 jh = jbd2_journal_grab_journal_head(bh);
1644 if (!jh) {
1645 __bforget(bh);
1646 return 0;
1647 }
1648
1649 spin_lock(&jh->b_state_lock);
1650
1651 /* Critical error: attempting to delete a bitmap buffer, maybe?
1652 * Don't do any jbd operations, and return an error. */
1653 if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1654 "inconsistent data on disk")) {
1655 err = -EIO;
1656 goto drop;
1657 }
1658
1659 /* keep track of whether or not this transaction modified us */
1660 was_modified = jh->b_modified;
1661
1662 /*
1663 * The buffer's going from the transaction, we must drop
1664 * all references -bzzz
1665 */
1666 jh->b_modified = 0;
1667
1668 if (jh->b_transaction == transaction) {
1669 J_ASSERT_JH(jh, !jh->b_frozen_data);
1670
1671 /* If we are forgetting a buffer which is already part
1672 * of this transaction, then we can just drop it from
1673 * the transaction immediately. */
1674 clear_buffer_dirty(bh);
1675 clear_buffer_jbddirty(bh);
1676
1677 JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1678
1679 /*
1680 * we only want to drop a reference if this transaction
1681 * modified the buffer
1682 */
1683 if (was_modified)
1684 drop_reserve = 1;
1685
1686 /*
1687 * We are no longer going to journal this buffer.
1688 * However, the commit of this transaction is still
1689 * important to the buffer: the delete that we are now
1690 * processing might obsolete an old log entry, so by
1691 * committing, we can satisfy the buffer's checkpoint.
1692 *
1693 * So, if we have a checkpoint on the buffer, we should
1694 * now refile the buffer on our BJ_Forget list so that
1695 * we know to remove the checkpoint after we commit.
1696 */
1697
1698 spin_lock(&journal->j_list_lock);
1699 if (jh->b_cp_transaction) {
1700 __jbd2_journal_temp_unlink_buffer(jh);
1701 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1702 } else {
1703 __jbd2_journal_unfile_buffer(jh);
1704 jbd2_journal_put_journal_head(jh);
1705 }
1706 spin_unlock(&journal->j_list_lock);
1707 } else if (jh->b_transaction) {
1708 J_ASSERT_JH(jh, (jh->b_transaction ==
1709 journal->j_committing_transaction));
1710 /* However, if the buffer is still owned by a prior
1711 * (committing) transaction, we can't drop it yet... */
1712 JBUFFER_TRACE(jh, "belongs to older transaction");
1713 /* ... but we CAN drop it from the new transaction through
1714 * marking the buffer as freed and set j_next_transaction to
1715 * the new transaction, so that not only the commit code
1716 * knows it should clear dirty bits when it is done with the
1717 * buffer, but also the buffer can be checkpointed only
1718 * after the new transaction commits. */
1719
1720 set_buffer_freed(bh);
1721
1722 if (!jh->b_next_transaction) {
1723 spin_lock(&journal->j_list_lock);
1724 jh->b_next_transaction = transaction;
1725 spin_unlock(&journal->j_list_lock);
1726 } else {
1727 J_ASSERT(jh->b_next_transaction == transaction);
1728
1729 /*
1730 * only drop a reference if this transaction modified
1731 * the buffer
1732 */
1733 if (was_modified)
1734 drop_reserve = 1;
1735 }
1736 } else {
1737 /*
1738 * Finally, if the buffer is not belongs to any
1739 * transaction, we can just drop it now if it has no
1740 * checkpoint.
1741 */
1742 spin_lock(&journal->j_list_lock);
1743 if (!jh->b_cp_transaction) {
1744 JBUFFER_TRACE(jh, "belongs to none transaction");
1745 spin_unlock(&journal->j_list_lock);
1746 goto drop;
1747 }
1748
1749 /*
1750 * Otherwise, if the buffer has been written to disk,
1751 * it is safe to remove the checkpoint and drop it.
1752 */
1753 if (!buffer_dirty(bh)) {
1754 __jbd2_journal_remove_checkpoint(jh);
1755 spin_unlock(&journal->j_list_lock);
1756 goto drop;
1757 }
1758
1759 /*
1760 * The buffer is still not written to disk, we should
1761 * attach this buffer to current transaction so that the
1762 * buffer can be checkpointed only after the current
1763 * transaction commits.
1764 */
1765 clear_buffer_dirty(bh);
1766 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1767 spin_unlock(&journal->j_list_lock);
1768 }
1769 drop:
1770 __brelse(bh);
1771 spin_unlock(&jh->b_state_lock);
1772 jbd2_journal_put_journal_head(jh);
1773 if (drop_reserve) {
1774 /* no need to reserve log space for this block -bzzz */
1775 handle->h_total_credits++;
1776 }
1777 return err;
1778 }
1779
1780 /**
1781 * jbd2_journal_stop() - complete a transaction
1782 * @handle: transaction to complete.
1783 *
1784 * All done for a particular handle.
1785 *
1786 * There is not much action needed here. We just return any remaining
1787 * buffer credits to the transaction and remove the handle. The only
1788 * complication is that we need to start a commit operation if the
1789 * filesystem is marked for synchronous update.
1790 *
1791 * jbd2_journal_stop itself will not usually return an error, but it may
1792 * do so in unusual circumstances. In particular, expect it to
1793 * return -EIO if a jbd2_journal_abort has been executed since the
1794 * transaction began.
1795 */
jbd2_journal_stop(handle_t * handle)1796 int jbd2_journal_stop(handle_t *handle)
1797 {
1798 transaction_t *transaction = handle->h_transaction;
1799 journal_t *journal;
1800 int err = 0, wait_for_commit = 0;
1801 tid_t tid;
1802 pid_t pid;
1803
1804 if (--handle->h_ref > 0) {
1805 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1806 handle->h_ref);
1807 if (is_handle_aborted(handle))
1808 return -EIO;
1809 return 0;
1810 }
1811 if (!transaction) {
1812 /*
1813 * Handle is already detached from the transaction so there is
1814 * nothing to do other than free the handle.
1815 */
1816 memalloc_nofs_restore(handle->saved_alloc_context);
1817 goto free_and_exit;
1818 }
1819 journal = transaction->t_journal;
1820 tid = transaction->t_tid;
1821
1822 if (is_handle_aborted(handle))
1823 err = -EIO;
1824
1825 jbd_debug(4, "Handle %p going down\n", handle);
1826 trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
1827 tid, handle->h_type, handle->h_line_no,
1828 jiffies - handle->h_start_jiffies,
1829 handle->h_sync, handle->h_requested_credits,
1830 (handle->h_requested_credits -
1831 handle->h_total_credits));
1832
1833 /*
1834 * Implement synchronous transaction batching. If the handle
1835 * was synchronous, don't force a commit immediately. Let's
1836 * yield and let another thread piggyback onto this
1837 * transaction. Keep doing that while new threads continue to
1838 * arrive. It doesn't cost much - we're about to run a commit
1839 * and sleep on IO anyway. Speeds up many-threaded, many-dir
1840 * operations by 30x or more...
1841 *
1842 * We try and optimize the sleep time against what the
1843 * underlying disk can do, instead of having a static sleep
1844 * time. This is useful for the case where our storage is so
1845 * fast that it is more optimal to go ahead and force a flush
1846 * and wait for the transaction to be committed than it is to
1847 * wait for an arbitrary amount of time for new writers to
1848 * join the transaction. We achieve this by measuring how
1849 * long it takes to commit a transaction, and compare it with
1850 * how long this transaction has been running, and if run time
1851 * < commit time then we sleep for the delta and commit. This
1852 * greatly helps super fast disks that would see slowdowns as
1853 * more threads started doing fsyncs.
1854 *
1855 * But don't do this if this process was the most recent one
1856 * to perform a synchronous write. We do this to detect the
1857 * case where a single process is doing a stream of sync
1858 * writes. No point in waiting for joiners in that case.
1859 *
1860 * Setting max_batch_time to 0 disables this completely.
1861 */
1862 pid = current->pid;
1863 if (handle->h_sync && journal->j_last_sync_writer != pid &&
1864 journal->j_max_batch_time) {
1865 u64 commit_time, trans_time;
1866
1867 journal->j_last_sync_writer = pid;
1868
1869 read_lock(&journal->j_state_lock);
1870 commit_time = journal->j_average_commit_time;
1871 read_unlock(&journal->j_state_lock);
1872
1873 trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1874 transaction->t_start_time));
1875
1876 commit_time = max_t(u64, commit_time,
1877 1000*journal->j_min_batch_time);
1878 commit_time = min_t(u64, commit_time,
1879 1000*journal->j_max_batch_time);
1880
1881 if (trans_time < commit_time) {
1882 ktime_t expires = ktime_add_ns(ktime_get(),
1883 commit_time);
1884 set_current_state(TASK_UNINTERRUPTIBLE);
1885 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1886 }
1887 }
1888
1889 if (handle->h_sync)
1890 transaction->t_synchronous_commit = 1;
1891
1892 /*
1893 * If the handle is marked SYNC, we need to set another commit
1894 * going! We also want to force a commit if the transaction is too
1895 * old now.
1896 */
1897 if (handle->h_sync ||
1898 time_after_eq(jiffies, transaction->t_expires)) {
1899 /* Do this even for aborted journals: an abort still
1900 * completes the commit thread, it just doesn't write
1901 * anything to disk. */
1902
1903 jbd_debug(2, "transaction too old, requesting commit for "
1904 "handle %p\n", handle);
1905 /* This is non-blocking */
1906 jbd2_log_start_commit(journal, tid);
1907
1908 /*
1909 * Special case: JBD2_SYNC synchronous updates require us
1910 * to wait for the commit to complete.
1911 */
1912 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1913 wait_for_commit = 1;
1914 }
1915
1916 /*
1917 * Once stop_this_handle() drops t_updates, the transaction could start
1918 * committing on us and eventually disappear. So we must not
1919 * dereference transaction pointer again after calling
1920 * stop_this_handle().
1921 */
1922 stop_this_handle(handle);
1923
1924 if (wait_for_commit)
1925 err = jbd2_log_wait_commit(journal, tid);
1926
1927 free_and_exit:
1928 if (handle->h_rsv_handle)
1929 jbd2_free_handle(handle->h_rsv_handle);
1930 jbd2_free_handle(handle);
1931 return err;
1932 }
1933
1934 /*
1935 *
1936 * List management code snippets: various functions for manipulating the
1937 * transaction buffer lists.
1938 *
1939 */
1940
1941 /*
1942 * Append a buffer to a transaction list, given the transaction's list head
1943 * pointer.
1944 *
1945 * j_list_lock is held.
1946 *
1947 * jh->b_state_lock is held.
1948 */
1949
1950 static inline void
__blist_add_buffer(struct journal_head ** list,struct journal_head * jh)1951 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1952 {
1953 if (!*list) {
1954 jh->b_tnext = jh->b_tprev = jh;
1955 *list = jh;
1956 } else {
1957 /* Insert at the tail of the list to preserve order */
1958 struct journal_head *first = *list, *last = first->b_tprev;
1959 jh->b_tprev = last;
1960 jh->b_tnext = first;
1961 last->b_tnext = first->b_tprev = jh;
1962 }
1963 }
1964
1965 /*
1966 * Remove a buffer from a transaction list, given the transaction's list
1967 * head pointer.
1968 *
1969 * Called with j_list_lock held, and the journal may not be locked.
1970 *
1971 * jh->b_state_lock is held.
1972 */
1973
1974 static inline void
__blist_del_buffer(struct journal_head ** list,struct journal_head * jh)1975 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1976 {
1977 if (*list == jh) {
1978 *list = jh->b_tnext;
1979 if (*list == jh)
1980 *list = NULL;
1981 }
1982 jh->b_tprev->b_tnext = jh->b_tnext;
1983 jh->b_tnext->b_tprev = jh->b_tprev;
1984 }
1985
1986 /*
1987 * Remove a buffer from the appropriate transaction list.
1988 *
1989 * Note that this function can *change* the value of
1990 * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or
1991 * t_reserved_list. If the caller is holding onto a copy of one of these
1992 * pointers, it could go bad. Generally the caller needs to re-read the
1993 * pointer from the transaction_t.
1994 *
1995 * Called under j_list_lock.
1996 */
__jbd2_journal_temp_unlink_buffer(struct journal_head * jh)1997 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1998 {
1999 struct journal_head **list = NULL;
2000 transaction_t *transaction;
2001 struct buffer_head *bh = jh2bh(jh);
2002
2003 lockdep_assert_held(&jh->b_state_lock);
2004 transaction = jh->b_transaction;
2005 if (transaction)
2006 assert_spin_locked(&transaction->t_journal->j_list_lock);
2007
2008 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2009 if (jh->b_jlist != BJ_None)
2010 J_ASSERT_JH(jh, transaction != NULL);
2011
2012 switch (jh->b_jlist) {
2013 case BJ_None:
2014 return;
2015 case BJ_Metadata:
2016 transaction->t_nr_buffers--;
2017 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
2018 list = &transaction->t_buffers;
2019 break;
2020 case BJ_Forget:
2021 list = &transaction->t_forget;
2022 break;
2023 case BJ_Shadow:
2024 list = &transaction->t_shadow_list;
2025 break;
2026 case BJ_Reserved:
2027 list = &transaction->t_reserved_list;
2028 break;
2029 }
2030
2031 __blist_del_buffer(list, jh);
2032 jh->b_jlist = BJ_None;
2033 if (transaction && is_journal_aborted(transaction->t_journal))
2034 clear_buffer_jbddirty(bh);
2035 else if (test_clear_buffer_jbddirty(bh))
2036 mark_buffer_dirty(bh); /* Expose it to the VM */
2037 }
2038
2039 /*
2040 * Remove buffer from all transactions. The caller is responsible for dropping
2041 * the jh reference that belonged to the transaction.
2042 *
2043 * Called with bh_state lock and j_list_lock
2044 */
__jbd2_journal_unfile_buffer(struct journal_head * jh)2045 static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
2046 {
2047 J_ASSERT_JH(jh, jh->b_transaction != NULL);
2048 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
2049
2050 __jbd2_journal_temp_unlink_buffer(jh);
2051 jh->b_transaction = NULL;
2052 }
2053
jbd2_journal_unfile_buffer(journal_t * journal,struct journal_head * jh)2054 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
2055 {
2056 struct buffer_head *bh = jh2bh(jh);
2057
2058 /* Get reference so that buffer cannot be freed before we unlock it */
2059 get_bh(bh);
2060 spin_lock(&jh->b_state_lock);
2061 spin_lock(&journal->j_list_lock);
2062 __jbd2_journal_unfile_buffer(jh);
2063 spin_unlock(&journal->j_list_lock);
2064 spin_unlock(&jh->b_state_lock);
2065 jbd2_journal_put_journal_head(jh);
2066 __brelse(bh);
2067 }
2068
2069 /*
2070 * Called from jbd2_journal_try_to_free_buffers().
2071 *
2072 * Called under jh->b_state_lock
2073 */
2074 static void
__journal_try_to_free_buffer(journal_t * journal,struct buffer_head * bh)2075 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
2076 {
2077 struct journal_head *jh;
2078
2079 jh = bh2jh(bh);
2080
2081 if (buffer_locked(bh) || buffer_dirty(bh))
2082 goto out;
2083
2084 if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
2085 goto out;
2086
2087 spin_lock(&journal->j_list_lock);
2088 if (jh->b_cp_transaction != NULL) {
2089 /* written-back checkpointed metadata buffer */
2090 JBUFFER_TRACE(jh, "remove from checkpoint list");
2091 __jbd2_journal_remove_checkpoint(jh);
2092 }
2093 spin_unlock(&journal->j_list_lock);
2094 out:
2095 return;
2096 }
2097
2098 /**
2099 * jbd2_journal_try_to_free_buffers() - try to free page buffers.
2100 * @journal: journal for operation
2101 * @page: to try and free
2102 *
2103 * For all the buffers on this page,
2104 * if they are fully written out ordered data, move them onto BUF_CLEAN
2105 * so try_to_free_buffers() can reap them.
2106 *
2107 * This function returns non-zero if we wish try_to_free_buffers()
2108 * to be called. We do this if the page is releasable by try_to_free_buffers().
2109 * We also do it if the page has locked or dirty buffers and the caller wants
2110 * us to perform sync or async writeout.
2111 *
2112 * This complicates JBD locking somewhat. We aren't protected by the
2113 * BKL here. We wish to remove the buffer from its committing or
2114 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
2115 *
2116 * This may *change* the value of transaction_t->t_datalist, so anyone
2117 * who looks at t_datalist needs to lock against this function.
2118 *
2119 * Even worse, someone may be doing a jbd2_journal_dirty_data on this
2120 * buffer. So we need to lock against that. jbd2_journal_dirty_data()
2121 * will come out of the lock with the buffer dirty, which makes it
2122 * ineligible for release here.
2123 *
2124 * Who else is affected by this? hmm... Really the only contender
2125 * is do_get_write_access() - it could be looking at the buffer while
2126 * journal_try_to_free_buffer() is changing its state. But that
2127 * cannot happen because we never reallocate freed data as metadata
2128 * while the data is part of a transaction. Yes?
2129 *
2130 * Return 0 on failure, 1 on success
2131 */
jbd2_journal_try_to_free_buffers(journal_t * journal,struct page * page)2132 int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page)
2133 {
2134 struct buffer_head *head;
2135 struct buffer_head *bh;
2136 int ret = 0;
2137
2138 J_ASSERT(PageLocked(page));
2139
2140 head = page_buffers(page);
2141 bh = head;
2142 do {
2143 struct journal_head *jh;
2144
2145 /*
2146 * We take our own ref against the journal_head here to avoid
2147 * having to add tons of locking around each instance of
2148 * jbd2_journal_put_journal_head().
2149 */
2150 jh = jbd2_journal_grab_journal_head(bh);
2151 if (!jh)
2152 continue;
2153
2154 spin_lock(&jh->b_state_lock);
2155 __journal_try_to_free_buffer(journal, bh);
2156 spin_unlock(&jh->b_state_lock);
2157 jbd2_journal_put_journal_head(jh);
2158 if (buffer_jbd(bh))
2159 goto busy;
2160 } while ((bh = bh->b_this_page) != head);
2161
2162 ret = try_to_free_buffers(page);
2163 busy:
2164 return ret;
2165 }
2166
2167 /*
2168 * This buffer is no longer needed. If it is on an older transaction's
2169 * checkpoint list we need to record it on this transaction's forget list
2170 * to pin this buffer (and hence its checkpointing transaction) down until
2171 * this transaction commits. If the buffer isn't on a checkpoint list, we
2172 * release it.
2173 * Returns non-zero if JBD no longer has an interest in the buffer.
2174 *
2175 * Called under j_list_lock.
2176 *
2177 * Called under jh->b_state_lock.
2178 */
__dispose_buffer(struct journal_head * jh,transaction_t * transaction)2179 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
2180 {
2181 int may_free = 1;
2182 struct buffer_head *bh = jh2bh(jh);
2183
2184 if (jh->b_cp_transaction) {
2185 JBUFFER_TRACE(jh, "on running+cp transaction");
2186 __jbd2_journal_temp_unlink_buffer(jh);
2187 /*
2188 * We don't want to write the buffer anymore, clear the
2189 * bit so that we don't confuse checks in
2190 * __journal_file_buffer
2191 */
2192 clear_buffer_dirty(bh);
2193 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
2194 may_free = 0;
2195 } else {
2196 JBUFFER_TRACE(jh, "on running transaction");
2197 __jbd2_journal_unfile_buffer(jh);
2198 jbd2_journal_put_journal_head(jh);
2199 }
2200 return may_free;
2201 }
2202
2203 /*
2204 * jbd2_journal_invalidatepage
2205 *
2206 * This code is tricky. It has a number of cases to deal with.
2207 *
2208 * There are two invariants which this code relies on:
2209 *
2210 * i_size must be updated on disk before we start calling invalidatepage on the
2211 * data.
2212 *
2213 * This is done in ext3 by defining an ext3_setattr method which
2214 * updates i_size before truncate gets going. By maintaining this
2215 * invariant, we can be sure that it is safe to throw away any buffers
2216 * attached to the current transaction: once the transaction commits,
2217 * we know that the data will not be needed.
2218 *
2219 * Note however that we can *not* throw away data belonging to the
2220 * previous, committing transaction!
2221 *
2222 * Any disk blocks which *are* part of the previous, committing
2223 * transaction (and which therefore cannot be discarded immediately) are
2224 * not going to be reused in the new running transaction
2225 *
2226 * The bitmap committed_data images guarantee this: any block which is
2227 * allocated in one transaction and removed in the next will be marked
2228 * as in-use in the committed_data bitmap, so cannot be reused until
2229 * the next transaction to delete the block commits. This means that
2230 * leaving committing buffers dirty is quite safe: the disk blocks
2231 * cannot be reallocated to a different file and so buffer aliasing is
2232 * not possible.
2233 *
2234 *
2235 * The above applies mainly to ordered data mode. In writeback mode we
2236 * don't make guarantees about the order in which data hits disk --- in
2237 * particular we don't guarantee that new dirty data is flushed before
2238 * transaction commit --- so it is always safe just to discard data
2239 * immediately in that mode. --sct
2240 */
2241
2242 /*
2243 * The journal_unmap_buffer helper function returns zero if the buffer
2244 * concerned remains pinned as an anonymous buffer belonging to an older
2245 * transaction.
2246 *
2247 * We're outside-transaction here. Either or both of j_running_transaction
2248 * and j_committing_transaction may be NULL.
2249 */
journal_unmap_buffer(journal_t * journal,struct buffer_head * bh,int partial_page)2250 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
2251 int partial_page)
2252 {
2253 transaction_t *transaction;
2254 struct journal_head *jh;
2255 int may_free = 1;
2256
2257 BUFFER_TRACE(bh, "entry");
2258
2259 /*
2260 * It is safe to proceed here without the j_list_lock because the
2261 * buffers cannot be stolen by try_to_free_buffers as long as we are
2262 * holding the page lock. --sct
2263 */
2264
2265 jh = jbd2_journal_grab_journal_head(bh);
2266 if (!jh)
2267 goto zap_buffer_unlocked;
2268
2269 /* OK, we have data buffer in journaled mode */
2270 write_lock(&journal->j_state_lock);
2271 spin_lock(&jh->b_state_lock);
2272 spin_lock(&journal->j_list_lock);
2273
2274 /*
2275 * We cannot remove the buffer from checkpoint lists until the
2276 * transaction adding inode to orphan list (let's call it T)
2277 * is committed. Otherwise if the transaction changing the
2278 * buffer would be cleaned from the journal before T is
2279 * committed, a crash will cause that the correct contents of
2280 * the buffer will be lost. On the other hand we have to
2281 * clear the buffer dirty bit at latest at the moment when the
2282 * transaction marking the buffer as freed in the filesystem
2283 * structures is committed because from that moment on the
2284 * block can be reallocated and used by a different page.
2285 * Since the block hasn't been freed yet but the inode has
2286 * already been added to orphan list, it is safe for us to add
2287 * the buffer to BJ_Forget list of the newest transaction.
2288 *
2289 * Also we have to clear buffer_mapped flag of a truncated buffer
2290 * because the buffer_head may be attached to the page straddling
2291 * i_size (can happen only when blocksize < pagesize) and thus the
2292 * buffer_head can be reused when the file is extended again. So we end
2293 * up keeping around invalidated buffers attached to transactions'
2294 * BJ_Forget list just to stop checkpointing code from cleaning up
2295 * the transaction this buffer was modified in.
2296 */
2297 transaction = jh->b_transaction;
2298 if (transaction == NULL) {
2299 /* First case: not on any transaction. If it
2300 * has no checkpoint link, then we can zap it:
2301 * it's a writeback-mode buffer so we don't care
2302 * if it hits disk safely. */
2303 if (!jh->b_cp_transaction) {
2304 JBUFFER_TRACE(jh, "not on any transaction: zap");
2305 goto zap_buffer;
2306 }
2307
2308 if (!buffer_dirty(bh)) {
2309 /* bdflush has written it. We can drop it now */
2310 __jbd2_journal_remove_checkpoint(jh);
2311 goto zap_buffer;
2312 }
2313
2314 /* OK, it must be in the journal but still not
2315 * written fully to disk: it's metadata or
2316 * journaled data... */
2317
2318 if (journal->j_running_transaction) {
2319 /* ... and once the current transaction has
2320 * committed, the buffer won't be needed any
2321 * longer. */
2322 JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
2323 may_free = __dispose_buffer(jh,
2324 journal->j_running_transaction);
2325 goto zap_buffer;
2326 } else {
2327 /* There is no currently-running transaction. So the
2328 * orphan record which we wrote for this file must have
2329 * passed into commit. We must attach this buffer to
2330 * the committing transaction, if it exists. */
2331 if (journal->j_committing_transaction) {
2332 JBUFFER_TRACE(jh, "give to committing trans");
2333 may_free = __dispose_buffer(jh,
2334 journal->j_committing_transaction);
2335 goto zap_buffer;
2336 } else {
2337 /* The orphan record's transaction has
2338 * committed. We can cleanse this buffer */
2339 clear_buffer_jbddirty(bh);
2340 __jbd2_journal_remove_checkpoint(jh);
2341 goto zap_buffer;
2342 }
2343 }
2344 } else if (transaction == journal->j_committing_transaction) {
2345 JBUFFER_TRACE(jh, "on committing transaction");
2346 /*
2347 * The buffer is committing, we simply cannot touch
2348 * it. If the page is straddling i_size we have to wait
2349 * for commit and try again.
2350 */
2351 if (partial_page) {
2352 spin_unlock(&journal->j_list_lock);
2353 spin_unlock(&jh->b_state_lock);
2354 write_unlock(&journal->j_state_lock);
2355 jbd2_journal_put_journal_head(jh);
2356 return -EBUSY;
2357 }
2358 /*
2359 * OK, buffer won't be reachable after truncate. We just clear
2360 * b_modified to not confuse transaction credit accounting, and
2361 * set j_next_transaction to the running transaction (if there
2362 * is one) and mark buffer as freed so that commit code knows
2363 * it should clear dirty bits when it is done with the buffer.
2364 */
2365 set_buffer_freed(bh);
2366 if (journal->j_running_transaction && buffer_jbddirty(bh))
2367 jh->b_next_transaction = journal->j_running_transaction;
2368 jh->b_modified = 0;
2369 spin_unlock(&journal->j_list_lock);
2370 spin_unlock(&jh->b_state_lock);
2371 write_unlock(&journal->j_state_lock);
2372 jbd2_journal_put_journal_head(jh);
2373 return 0;
2374 } else {
2375 /* Good, the buffer belongs to the running transaction.
2376 * We are writing our own transaction's data, not any
2377 * previous one's, so it is safe to throw it away
2378 * (remember that we expect the filesystem to have set
2379 * i_size already for this truncate so recovery will not
2380 * expose the disk blocks we are discarding here.) */
2381 J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
2382 JBUFFER_TRACE(jh, "on running transaction");
2383 may_free = __dispose_buffer(jh, transaction);
2384 }
2385
2386 zap_buffer:
2387 /*
2388 * This is tricky. Although the buffer is truncated, it may be reused
2389 * if blocksize < pagesize and it is attached to the page straddling
2390 * EOF. Since the buffer might have been added to BJ_Forget list of the
2391 * running transaction, journal_get_write_access() won't clear
2392 * b_modified and credit accounting gets confused. So clear b_modified
2393 * here.
2394 */
2395 jh->b_modified = 0;
2396 spin_unlock(&journal->j_list_lock);
2397 spin_unlock(&jh->b_state_lock);
2398 write_unlock(&journal->j_state_lock);
2399 jbd2_journal_put_journal_head(jh);
2400 zap_buffer_unlocked:
2401 clear_buffer_dirty(bh);
2402 J_ASSERT_BH(bh, !buffer_jbddirty(bh));
2403 clear_buffer_mapped(bh);
2404 clear_buffer_req(bh);
2405 clear_buffer_new(bh);
2406 clear_buffer_delay(bh);
2407 clear_buffer_unwritten(bh);
2408 bh->b_bdev = NULL;
2409 return may_free;
2410 }
2411
2412 /**
2413 * jbd2_journal_invalidatepage()
2414 * @journal: journal to use for flush...
2415 * @page: page to flush
2416 * @offset: start of the range to invalidate
2417 * @length: length of the range to invalidate
2418 *
2419 * Reap page buffers containing data after in the specified range in page.
2420 * Can return -EBUSY if buffers are part of the committing transaction and
2421 * the page is straddling i_size. Caller then has to wait for current commit
2422 * and try again.
2423 */
jbd2_journal_invalidatepage(journal_t * journal,struct page * page,unsigned int offset,unsigned int length)2424 int jbd2_journal_invalidatepage(journal_t *journal,
2425 struct page *page,
2426 unsigned int offset,
2427 unsigned int length)
2428 {
2429 struct buffer_head *head, *bh, *next;
2430 unsigned int stop = offset + length;
2431 unsigned int curr_off = 0;
2432 int partial_page = (offset || length < PAGE_SIZE);
2433 int may_free = 1;
2434 int ret = 0;
2435
2436 if (!PageLocked(page))
2437 BUG();
2438 if (!page_has_buffers(page))
2439 return 0;
2440
2441 BUG_ON(stop > PAGE_SIZE || stop < length);
2442
2443 /* We will potentially be playing with lists other than just the
2444 * data lists (especially for journaled data mode), so be
2445 * cautious in our locking. */
2446
2447 head = bh = page_buffers(page);
2448 do {
2449 unsigned int next_off = curr_off + bh->b_size;
2450 next = bh->b_this_page;
2451
2452 if (next_off > stop)
2453 return 0;
2454
2455 if (offset <= curr_off) {
2456 /* This block is wholly outside the truncation point */
2457 lock_buffer(bh);
2458 ret = journal_unmap_buffer(journal, bh, partial_page);
2459 unlock_buffer(bh);
2460 if (ret < 0)
2461 return ret;
2462 may_free &= ret;
2463 }
2464 curr_off = next_off;
2465 bh = next;
2466
2467 } while (bh != head);
2468
2469 if (!partial_page) {
2470 if (may_free && try_to_free_buffers(page))
2471 J_ASSERT(!page_has_buffers(page));
2472 }
2473 return 0;
2474 }
2475
2476 /*
2477 * File a buffer on the given transaction list.
2478 */
__jbd2_journal_file_buffer(struct journal_head * jh,transaction_t * transaction,int jlist)2479 void __jbd2_journal_file_buffer(struct journal_head *jh,
2480 transaction_t *transaction, int jlist)
2481 {
2482 struct journal_head **list = NULL;
2483 int was_dirty = 0;
2484 struct buffer_head *bh = jh2bh(jh);
2485
2486 lockdep_assert_held(&jh->b_state_lock);
2487 assert_spin_locked(&transaction->t_journal->j_list_lock);
2488
2489 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2490 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
2491 jh->b_transaction == NULL);
2492
2493 if (jh->b_transaction && jh->b_jlist == jlist)
2494 return;
2495
2496 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
2497 jlist == BJ_Shadow || jlist == BJ_Forget) {
2498 /*
2499 * For metadata buffers, we track dirty bit in buffer_jbddirty
2500 * instead of buffer_dirty. We should not see a dirty bit set
2501 * here because we clear it in do_get_write_access but e.g.
2502 * tune2fs can modify the sb and set the dirty bit at any time
2503 * so we try to gracefully handle that.
2504 */
2505 if (buffer_dirty(bh))
2506 warn_dirty_buffer(bh);
2507 if (test_clear_buffer_dirty(bh) ||
2508 test_clear_buffer_jbddirty(bh))
2509 was_dirty = 1;
2510 }
2511
2512 if (jh->b_transaction)
2513 __jbd2_journal_temp_unlink_buffer(jh);
2514 else
2515 jbd2_journal_grab_journal_head(bh);
2516 jh->b_transaction = transaction;
2517
2518 switch (jlist) {
2519 case BJ_None:
2520 J_ASSERT_JH(jh, !jh->b_committed_data);
2521 J_ASSERT_JH(jh, !jh->b_frozen_data);
2522 return;
2523 case BJ_Metadata:
2524 transaction->t_nr_buffers++;
2525 list = &transaction->t_buffers;
2526 break;
2527 case BJ_Forget:
2528 list = &transaction->t_forget;
2529 break;
2530 case BJ_Shadow:
2531 list = &transaction->t_shadow_list;
2532 break;
2533 case BJ_Reserved:
2534 list = &transaction->t_reserved_list;
2535 break;
2536 }
2537
2538 __blist_add_buffer(list, jh);
2539 jh->b_jlist = jlist;
2540
2541 if (was_dirty)
2542 set_buffer_jbddirty(bh);
2543 }
2544
jbd2_journal_file_buffer(struct journal_head * jh,transaction_t * transaction,int jlist)2545 void jbd2_journal_file_buffer(struct journal_head *jh,
2546 transaction_t *transaction, int jlist)
2547 {
2548 spin_lock(&jh->b_state_lock);
2549 spin_lock(&transaction->t_journal->j_list_lock);
2550 __jbd2_journal_file_buffer(jh, transaction, jlist);
2551 spin_unlock(&transaction->t_journal->j_list_lock);
2552 spin_unlock(&jh->b_state_lock);
2553 }
2554
2555 /*
2556 * Remove a buffer from its current buffer list in preparation for
2557 * dropping it from its current transaction entirely. If the buffer has
2558 * already started to be used by a subsequent transaction, refile the
2559 * buffer on that transaction's metadata list.
2560 *
2561 * Called under j_list_lock
2562 * Called under jh->b_state_lock
2563 *
2564 * When this function returns true, there's no next transaction to refile to
2565 * and the caller has to drop jh reference through
2566 * jbd2_journal_put_journal_head().
2567 */
__jbd2_journal_refile_buffer(struct journal_head * jh)2568 bool __jbd2_journal_refile_buffer(struct journal_head *jh)
2569 {
2570 int was_dirty, jlist;
2571 struct buffer_head *bh = jh2bh(jh);
2572
2573 lockdep_assert_held(&jh->b_state_lock);
2574 if (jh->b_transaction)
2575 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
2576
2577 /* If the buffer is now unused, just drop it. */
2578 if (jh->b_next_transaction == NULL) {
2579 __jbd2_journal_unfile_buffer(jh);
2580 return true;
2581 }
2582
2583 /*
2584 * It has been modified by a later transaction: add it to the new
2585 * transaction's metadata list.
2586 */
2587
2588 was_dirty = test_clear_buffer_jbddirty(bh);
2589 __jbd2_journal_temp_unlink_buffer(jh);
2590
2591 /*
2592 * b_transaction must be set, otherwise the new b_transaction won't
2593 * be holding jh reference
2594 */
2595 J_ASSERT_JH(jh, jh->b_transaction != NULL);
2596
2597 /*
2598 * We set b_transaction here because b_next_transaction will inherit
2599 * our jh reference and thus __jbd2_journal_file_buffer() must not
2600 * take a new one.
2601 */
2602 WRITE_ONCE(jh->b_transaction, jh->b_next_transaction);
2603 WRITE_ONCE(jh->b_next_transaction, NULL);
2604 if (buffer_freed(bh))
2605 jlist = BJ_Forget;
2606 else if (jh->b_modified)
2607 jlist = BJ_Metadata;
2608 else
2609 jlist = BJ_Reserved;
2610 __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
2611 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2612
2613 if (was_dirty)
2614 set_buffer_jbddirty(bh);
2615 return false;
2616 }
2617
2618 /*
2619 * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2620 * bh reference so that we can safely unlock bh.
2621 *
2622 * The jh and bh may be freed by this call.
2623 */
jbd2_journal_refile_buffer(journal_t * journal,struct journal_head * jh)2624 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2625 {
2626 bool drop;
2627
2628 spin_lock(&jh->b_state_lock);
2629 spin_lock(&journal->j_list_lock);
2630 drop = __jbd2_journal_refile_buffer(jh);
2631 spin_unlock(&jh->b_state_lock);
2632 spin_unlock(&journal->j_list_lock);
2633 if (drop)
2634 jbd2_journal_put_journal_head(jh);
2635 }
2636
2637 /*
2638 * File inode in the inode list of the handle's transaction
2639 */
jbd2_journal_file_inode(handle_t * handle,struct jbd2_inode * jinode,unsigned long flags,loff_t start_byte,loff_t end_byte)2640 static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
2641 unsigned long flags, loff_t start_byte, loff_t end_byte)
2642 {
2643 transaction_t *transaction = handle->h_transaction;
2644 journal_t *journal;
2645
2646 if (is_handle_aborted(handle))
2647 return -EROFS;
2648 journal = transaction->t_journal;
2649
2650 jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
2651 transaction->t_tid);
2652
2653 spin_lock(&journal->j_list_lock);
2654 jinode->i_flags |= flags;
2655
2656 if (jinode->i_dirty_end) {
2657 jinode->i_dirty_start = min(jinode->i_dirty_start, start_byte);
2658 jinode->i_dirty_end = max(jinode->i_dirty_end, end_byte);
2659 } else {
2660 jinode->i_dirty_start = start_byte;
2661 jinode->i_dirty_end = end_byte;
2662 }
2663
2664 /* Is inode already attached where we need it? */
2665 if (jinode->i_transaction == transaction ||
2666 jinode->i_next_transaction == transaction)
2667 goto done;
2668
2669 /*
2670 * We only ever set this variable to 1 so the test is safe. Since
2671 * t_need_data_flush is likely to be set, we do the test to save some
2672 * cacheline bouncing
2673 */
2674 if (!transaction->t_need_data_flush)
2675 transaction->t_need_data_flush = 1;
2676 /* On some different transaction's list - should be
2677 * the committing one */
2678 if (jinode->i_transaction) {
2679 J_ASSERT(jinode->i_next_transaction == NULL);
2680 J_ASSERT(jinode->i_transaction ==
2681 journal->j_committing_transaction);
2682 jinode->i_next_transaction = transaction;
2683 goto done;
2684 }
2685 /* Not on any transaction list... */
2686 J_ASSERT(!jinode->i_next_transaction);
2687 jinode->i_transaction = transaction;
2688 list_add(&jinode->i_list, &transaction->t_inode_list);
2689 done:
2690 spin_unlock(&journal->j_list_lock);
2691
2692 return 0;
2693 }
2694
jbd2_journal_inode_ranged_write(handle_t * handle,struct jbd2_inode * jinode,loff_t start_byte,loff_t length)2695 int jbd2_journal_inode_ranged_write(handle_t *handle,
2696 struct jbd2_inode *jinode, loff_t start_byte, loff_t length)
2697 {
2698 return jbd2_journal_file_inode(handle, jinode,
2699 JI_WRITE_DATA | JI_WAIT_DATA, start_byte,
2700 start_byte + length - 1);
2701 }
2702
jbd2_journal_inode_ranged_wait(handle_t * handle,struct jbd2_inode * jinode,loff_t start_byte,loff_t length)2703 int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode *jinode,
2704 loff_t start_byte, loff_t length)
2705 {
2706 return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA,
2707 start_byte, start_byte + length - 1);
2708 }
2709
2710 /*
2711 * File truncate and transaction commit interact with each other in a
2712 * non-trivial way. If a transaction writing data block A is
2713 * committing, we cannot discard the data by truncate until we have
2714 * written them. Otherwise if we crashed after the transaction with
2715 * write has committed but before the transaction with truncate has
2716 * committed, we could see stale data in block A. This function is a
2717 * helper to solve this problem. It starts writeout of the truncated
2718 * part in case it is in the committing transaction.
2719 *
2720 * Filesystem code must call this function when inode is journaled in
2721 * ordered mode before truncation happens and after the inode has been
2722 * placed on orphan list with the new inode size. The second condition
2723 * avoids the race that someone writes new data and we start
2724 * committing the transaction after this function has been called but
2725 * before a transaction for truncate is started (and furthermore it
2726 * allows us to optimize the case where the addition to orphan list
2727 * happens in the same transaction as write --- we don't have to write
2728 * any data in such case).
2729 */
jbd2_journal_begin_ordered_truncate(journal_t * journal,struct jbd2_inode * jinode,loff_t new_size)2730 int jbd2_journal_begin_ordered_truncate(journal_t *journal,
2731 struct jbd2_inode *jinode,
2732 loff_t new_size)
2733 {
2734 transaction_t *inode_trans, *commit_trans;
2735 int ret = 0;
2736
2737 /* This is a quick check to avoid locking if not necessary */
2738 if (!jinode->i_transaction)
2739 goto out;
2740 /* Locks are here just to force reading of recent values, it is
2741 * enough that the transaction was not committing before we started
2742 * a transaction adding the inode to orphan list */
2743 read_lock(&journal->j_state_lock);
2744 commit_trans = journal->j_committing_transaction;
2745 read_unlock(&journal->j_state_lock);
2746 spin_lock(&journal->j_list_lock);
2747 inode_trans = jinode->i_transaction;
2748 spin_unlock(&journal->j_list_lock);
2749 if (inode_trans == commit_trans) {
2750 ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
2751 new_size, LLONG_MAX);
2752 if (ret)
2753 jbd2_journal_abort(journal, ret);
2754 }
2755 out:
2756 return ret;
2757 }
2758