1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_trans.h"
15 #include "xfs_buf_item.h"
16 #include "xfs_inode.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_log.h"
21
22 /*
23 * Deferred Operations in XFS
24 *
25 * Due to the way locking rules work in XFS, certain transactions (block
26 * mapping and unmapping, typically) have permanent reservations so that
27 * we can roll the transaction to adhere to AG locking order rules and
28 * to unlock buffers between metadata updates. Prior to rmap/reflink,
29 * the mapping code had a mechanism to perform these deferrals for
30 * extents that were going to be freed; this code makes that facility
31 * more generic.
32 *
33 * When adding the reverse mapping and reflink features, it became
34 * necessary to perform complex remapping multi-transactions to comply
35 * with AG locking order rules, and to be able to spread a single
36 * refcount update operation (an operation on an n-block extent can
37 * update as many as n records!) among multiple transactions. XFS can
38 * roll a transaction to facilitate this, but using this facility
39 * requires us to log "intent" items in case log recovery needs to
40 * redo the operation, and to log "done" items to indicate that redo
41 * is not necessary.
42 *
43 * Deferred work is tracked in xfs_defer_pending items. Each pending
44 * item tracks one type of deferred work. Incoming work items (which
45 * have not yet had an intent logged) are attached to a pending item
46 * on the dop_intake list, where they wait for the caller to finish
47 * the deferred operations.
48 *
49 * Finishing a set of deferred operations is an involved process. To
50 * start, we define "rolling a deferred-op transaction" as follows:
51 *
52 * > For each xfs_defer_pending item on the dop_intake list,
53 * - Sort the work items in AG order. XFS locking
54 * order rules require us to lock buffers in AG order.
55 * - Create a log intent item for that type.
56 * - Attach it to the pending item.
57 * - Move the pending item from the dop_intake list to the
58 * dop_pending list.
59 * > Roll the transaction.
60 *
61 * NOTE: To avoid exceeding the transaction reservation, we limit the
62 * number of items that we attach to a given xfs_defer_pending.
63 *
64 * The actual finishing process looks like this:
65 *
66 * > For each xfs_defer_pending in the dop_pending list,
67 * - Roll the deferred-op transaction as above.
68 * - Create a log done item for that type, and attach it to the
69 * log intent item.
70 * - For each work item attached to the log intent item,
71 * * Perform the described action.
72 * * Attach the work item to the log done item.
73 * * If the result of doing the work was -EAGAIN, ->finish work
74 * wants a new transaction. See the "Requesting a Fresh
75 * Transaction while Finishing Deferred Work" section below for
76 * details.
77 *
78 * The key here is that we must log an intent item for all pending
79 * work items every time we roll the transaction, and that we must log
80 * a done item as soon as the work is completed. With this mechanism
81 * we can perform complex remapping operations, chaining intent items
82 * as needed.
83 *
84 * Requesting a Fresh Transaction while Finishing Deferred Work
85 *
86 * If ->finish_item decides that it needs a fresh transaction to
87 * finish the work, it must ask its caller (xfs_defer_finish) for a
88 * continuation. The most likely cause of this circumstance are the
89 * refcount adjust functions deciding that they've logged enough items
90 * to be at risk of exceeding the transaction reservation.
91 *
92 * To get a fresh transaction, we want to log the existing log done
93 * item to prevent the log intent item from replaying, immediately log
94 * a new log intent item with the unfinished work items, roll the
95 * transaction, and re-call ->finish_item wherever it left off. The
96 * log done item and the new log intent item must be in the same
97 * transaction or atomicity cannot be guaranteed; defer_finish ensures
98 * that this happens.
99 *
100 * This requires some coordination between ->finish_item and
101 * defer_finish. Upon deciding to request a new transaction,
102 * ->finish_item should update the current work item to reflect the
103 * unfinished work. Next, it should reset the log done item's list
104 * count to the number of items finished, and return -EAGAIN.
105 * defer_finish sees the -EAGAIN, logs the new log intent item
106 * with the remaining work items, and leaves the xfs_defer_pending
107 * item at the head of the dop_work queue. Then it rolls the
108 * transaction and picks up processing where it left off. It is
109 * required that ->finish_item must be careful to leave enough
110 * transaction reservation to fit the new log intent item.
111 *
112 * This is an example of remapping the extent (E, E+B) into file X at
113 * offset A and dealing with the extent (C, C+B) already being mapped
114 * there:
115 * +-------------------------------------------------+
116 * | Unmap file X startblock C offset A length B | t0
117 * | Intent to reduce refcount for extent (C, B) |
118 * | Intent to remove rmap (X, C, A, B) |
119 * | Intent to free extent (D, 1) (bmbt block) |
120 * | Intent to map (X, A, B) at startblock E |
121 * +-------------------------------------------------+
122 * | Map file X startblock E offset A length B | t1
123 * | Done mapping (X, E, A, B) |
124 * | Intent to increase refcount for extent (E, B) |
125 * | Intent to add rmap (X, E, A, B) |
126 * +-------------------------------------------------+
127 * | Reduce refcount for extent (C, B) | t2
128 * | Done reducing refcount for extent (C, 9) |
129 * | Intent to reduce refcount for extent (C+9, B-9) |
130 * | (ran out of space after 9 refcount updates) |
131 * +-------------------------------------------------+
132 * | Reduce refcount for extent (C+9, B+9) | t3
133 * | Done reducing refcount for extent (C+9, B-9) |
134 * | Increase refcount for extent (E, B) |
135 * | Done increasing refcount for extent (E, B) |
136 * | Intent to free extent (C, B) |
137 * | Intent to free extent (F, 1) (refcountbt block) |
138 * | Intent to remove rmap (F, 1, REFC) |
139 * +-------------------------------------------------+
140 * | Remove rmap (X, C, A, B) | t4
141 * | Done removing rmap (X, C, A, B) |
142 * | Add rmap (X, E, A, B) |
143 * | Done adding rmap (X, E, A, B) |
144 * | Remove rmap (F, 1, REFC) |
145 * | Done removing rmap (F, 1, REFC) |
146 * +-------------------------------------------------+
147 * | Free extent (C, B) | t5
148 * | Done freeing extent (C, B) |
149 * | Free extent (D, 1) |
150 * | Done freeing extent (D, 1) |
151 * | Free extent (F, 1) |
152 * | Done freeing extent (F, 1) |
153 * +-------------------------------------------------+
154 *
155 * If we should crash before t2 commits, log recovery replays
156 * the following intent items:
157 *
158 * - Intent to reduce refcount for extent (C, B)
159 * - Intent to remove rmap (X, C, A, B)
160 * - Intent to free extent (D, 1) (bmbt block)
161 * - Intent to increase refcount for extent (E, B)
162 * - Intent to add rmap (X, E, A, B)
163 *
164 * In the process of recovering, it should also generate and take care
165 * of these intent items:
166 *
167 * - Intent to free extent (C, B)
168 * - Intent to free extent (F, 1) (refcountbt block)
169 * - Intent to remove rmap (F, 1, REFC)
170 *
171 * Note that the continuation requested between t2 and t3 is likely to
172 * reoccur.
173 */
174
175 static const struct xfs_defer_op_type *defer_op_types[] = {
176 [XFS_DEFER_OPS_TYPE_BMAP] = &xfs_bmap_update_defer_type,
177 [XFS_DEFER_OPS_TYPE_REFCOUNT] = &xfs_refcount_update_defer_type,
178 [XFS_DEFER_OPS_TYPE_RMAP] = &xfs_rmap_update_defer_type,
179 [XFS_DEFER_OPS_TYPE_FREE] = &xfs_extent_free_defer_type,
180 [XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type,
181 };
182
183 static void
xfs_defer_create_intent(struct xfs_trans * tp,struct xfs_defer_pending * dfp,bool sort)184 xfs_defer_create_intent(
185 struct xfs_trans *tp,
186 struct xfs_defer_pending *dfp,
187 bool sort)
188 {
189 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
190
191 if (!dfp->dfp_intent)
192 dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work,
193 dfp->dfp_count, sort);
194 }
195
196 /*
197 * For each pending item in the intake list, log its intent item and the
198 * associated extents, then add the entire intake list to the end of
199 * the pending list.
200 */
201 STATIC void
xfs_defer_create_intents(struct xfs_trans * tp)202 xfs_defer_create_intents(
203 struct xfs_trans *tp)
204 {
205 struct xfs_defer_pending *dfp;
206
207 list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
208 trace_xfs_defer_create_intent(tp->t_mountp, dfp);
209 xfs_defer_create_intent(tp, dfp, true);
210 }
211 }
212
213 /* Abort all the intents that were committed. */
214 STATIC void
xfs_defer_trans_abort(struct xfs_trans * tp,struct list_head * dop_pending)215 xfs_defer_trans_abort(
216 struct xfs_trans *tp,
217 struct list_head *dop_pending)
218 {
219 struct xfs_defer_pending *dfp;
220 const struct xfs_defer_op_type *ops;
221
222 trace_xfs_defer_trans_abort(tp, _RET_IP_);
223
224 /* Abort intent items that don't have a done item. */
225 list_for_each_entry(dfp, dop_pending, dfp_list) {
226 ops = defer_op_types[dfp->dfp_type];
227 trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
228 if (dfp->dfp_intent && !dfp->dfp_done) {
229 ops->abort_intent(dfp->dfp_intent);
230 dfp->dfp_intent = NULL;
231 }
232 }
233 }
234
235 /* Roll a transaction so we can do some deferred op processing. */
236 STATIC int
xfs_defer_trans_roll(struct xfs_trans ** tpp)237 xfs_defer_trans_roll(
238 struct xfs_trans **tpp)
239 {
240 struct xfs_trans *tp = *tpp;
241 struct xfs_buf_log_item *bli;
242 struct xfs_inode_log_item *ili;
243 struct xfs_log_item *lip;
244 struct xfs_buf *bplist[XFS_DEFER_OPS_NR_BUFS];
245 struct xfs_inode *iplist[XFS_DEFER_OPS_NR_INODES];
246 unsigned int ordered = 0; /* bitmap */
247 int bpcount = 0, ipcount = 0;
248 int i;
249 int error;
250
251 BUILD_BUG_ON(NBBY * sizeof(ordered) < XFS_DEFER_OPS_NR_BUFS);
252
253 list_for_each_entry(lip, &tp->t_items, li_trans) {
254 switch (lip->li_type) {
255 case XFS_LI_BUF:
256 bli = container_of(lip, struct xfs_buf_log_item,
257 bli_item);
258 if (bli->bli_flags & XFS_BLI_HOLD) {
259 if (bpcount >= XFS_DEFER_OPS_NR_BUFS) {
260 ASSERT(0);
261 return -EFSCORRUPTED;
262 }
263 if (bli->bli_flags & XFS_BLI_ORDERED)
264 ordered |= (1U << bpcount);
265 else
266 xfs_trans_dirty_buf(tp, bli->bli_buf);
267 bplist[bpcount++] = bli->bli_buf;
268 }
269 break;
270 case XFS_LI_INODE:
271 ili = container_of(lip, struct xfs_inode_log_item,
272 ili_item);
273 if (ili->ili_lock_flags == 0) {
274 if (ipcount >= XFS_DEFER_OPS_NR_INODES) {
275 ASSERT(0);
276 return -EFSCORRUPTED;
277 }
278 xfs_trans_log_inode(tp, ili->ili_inode,
279 XFS_ILOG_CORE);
280 iplist[ipcount++] = ili->ili_inode;
281 }
282 break;
283 default:
284 break;
285 }
286 }
287
288 trace_xfs_defer_trans_roll(tp, _RET_IP_);
289
290 /*
291 * Roll the transaction. Rolling always given a new transaction (even
292 * if committing the old one fails!) to hand back to the caller, so we
293 * join the held resources to the new transaction so that we always
294 * return with the held resources joined to @tpp, no matter what
295 * happened.
296 */
297 error = xfs_trans_roll(tpp);
298 tp = *tpp;
299
300 /* Rejoin the joined inodes. */
301 for (i = 0; i < ipcount; i++)
302 xfs_trans_ijoin(tp, iplist[i], 0);
303
304 /* Rejoin the buffers and dirty them so the log moves forward. */
305 for (i = 0; i < bpcount; i++) {
306 xfs_trans_bjoin(tp, bplist[i]);
307 if (ordered & (1U << i))
308 xfs_trans_ordered_buf(tp, bplist[i]);
309 xfs_trans_bhold(tp, bplist[i]);
310 }
311
312 if (error)
313 trace_xfs_defer_trans_roll_error(tp, error);
314 return error;
315 }
316
317 /*
318 * Reset an already used dfops after finish.
319 */
320 static void
xfs_defer_reset(struct xfs_trans * tp)321 xfs_defer_reset(
322 struct xfs_trans *tp)
323 {
324 ASSERT(list_empty(&tp->t_dfops));
325
326 /*
327 * Low mode state transfers across transaction rolls to mirror dfops
328 * lifetime. Clear it now that dfops is reset.
329 */
330 tp->t_flags &= ~XFS_TRANS_LOWMODE;
331 }
332
333 /*
334 * Free up any items left in the list.
335 */
336 static void
xfs_defer_cancel_list(struct xfs_mount * mp,struct list_head * dop_list)337 xfs_defer_cancel_list(
338 struct xfs_mount *mp,
339 struct list_head *dop_list)
340 {
341 struct xfs_defer_pending *dfp;
342 struct xfs_defer_pending *pli;
343 struct list_head *pwi;
344 struct list_head *n;
345 const struct xfs_defer_op_type *ops;
346
347 /*
348 * Free the pending items. Caller should already have arranged
349 * for the intent items to be released.
350 */
351 list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) {
352 ops = defer_op_types[dfp->dfp_type];
353 trace_xfs_defer_cancel_list(mp, dfp);
354 list_del(&dfp->dfp_list);
355 list_for_each_safe(pwi, n, &dfp->dfp_work) {
356 list_del(pwi);
357 dfp->dfp_count--;
358 ops->cancel_item(pwi);
359 }
360 ASSERT(dfp->dfp_count == 0);
361 kmem_free(dfp);
362 }
363 }
364
365 /*
366 * Prevent a log intent item from pinning the tail of the log by logging a
367 * done item to release the intent item; and then log a new intent item.
368 * The caller should provide a fresh transaction and roll it after we're done.
369 */
370 static int
xfs_defer_relog(struct xfs_trans ** tpp,struct list_head * dfops)371 xfs_defer_relog(
372 struct xfs_trans **tpp,
373 struct list_head *dfops)
374 {
375 struct xlog *log = (*tpp)->t_mountp->m_log;
376 struct xfs_defer_pending *dfp;
377 xfs_lsn_t threshold_lsn = NULLCOMMITLSN;
378
379
380 ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
381
382 list_for_each_entry(dfp, dfops, dfp_list) {
383 /*
384 * If the log intent item for this deferred op is not a part of
385 * the current log checkpoint, relog the intent item to keep
386 * the log tail moving forward. We're ok with this being racy
387 * because an incorrect decision means we'll be a little slower
388 * at pushing the tail.
389 */
390 if (dfp->dfp_intent == NULL ||
391 xfs_log_item_in_current_chkpt(dfp->dfp_intent))
392 continue;
393
394 /*
395 * Figure out where we need the tail to be in order to maintain
396 * the minimum required free space in the log. Only sample
397 * the log threshold once per call.
398 */
399 if (threshold_lsn == NULLCOMMITLSN) {
400 threshold_lsn = xlog_grant_push_threshold(log, 0);
401 if (threshold_lsn == NULLCOMMITLSN)
402 break;
403 }
404 if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0)
405 continue;
406
407 trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp);
408 XFS_STATS_INC((*tpp)->t_mountp, defer_relog);
409 dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp);
410 }
411
412 if ((*tpp)->t_flags & XFS_TRANS_DIRTY)
413 return xfs_defer_trans_roll(tpp);
414 return 0;
415 }
416
417 /*
418 * Log an intent-done item for the first pending intent, and finish the work
419 * items.
420 */
421 static int
xfs_defer_finish_one(struct xfs_trans * tp,struct xfs_defer_pending * dfp)422 xfs_defer_finish_one(
423 struct xfs_trans *tp,
424 struct xfs_defer_pending *dfp)
425 {
426 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
427 void *state = NULL;
428 struct list_head *li, *n;
429 int error;
430
431 trace_xfs_defer_pending_finish(tp->t_mountp, dfp);
432
433 dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
434 list_for_each_safe(li, n, &dfp->dfp_work) {
435 list_del(li);
436 dfp->dfp_count--;
437 error = ops->finish_item(tp, li, dfp->dfp_done, &state);
438 if (error == -EAGAIN) {
439 /*
440 * Caller wants a fresh transaction; put the work item
441 * back on the list and log a new log intent item to
442 * replace the old one. See "Requesting a Fresh
443 * Transaction while Finishing Deferred Work" above.
444 */
445 list_add(li, &dfp->dfp_work);
446 dfp->dfp_count++;
447 dfp->dfp_done = NULL;
448 dfp->dfp_intent = NULL;
449 xfs_defer_create_intent(tp, dfp, false);
450 }
451
452 if (error)
453 goto out;
454 }
455
456 /* Done with the dfp, free it. */
457 list_del(&dfp->dfp_list);
458 kmem_free(dfp);
459 out:
460 if (ops->finish_cleanup)
461 ops->finish_cleanup(tp, state, error);
462 return error;
463 }
464
465 /*
466 * Finish all the pending work. This involves logging intent items for
467 * any work items that wandered in since the last transaction roll (if
468 * one has even happened), rolling the transaction, and finishing the
469 * work items in the first item on the logged-and-pending list.
470 *
471 * If an inode is provided, relog it to the new transaction.
472 */
473 int
xfs_defer_finish_noroll(struct xfs_trans ** tp)474 xfs_defer_finish_noroll(
475 struct xfs_trans **tp)
476 {
477 struct xfs_defer_pending *dfp;
478 int error = 0;
479 LIST_HEAD(dop_pending);
480
481 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
482
483 trace_xfs_defer_finish(*tp, _RET_IP_);
484
485 /* Until we run out of pending work to finish... */
486 while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
487 /*
488 * Deferred items that are created in the process of finishing
489 * other deferred work items should be queued at the head of
490 * the pending list, which puts them ahead of the deferred work
491 * that was created by the caller. This keeps the number of
492 * pending work items to a minimum, which decreases the amount
493 * of time that any one intent item can stick around in memory,
494 * pinning the log tail.
495 */
496 xfs_defer_create_intents(*tp);
497 list_splice_init(&(*tp)->t_dfops, &dop_pending);
498
499 error = xfs_defer_trans_roll(tp);
500 if (error)
501 goto out_shutdown;
502
503 /* Possibly relog intent items to keep the log moving. */
504 error = xfs_defer_relog(tp, &dop_pending);
505 if (error)
506 goto out_shutdown;
507
508 dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
509 dfp_list);
510 error = xfs_defer_finish_one(*tp, dfp);
511 if (error && error != -EAGAIN)
512 goto out_shutdown;
513 }
514
515 trace_xfs_defer_finish_done(*tp, _RET_IP_);
516 return 0;
517
518 out_shutdown:
519 xfs_defer_trans_abort(*tp, &dop_pending);
520 xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
521 trace_xfs_defer_finish_error(*tp, error);
522 xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
523 xfs_defer_cancel(*tp);
524 return error;
525 }
526
527 int
xfs_defer_finish(struct xfs_trans ** tp)528 xfs_defer_finish(
529 struct xfs_trans **tp)
530 {
531 int error;
532
533 /*
534 * Finish and roll the transaction once more to avoid returning to the
535 * caller with a dirty transaction.
536 */
537 error = xfs_defer_finish_noroll(tp);
538 if (error)
539 return error;
540 if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
541 error = xfs_defer_trans_roll(tp);
542 if (error) {
543 xfs_force_shutdown((*tp)->t_mountp,
544 SHUTDOWN_CORRUPT_INCORE);
545 return error;
546 }
547 }
548 xfs_defer_reset(*tp);
549 return 0;
550 }
551
552 void
xfs_defer_cancel(struct xfs_trans * tp)553 xfs_defer_cancel(
554 struct xfs_trans *tp)
555 {
556 struct xfs_mount *mp = tp->t_mountp;
557
558 trace_xfs_defer_cancel(tp, _RET_IP_);
559 xfs_defer_cancel_list(mp, &tp->t_dfops);
560 }
561
562 /* Add an item for later deferred processing. */
563 void
xfs_defer_add(struct xfs_trans * tp,enum xfs_defer_ops_type type,struct list_head * li)564 xfs_defer_add(
565 struct xfs_trans *tp,
566 enum xfs_defer_ops_type type,
567 struct list_head *li)
568 {
569 struct xfs_defer_pending *dfp = NULL;
570 const struct xfs_defer_op_type *ops;
571
572 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
573 BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX);
574
575 /*
576 * Add the item to a pending item at the end of the intake list.
577 * If the last pending item has the same type, reuse it. Else,
578 * create a new pending item at the end of the intake list.
579 */
580 if (!list_empty(&tp->t_dfops)) {
581 dfp = list_last_entry(&tp->t_dfops,
582 struct xfs_defer_pending, dfp_list);
583 ops = defer_op_types[dfp->dfp_type];
584 if (dfp->dfp_type != type ||
585 (ops->max_items && dfp->dfp_count >= ops->max_items))
586 dfp = NULL;
587 }
588 if (!dfp) {
589 dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
590 KM_NOFS);
591 dfp->dfp_type = type;
592 dfp->dfp_intent = NULL;
593 dfp->dfp_done = NULL;
594 dfp->dfp_count = 0;
595 INIT_LIST_HEAD(&dfp->dfp_work);
596 list_add_tail(&dfp->dfp_list, &tp->t_dfops);
597 }
598
599 list_add_tail(li, &dfp->dfp_work);
600 dfp->dfp_count++;
601 }
602
603 /*
604 * Move deferred ops from one transaction to another and reset the source to
605 * initial state. This is primarily used to carry state forward across
606 * transaction rolls with pending dfops.
607 */
608 void
xfs_defer_move(struct xfs_trans * dtp,struct xfs_trans * stp)609 xfs_defer_move(
610 struct xfs_trans *dtp,
611 struct xfs_trans *stp)
612 {
613 list_splice_init(&stp->t_dfops, &dtp->t_dfops);
614
615 /*
616 * Low free space mode was historically controlled by a dfops field.
617 * This meant that low mode state potentially carried across multiple
618 * transaction rolls. Transfer low mode on a dfops move to preserve
619 * that behavior.
620 */
621 dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE);
622
623 xfs_defer_reset(stp);
624 }
625
626 /*
627 * Prepare a chain of fresh deferred ops work items to be completed later. Log
628 * recovery requires the ability to put off until later the actual finishing
629 * work so that it can process unfinished items recovered from the log in
630 * correct order.
631 *
632 * Create and log intent items for all the work that we're capturing so that we
633 * can be assured that the items will get replayed if the system goes down
634 * before log recovery gets a chance to finish the work it put off. The entire
635 * deferred ops state is transferred to the capture structure and the
636 * transaction is then ready for the caller to commit it. If there are no
637 * intent items to capture, this function returns NULL.
638 *
639 * If capture_ip is not NULL, the capture structure will obtain an extra
640 * reference to the inode.
641 */
642 static struct xfs_defer_capture *
xfs_defer_ops_capture(struct xfs_trans * tp,struct xfs_inode * capture_ip)643 xfs_defer_ops_capture(
644 struct xfs_trans *tp,
645 struct xfs_inode *capture_ip)
646 {
647 struct xfs_defer_capture *dfc;
648
649 if (list_empty(&tp->t_dfops))
650 return NULL;
651
652 /* Create an object to capture the defer ops. */
653 dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
654 INIT_LIST_HEAD(&dfc->dfc_list);
655 INIT_LIST_HEAD(&dfc->dfc_dfops);
656
657 xfs_defer_create_intents(tp);
658
659 /* Move the dfops chain and transaction state to the capture struct. */
660 list_splice_init(&tp->t_dfops, &dfc->dfc_dfops);
661 dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE;
662 tp->t_flags &= ~XFS_TRANS_LOWMODE;
663
664 /* Capture the remaining block reservations along with the dfops. */
665 dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used;
666 dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used;
667
668 /* Preserve the log reservation size. */
669 dfc->dfc_logres = tp->t_log_res;
670
671 /*
672 * Grab an extra reference to this inode and attach it to the capture
673 * structure.
674 */
675 if (capture_ip) {
676 ihold(VFS_I(capture_ip));
677 dfc->dfc_capture_ip = capture_ip;
678 }
679
680 return dfc;
681 }
682
683 /* Release all resources that we used to capture deferred ops. */
684 void
xfs_defer_ops_release(struct xfs_mount * mp,struct xfs_defer_capture * dfc)685 xfs_defer_ops_release(
686 struct xfs_mount *mp,
687 struct xfs_defer_capture *dfc)
688 {
689 xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
690 if (dfc->dfc_capture_ip)
691 xfs_irele(dfc->dfc_capture_ip);
692 kmem_free(dfc);
693 }
694
695 /*
696 * Capture any deferred ops and commit the transaction. This is the last step
697 * needed to finish a log intent item that we recovered from the log. If any
698 * of the deferred ops operate on an inode, the caller must pass in that inode
699 * so that the reference can be transferred to the capture structure. The
700 * caller must hold ILOCK_EXCL on the inode, and must unlock it before calling
701 * xfs_defer_ops_continue.
702 */
703 int
xfs_defer_ops_capture_and_commit(struct xfs_trans * tp,struct xfs_inode * capture_ip,struct list_head * capture_list)704 xfs_defer_ops_capture_and_commit(
705 struct xfs_trans *tp,
706 struct xfs_inode *capture_ip,
707 struct list_head *capture_list)
708 {
709 struct xfs_mount *mp = tp->t_mountp;
710 struct xfs_defer_capture *dfc;
711 int error;
712
713 ASSERT(!capture_ip || xfs_isilocked(capture_ip, XFS_ILOCK_EXCL));
714
715 /* If we don't capture anything, commit transaction and exit. */
716 dfc = xfs_defer_ops_capture(tp, capture_ip);
717 if (!dfc)
718 return xfs_trans_commit(tp);
719
720 /* Commit the transaction and add the capture structure to the list. */
721 error = xfs_trans_commit(tp);
722 if (error) {
723 xfs_defer_ops_release(mp, dfc);
724 return error;
725 }
726
727 list_add_tail(&dfc->dfc_list, capture_list);
728 return 0;
729 }
730
731 /*
732 * Attach a chain of captured deferred ops to a new transaction and free the
733 * capture structure. If an inode was captured, it will be passed back to the
734 * caller with ILOCK_EXCL held and joined to the transaction with lockflags==0.
735 * The caller now owns the inode reference.
736 */
737 void
xfs_defer_ops_continue(struct xfs_defer_capture * dfc,struct xfs_trans * tp,struct xfs_inode ** captured_ipp)738 xfs_defer_ops_continue(
739 struct xfs_defer_capture *dfc,
740 struct xfs_trans *tp,
741 struct xfs_inode **captured_ipp)
742 {
743 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
744 ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));
745
746 /* Lock and join the captured inode to the new transaction. */
747 if (dfc->dfc_capture_ip) {
748 xfs_ilock(dfc->dfc_capture_ip, XFS_ILOCK_EXCL);
749 xfs_trans_ijoin(tp, dfc->dfc_capture_ip, 0);
750 }
751 *captured_ipp = dfc->dfc_capture_ip;
752
753 /* Move captured dfops chain and state to the transaction. */
754 list_splice_init(&dfc->dfc_dfops, &tp->t_dfops);
755 tp->t_flags |= dfc->dfc_tpflags;
756
757 kmem_free(dfc);
758 }
759