• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_rmap_item.h"
18 #include "xfs_log.h"
19 #include "xfs_rmap.h"
20 #include "xfs_error.h"
21 
22 kmem_zone_t	*xfs_rui_zone;
23 kmem_zone_t	*xfs_rud_zone;
24 
RUI_ITEM(struct xfs_log_item * lip)25 static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
26 {
27 	return container_of(lip, struct xfs_rui_log_item, rui_item);
28 }
29 
30 void
xfs_rui_item_free(struct xfs_rui_log_item * ruip)31 xfs_rui_item_free(
32 	struct xfs_rui_log_item	*ruip)
33 {
34 	if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
35 		kmem_free(ruip);
36 	else
37 		kmem_zone_free(xfs_rui_zone, ruip);
38 }
39 
40 /*
41  * Freeing the RUI requires that we remove it from the AIL if it has already
42  * been placed there. However, the RUI may not yet have been placed in the AIL
43  * when called by xfs_rui_release() from RUD processing due to the ordering of
44  * committed vs unpin operations in bulk insert operations. Hence the reference
45  * count to ensure only the last caller frees the RUI.
46  */
47 void
xfs_rui_release(struct xfs_rui_log_item * ruip)48 xfs_rui_release(
49 	struct xfs_rui_log_item	*ruip)
50 {
51 	ASSERT(atomic_read(&ruip->rui_refcount) > 0);
52 	if (atomic_dec_and_test(&ruip->rui_refcount)) {
53 		xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
54 		xfs_rui_item_free(ruip);
55 	}
56 }
57 
58 STATIC void
xfs_rui_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)59 xfs_rui_item_size(
60 	struct xfs_log_item	*lip,
61 	int			*nvecs,
62 	int			*nbytes)
63 {
64 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
65 
66 	*nvecs += 1;
67 	*nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
68 }
69 
70 /*
71  * This is called to fill in the vector of log iovecs for the
72  * given rui log item. We use only 1 iovec, and we point that
73  * at the rui_log_format structure embedded in the rui item.
74  * It is at this point that we assert that all of the extent
75  * slots in the rui item have been filled.
76  */
77 STATIC void
xfs_rui_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)78 xfs_rui_item_format(
79 	struct xfs_log_item	*lip,
80 	struct xfs_log_vec	*lv)
81 {
82 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
83 	struct xfs_log_iovec	*vecp = NULL;
84 
85 	ASSERT(atomic_read(&ruip->rui_next_extent) ==
86 			ruip->rui_format.rui_nextents);
87 
88 	ruip->rui_format.rui_type = XFS_LI_RUI;
89 	ruip->rui_format.rui_size = 1;
90 
91 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
92 			xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
93 }
94 
95 /*
96  * The unpin operation is the last place an RUI is manipulated in the log. It is
97  * either inserted in the AIL or aborted in the event of a log I/O error. In
98  * either case, the RUI transaction has been successfully committed to make it
99  * this far. Therefore, we expect whoever committed the RUI to either construct
100  * and commit the RUD or drop the RUD's reference in the event of error. Simply
101  * drop the log's RUI reference now that the log is done with it.
102  */
103 STATIC void
xfs_rui_item_unpin(struct xfs_log_item * lip,int remove)104 xfs_rui_item_unpin(
105 	struct xfs_log_item	*lip,
106 	int			remove)
107 {
108 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
109 
110 	xfs_rui_release(ruip);
111 }
112 
113 /*
114  * The RUI has been either committed or aborted if the transaction has been
115  * cancelled. If the transaction was cancelled, an RUD isn't going to be
116  * constructed and thus we free the RUI here directly.
117  */
118 STATIC void
xfs_rui_item_release(struct xfs_log_item * lip)119 xfs_rui_item_release(
120 	struct xfs_log_item	*lip)
121 {
122 	xfs_rui_release(RUI_ITEM(lip));
123 }
124 
125 /*
126  * Copy an RUI format buffer from the given buf, and into the destination
127  * RUI format structure.  The RUI/RUD items were designed not to need any
128  * special alignment handling.
129  */
130 int
xfs_rui_copy_format(struct xfs_log_iovec * buf,struct xfs_rui_log_format * dst_rui_fmt)131 xfs_rui_copy_format(
132 	struct xfs_log_iovec		*buf,
133 	struct xfs_rui_log_format	*dst_rui_fmt)
134 {
135 	struct xfs_rui_log_format	*src_rui_fmt;
136 	uint				len;
137 
138 	src_rui_fmt = buf->i_addr;
139 	len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
140 
141 	if (buf->i_len != len) {
142 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
143 		return -EFSCORRUPTED;
144 	}
145 
146 	memcpy(dst_rui_fmt, src_rui_fmt, len);
147 	return 0;
148 }
149 
RUD_ITEM(struct xfs_log_item * lip)150 static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
151 {
152 	return container_of(lip, struct xfs_rud_log_item, rud_item);
153 }
154 
155 STATIC void
xfs_rud_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)156 xfs_rud_item_size(
157 	struct xfs_log_item	*lip,
158 	int			*nvecs,
159 	int			*nbytes)
160 {
161 	*nvecs += 1;
162 	*nbytes += sizeof(struct xfs_rud_log_format);
163 }
164 
165 /*
166  * This is called to fill in the vector of log iovecs for the
167  * given rud log item. We use only 1 iovec, and we point that
168  * at the rud_log_format structure embedded in the rud item.
169  * It is at this point that we assert that all of the extent
170  * slots in the rud item have been filled.
171  */
172 STATIC void
xfs_rud_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)173 xfs_rud_item_format(
174 	struct xfs_log_item	*lip,
175 	struct xfs_log_vec	*lv)
176 {
177 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
178 	struct xfs_log_iovec	*vecp = NULL;
179 
180 	rudp->rud_format.rud_type = XFS_LI_RUD;
181 	rudp->rud_format.rud_size = 1;
182 
183 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
184 			sizeof(struct xfs_rud_log_format));
185 }
186 
187 /*
188  * The RUD is either committed or aborted if the transaction is cancelled. If
189  * the transaction is cancelled, drop our reference to the RUI and free the
190  * RUD.
191  */
192 STATIC void
xfs_rud_item_release(struct xfs_log_item * lip)193 xfs_rud_item_release(
194 	struct xfs_log_item	*lip)
195 {
196 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
197 
198 	xfs_rui_release(rudp->rud_ruip);
199 	kmem_zone_free(xfs_rud_zone, rudp);
200 }
201 
202 static const struct xfs_item_ops xfs_rud_item_ops = {
203 	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED,
204 	.iop_size	= xfs_rud_item_size,
205 	.iop_format	= xfs_rud_item_format,
206 	.iop_release	= xfs_rud_item_release,
207 };
208 
209 static struct xfs_rud_log_item *
xfs_trans_get_rud(struct xfs_trans * tp,struct xfs_rui_log_item * ruip)210 xfs_trans_get_rud(
211 	struct xfs_trans		*tp,
212 	struct xfs_rui_log_item		*ruip)
213 {
214 	struct xfs_rud_log_item		*rudp;
215 
216 	rudp = kmem_zone_zalloc(xfs_rud_zone, 0);
217 	xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
218 			  &xfs_rud_item_ops);
219 	rudp->rud_ruip = ruip;
220 	rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
221 
222 	xfs_trans_add_item(tp, &rudp->rud_item);
223 	return rudp;
224 }
225 
226 /* Set the map extent flags for this reverse mapping. */
227 static void
xfs_trans_set_rmap_flags(struct xfs_map_extent * rmap,enum xfs_rmap_intent_type type,int whichfork,xfs_exntst_t state)228 xfs_trans_set_rmap_flags(
229 	struct xfs_map_extent		*rmap,
230 	enum xfs_rmap_intent_type	type,
231 	int				whichfork,
232 	xfs_exntst_t			state)
233 {
234 	rmap->me_flags = 0;
235 	if (state == XFS_EXT_UNWRITTEN)
236 		rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
237 	if (whichfork == XFS_ATTR_FORK)
238 		rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
239 	switch (type) {
240 	case XFS_RMAP_MAP:
241 		rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
242 		break;
243 	case XFS_RMAP_MAP_SHARED:
244 		rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
245 		break;
246 	case XFS_RMAP_UNMAP:
247 		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
248 		break;
249 	case XFS_RMAP_UNMAP_SHARED:
250 		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
251 		break;
252 	case XFS_RMAP_CONVERT:
253 		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
254 		break;
255 	case XFS_RMAP_CONVERT_SHARED:
256 		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
257 		break;
258 	case XFS_RMAP_ALLOC:
259 		rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
260 		break;
261 	case XFS_RMAP_FREE:
262 		rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
263 		break;
264 	default:
265 		ASSERT(0);
266 	}
267 }
268 
269 /*
270  * Finish an rmap update and log it to the RUD. Note that the transaction is
271  * marked dirty regardless of whether the rmap update succeeds or fails to
272  * support the RUI/RUD lifecycle rules.
273  */
274 static int
xfs_trans_log_finish_rmap_update(struct xfs_trans * tp,struct xfs_rud_log_item * rudp,enum xfs_rmap_intent_type type,uint64_t owner,int whichfork,xfs_fileoff_t startoff,xfs_fsblock_t startblock,xfs_filblks_t blockcount,xfs_exntst_t state,struct xfs_btree_cur ** pcur)275 xfs_trans_log_finish_rmap_update(
276 	struct xfs_trans		*tp,
277 	struct xfs_rud_log_item		*rudp,
278 	enum xfs_rmap_intent_type	type,
279 	uint64_t			owner,
280 	int				whichfork,
281 	xfs_fileoff_t			startoff,
282 	xfs_fsblock_t			startblock,
283 	xfs_filblks_t			blockcount,
284 	xfs_exntst_t			state,
285 	struct xfs_btree_cur		**pcur)
286 {
287 	int				error;
288 
289 	error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
290 			startblock, blockcount, state, pcur);
291 
292 	/*
293 	 * Mark the transaction dirty, even on error. This ensures the
294 	 * transaction is aborted, which:
295 	 *
296 	 * 1.) releases the RUI and frees the RUD
297 	 * 2.) shuts down the filesystem
298 	 */
299 	tp->t_flags |= XFS_TRANS_DIRTY;
300 	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
301 
302 	return error;
303 }
304 
305 /* Sort rmap intents by AG. */
306 static int
xfs_rmap_update_diff_items(void * priv,struct list_head * a,struct list_head * b)307 xfs_rmap_update_diff_items(
308 	void				*priv,
309 	struct list_head		*a,
310 	struct list_head		*b)
311 {
312 	struct xfs_mount		*mp = priv;
313 	struct xfs_rmap_intent		*ra;
314 	struct xfs_rmap_intent		*rb;
315 
316 	ra = container_of(a, struct xfs_rmap_intent, ri_list);
317 	rb = container_of(b, struct xfs_rmap_intent, ri_list);
318 	return  XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
319 		XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
320 }
321 
322 /* Log rmap updates in the intent item. */
323 STATIC void
xfs_rmap_update_log_item(struct xfs_trans * tp,struct xfs_rui_log_item * ruip,struct xfs_rmap_intent * rmap)324 xfs_rmap_update_log_item(
325 	struct xfs_trans		*tp,
326 	struct xfs_rui_log_item		*ruip,
327 	struct xfs_rmap_intent		*rmap)
328 {
329 	uint				next_extent;
330 	struct xfs_map_extent		*map;
331 
332 	tp->t_flags |= XFS_TRANS_DIRTY;
333 	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
334 
335 	/*
336 	 * atomic_inc_return gives us the value after the increment;
337 	 * we want to use it as an array index so we need to subtract 1 from
338 	 * it.
339 	 */
340 	next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
341 	ASSERT(next_extent < ruip->rui_format.rui_nextents);
342 	map = &ruip->rui_format.rui_extents[next_extent];
343 	map->me_owner = rmap->ri_owner;
344 	map->me_startblock = rmap->ri_bmap.br_startblock;
345 	map->me_startoff = rmap->ri_bmap.br_startoff;
346 	map->me_len = rmap->ri_bmap.br_blockcount;
347 	xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
348 			rmap->ri_bmap.br_state);
349 }
350 
351 static struct xfs_log_item *
xfs_rmap_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort)352 xfs_rmap_update_create_intent(
353 	struct xfs_trans		*tp,
354 	struct list_head		*items,
355 	unsigned int			count,
356 	bool				sort)
357 {
358 	struct xfs_mount		*mp = tp->t_mountp;
359 	struct xfs_rui_log_item		*ruip = xfs_rui_init(mp, count);
360 	struct xfs_rmap_intent		*rmap;
361 
362 	ASSERT(count > 0);
363 
364 	xfs_trans_add_item(tp, &ruip->rui_item);
365 	if (sort)
366 		list_sort(mp, items, xfs_rmap_update_diff_items);
367 	list_for_each_entry(rmap, items, ri_list)
368 		xfs_rmap_update_log_item(tp, ruip, rmap);
369 	return &ruip->rui_item;
370 }
371 
372 /* Get an RUD so we can process all the deferred rmap updates. */
373 STATIC void *
xfs_rmap_update_create_done(struct xfs_trans * tp,struct xfs_log_item * intent,unsigned int count)374 xfs_rmap_update_create_done(
375 	struct xfs_trans		*tp,
376 	struct xfs_log_item		*intent,
377 	unsigned int			count)
378 {
379 	return xfs_trans_get_rud(tp, RUI_ITEM(intent));
380 }
381 
382 /* Process a deferred rmap update. */
383 STATIC int
xfs_rmap_update_finish_item(struct xfs_trans * tp,struct list_head * item,void * done_item,void ** state)384 xfs_rmap_update_finish_item(
385 	struct xfs_trans		*tp,
386 	struct list_head		*item,
387 	void				*done_item,
388 	void				**state)
389 {
390 	struct xfs_rmap_intent		*rmap;
391 	int				error;
392 
393 	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
394 	error = xfs_trans_log_finish_rmap_update(tp, done_item,
395 			rmap->ri_type,
396 			rmap->ri_owner, rmap->ri_whichfork,
397 			rmap->ri_bmap.br_startoff,
398 			rmap->ri_bmap.br_startblock,
399 			rmap->ri_bmap.br_blockcount,
400 			rmap->ri_bmap.br_state,
401 			(struct xfs_btree_cur **)state);
402 	kmem_free(rmap);
403 	return error;
404 }
405 
406 /* Clean up after processing deferred rmaps. */
407 STATIC void
xfs_rmap_update_finish_cleanup(struct xfs_trans * tp,void * state,int error)408 xfs_rmap_update_finish_cleanup(
409 	struct xfs_trans	*tp,
410 	void			*state,
411 	int			error)
412 {
413 	struct xfs_btree_cur	*rcur = state;
414 
415 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
416 }
417 
418 /* Abort all pending RUIs. */
419 STATIC void
xfs_rmap_update_abort_intent(struct xfs_log_item * intent)420 xfs_rmap_update_abort_intent(
421 	struct xfs_log_item	*intent)
422 {
423 	xfs_rui_release(RUI_ITEM(intent));
424 }
425 
426 /* Cancel a deferred rmap update. */
427 STATIC void
xfs_rmap_update_cancel_item(struct list_head * item)428 xfs_rmap_update_cancel_item(
429 	struct list_head		*item)
430 {
431 	struct xfs_rmap_intent		*rmap;
432 
433 	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
434 	kmem_free(rmap);
435 }
436 
437 const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
438 	.max_items	= XFS_RUI_MAX_FAST_EXTENTS,
439 	.create_intent	= xfs_rmap_update_create_intent,
440 	.abort_intent	= xfs_rmap_update_abort_intent,
441 	.create_done	= xfs_rmap_update_create_done,
442 	.finish_item	= xfs_rmap_update_finish_item,
443 	.finish_cleanup = xfs_rmap_update_finish_cleanup,
444 	.cancel_item	= xfs_rmap_update_cancel_item,
445 };
446 
447 /*
448  * Process an rmap update intent item that was recovered from the log.
449  * We need to update the rmapbt.
450  */
451 int
xfs_rui_recover(struct xfs_rui_log_item * ruip,struct list_head * capture_list)452 xfs_rui_recover(
453 	struct xfs_rui_log_item		*ruip,
454 	struct list_head		*capture_list)
455 {
456 	struct xfs_mount		*mp = ruip->rui_item.li_mountp;
457 	int				i;
458 	int				error = 0;
459 	struct xfs_map_extent		*rmap;
460 	xfs_fsblock_t			startblock_fsb;
461 	bool				op_ok;
462 	struct xfs_rud_log_item		*rudp;
463 	enum xfs_rmap_intent_type	type;
464 	int				whichfork;
465 	xfs_exntst_t			state;
466 	struct xfs_trans		*tp;
467 	struct xfs_btree_cur		*rcur = NULL;
468 
469 	ASSERT(!test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags));
470 
471 	/*
472 	 * First check the validity of the extents described by the
473 	 * RUI.  If any are bad, then assume that all are bad and
474 	 * just toss the RUI.
475 	 */
476 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
477 		rmap = &ruip->rui_format.rui_extents[i];
478 		startblock_fsb = XFS_BB_TO_FSB(mp,
479 				   XFS_FSB_TO_DADDR(mp, rmap->me_startblock));
480 		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
481 		case XFS_RMAP_EXTENT_MAP:
482 		case XFS_RMAP_EXTENT_MAP_SHARED:
483 		case XFS_RMAP_EXTENT_UNMAP:
484 		case XFS_RMAP_EXTENT_UNMAP_SHARED:
485 		case XFS_RMAP_EXTENT_CONVERT:
486 		case XFS_RMAP_EXTENT_CONVERT_SHARED:
487 		case XFS_RMAP_EXTENT_ALLOC:
488 		case XFS_RMAP_EXTENT_FREE:
489 			op_ok = true;
490 			break;
491 		default:
492 			op_ok = false;
493 			break;
494 		}
495 		if (!op_ok || startblock_fsb == 0 ||
496 		    rmap->me_len == 0 ||
497 		    startblock_fsb >= mp->m_sb.sb_dblocks ||
498 		    rmap->me_len >= mp->m_sb.sb_agblocks ||
499 		    (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)) {
500 			/*
501 			 * This will pull the RUI from the AIL and
502 			 * free the memory associated with it.
503 			 */
504 			set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
505 			xfs_rui_release(ruip);
506 			return -EFSCORRUPTED;
507 		}
508 	}
509 
510 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
511 			mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
512 	if (error)
513 		return error;
514 	rudp = xfs_trans_get_rud(tp, ruip);
515 
516 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
517 		rmap = &ruip->rui_format.rui_extents[i];
518 		state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
519 				XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
520 		whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
521 				XFS_ATTR_FORK : XFS_DATA_FORK;
522 		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
523 		case XFS_RMAP_EXTENT_MAP:
524 			type = XFS_RMAP_MAP;
525 			break;
526 		case XFS_RMAP_EXTENT_MAP_SHARED:
527 			type = XFS_RMAP_MAP_SHARED;
528 			break;
529 		case XFS_RMAP_EXTENT_UNMAP:
530 			type = XFS_RMAP_UNMAP;
531 			break;
532 		case XFS_RMAP_EXTENT_UNMAP_SHARED:
533 			type = XFS_RMAP_UNMAP_SHARED;
534 			break;
535 		case XFS_RMAP_EXTENT_CONVERT:
536 			type = XFS_RMAP_CONVERT;
537 			break;
538 		case XFS_RMAP_EXTENT_CONVERT_SHARED:
539 			type = XFS_RMAP_CONVERT_SHARED;
540 			break;
541 		case XFS_RMAP_EXTENT_ALLOC:
542 			type = XFS_RMAP_ALLOC;
543 			break;
544 		case XFS_RMAP_EXTENT_FREE:
545 			type = XFS_RMAP_FREE;
546 			break;
547 		default:
548 			XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
549 			error = -EFSCORRUPTED;
550 			goto abort_error;
551 		}
552 		error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
553 				rmap->me_owner, whichfork,
554 				rmap->me_startoff, rmap->me_startblock,
555 				rmap->me_len, state, &rcur);
556 		if (error)
557 			goto abort_error;
558 
559 	}
560 
561 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
562 	set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
563 	return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
564 
565 abort_error:
566 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
567 	xfs_trans_cancel(tp);
568 	return error;
569 }
570 
571 /* Relog an intent item to push the log tail forward. */
572 static struct xfs_log_item *
xfs_rui_item_relog(struct xfs_log_item * intent,struct xfs_trans * tp)573 xfs_rui_item_relog(
574 	struct xfs_log_item		*intent,
575 	struct xfs_trans		*tp)
576 {
577 	struct xfs_rud_log_item		*rudp;
578 	struct xfs_rui_log_item		*ruip;
579 	struct xfs_map_extent		*extp;
580 	unsigned int			count;
581 
582 	count = RUI_ITEM(intent)->rui_format.rui_nextents;
583 	extp = RUI_ITEM(intent)->rui_format.rui_extents;
584 
585 	tp->t_flags |= XFS_TRANS_DIRTY;
586 	rudp = xfs_trans_get_rud(tp, RUI_ITEM(intent));
587 	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
588 
589 	ruip = xfs_rui_init(tp->t_mountp, count);
590 	memcpy(ruip->rui_format.rui_extents, extp, count * sizeof(*extp));
591 	atomic_set(&ruip->rui_next_extent, count);
592 	xfs_trans_add_item(tp, &ruip->rui_item);
593 	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
594 	return &ruip->rui_item;
595 }
596 
597 static const struct xfs_item_ops xfs_rui_item_ops = {
598 	.iop_size	= xfs_rui_item_size,
599 	.iop_format	= xfs_rui_item_format,
600 	.iop_unpin	= xfs_rui_item_unpin,
601 	.iop_release	= xfs_rui_item_release,
602 	.iop_relog	= xfs_rui_item_relog,
603 };
604 
605 /*
606  * Allocate and initialize an rui item with the given number of extents.
607  */
608 struct xfs_rui_log_item *
xfs_rui_init(struct xfs_mount * mp,uint nextents)609 xfs_rui_init(
610 	struct xfs_mount		*mp,
611 	uint				nextents)
612 
613 {
614 	struct xfs_rui_log_item		*ruip;
615 
616 	ASSERT(nextents > 0);
617 	if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
618 		ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
619 	else
620 		ruip = kmem_zone_zalloc(xfs_rui_zone, 0);
621 
622 	xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
623 	ruip->rui_format.rui_nextents = nextents;
624 	ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
625 	atomic_set(&ruip->rui_next_extent, 0);
626 	atomic_set(&ruip->rui_refcount, 2);
627 
628 	return ruip;
629 }
630