1 /*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir2.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_bmap.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
44 #include "xfs_rw.h"
45 #include "xfs_acl.h"
46 #include "xfs_attr.h"
47 #include "xfs_buf_item.h"
48 #include "xfs_trans_priv.h"
49 #include "xfs_qm.h"
50
51 /*
52 * returns the number of iovecs needed to log the given dquot item.
53 */
54 /* ARGSUSED */
55 STATIC uint
xfs_qm_dquot_logitem_size(xfs_dq_logitem_t * logitem)56 xfs_qm_dquot_logitem_size(
57 xfs_dq_logitem_t *logitem)
58 {
59 /*
60 * we need only two iovecs, one for the format, one for the real thing
61 */
62 return (2);
63 }
64
65 /*
66 * fills in the vector of log iovecs for the given dquot log item.
67 */
68 STATIC void
xfs_qm_dquot_logitem_format(xfs_dq_logitem_t * logitem,xfs_log_iovec_t * logvec)69 xfs_qm_dquot_logitem_format(
70 xfs_dq_logitem_t *logitem,
71 xfs_log_iovec_t *logvec)
72 {
73 ASSERT(logitem);
74 ASSERT(logitem->qli_dquot);
75
76 logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
77 logvec->i_len = sizeof(xfs_dq_logformat_t);
78 XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_QFORMAT);
79 logvec++;
80 logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
81 logvec->i_len = sizeof(xfs_disk_dquot_t);
82 XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_DQUOT);
83
84 ASSERT(2 == logitem->qli_item.li_desc->lid_size);
85 logitem->qli_format.qlf_size = 2;
86
87 }
88
89 /*
90 * Increment the pin count of the given dquot.
91 */
92 STATIC void
xfs_qm_dquot_logitem_pin(xfs_dq_logitem_t * logitem)93 xfs_qm_dquot_logitem_pin(
94 xfs_dq_logitem_t *logitem)
95 {
96 xfs_dquot_t *dqp = logitem->qli_dquot;
97
98 ASSERT(XFS_DQ_IS_LOCKED(dqp));
99 atomic_inc(&dqp->q_pincount);
100 }
101
102 /*
103 * Decrement the pin count of the given dquot, and wake up
104 * anyone in xfs_dqwait_unpin() if the count goes to 0. The
105 * dquot must have been previously pinned with a call to
106 * xfs_qm_dquot_logitem_pin().
107 */
108 /* ARGSUSED */
109 STATIC void
xfs_qm_dquot_logitem_unpin(xfs_dq_logitem_t * logitem,int stale)110 xfs_qm_dquot_logitem_unpin(
111 xfs_dq_logitem_t *logitem,
112 int stale)
113 {
114 xfs_dquot_t *dqp = logitem->qli_dquot;
115
116 ASSERT(atomic_read(&dqp->q_pincount) > 0);
117 if (atomic_dec_and_test(&dqp->q_pincount))
118 wake_up(&dqp->q_pinwait);
119 }
120
121 /* ARGSUSED */
122 STATIC void
xfs_qm_dquot_logitem_unpin_remove(xfs_dq_logitem_t * logitem,xfs_trans_t * tp)123 xfs_qm_dquot_logitem_unpin_remove(
124 xfs_dq_logitem_t *logitem,
125 xfs_trans_t *tp)
126 {
127 xfs_qm_dquot_logitem_unpin(logitem, 0);
128 }
129
130 /*
131 * Given the logitem, this writes the corresponding dquot entry to disk
132 * asynchronously. This is called with the dquot entry securely locked;
133 * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
134 * at the end.
135 */
136 STATIC void
xfs_qm_dquot_logitem_push(xfs_dq_logitem_t * logitem)137 xfs_qm_dquot_logitem_push(
138 xfs_dq_logitem_t *logitem)
139 {
140 xfs_dquot_t *dqp;
141 int error;
142
143 dqp = logitem->qli_dquot;
144
145 ASSERT(XFS_DQ_IS_LOCKED(dqp));
146 ASSERT(!completion_done(&dqp->q_flush));
147
148 /*
149 * Since we were able to lock the dquot's flush lock and
150 * we found it on the AIL, the dquot must be dirty. This
151 * is because the dquot is removed from the AIL while still
152 * holding the flush lock in xfs_dqflush_done(). Thus, if
153 * we found it in the AIL and were able to obtain the flush
154 * lock without sleeping, then there must not have been
155 * anyone in the process of flushing the dquot.
156 */
157 error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
158 if (error)
159 xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
160 "xfs_qm_dquot_logitem_push: push error %d on dqp %p",
161 error, dqp);
162 xfs_dqunlock(dqp);
163 }
164
165 /*ARGSUSED*/
166 STATIC xfs_lsn_t
xfs_qm_dquot_logitem_committed(xfs_dq_logitem_t * l,xfs_lsn_t lsn)167 xfs_qm_dquot_logitem_committed(
168 xfs_dq_logitem_t *l,
169 xfs_lsn_t lsn)
170 {
171 /*
172 * We always re-log the entire dquot when it becomes dirty,
173 * so, the latest copy _is_ the only one that matters.
174 */
175 return (lsn);
176 }
177
178
179 /*
180 * This is called to wait for the given dquot to be unpinned.
181 * Most of these pin/unpin routines are plagiarized from inode code.
182 */
183 void
xfs_qm_dqunpin_wait(xfs_dquot_t * dqp)184 xfs_qm_dqunpin_wait(
185 xfs_dquot_t *dqp)
186 {
187 ASSERT(XFS_DQ_IS_LOCKED(dqp));
188 if (atomic_read(&dqp->q_pincount) == 0)
189 return;
190
191 /*
192 * Give the log a push so we don't wait here too long.
193 */
194 xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE);
195 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
196 }
197
198 /*
199 * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
200 * the dquot is locked by us, but the flush lock isn't. So, here we are
201 * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
202 * If so, we want to push it out to help us take this item off the AIL as soon
203 * as possible.
204 *
205 * We must not be holding the AIL lock at this point. Calling incore() to
206 * search the buffer cache can be a time consuming thing, and AIL lock is a
207 * spinlock.
208 */
209 STATIC void
xfs_qm_dquot_logitem_pushbuf(xfs_dq_logitem_t * qip)210 xfs_qm_dquot_logitem_pushbuf(
211 xfs_dq_logitem_t *qip)
212 {
213 xfs_dquot_t *dqp;
214 xfs_mount_t *mp;
215 xfs_buf_t *bp;
216 uint dopush;
217
218 dqp = qip->qli_dquot;
219 ASSERT(XFS_DQ_IS_LOCKED(dqp));
220
221 /*
222 * The qli_pushbuf_flag keeps others from
223 * trying to duplicate our effort.
224 */
225 ASSERT(qip->qli_pushbuf_flag != 0);
226 ASSERT(qip->qli_push_owner == current_pid());
227
228 /*
229 * If flushlock isn't locked anymore, chances are that the
230 * inode flush completed and the inode was taken off the AIL.
231 * So, just get out.
232 */
233 if (completion_done(&dqp->q_flush) ||
234 ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
235 qip->qli_pushbuf_flag = 0;
236 xfs_dqunlock(dqp);
237 return;
238 }
239 mp = dqp->q_mount;
240 bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
241 XFS_QI_DQCHUNKLEN(mp),
242 XFS_INCORE_TRYLOCK);
243 if (bp != NULL) {
244 if (XFS_BUF_ISDELAYWRITE(bp)) {
245 dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
246 !completion_done(&dqp->q_flush));
247 qip->qli_pushbuf_flag = 0;
248 xfs_dqunlock(dqp);
249
250 if (XFS_BUF_ISPINNED(bp)) {
251 xfs_log_force(mp, (xfs_lsn_t)0,
252 XFS_LOG_FORCE);
253 }
254 if (dopush) {
255 int error;
256 #ifdef XFSRACEDEBUG
257 delay_for_intr();
258 delay(300);
259 #endif
260 error = xfs_bawrite(mp, bp);
261 if (error)
262 xfs_fs_cmn_err(CE_WARN, mp,
263 "xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p",
264 error, qip, bp);
265 } else {
266 xfs_buf_relse(bp);
267 }
268 } else {
269 qip->qli_pushbuf_flag = 0;
270 xfs_dqunlock(dqp);
271 xfs_buf_relse(bp);
272 }
273 return;
274 }
275
276 qip->qli_pushbuf_flag = 0;
277 xfs_dqunlock(dqp);
278 }
279
280 /*
281 * This is called to attempt to lock the dquot associated with this
282 * dquot log item. Don't sleep on the dquot lock or the flush lock.
283 * If the flush lock is already held, indicating that the dquot has
284 * been or is in the process of being flushed, then see if we can
285 * find the dquot's buffer in the buffer cache without sleeping. If
286 * we can and it is marked delayed write, then we want to send it out.
287 * We delay doing so until the push routine, though, to avoid sleeping
288 * in any device strategy routines.
289 */
290 STATIC uint
xfs_qm_dquot_logitem_trylock(xfs_dq_logitem_t * qip)291 xfs_qm_dquot_logitem_trylock(
292 xfs_dq_logitem_t *qip)
293 {
294 xfs_dquot_t *dqp;
295 uint retval;
296
297 dqp = qip->qli_dquot;
298 if (atomic_read(&dqp->q_pincount) > 0)
299 return (XFS_ITEM_PINNED);
300
301 if (! xfs_qm_dqlock_nowait(dqp))
302 return (XFS_ITEM_LOCKED);
303
304 retval = XFS_ITEM_SUCCESS;
305 if (!xfs_dqflock_nowait(dqp)) {
306 /*
307 * The dquot is already being flushed. It may have been
308 * flushed delayed write, however, and we don't want to
309 * get stuck waiting for that to complete. So, we want to check
310 * to see if we can lock the dquot's buffer without sleeping.
311 * If we can and it is marked for delayed write, then we
312 * hold it and send it out from the push routine. We don't
313 * want to do that now since we might sleep in the device
314 * strategy routine. We also don't want to grab the buffer lock
315 * here because we'd like not to call into the buffer cache
316 * while holding the AIL lock.
317 * Make sure to only return PUSHBUF if we set pushbuf_flag
318 * ourselves. If someone else is doing it then we don't
319 * want to go to the push routine and duplicate their efforts.
320 */
321 if (qip->qli_pushbuf_flag == 0) {
322 qip->qli_pushbuf_flag = 1;
323 ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
324 #ifdef DEBUG
325 qip->qli_push_owner = current_pid();
326 #endif
327 /*
328 * The dquot is left locked.
329 */
330 retval = XFS_ITEM_PUSHBUF;
331 } else {
332 retval = XFS_ITEM_FLUSHING;
333 xfs_dqunlock_nonotify(dqp);
334 }
335 }
336
337 ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
338 return (retval);
339 }
340
341
342 /*
343 * Unlock the dquot associated with the log item.
344 * Clear the fields of the dquot and dquot log item that
345 * are specific to the current transaction. If the
346 * hold flags is set, do not unlock the dquot.
347 */
348 STATIC void
xfs_qm_dquot_logitem_unlock(xfs_dq_logitem_t * ql)349 xfs_qm_dquot_logitem_unlock(
350 xfs_dq_logitem_t *ql)
351 {
352 xfs_dquot_t *dqp;
353
354 ASSERT(ql != NULL);
355 dqp = ql->qli_dquot;
356 ASSERT(XFS_DQ_IS_LOCKED(dqp));
357
358 /*
359 * Clear the transaction pointer in the dquot
360 */
361 dqp->q_transp = NULL;
362
363 /*
364 * dquots are never 'held' from getting unlocked at the end of
365 * a transaction. Their locking and unlocking is hidden inside the
366 * transaction layer, within trans_commit. Hence, no LI_HOLD flag
367 * for the logitem.
368 */
369 xfs_dqunlock(dqp);
370 }
371
372
373 /*
374 * this needs to stamp an lsn into the dquot, I think.
375 * rpc's that look at user dquot's would then have to
376 * push on the dependency recorded in the dquot
377 */
378 /* ARGSUSED */
379 STATIC void
xfs_qm_dquot_logitem_committing(xfs_dq_logitem_t * l,xfs_lsn_t lsn)380 xfs_qm_dquot_logitem_committing(
381 xfs_dq_logitem_t *l,
382 xfs_lsn_t lsn)
383 {
384 return;
385 }
386
387
388 /*
389 * This is the ops vector for dquots
390 */
391 static struct xfs_item_ops xfs_dquot_item_ops = {
392 .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size,
393 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
394 xfs_qm_dquot_logitem_format,
395 .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin,
396 .iop_unpin = (void(*)(xfs_log_item_t*, int))
397 xfs_qm_dquot_logitem_unpin,
398 .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
399 xfs_qm_dquot_logitem_unpin_remove,
400 .iop_trylock = (uint(*)(xfs_log_item_t*))
401 xfs_qm_dquot_logitem_trylock,
402 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unlock,
403 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
404 xfs_qm_dquot_logitem_committed,
405 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push,
406 .iop_pushbuf = (void(*)(xfs_log_item_t*))
407 xfs_qm_dquot_logitem_pushbuf,
408 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
409 xfs_qm_dquot_logitem_committing
410 };
411
412 /*
413 * Initialize the dquot log item for a newly allocated dquot.
414 * The dquot isn't locked at this point, but it isn't on any of the lists
415 * either, so we don't care.
416 */
417 void
xfs_qm_dquot_logitem_init(struct xfs_dquot * dqp)418 xfs_qm_dquot_logitem_init(
419 struct xfs_dquot *dqp)
420 {
421 xfs_dq_logitem_t *lp;
422 lp = &dqp->q_logitem;
423
424 lp->qli_item.li_type = XFS_LI_DQUOT;
425 lp->qli_item.li_ops = &xfs_dquot_item_ops;
426 lp->qli_item.li_mountp = dqp->q_mount;
427 lp->qli_dquot = dqp;
428 lp->qli_format.qlf_type = XFS_LI_DQUOT;
429 lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id);
430 lp->qli_format.qlf_blkno = dqp->q_blkno;
431 lp->qli_format.qlf_len = 1;
432 /*
433 * This is just the offset of this dquot within its buffer
434 * (which is currently 1 FSB and probably won't change).
435 * Hence 32 bits for this offset should be just fine.
436 * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t))
437 * here, and recompute it at recovery time.
438 */
439 lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset;
440 }
441
442 /*------------------ QUOTAOFF LOG ITEMS -------------------*/
443
444 /*
445 * This returns the number of iovecs needed to log the given quotaoff item.
446 * We only need 1 iovec for an quotaoff item. It just logs the
447 * quotaoff_log_format structure.
448 */
449 /*ARGSUSED*/
450 STATIC uint
xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t * qf)451 xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf)
452 {
453 return (1);
454 }
455
456 /*
457 * This is called to fill in the vector of log iovecs for the
458 * given quotaoff log item. We use only 1 iovec, and we point that
459 * at the quotaoff_log_format structure embedded in the quotaoff item.
460 * It is at this point that we assert that all of the extent
461 * slots in the quotaoff item have been filled.
462 */
463 STATIC void
xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t * qf,xfs_log_iovec_t * log_vector)464 xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf,
465 xfs_log_iovec_t *log_vector)
466 {
467 ASSERT(qf->qql_format.qf_type == XFS_LI_QUOTAOFF);
468
469 log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
470 log_vector->i_len = sizeof(xfs_qoff_logitem_t);
471 XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_QUOTAOFF);
472 qf->qql_format.qf_size = 1;
473 }
474
475
476 /*
477 * Pinning has no meaning for an quotaoff item, so just return.
478 */
479 /*ARGSUSED*/
480 STATIC void
xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t * qf)481 xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf)
482 {
483 return;
484 }
485
486
487 /*
488 * Since pinning has no meaning for an quotaoff item, unpinning does
489 * not either.
490 */
491 /*ARGSUSED*/
492 STATIC void
xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t * qf,int stale)493 xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf, int stale)
494 {
495 return;
496 }
497
498 /*ARGSUSED*/
499 STATIC void
xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t * qf,xfs_trans_t * tp)500 xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t *qf, xfs_trans_t *tp)
501 {
502 return;
503 }
504
505 /*
506 * Quotaoff items have no locking, so just return success.
507 */
508 /*ARGSUSED*/
509 STATIC uint
xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t * qf)510 xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf)
511 {
512 return XFS_ITEM_LOCKED;
513 }
514
515 /*
516 * Quotaoff items have no locking or pushing, so return failure
517 * so that the caller doesn't bother with us.
518 */
519 /*ARGSUSED*/
520 STATIC void
xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t * qf)521 xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t *qf)
522 {
523 return;
524 }
525
526 /*
527 * The quotaoff-start-item is logged only once and cannot be moved in the log,
528 * so simply return the lsn at which it's been logged.
529 */
530 /*ARGSUSED*/
531 STATIC xfs_lsn_t
xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t * qf,xfs_lsn_t lsn)532 xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn)
533 {
534 return (lsn);
535 }
536
537 /*
538 * There isn't much you can do to push on an quotaoff item. It is simply
539 * stuck waiting for the log to be flushed to disk.
540 */
541 /*ARGSUSED*/
542 STATIC void
xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t * qf)543 xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t *qf)
544 {
545 return;
546 }
547
548
549 /*ARGSUSED*/
550 STATIC xfs_lsn_t
xfs_qm_qoffend_logitem_committed(xfs_qoff_logitem_t * qfe,xfs_lsn_t lsn)551 xfs_qm_qoffend_logitem_committed(
552 xfs_qoff_logitem_t *qfe,
553 xfs_lsn_t lsn)
554 {
555 xfs_qoff_logitem_t *qfs;
556 struct xfs_ail *ailp;
557
558 qfs = qfe->qql_start_lip;
559 ailp = qfs->qql_item.li_ailp;
560 spin_lock(&ailp->xa_lock);
561 /*
562 * Delete the qoff-start logitem from the AIL.
563 * xfs_trans_ail_delete() drops the AIL lock.
564 */
565 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs);
566 kmem_free(qfs);
567 kmem_free(qfe);
568 return (xfs_lsn_t)-1;
569 }
570
571 /*
572 * XXX rcc - don't know quite what to do with this. I think we can
573 * just ignore it. The only time that isn't the case is if we allow
574 * the client to somehow see that quotas have been turned off in which
575 * we can't allow that to get back until the quotaoff hits the disk.
576 * So how would that happen? Also, do we need different routines for
577 * quotaoff start and quotaoff end? I suspect the answer is yes but
578 * to be sure, I need to look at the recovery code and see how quota off
579 * recovery is handled (do we roll forward or back or do something else).
580 * If we roll forwards or backwards, then we need two separate routines,
581 * one that does nothing and one that stamps in the lsn that matters
582 * (truly makes the quotaoff irrevocable). If we do something else,
583 * then maybe we don't need two.
584 */
585 /* ARGSUSED */
586 STATIC void
xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t * qip,xfs_lsn_t commit_lsn)587 xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
588 {
589 return;
590 }
591
592 /* ARGSUSED */
593 STATIC void
xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t * qip,xfs_lsn_t commit_lsn)594 xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
595 {
596 return;
597 }
598
599 static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
600 .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
601 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
602 xfs_qm_qoff_logitem_format,
603 .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
604 .iop_unpin = (void(*)(xfs_log_item_t* ,int))
605 xfs_qm_qoff_logitem_unpin,
606 .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
607 xfs_qm_qoff_logitem_unpin_remove,
608 .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
609 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
610 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
611 xfs_qm_qoffend_logitem_committed,
612 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
613 .iop_pushbuf = NULL,
614 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
615 xfs_qm_qoffend_logitem_committing
616 };
617
618 /*
619 * This is the ops vector shared by all quotaoff-start log items.
620 */
621 static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
622 .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
623 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
624 xfs_qm_qoff_logitem_format,
625 .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
626 .iop_unpin = (void(*)(xfs_log_item_t*, int))
627 xfs_qm_qoff_logitem_unpin,
628 .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
629 xfs_qm_qoff_logitem_unpin_remove,
630 .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
631 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
632 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
633 xfs_qm_qoff_logitem_committed,
634 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
635 .iop_pushbuf = NULL,
636 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
637 xfs_qm_qoff_logitem_committing
638 };
639
640 /*
641 * Allocate and initialize an quotaoff item of the correct quota type(s).
642 */
643 xfs_qoff_logitem_t *
xfs_qm_qoff_logitem_init(struct xfs_mount * mp,xfs_qoff_logitem_t * start,uint flags)644 xfs_qm_qoff_logitem_init(
645 struct xfs_mount *mp,
646 xfs_qoff_logitem_t *start,
647 uint flags)
648 {
649 xfs_qoff_logitem_t *qf;
650
651 qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP);
652
653 qf->qql_item.li_type = XFS_LI_QUOTAOFF;
654 if (start)
655 qf->qql_item.li_ops = &xfs_qm_qoffend_logitem_ops;
656 else
657 qf->qql_item.li_ops = &xfs_qm_qoff_logitem_ops;
658 qf->qql_item.li_mountp = mp;
659 qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
660 qf->qql_format.qf_flags = flags;
661 qf->qql_start_lip = start;
662 return (qf);
663 }
664