1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_quota.h"
17 #include "xfs_qm.h"
18 #include "xfs_error.h"
19
20 STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
21
22 /*
23 * Add the locked dquot to the transaction.
24 * The dquot must be locked, and it cannot be associated with any
25 * transaction.
26 */
27 void
xfs_trans_dqjoin(struct xfs_trans * tp,struct xfs_dquot * dqp)28 xfs_trans_dqjoin(
29 struct xfs_trans *tp,
30 struct xfs_dquot *dqp)
31 {
32 ASSERT(XFS_DQ_IS_LOCKED(dqp));
33 ASSERT(dqp->q_logitem.qli_dquot == dqp);
34
35 /*
36 * Get a log_item_desc to point at the new item.
37 */
38 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
39 }
40
41 /*
42 * This is called to mark the dquot as needing
43 * to be logged when the transaction is committed. The dquot must
44 * already be associated with the given transaction.
45 * Note that it marks the entire transaction as dirty. In the ordinary
46 * case, this gets called via xfs_trans_commit, after the transaction
47 * is already dirty. However, there's nothing stop this from getting
48 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
49 * flag.
50 */
51 void
xfs_trans_log_dquot(struct xfs_trans * tp,struct xfs_dquot * dqp)52 xfs_trans_log_dquot(
53 struct xfs_trans *tp,
54 struct xfs_dquot *dqp)
55 {
56 ASSERT(XFS_DQ_IS_LOCKED(dqp));
57
58 tp->t_flags |= XFS_TRANS_DIRTY;
59 set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
60 }
61
62 /*
63 * Carry forward whatever is left of the quota blk reservation to
64 * the spanky new transaction
65 */
66 void
xfs_trans_dup_dqinfo(struct xfs_trans * otp,struct xfs_trans * ntp)67 xfs_trans_dup_dqinfo(
68 struct xfs_trans *otp,
69 struct xfs_trans *ntp)
70 {
71 struct xfs_dqtrx *oq, *nq;
72 int i, j;
73 struct xfs_dqtrx *oqa, *nqa;
74 uint64_t blk_res_used;
75
76 if (!otp->t_dqinfo)
77 return;
78
79 xfs_trans_alloc_dqinfo(ntp);
80
81 /*
82 * Because the quota blk reservation is carried forward,
83 * it is also necessary to carry forward the DQ_DIRTY flag.
84 */
85 if (otp->t_flags & XFS_TRANS_DQ_DIRTY)
86 ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
87
88 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
89 oqa = otp->t_dqinfo->dqs[j];
90 nqa = ntp->t_dqinfo->dqs[j];
91 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
92 blk_res_used = 0;
93
94 if (oqa[i].qt_dquot == NULL)
95 break;
96 oq = &oqa[i];
97 nq = &nqa[i];
98
99 if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
100 blk_res_used = oq->qt_bcount_delta;
101
102 nq->qt_dquot = oq->qt_dquot;
103 nq->qt_bcount_delta = nq->qt_icount_delta = 0;
104 nq->qt_rtbcount_delta = 0;
105
106 /*
107 * Transfer whatever is left of the reservations.
108 */
109 nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
110 oq->qt_blk_res = blk_res_used;
111
112 nq->qt_rtblk_res = oq->qt_rtblk_res -
113 oq->qt_rtblk_res_used;
114 oq->qt_rtblk_res = oq->qt_rtblk_res_used;
115
116 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
117 oq->qt_ino_res = oq->qt_ino_res_used;
118
119 }
120 }
121 }
122
123 /*
124 * Wrap around mod_dquot to account for both user and group quotas.
125 */
126 void
xfs_trans_mod_dquot_byino(xfs_trans_t * tp,xfs_inode_t * ip,uint field,int64_t delta)127 xfs_trans_mod_dquot_byino(
128 xfs_trans_t *tp,
129 xfs_inode_t *ip,
130 uint field,
131 int64_t delta)
132 {
133 xfs_mount_t *mp = tp->t_mountp;
134
135 if (!XFS_IS_QUOTA_RUNNING(mp) ||
136 !XFS_IS_QUOTA_ON(mp) ||
137 xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
138 return;
139
140 if (tp->t_dqinfo == NULL)
141 xfs_trans_alloc_dqinfo(tp);
142
143 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
144 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
145 if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
146 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
147 if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
148 (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
149 }
150
151 STATIC struct xfs_dqtrx *
xfs_trans_get_dqtrx(struct xfs_trans * tp,struct xfs_dquot * dqp)152 xfs_trans_get_dqtrx(
153 struct xfs_trans *tp,
154 struct xfs_dquot *dqp)
155 {
156 int i;
157 struct xfs_dqtrx *qa;
158
159 if (XFS_QM_ISUDQ(dqp))
160 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
161 else if (XFS_QM_ISGDQ(dqp))
162 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
163 else if (XFS_QM_ISPDQ(dqp))
164 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
165 else
166 return NULL;
167
168 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
169 if (qa[i].qt_dquot == NULL ||
170 qa[i].qt_dquot == dqp)
171 return &qa[i];
172 }
173
174 return NULL;
175 }
176
177 /*
178 * Make the changes in the transaction structure.
179 * The moral equivalent to xfs_trans_mod_sb().
180 * We don't touch any fields in the dquot, so we don't care
181 * if it's locked or not (most of the time it won't be).
182 */
183 void
xfs_trans_mod_dquot(struct xfs_trans * tp,struct xfs_dquot * dqp,uint field,int64_t delta)184 xfs_trans_mod_dquot(
185 struct xfs_trans *tp,
186 struct xfs_dquot *dqp,
187 uint field,
188 int64_t delta)
189 {
190 struct xfs_dqtrx *qtrx;
191
192 ASSERT(tp);
193 ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
194 qtrx = NULL;
195
196 if (tp->t_dqinfo == NULL)
197 xfs_trans_alloc_dqinfo(tp);
198 /*
199 * Find either the first free slot or the slot that belongs
200 * to this dquot.
201 */
202 qtrx = xfs_trans_get_dqtrx(tp, dqp);
203 ASSERT(qtrx);
204 if (qtrx->qt_dquot == NULL)
205 qtrx->qt_dquot = dqp;
206
207 switch (field) {
208
209 /*
210 * regular disk blk reservation
211 */
212 case XFS_TRANS_DQ_RES_BLKS:
213 qtrx->qt_blk_res += delta;
214 break;
215
216 /*
217 * inode reservation
218 */
219 case XFS_TRANS_DQ_RES_INOS:
220 qtrx->qt_ino_res += delta;
221 break;
222
223 /*
224 * disk blocks used.
225 */
226 case XFS_TRANS_DQ_BCOUNT:
227 qtrx->qt_bcount_delta += delta;
228 break;
229
230 case XFS_TRANS_DQ_DELBCOUNT:
231 qtrx->qt_delbcnt_delta += delta;
232 break;
233
234 /*
235 * Inode Count
236 */
237 case XFS_TRANS_DQ_ICOUNT:
238 if (qtrx->qt_ino_res && delta > 0) {
239 qtrx->qt_ino_res_used += delta;
240 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
241 }
242 qtrx->qt_icount_delta += delta;
243 break;
244
245 /*
246 * rtblk reservation
247 */
248 case XFS_TRANS_DQ_RES_RTBLKS:
249 qtrx->qt_rtblk_res += delta;
250 break;
251
252 /*
253 * rtblk count
254 */
255 case XFS_TRANS_DQ_RTBCOUNT:
256 if (qtrx->qt_rtblk_res && delta > 0) {
257 qtrx->qt_rtblk_res_used += delta;
258 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
259 }
260 qtrx->qt_rtbcount_delta += delta;
261 break;
262
263 case XFS_TRANS_DQ_DELRTBCOUNT:
264 qtrx->qt_delrtb_delta += delta;
265 break;
266
267 default:
268 ASSERT(0);
269 }
270 tp->t_flags |= XFS_TRANS_DQ_DIRTY;
271 }
272
273
274 /*
275 * Given an array of dqtrx structures, lock all the dquots associated and join
276 * them to the transaction, provided they have been modified. We know that the
277 * highest number of dquots of one type - usr, grp and prj - involved in a
278 * transaction is 3 so we don't need to make this very generic.
279 */
280 STATIC void
xfs_trans_dqlockedjoin(struct xfs_trans * tp,struct xfs_dqtrx * q)281 xfs_trans_dqlockedjoin(
282 struct xfs_trans *tp,
283 struct xfs_dqtrx *q)
284 {
285 ASSERT(q[0].qt_dquot != NULL);
286 if (q[1].qt_dquot == NULL) {
287 xfs_dqlock(q[0].qt_dquot);
288 xfs_trans_dqjoin(tp, q[0].qt_dquot);
289 } else {
290 ASSERT(XFS_QM_TRANS_MAXDQS == 2);
291 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
292 xfs_trans_dqjoin(tp, q[0].qt_dquot);
293 xfs_trans_dqjoin(tp, q[1].qt_dquot);
294 }
295 }
296
297
298 /*
299 * Called by xfs_trans_commit() and similar in spirit to
300 * xfs_trans_apply_sb_deltas().
301 * Go thru all the dquots belonging to this transaction and modify the
302 * INCORE dquot to reflect the actual usages.
303 * Unreserve just the reservations done by this transaction.
304 * dquot is still left locked at exit.
305 */
306 void
xfs_trans_apply_dquot_deltas(struct xfs_trans * tp)307 xfs_trans_apply_dquot_deltas(
308 struct xfs_trans *tp)
309 {
310 int i, j;
311 struct xfs_dquot *dqp;
312 struct xfs_dqtrx *qtrx, *qa;
313 struct xfs_disk_dquot *d;
314 int64_t totalbdelta;
315 int64_t totalrtbdelta;
316
317 if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
318 return;
319
320 ASSERT(tp->t_dqinfo);
321 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
322 qa = tp->t_dqinfo->dqs[j];
323 if (qa[0].qt_dquot == NULL)
324 continue;
325
326 /*
327 * Lock all of the dquots and join them to the transaction.
328 */
329 xfs_trans_dqlockedjoin(tp, qa);
330
331 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
332 qtrx = &qa[i];
333 /*
334 * The array of dquots is filled
335 * sequentially, not sparsely.
336 */
337 if ((dqp = qtrx->qt_dquot) == NULL)
338 break;
339
340 ASSERT(XFS_DQ_IS_LOCKED(dqp));
341
342 /*
343 * adjust the actual number of blocks used
344 */
345 d = &dqp->q_core;
346
347 /*
348 * The issue here is - sometimes we don't make a blkquota
349 * reservation intentionally to be fair to users
350 * (when the amount is small). On the other hand,
351 * delayed allocs do make reservations, but that's
352 * outside of a transaction, so we have no
353 * idea how much was really reserved.
354 * So, here we've accumulated delayed allocation blks and
355 * non-delay blks. The assumption is that the
356 * delayed ones are always reserved (outside of a
357 * transaction), and the others may or may not have
358 * quota reservations.
359 */
360 totalbdelta = qtrx->qt_bcount_delta +
361 qtrx->qt_delbcnt_delta;
362 totalrtbdelta = qtrx->qt_rtbcount_delta +
363 qtrx->qt_delrtb_delta;
364 #ifdef DEBUG
365 if (totalbdelta < 0)
366 ASSERT(be64_to_cpu(d->d_bcount) >=
367 -totalbdelta);
368
369 if (totalrtbdelta < 0)
370 ASSERT(be64_to_cpu(d->d_rtbcount) >=
371 -totalrtbdelta);
372
373 if (qtrx->qt_icount_delta < 0)
374 ASSERT(be64_to_cpu(d->d_icount) >=
375 -qtrx->qt_icount_delta);
376 #endif
377 if (totalbdelta)
378 be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
379
380 if (qtrx->qt_icount_delta)
381 be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
382
383 if (totalrtbdelta)
384 be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
385
386 /*
387 * Get any default limits in use.
388 * Start/reset the timer(s) if needed.
389 */
390 if (d->d_id) {
391 xfs_qm_adjust_dqlimits(tp->t_mountp, dqp);
392 xfs_qm_adjust_dqtimers(tp->t_mountp, d);
393 }
394
395 dqp->dq_flags |= XFS_DQ_DIRTY;
396 /*
397 * add this to the list of items to get logged
398 */
399 xfs_trans_log_dquot(tp, dqp);
400 /*
401 * Take off what's left of the original reservation.
402 * In case of delayed allocations, there's no
403 * reservation that a transaction structure knows of.
404 */
405 if (qtrx->qt_blk_res != 0) {
406 uint64_t blk_res_used = 0;
407
408 if (qtrx->qt_bcount_delta > 0)
409 blk_res_used = qtrx->qt_bcount_delta;
410
411 if (qtrx->qt_blk_res != blk_res_used) {
412 if (qtrx->qt_blk_res > blk_res_used)
413 dqp->q_res_bcount -= (xfs_qcnt_t)
414 (qtrx->qt_blk_res -
415 blk_res_used);
416 else
417 dqp->q_res_bcount -= (xfs_qcnt_t)
418 (blk_res_used -
419 qtrx->qt_blk_res);
420 }
421 } else {
422 /*
423 * These blks were never reserved, either inside
424 * a transaction or outside one (in a delayed
425 * allocation). Also, this isn't always a
426 * negative number since we sometimes
427 * deliberately skip quota reservations.
428 */
429 if (qtrx->qt_bcount_delta) {
430 dqp->q_res_bcount +=
431 (xfs_qcnt_t)qtrx->qt_bcount_delta;
432 }
433 }
434 /*
435 * Adjust the RT reservation.
436 */
437 if (qtrx->qt_rtblk_res != 0) {
438 if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
439 if (qtrx->qt_rtblk_res >
440 qtrx->qt_rtblk_res_used)
441 dqp->q_res_rtbcount -= (xfs_qcnt_t)
442 (qtrx->qt_rtblk_res -
443 qtrx->qt_rtblk_res_used);
444 else
445 dqp->q_res_rtbcount -= (xfs_qcnt_t)
446 (qtrx->qt_rtblk_res_used -
447 qtrx->qt_rtblk_res);
448 }
449 } else {
450 if (qtrx->qt_rtbcount_delta)
451 dqp->q_res_rtbcount +=
452 (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
453 }
454
455 /*
456 * Adjust the inode reservation.
457 */
458 if (qtrx->qt_ino_res != 0) {
459 ASSERT(qtrx->qt_ino_res >=
460 qtrx->qt_ino_res_used);
461 if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
462 dqp->q_res_icount -= (xfs_qcnt_t)
463 (qtrx->qt_ino_res -
464 qtrx->qt_ino_res_used);
465 } else {
466 if (qtrx->qt_icount_delta)
467 dqp->q_res_icount +=
468 (xfs_qcnt_t)qtrx->qt_icount_delta;
469 }
470
471 ASSERT(dqp->q_res_bcount >=
472 be64_to_cpu(dqp->q_core.d_bcount));
473 ASSERT(dqp->q_res_icount >=
474 be64_to_cpu(dqp->q_core.d_icount));
475 ASSERT(dqp->q_res_rtbcount >=
476 be64_to_cpu(dqp->q_core.d_rtbcount));
477 }
478 }
479 }
480
481 /*
482 * Release the reservations, and adjust the dquots accordingly.
483 * This is called only when the transaction is being aborted. If by
484 * any chance we have done dquot modifications incore (ie. deltas) already,
485 * we simply throw those away, since that's the expected behavior
486 * when a transaction is curtailed without a commit.
487 */
488 void
xfs_trans_unreserve_and_mod_dquots(struct xfs_trans * tp)489 xfs_trans_unreserve_and_mod_dquots(
490 struct xfs_trans *tp)
491 {
492 int i, j;
493 struct xfs_dquot *dqp;
494 struct xfs_dqtrx *qtrx, *qa;
495 bool locked;
496
497 if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
498 return;
499
500 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
501 qa = tp->t_dqinfo->dqs[j];
502
503 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
504 qtrx = &qa[i];
505 /*
506 * We assume that the array of dquots is filled
507 * sequentially, not sparsely.
508 */
509 if ((dqp = qtrx->qt_dquot) == NULL)
510 break;
511 /*
512 * Unreserve the original reservation. We don't care
513 * about the number of blocks used field, or deltas.
514 * Also we don't bother to zero the fields.
515 */
516 locked = false;
517 if (qtrx->qt_blk_res) {
518 xfs_dqlock(dqp);
519 locked = true;
520 dqp->q_res_bcount -=
521 (xfs_qcnt_t)qtrx->qt_blk_res;
522 }
523 if (qtrx->qt_ino_res) {
524 if (!locked) {
525 xfs_dqlock(dqp);
526 locked = true;
527 }
528 dqp->q_res_icount -=
529 (xfs_qcnt_t)qtrx->qt_ino_res;
530 }
531
532 if (qtrx->qt_rtblk_res) {
533 if (!locked) {
534 xfs_dqlock(dqp);
535 locked = true;
536 }
537 dqp->q_res_rtbcount -=
538 (xfs_qcnt_t)qtrx->qt_rtblk_res;
539 }
540 if (locked)
541 xfs_dqunlock(dqp);
542
543 }
544 }
545 }
546
547 STATIC void
xfs_quota_warn(struct xfs_mount * mp,struct xfs_dquot * dqp,int type)548 xfs_quota_warn(
549 struct xfs_mount *mp,
550 struct xfs_dquot *dqp,
551 int type)
552 {
553 enum quota_type qtype;
554
555 if (dqp->dq_flags & XFS_DQ_PROJ)
556 qtype = PRJQUOTA;
557 else if (dqp->dq_flags & XFS_DQ_USER)
558 qtype = USRQUOTA;
559 else
560 qtype = GRPQUOTA;
561
562 quota_send_warning(make_kqid(&init_user_ns, qtype,
563 be32_to_cpu(dqp->q_core.d_id)),
564 mp->m_super->s_dev, type);
565 }
566
567 /*
568 * This reserves disk blocks and inodes against a dquot.
569 * Flags indicate if the dquot is to be locked here and also
570 * if the blk reservation is for RT or regular blocks.
571 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
572 */
573 STATIC int
xfs_trans_dqresv(struct xfs_trans * tp,struct xfs_mount * mp,struct xfs_dquot * dqp,int64_t nblks,long ninos,uint flags)574 xfs_trans_dqresv(
575 struct xfs_trans *tp,
576 struct xfs_mount *mp,
577 struct xfs_dquot *dqp,
578 int64_t nblks,
579 long ninos,
580 uint flags)
581 {
582 xfs_qcnt_t hardlimit;
583 xfs_qcnt_t softlimit;
584 time_t timer;
585 xfs_qwarncnt_t warns;
586 xfs_qwarncnt_t warnlimit;
587 xfs_qcnt_t total_count;
588 xfs_qcnt_t *resbcountp;
589 xfs_quotainfo_t *q = mp->m_quotainfo;
590 struct xfs_def_quota *defq;
591
592
593 xfs_dqlock(dqp);
594
595 defq = xfs_get_defquota(dqp, q);
596
597 if (flags & XFS_TRANS_DQ_RES_BLKS) {
598 hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
599 if (!hardlimit)
600 hardlimit = defq->bhardlimit;
601 softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
602 if (!softlimit)
603 softlimit = defq->bsoftlimit;
604 timer = be32_to_cpu(dqp->q_core.d_btimer);
605 warns = be16_to_cpu(dqp->q_core.d_bwarns);
606 warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
607 resbcountp = &dqp->q_res_bcount;
608 } else {
609 ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
610 hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
611 if (!hardlimit)
612 hardlimit = defq->rtbhardlimit;
613 softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
614 if (!softlimit)
615 softlimit = defq->rtbsoftlimit;
616 timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
617 warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
618 warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
619 resbcountp = &dqp->q_res_rtbcount;
620 }
621
622 if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
623 dqp->q_core.d_id &&
624 ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
625 (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) ||
626 (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) {
627 if (nblks > 0) {
628 /*
629 * dquot is locked already. See if we'd go over the
630 * hardlimit or exceed the timelimit if we allocate
631 * nblks.
632 */
633 total_count = *resbcountp + nblks;
634 if (hardlimit && total_count > hardlimit) {
635 xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
636 goto error_return;
637 }
638 if (softlimit && total_count > softlimit) {
639 if ((timer != 0 && get_seconds() > timer) ||
640 (warns != 0 && warns >= warnlimit)) {
641 xfs_quota_warn(mp, dqp,
642 QUOTA_NL_BSOFTLONGWARN);
643 goto error_return;
644 }
645
646 xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
647 }
648 }
649 if (ninos > 0) {
650 total_count = dqp->q_res_icount + ninos;
651 timer = be32_to_cpu(dqp->q_core.d_itimer);
652 warns = be16_to_cpu(dqp->q_core.d_iwarns);
653 warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
654 hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
655 if (!hardlimit)
656 hardlimit = defq->ihardlimit;
657 softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
658 if (!softlimit)
659 softlimit = defq->isoftlimit;
660
661 if (hardlimit && total_count > hardlimit) {
662 xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
663 goto error_return;
664 }
665 if (softlimit && total_count > softlimit) {
666 if ((timer != 0 && get_seconds() > timer) ||
667 (warns != 0 && warns >= warnlimit)) {
668 xfs_quota_warn(mp, dqp,
669 QUOTA_NL_ISOFTLONGWARN);
670 goto error_return;
671 }
672 xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
673 }
674 }
675 }
676
677 /*
678 * Change the reservation, but not the actual usage.
679 * Note that q_res_bcount = q_core.d_bcount + resv
680 */
681 (*resbcountp) += (xfs_qcnt_t)nblks;
682 if (ninos != 0)
683 dqp->q_res_icount += (xfs_qcnt_t)ninos;
684
685 /*
686 * note the reservation amt in the trans struct too,
687 * so that the transaction knows how much was reserved by
688 * it against this particular dquot.
689 * We don't do this when we are reserving for a delayed allocation,
690 * because we don't have the luxury of a transaction envelope then.
691 */
692 if (tp) {
693 ASSERT(tp->t_dqinfo);
694 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
695 if (nblks != 0)
696 xfs_trans_mod_dquot(tp, dqp,
697 flags & XFS_QMOPT_RESBLK_MASK,
698 nblks);
699 if (ninos != 0)
700 xfs_trans_mod_dquot(tp, dqp,
701 XFS_TRANS_DQ_RES_INOS,
702 ninos);
703 }
704
705 if (XFS_IS_CORRUPT(mp,
706 dqp->q_res_bcount < be64_to_cpu(dqp->q_core.d_bcount)) ||
707 XFS_IS_CORRUPT(mp,
708 dqp->q_res_rtbcount < be64_to_cpu(dqp->q_core.d_rtbcount)) ||
709 XFS_IS_CORRUPT(mp,
710 dqp->q_res_icount < be64_to_cpu(dqp->q_core.d_icount)))
711 goto error_corrupt;
712
713 xfs_dqunlock(dqp);
714 return 0;
715
716 error_return:
717 xfs_dqunlock(dqp);
718 if (flags & XFS_QMOPT_ENOSPC)
719 return -ENOSPC;
720 return -EDQUOT;
721 error_corrupt:
722 xfs_dqunlock(dqp);
723 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
724 return -EFSCORRUPTED;
725 }
726
727
728 /*
729 * Given dquot(s), make disk block and/or inode reservations against them.
730 * The fact that this does the reservation against user, group and
731 * project quotas is important, because this follows a all-or-nothing
732 * approach.
733 *
734 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
735 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
736 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
737 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
738 * dquots are unlocked on return, if they were not locked by caller.
739 */
740 int
xfs_trans_reserve_quota_bydquots(struct xfs_trans * tp,struct xfs_mount * mp,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp,int64_t nblks,long ninos,uint flags)741 xfs_trans_reserve_quota_bydquots(
742 struct xfs_trans *tp,
743 struct xfs_mount *mp,
744 struct xfs_dquot *udqp,
745 struct xfs_dquot *gdqp,
746 struct xfs_dquot *pdqp,
747 int64_t nblks,
748 long ninos,
749 uint flags)
750 {
751 int error;
752
753 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
754 return 0;
755
756 if (tp && tp->t_dqinfo == NULL)
757 xfs_trans_alloc_dqinfo(tp);
758
759 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
760
761 if (udqp) {
762 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos,
763 (flags & ~XFS_QMOPT_ENOSPC));
764 if (error)
765 return error;
766 }
767
768 if (gdqp) {
769 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos,
770 (flags & ~XFS_QMOPT_ENOSPC));
771 if (error)
772 goto unwind_usr;
773 }
774
775 if (pdqp) {
776 error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
777 if (error)
778 goto unwind_grp;
779 }
780
781 /*
782 * Didn't change anything critical, so, no need to log
783 */
784 return 0;
785
786 unwind_grp:
787 flags |= XFS_QMOPT_FORCE_RES;
788 if (gdqp)
789 xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
790 unwind_usr:
791 flags |= XFS_QMOPT_FORCE_RES;
792 if (udqp)
793 xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
794 return error;
795 }
796
797
798 /*
799 * Lock the dquot and change the reservation if we can.
800 * This doesn't change the actual usage, just the reservation.
801 * The inode sent in is locked.
802 */
803 int
xfs_trans_reserve_quota_nblks(struct xfs_trans * tp,struct xfs_inode * ip,int64_t nblks,long ninos,uint flags)804 xfs_trans_reserve_quota_nblks(
805 struct xfs_trans *tp,
806 struct xfs_inode *ip,
807 int64_t nblks,
808 long ninos,
809 uint flags)
810 {
811 struct xfs_mount *mp = ip->i_mount;
812
813 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
814 return 0;
815 if (XFS_IS_PQUOTA_ON(mp))
816 flags |= XFS_QMOPT_ENOSPC;
817
818 ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
819
820 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
821 ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
822 XFS_TRANS_DQ_RES_RTBLKS ||
823 (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
824 XFS_TRANS_DQ_RES_BLKS);
825
826 /*
827 * Reserve nblks against these dquots, with trans as the mediator.
828 */
829 return xfs_trans_reserve_quota_bydquots(tp, mp,
830 ip->i_udquot, ip->i_gdquot,
831 ip->i_pdquot,
832 nblks, ninos, flags);
833 }
834
835 /*
836 * This routine is called to allocate a quotaoff log item.
837 */
838 struct xfs_qoff_logitem *
xfs_trans_get_qoff_item(struct xfs_trans * tp,struct xfs_qoff_logitem * startqoff,uint flags)839 xfs_trans_get_qoff_item(
840 struct xfs_trans *tp,
841 struct xfs_qoff_logitem *startqoff,
842 uint flags)
843 {
844 struct xfs_qoff_logitem *q;
845
846 ASSERT(tp != NULL);
847
848 q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
849 ASSERT(q != NULL);
850
851 /*
852 * Get a log_item_desc to point at the new item.
853 */
854 xfs_trans_add_item(tp, &q->qql_item);
855 return q;
856 }
857
858
859 /*
860 * This is called to mark the quotaoff logitem as needing
861 * to be logged when the transaction is committed. The logitem must
862 * already be associated with the given transaction.
863 */
864 void
xfs_trans_log_quotaoff_item(struct xfs_trans * tp,struct xfs_qoff_logitem * qlp)865 xfs_trans_log_quotaoff_item(
866 struct xfs_trans *tp,
867 struct xfs_qoff_logitem *qlp)
868 {
869 tp->t_flags |= XFS_TRANS_DIRTY;
870 set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags);
871 }
872
873 STATIC void
xfs_trans_alloc_dqinfo(xfs_trans_t * tp)874 xfs_trans_alloc_dqinfo(
875 xfs_trans_t *tp)
876 {
877 tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, 0);
878 }
879
880 void
xfs_trans_free_dqinfo(xfs_trans_t * tp)881 xfs_trans_free_dqinfo(
882 xfs_trans_t *tp)
883 {
884 if (!tp->t_dqinfo)
885 return;
886 kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
887 tp->t_dqinfo = NULL;
888 }
889