• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2002 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_quota.h"
17 #include "xfs_qm.h"
18 #include "xfs_trace.h"
19 #include "xfs_error.h"
20 
21 STATIC void	xfs_trans_alloc_dqinfo(xfs_trans_t *);
22 
23 /*
24  * Add the locked dquot to the transaction.
25  * The dquot must be locked, and it cannot be associated with any
26  * transaction.
27  */
28 void
xfs_trans_dqjoin(struct xfs_trans * tp,struct xfs_dquot * dqp)29 xfs_trans_dqjoin(
30 	struct xfs_trans	*tp,
31 	struct xfs_dquot	*dqp)
32 {
33 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
34 	ASSERT(dqp->q_logitem.qli_dquot == dqp);
35 
36 	/*
37 	 * Get a log_item_desc to point at the new item.
38 	 */
39 	xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
40 }
41 
42 /*
43  * This is called to mark the dquot as needing
44  * to be logged when the transaction is committed.  The dquot must
45  * already be associated with the given transaction.
46  * Note that it marks the entire transaction as dirty. In the ordinary
47  * case, this gets called via xfs_trans_commit, after the transaction
48  * is already dirty. However, there's nothing stop this from getting
49  * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
50  * flag.
51  */
52 void
xfs_trans_log_dquot(struct xfs_trans * tp,struct xfs_dquot * dqp)53 xfs_trans_log_dquot(
54 	struct xfs_trans	*tp,
55 	struct xfs_dquot	*dqp)
56 {
57 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
58 
59 	/* Upgrade the dquot to bigtime format if possible. */
60 	if (dqp->q_id != 0 &&
61 	    xfs_sb_version_hasbigtime(&tp->t_mountp->m_sb) &&
62 	    !(dqp->q_type & XFS_DQTYPE_BIGTIME))
63 		dqp->q_type |= XFS_DQTYPE_BIGTIME;
64 
65 	tp->t_flags |= XFS_TRANS_DIRTY;
66 	set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
67 }
68 
69 /*
70  * Carry forward whatever is left of the quota blk reservation to
71  * the spanky new transaction
72  */
73 void
xfs_trans_dup_dqinfo(struct xfs_trans * otp,struct xfs_trans * ntp)74 xfs_trans_dup_dqinfo(
75 	struct xfs_trans	*otp,
76 	struct xfs_trans	*ntp)
77 {
78 	struct xfs_dqtrx	*oq, *nq;
79 	int			i, j;
80 	struct xfs_dqtrx	*oqa, *nqa;
81 	uint64_t		blk_res_used;
82 
83 	if (!otp->t_dqinfo)
84 		return;
85 
86 	xfs_trans_alloc_dqinfo(ntp);
87 
88 	/*
89 	 * Because the quota blk reservation is carried forward,
90 	 * it is also necessary to carry forward the DQ_DIRTY flag.
91 	 */
92 	if (otp->t_flags & XFS_TRANS_DQ_DIRTY)
93 		ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
94 
95 	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
96 		oqa = otp->t_dqinfo->dqs[j];
97 		nqa = ntp->t_dqinfo->dqs[j];
98 		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
99 			blk_res_used = 0;
100 
101 			if (oqa[i].qt_dquot == NULL)
102 				break;
103 			oq = &oqa[i];
104 			nq = &nqa[i];
105 
106 			if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
107 				blk_res_used = oq->qt_bcount_delta;
108 
109 			nq->qt_dquot = oq->qt_dquot;
110 			nq->qt_bcount_delta = nq->qt_icount_delta = 0;
111 			nq->qt_rtbcount_delta = 0;
112 
113 			/*
114 			 * Transfer whatever is left of the reservations.
115 			 */
116 			nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
117 			oq->qt_blk_res = blk_res_used;
118 
119 			nq->qt_rtblk_res = oq->qt_rtblk_res -
120 				oq->qt_rtblk_res_used;
121 			oq->qt_rtblk_res = oq->qt_rtblk_res_used;
122 
123 			nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
124 			oq->qt_ino_res = oq->qt_ino_res_used;
125 
126 		}
127 	}
128 }
129 
130 /*
131  * Wrap around mod_dquot to account for both user and group quotas.
132  */
133 void
xfs_trans_mod_dquot_byino(xfs_trans_t * tp,xfs_inode_t * ip,uint field,int64_t delta)134 xfs_trans_mod_dquot_byino(
135 	xfs_trans_t	*tp,
136 	xfs_inode_t	*ip,
137 	uint		field,
138 	int64_t		delta)
139 {
140 	xfs_mount_t	*mp = tp->t_mountp;
141 
142 	if (!XFS_IS_QUOTA_RUNNING(mp) ||
143 	    !XFS_IS_QUOTA_ON(mp) ||
144 	    xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
145 		return;
146 
147 	if (tp->t_dqinfo == NULL)
148 		xfs_trans_alloc_dqinfo(tp);
149 
150 	if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
151 		(void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
152 	if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
153 		(void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
154 	if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
155 		(void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
156 }
157 
158 STATIC struct xfs_dqtrx *
xfs_trans_get_dqtrx(struct xfs_trans * tp,struct xfs_dquot * dqp)159 xfs_trans_get_dqtrx(
160 	struct xfs_trans	*tp,
161 	struct xfs_dquot	*dqp)
162 {
163 	int			i;
164 	struct xfs_dqtrx	*qa;
165 
166 	switch (xfs_dquot_type(dqp)) {
167 	case XFS_DQTYPE_USER:
168 		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
169 		break;
170 	case XFS_DQTYPE_GROUP:
171 		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
172 		break;
173 	case XFS_DQTYPE_PROJ:
174 		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
175 		break;
176 	default:
177 		return NULL;
178 	}
179 
180 	for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
181 		if (qa[i].qt_dquot == NULL ||
182 		    qa[i].qt_dquot == dqp)
183 			return &qa[i];
184 	}
185 
186 	return NULL;
187 }
188 
189 /*
190  * Make the changes in the transaction structure.
191  * The moral equivalent to xfs_trans_mod_sb().
192  * We don't touch any fields in the dquot, so we don't care
193  * if it's locked or not (most of the time it won't be).
194  */
195 void
xfs_trans_mod_dquot(struct xfs_trans * tp,struct xfs_dquot * dqp,uint field,int64_t delta)196 xfs_trans_mod_dquot(
197 	struct xfs_trans	*tp,
198 	struct xfs_dquot	*dqp,
199 	uint			field,
200 	int64_t			delta)
201 {
202 	struct xfs_dqtrx	*qtrx;
203 
204 	ASSERT(tp);
205 	ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
206 	qtrx = NULL;
207 
208 	if (tp->t_dqinfo == NULL)
209 		xfs_trans_alloc_dqinfo(tp);
210 	/*
211 	 * Find either the first free slot or the slot that belongs
212 	 * to this dquot.
213 	 */
214 	qtrx = xfs_trans_get_dqtrx(tp, dqp);
215 	ASSERT(qtrx);
216 	if (qtrx->qt_dquot == NULL)
217 		qtrx->qt_dquot = dqp;
218 
219 	if (delta) {
220 		trace_xfs_trans_mod_dquot_before(qtrx);
221 		trace_xfs_trans_mod_dquot(tp, dqp, field, delta);
222 	}
223 
224 	switch (field) {
225 	/* regular disk blk reservation */
226 	case XFS_TRANS_DQ_RES_BLKS:
227 		qtrx->qt_blk_res += delta;
228 		break;
229 
230 	/* inode reservation */
231 	case XFS_TRANS_DQ_RES_INOS:
232 		qtrx->qt_ino_res += delta;
233 		break;
234 
235 	/* disk blocks used. */
236 	case XFS_TRANS_DQ_BCOUNT:
237 		qtrx->qt_bcount_delta += delta;
238 		break;
239 
240 	case XFS_TRANS_DQ_DELBCOUNT:
241 		qtrx->qt_delbcnt_delta += delta;
242 		break;
243 
244 	/* Inode Count */
245 	case XFS_TRANS_DQ_ICOUNT:
246 		if (qtrx->qt_ino_res && delta > 0) {
247 			qtrx->qt_ino_res_used += delta;
248 			ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
249 		}
250 		qtrx->qt_icount_delta += delta;
251 		break;
252 
253 	/* rtblk reservation */
254 	case XFS_TRANS_DQ_RES_RTBLKS:
255 		qtrx->qt_rtblk_res += delta;
256 		break;
257 
258 	/* rtblk count */
259 	case XFS_TRANS_DQ_RTBCOUNT:
260 		if (qtrx->qt_rtblk_res && delta > 0) {
261 			qtrx->qt_rtblk_res_used += delta;
262 			ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
263 		}
264 		qtrx->qt_rtbcount_delta += delta;
265 		break;
266 
267 	case XFS_TRANS_DQ_DELRTBCOUNT:
268 		qtrx->qt_delrtb_delta += delta;
269 		break;
270 
271 	default:
272 		ASSERT(0);
273 	}
274 
275 	if (delta)
276 		trace_xfs_trans_mod_dquot_after(qtrx);
277 
278 	tp->t_flags |= XFS_TRANS_DQ_DIRTY;
279 }
280 
281 
282 /*
283  * Given an array of dqtrx structures, lock all the dquots associated and join
284  * them to the transaction, provided they have been modified.  We know that the
285  * highest number of dquots of one type - usr, grp and prj - involved in a
286  * transaction is 3 so we don't need to make this very generic.
287  */
288 STATIC void
xfs_trans_dqlockedjoin(struct xfs_trans * tp,struct xfs_dqtrx * q)289 xfs_trans_dqlockedjoin(
290 	struct xfs_trans	*tp,
291 	struct xfs_dqtrx	*q)
292 {
293 	ASSERT(q[0].qt_dquot != NULL);
294 	if (q[1].qt_dquot == NULL) {
295 		xfs_dqlock(q[0].qt_dquot);
296 		xfs_trans_dqjoin(tp, q[0].qt_dquot);
297 	} else {
298 		ASSERT(XFS_QM_TRANS_MAXDQS == 2);
299 		xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
300 		xfs_trans_dqjoin(tp, q[0].qt_dquot);
301 		xfs_trans_dqjoin(tp, q[1].qt_dquot);
302 	}
303 }
304 
305 /* Apply dqtrx changes to the quota reservation counters. */
306 static inline void
xfs_apply_quota_reservation_deltas(struct xfs_dquot_res * res,uint64_t reserved,int64_t res_used,int64_t count_delta)307 xfs_apply_quota_reservation_deltas(
308 	struct xfs_dquot_res	*res,
309 	uint64_t		reserved,
310 	int64_t			res_used,
311 	int64_t			count_delta)
312 {
313 	if (reserved != 0) {
314 		/*
315 		 * Subtle math here: If reserved > res_used (the normal case),
316 		 * we're simply subtracting the unused transaction quota
317 		 * reservation from the dquot reservation.
318 		 *
319 		 * If, however, res_used > reserved, then we have allocated
320 		 * more quota blocks than were reserved for the transaction.
321 		 * We must add that excess to the dquot reservation since it
322 		 * tracks (usage + resv) and by definition we didn't reserve
323 		 * that excess.
324 		 */
325 		res->reserved -= abs(reserved - res_used);
326 	} else if (count_delta != 0) {
327 		/*
328 		 * These blks were never reserved, either inside a transaction
329 		 * or outside one (in a delayed allocation). Also, this isn't
330 		 * always a negative number since we sometimes deliberately
331 		 * skip quota reservations.
332 		 */
333 		res->reserved += count_delta;
334 	}
335 }
336 
337 /*
338  * Called by xfs_trans_commit() and similar in spirit to
339  * xfs_trans_apply_sb_deltas().
340  * Go thru all the dquots belonging to this transaction and modify the
341  * INCORE dquot to reflect the actual usages.
342  * Unreserve just the reservations done by this transaction.
343  * dquot is still left locked at exit.
344  */
345 void
xfs_trans_apply_dquot_deltas(struct xfs_trans * tp)346 xfs_trans_apply_dquot_deltas(
347 	struct xfs_trans	*tp)
348 {
349 	int			i, j;
350 	struct xfs_dquot	*dqp;
351 	struct xfs_dqtrx	*qtrx, *qa;
352 	int64_t			totalbdelta;
353 	int64_t			totalrtbdelta;
354 
355 	if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
356 		return;
357 
358 	ASSERT(tp->t_dqinfo);
359 	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
360 		qa = tp->t_dqinfo->dqs[j];
361 		if (qa[0].qt_dquot == NULL)
362 			continue;
363 
364 		/*
365 		 * Lock all of the dquots and join them to the transaction.
366 		 */
367 		xfs_trans_dqlockedjoin(tp, qa);
368 
369 		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
370 			uint64_t	blk_res_used;
371 
372 			qtrx = &qa[i];
373 			/*
374 			 * The array of dquots is filled
375 			 * sequentially, not sparsely.
376 			 */
377 			if ((dqp = qtrx->qt_dquot) == NULL)
378 				break;
379 
380 			ASSERT(XFS_DQ_IS_LOCKED(dqp));
381 
382 			/*
383 			 * adjust the actual number of blocks used
384 			 */
385 
386 			/*
387 			 * The issue here is - sometimes we don't make a blkquota
388 			 * reservation intentionally to be fair to users
389 			 * (when the amount is small). On the other hand,
390 			 * delayed allocs do make reservations, but that's
391 			 * outside of a transaction, so we have no
392 			 * idea how much was really reserved.
393 			 * So, here we've accumulated delayed allocation blks and
394 			 * non-delay blks. The assumption is that the
395 			 * delayed ones are always reserved (outside of a
396 			 * transaction), and the others may or may not have
397 			 * quota reservations.
398 			 */
399 			totalbdelta = qtrx->qt_bcount_delta +
400 				qtrx->qt_delbcnt_delta;
401 			totalrtbdelta = qtrx->qt_rtbcount_delta +
402 				qtrx->qt_delrtb_delta;
403 
404 			if (totalbdelta != 0 || totalrtbdelta != 0 ||
405 			    qtrx->qt_icount_delta != 0) {
406 				trace_xfs_trans_apply_dquot_deltas_before(dqp);
407 				trace_xfs_trans_apply_dquot_deltas(qtrx);
408 			}
409 
410 #ifdef DEBUG
411 			if (totalbdelta < 0)
412 				ASSERT(dqp->q_blk.count >= -totalbdelta);
413 
414 			if (totalrtbdelta < 0)
415 				ASSERT(dqp->q_rtb.count >= -totalrtbdelta);
416 
417 			if (qtrx->qt_icount_delta < 0)
418 				ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta);
419 #endif
420 			if (totalbdelta)
421 				dqp->q_blk.count += totalbdelta;
422 
423 			if (qtrx->qt_icount_delta)
424 				dqp->q_ino.count += qtrx->qt_icount_delta;
425 
426 			if (totalrtbdelta)
427 				dqp->q_rtb.count += totalrtbdelta;
428 
429 			if (totalbdelta != 0 || totalrtbdelta != 0 ||
430 			    qtrx->qt_icount_delta != 0)
431 				trace_xfs_trans_apply_dquot_deltas_after(dqp);
432 
433 			/*
434 			 * Get any default limits in use.
435 			 * Start/reset the timer(s) if needed.
436 			 */
437 			if (dqp->q_id) {
438 				xfs_qm_adjust_dqlimits(dqp);
439 				xfs_qm_adjust_dqtimers(dqp);
440 			}
441 
442 			dqp->q_flags |= XFS_DQFLAG_DIRTY;
443 			/*
444 			 * add this to the list of items to get logged
445 			 */
446 			xfs_trans_log_dquot(tp, dqp);
447 			/*
448 			 * Take off what's left of the original reservation.
449 			 * In case of delayed allocations, there's no
450 			 * reservation that a transaction structure knows of.
451 			 */
452 			blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta);
453 			xfs_apply_quota_reservation_deltas(&dqp->q_blk,
454 					qtrx->qt_blk_res, blk_res_used,
455 					qtrx->qt_bcount_delta);
456 
457 			/*
458 			 * Adjust the RT reservation.
459 			 */
460 			xfs_apply_quota_reservation_deltas(&dqp->q_rtb,
461 					qtrx->qt_rtblk_res,
462 					qtrx->qt_rtblk_res_used,
463 					qtrx->qt_rtbcount_delta);
464 
465 			/*
466 			 * Adjust the inode reservation.
467 			 */
468 			ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
469 			xfs_apply_quota_reservation_deltas(&dqp->q_ino,
470 					qtrx->qt_ino_res,
471 					qtrx->qt_ino_res_used,
472 					qtrx->qt_icount_delta);
473 
474 			ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
475 			ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
476 			ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
477 		}
478 	}
479 }
480 
481 /*
482  * Release the reservations, and adjust the dquots accordingly.
483  * This is called only when the transaction is being aborted. If by
484  * any chance we have done dquot modifications incore (ie. deltas) already,
485  * we simply throw those away, since that's the expected behavior
486  * when a transaction is curtailed without a commit.
487  */
488 void
xfs_trans_unreserve_and_mod_dquots(struct xfs_trans * tp)489 xfs_trans_unreserve_and_mod_dquots(
490 	struct xfs_trans	*tp)
491 {
492 	int			i, j;
493 	struct xfs_dquot	*dqp;
494 	struct xfs_dqtrx	*qtrx, *qa;
495 	bool			locked;
496 
497 	if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
498 		return;
499 
500 	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
501 		qa = tp->t_dqinfo->dqs[j];
502 
503 		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
504 			qtrx = &qa[i];
505 			/*
506 			 * We assume that the array of dquots is filled
507 			 * sequentially, not sparsely.
508 			 */
509 			if ((dqp = qtrx->qt_dquot) == NULL)
510 				break;
511 			/*
512 			 * Unreserve the original reservation. We don't care
513 			 * about the number of blocks used field, or deltas.
514 			 * Also we don't bother to zero the fields.
515 			 */
516 			locked = false;
517 			if (qtrx->qt_blk_res) {
518 				xfs_dqlock(dqp);
519 				locked = true;
520 				dqp->q_blk.reserved -=
521 					(xfs_qcnt_t)qtrx->qt_blk_res;
522 			}
523 			if (qtrx->qt_ino_res) {
524 				if (!locked) {
525 					xfs_dqlock(dqp);
526 					locked = true;
527 				}
528 				dqp->q_ino.reserved -=
529 					(xfs_qcnt_t)qtrx->qt_ino_res;
530 			}
531 
532 			if (qtrx->qt_rtblk_res) {
533 				if (!locked) {
534 					xfs_dqlock(dqp);
535 					locked = true;
536 				}
537 				dqp->q_rtb.reserved -=
538 					(xfs_qcnt_t)qtrx->qt_rtblk_res;
539 			}
540 			if (locked)
541 				xfs_dqunlock(dqp);
542 
543 		}
544 	}
545 }
546 
547 STATIC void
xfs_quota_warn(struct xfs_mount * mp,struct xfs_dquot * dqp,int type)548 xfs_quota_warn(
549 	struct xfs_mount	*mp,
550 	struct xfs_dquot	*dqp,
551 	int			type)
552 {
553 	enum quota_type		qtype;
554 
555 	switch (xfs_dquot_type(dqp)) {
556 	case XFS_DQTYPE_PROJ:
557 		qtype = PRJQUOTA;
558 		break;
559 	case XFS_DQTYPE_USER:
560 		qtype = USRQUOTA;
561 		break;
562 	case XFS_DQTYPE_GROUP:
563 		qtype = GRPQUOTA;
564 		break;
565 	default:
566 		return;
567 	}
568 
569 	quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id),
570 			   mp->m_super->s_dev, type);
571 }
572 
573 /*
574  * Decide if we can make an additional reservation against a quota resource.
575  * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal.
576  *
577  * Note that we assume that the numeric difference between the inode and block
578  * warning codes will always be 3 since it's userspace ABI now, and will never
579  * decrease the quota reservation, so the *BELOW messages are irrelevant.
580  */
581 static inline int
xfs_dqresv_check(struct xfs_dquot_res * res,struct xfs_quota_limits * qlim,int64_t delta,bool * fatal)582 xfs_dqresv_check(
583 	struct xfs_dquot_res	*res,
584 	struct xfs_quota_limits	*qlim,
585 	int64_t			delta,
586 	bool			*fatal)
587 {
588 	xfs_qcnt_t		hardlimit = res->hardlimit;
589 	xfs_qcnt_t		softlimit = res->softlimit;
590 	xfs_qcnt_t		total_count = res->reserved + delta;
591 
592 	BUILD_BUG_ON(QUOTA_NL_BHARDWARN     != QUOTA_NL_IHARDWARN + 3);
593 	BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3);
594 	BUILD_BUG_ON(QUOTA_NL_BSOFTWARN     != QUOTA_NL_ISOFTWARN + 3);
595 
596 	*fatal = false;
597 	if (delta <= 0)
598 		return QUOTA_NL_NOWARN;
599 
600 	if (!hardlimit)
601 		hardlimit = qlim->hard;
602 	if (!softlimit)
603 		softlimit = qlim->soft;
604 
605 	if (hardlimit && total_count > hardlimit) {
606 		*fatal = true;
607 		return QUOTA_NL_IHARDWARN;
608 	}
609 
610 	if (softlimit && total_count > softlimit) {
611 		time64_t	now = ktime_get_real_seconds();
612 
613 		if ((res->timer != 0 && now > res->timer) ||
614 		    (res->warnings != 0 && res->warnings >= qlim->warn)) {
615 			*fatal = true;
616 			return QUOTA_NL_ISOFTLONGWARN;
617 		}
618 
619 		return QUOTA_NL_ISOFTWARN;
620 	}
621 
622 	return QUOTA_NL_NOWARN;
623 }
624 
625 /*
626  * This reserves disk blocks and inodes against a dquot.
627  * Flags indicate if the dquot is to be locked here and also
628  * if the blk reservation is for RT or regular blocks.
629  * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
630  */
631 STATIC int
xfs_trans_dqresv(struct xfs_trans * tp,struct xfs_mount * mp,struct xfs_dquot * dqp,int64_t nblks,long ninos,uint flags)632 xfs_trans_dqresv(
633 	struct xfs_trans	*tp,
634 	struct xfs_mount	*mp,
635 	struct xfs_dquot	*dqp,
636 	int64_t			nblks,
637 	long			ninos,
638 	uint			flags)
639 {
640 	struct xfs_quotainfo	*q = mp->m_quotainfo;
641 	struct xfs_def_quota	*defq;
642 	struct xfs_dquot_res	*blkres;
643 	struct xfs_quota_limits	*qlim;
644 
645 	xfs_dqlock(dqp);
646 
647 	defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
648 
649 	if (flags & XFS_TRANS_DQ_RES_BLKS) {
650 		blkres = &dqp->q_blk;
651 		qlim = &defq->blk;
652 	} else {
653 		blkres = &dqp->q_rtb;
654 		qlim = &defq->rtb;
655 	}
656 
657 	if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id &&
658 	    xfs_dquot_is_enforced(dqp)) {
659 		int		quota_nl;
660 		bool		fatal;
661 
662 		/*
663 		 * dquot is locked already. See if we'd go over the hardlimit
664 		 * or exceed the timelimit if we'd reserve resources.
665 		 */
666 		quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal);
667 		if (quota_nl != QUOTA_NL_NOWARN) {
668 			/*
669 			 * Quota block warning codes are 3 more than the inode
670 			 * codes, which we check above.
671 			 */
672 			xfs_quota_warn(mp, dqp, quota_nl + 3);
673 			if (fatal)
674 				goto error_return;
675 		}
676 
677 		quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos,
678 				&fatal);
679 		if (quota_nl != QUOTA_NL_NOWARN) {
680 			xfs_quota_warn(mp, dqp, quota_nl);
681 			if (fatal)
682 				goto error_return;
683 		}
684 	}
685 
686 	/*
687 	 * Change the reservation, but not the actual usage.
688 	 * Note that q_blk.reserved = q_blk.count + resv
689 	 */
690 	blkres->reserved += (xfs_qcnt_t)nblks;
691 	dqp->q_ino.reserved += (xfs_qcnt_t)ninos;
692 
693 	/*
694 	 * note the reservation amt in the trans struct too,
695 	 * so that the transaction knows how much was reserved by
696 	 * it against this particular dquot.
697 	 * We don't do this when we are reserving for a delayed allocation,
698 	 * because we don't have the luxury of a transaction envelope then.
699 	 */
700 	if (tp) {
701 		ASSERT(tp->t_dqinfo);
702 		ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
703 		if (nblks != 0)
704 			xfs_trans_mod_dquot(tp, dqp,
705 					    flags & XFS_QMOPT_RESBLK_MASK,
706 					    nblks);
707 		if (ninos != 0)
708 			xfs_trans_mod_dquot(tp, dqp,
709 					    XFS_TRANS_DQ_RES_INOS,
710 					    ninos);
711 	}
712 
713 	if (XFS_IS_CORRUPT(mp, dqp->q_blk.reserved < dqp->q_blk.count) ||
714 	    XFS_IS_CORRUPT(mp, dqp->q_rtb.reserved < dqp->q_rtb.count) ||
715 	    XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count))
716 		goto error_corrupt;
717 
718 	xfs_dqunlock(dqp);
719 	return 0;
720 
721 error_return:
722 	xfs_dqunlock(dqp);
723 	if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
724 		return -ENOSPC;
725 	return -EDQUOT;
726 error_corrupt:
727 	xfs_dqunlock(dqp);
728 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
729 	return -EFSCORRUPTED;
730 }
731 
732 
733 /*
734  * Given dquot(s), make disk block and/or inode reservations against them.
735  * The fact that this does the reservation against user, group and
736  * project quotas is important, because this follows a all-or-nothing
737  * approach.
738  *
739  * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
740  *	   XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT.  Used by pquota.
741  *	   XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
742  *	   XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
743  * dquots are unlocked on return, if they were not locked by caller.
744  */
745 int
xfs_trans_reserve_quota_bydquots(struct xfs_trans * tp,struct xfs_mount * mp,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp,int64_t nblks,long ninos,uint flags)746 xfs_trans_reserve_quota_bydquots(
747 	struct xfs_trans	*tp,
748 	struct xfs_mount	*mp,
749 	struct xfs_dquot	*udqp,
750 	struct xfs_dquot	*gdqp,
751 	struct xfs_dquot	*pdqp,
752 	int64_t			nblks,
753 	long			ninos,
754 	uint			flags)
755 {
756 	int		error;
757 
758 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
759 		return 0;
760 
761 	if (tp && tp->t_dqinfo == NULL)
762 		xfs_trans_alloc_dqinfo(tp);
763 
764 	ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
765 
766 	if (udqp) {
767 		error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags);
768 		if (error)
769 			return error;
770 	}
771 
772 	if (gdqp) {
773 		error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
774 		if (error)
775 			goto unwind_usr;
776 	}
777 
778 	if (pdqp) {
779 		error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
780 		if (error)
781 			goto unwind_grp;
782 	}
783 
784 	/*
785 	 * Didn't change anything critical, so, no need to log
786 	 */
787 	return 0;
788 
789 unwind_grp:
790 	flags |= XFS_QMOPT_FORCE_RES;
791 	if (gdqp)
792 		xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
793 unwind_usr:
794 	flags |= XFS_QMOPT_FORCE_RES;
795 	if (udqp)
796 		xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
797 	return error;
798 }
799 
800 
801 /*
802  * Lock the dquot and change the reservation if we can.
803  * This doesn't change the actual usage, just the reservation.
804  * The inode sent in is locked.
805  */
806 int
xfs_trans_reserve_quota_nblks(struct xfs_trans * tp,struct xfs_inode * ip,int64_t nblks,long ninos,uint flags)807 xfs_trans_reserve_quota_nblks(
808 	struct xfs_trans	*tp,
809 	struct xfs_inode	*ip,
810 	int64_t			nblks,
811 	long			ninos,
812 	uint			flags)
813 {
814 	struct xfs_mount	*mp = ip->i_mount;
815 
816 	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
817 		return 0;
818 
819 	ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
820 
821 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
822 	ASSERT((flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_RTBLKS ||
823 	       (flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_BLKS);
824 
825 	/*
826 	 * Reserve nblks against these dquots, with trans as the mediator.
827 	 */
828 	return xfs_trans_reserve_quota_bydquots(tp, mp,
829 						ip->i_udquot, ip->i_gdquot,
830 						ip->i_pdquot,
831 						nblks, ninos, flags);
832 }
833 
834 /*
835  * This routine is called to allocate a quotaoff log item.
836  */
837 struct xfs_qoff_logitem *
xfs_trans_get_qoff_item(struct xfs_trans * tp,struct xfs_qoff_logitem * startqoff,uint flags)838 xfs_trans_get_qoff_item(
839 	struct xfs_trans	*tp,
840 	struct xfs_qoff_logitem	*startqoff,
841 	uint			flags)
842 {
843 	struct xfs_qoff_logitem	*q;
844 
845 	ASSERT(tp != NULL);
846 
847 	q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
848 	ASSERT(q != NULL);
849 
850 	/*
851 	 * Get a log_item_desc to point at the new item.
852 	 */
853 	xfs_trans_add_item(tp, &q->qql_item);
854 	return q;
855 }
856 
857 
858 /*
859  * This is called to mark the quotaoff logitem as needing
860  * to be logged when the transaction is committed.  The logitem must
861  * already be associated with the given transaction.
862  */
863 void
xfs_trans_log_quotaoff_item(struct xfs_trans * tp,struct xfs_qoff_logitem * qlp)864 xfs_trans_log_quotaoff_item(
865 	struct xfs_trans	*tp,
866 	struct xfs_qoff_logitem	*qlp)
867 {
868 	tp->t_flags |= XFS_TRANS_DIRTY;
869 	set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags);
870 }
871 
872 STATIC void
xfs_trans_alloc_dqinfo(xfs_trans_t * tp)873 xfs_trans_alloc_dqinfo(
874 	xfs_trans_t	*tp)
875 {
876 	tp->t_dqinfo = kmem_cache_zalloc(xfs_qm_dqtrxzone,
877 					 GFP_KERNEL | __GFP_NOFAIL);
878 }
879 
880 void
xfs_trans_free_dqinfo(xfs_trans_t * tp)881 xfs_trans_free_dqinfo(
882 	xfs_trans_t	*tp)
883 {
884 	if (!tp->t_dqinfo)
885 		return;
886 	kmem_cache_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
887 	tp->t_dqinfo = NULL;
888 }
889