• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 
8 #include "xfs.h"
9 #include "xfs_fs.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_sb.h"
15 #include "xfs_mount.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
18 #include "xfs_quota.h"
19 #include "xfs_qm.h"
20 #include "xfs_icache.h"
21 
22 STATIC int	xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
23 STATIC int	xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
24 					uint);
25 
26 /*
27  * Turn off quota accounting and/or enforcement for all udquots and/or
28  * gdquots. Called only at unmount time.
29  *
30  * This assumes that there are no dquots of this file system cached
31  * incore, and modifies the ondisk dquot directly. Therefore, for example,
32  * it is an error to call this twice, without purging the cache.
33  */
34 int
xfs_qm_scall_quotaoff(xfs_mount_t * mp,uint flags)35 xfs_qm_scall_quotaoff(
36 	xfs_mount_t		*mp,
37 	uint			flags)
38 {
39 	struct xfs_quotainfo	*q = mp->m_quotainfo;
40 	uint			dqtype;
41 	int			error;
42 	uint			inactivate_flags;
43 	xfs_qoff_logitem_t	*qoffstart;
44 
45 	/*
46 	 * No file system can have quotas enabled on disk but not in core.
47 	 * Note that quota utilities (like quotaoff) _expect_
48 	 * errno == -EEXIST here.
49 	 */
50 	if ((mp->m_qflags & flags) == 0)
51 		return -EEXIST;
52 	error = 0;
53 
54 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
55 
56 	/*
57 	 * We don't want to deal with two quotaoffs messing up each other,
58 	 * so we're going to serialize it. quotaoff isn't exactly a performance
59 	 * critical thing.
60 	 * If quotaoff, then we must be dealing with the root filesystem.
61 	 */
62 	ASSERT(q);
63 	mutex_lock(&q->qi_quotaofflock);
64 
65 	/*
66 	 * If we're just turning off quota enforcement, change mp and go.
67 	 */
68 	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
69 		mp->m_qflags &= ~(flags);
70 
71 		spin_lock(&mp->m_sb_lock);
72 		mp->m_sb.sb_qflags = mp->m_qflags;
73 		spin_unlock(&mp->m_sb_lock);
74 		mutex_unlock(&q->qi_quotaofflock);
75 
76 		/* XXX what to do if error ? Revert back to old vals incore ? */
77 		return xfs_sync_sb(mp, false);
78 	}
79 
80 	dqtype = 0;
81 	inactivate_flags = 0;
82 	/*
83 	 * If accounting is off, we must turn enforcement off, clear the
84 	 * quota 'CHKD' certificate to make it known that we have to
85 	 * do a quotacheck the next time this quota is turned on.
86 	 */
87 	if (flags & XFS_UQUOTA_ACCT) {
88 		dqtype |= XFS_QMOPT_UQUOTA;
89 		flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
90 		inactivate_flags |= XFS_UQUOTA_ACTIVE;
91 	}
92 	if (flags & XFS_GQUOTA_ACCT) {
93 		dqtype |= XFS_QMOPT_GQUOTA;
94 		flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
95 		inactivate_flags |= XFS_GQUOTA_ACTIVE;
96 	}
97 	if (flags & XFS_PQUOTA_ACCT) {
98 		dqtype |= XFS_QMOPT_PQUOTA;
99 		flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
100 		inactivate_flags |= XFS_PQUOTA_ACTIVE;
101 	}
102 
103 	/*
104 	 * Nothing to do?  Don't complain. This happens when we're just
105 	 * turning off quota enforcement.
106 	 */
107 	if ((mp->m_qflags & flags) == 0)
108 		goto out_unlock;
109 
110 	/*
111 	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
112 	 * and synchronously. If we fail to write, we should abort the
113 	 * operation as it cannot be recovered safely if we crash.
114 	 */
115 	error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
116 	if (error)
117 		goto out_unlock;
118 
119 	/*
120 	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
121 	 * to take care of the race between dqget and quotaoff. We don't take
122 	 * any special locks to reset these bits. All processes need to check
123 	 * these bits *after* taking inode lock(s) to see if the particular
124 	 * quota type is in the process of being turned off. If *ACTIVE, it is
125 	 * guaranteed that all dquot structures and all quotainode ptrs will all
126 	 * stay valid as long as that inode is kept locked.
127 	 *
128 	 * There is no turning back after this.
129 	 */
130 	mp->m_qflags &= ~inactivate_flags;
131 
132 	/*
133 	 * Give back all the dquot reference(s) held by inodes.
134 	 * Here we go thru every single incore inode in this file system, and
135 	 * do a dqrele on the i_udquot/i_gdquot that it may have.
136 	 * Essentially, as long as somebody has an inode locked, this guarantees
137 	 * that quotas will not be turned off. This is handy because in a
138 	 * transaction once we lock the inode(s) and check for quotaon, we can
139 	 * depend on the quota inodes (and other things) being valid as long as
140 	 * we keep the lock(s).
141 	 */
142 	xfs_qm_dqrele_all_inodes(mp, flags);
143 
144 	/*
145 	 * Next we make the changes in the quota flag in the mount struct.
146 	 * This isn't protected by a particular lock directly, because we
147 	 * don't want to take a mrlock every time we depend on quotas being on.
148 	 */
149 	mp->m_qflags &= ~flags;
150 
151 	/*
152 	 * Go through all the dquots of this file system and purge them,
153 	 * according to what was turned off.
154 	 */
155 	xfs_qm_dqpurge_all(mp, dqtype);
156 
157 	/*
158 	 * Transactions that had started before ACTIVE state bit was cleared
159 	 * could have logged many dquots, so they'd have higher LSNs than
160 	 * the first QUOTAOFF log record does. If we happen to crash when
161 	 * the tail of the log has gone past the QUOTAOFF record, but
162 	 * before the last dquot modification, those dquots __will__
163 	 * recover, and that's not good.
164 	 *
165 	 * So, we have QUOTAOFF start and end logitems; the start
166 	 * logitem won't get overwritten until the end logitem appears...
167 	 */
168 	error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
169 	if (error) {
170 		/* We're screwed now. Shutdown is the only option. */
171 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
172 		goto out_unlock;
173 	}
174 
175 	/*
176 	 * If all quotas are completely turned off, close shop.
177 	 */
178 	if (mp->m_qflags == 0) {
179 		mutex_unlock(&q->qi_quotaofflock);
180 		xfs_qm_destroy_quotainfo(mp);
181 		return 0;
182 	}
183 
184 	/*
185 	 * Release our quotainode references if we don't need them anymore.
186 	 */
187 	if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
188 		xfs_irele(q->qi_uquotaip);
189 		q->qi_uquotaip = NULL;
190 	}
191 	if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
192 		xfs_irele(q->qi_gquotaip);
193 		q->qi_gquotaip = NULL;
194 	}
195 	if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
196 		xfs_irele(q->qi_pquotaip);
197 		q->qi_pquotaip = NULL;
198 	}
199 
200 out_unlock:
201 	mutex_unlock(&q->qi_quotaofflock);
202 	return error;
203 }
204 
205 STATIC int
xfs_qm_scall_trunc_qfile(struct xfs_mount * mp,xfs_ino_t ino)206 xfs_qm_scall_trunc_qfile(
207 	struct xfs_mount	*mp,
208 	xfs_ino_t		ino)
209 {
210 	struct xfs_inode	*ip;
211 	struct xfs_trans	*tp;
212 	int			error;
213 
214 	if (ino == NULLFSINO)
215 		return 0;
216 
217 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
218 	if (error)
219 		return error;
220 
221 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
222 
223 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
224 	if (error) {
225 		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
226 		goto out_put;
227 	}
228 
229 	xfs_ilock(ip, XFS_ILOCK_EXCL);
230 	xfs_trans_ijoin(tp, ip, 0);
231 
232 	ip->i_d.di_size = 0;
233 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
234 
235 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
236 	if (error) {
237 		xfs_trans_cancel(tp);
238 		goto out_unlock;
239 	}
240 
241 	ASSERT(ip->i_d.di_nextents == 0);
242 
243 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
244 	error = xfs_trans_commit(tp);
245 
246 out_unlock:
247 	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
248 out_put:
249 	xfs_irele(ip);
250 	return error;
251 }
252 
253 int
xfs_qm_scall_trunc_qfiles(xfs_mount_t * mp,uint flags)254 xfs_qm_scall_trunc_qfiles(
255 	xfs_mount_t	*mp,
256 	uint		flags)
257 {
258 	int		error = -EINVAL;
259 
260 	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 ||
261 	    (flags & ~XFS_DQ_ALLTYPES)) {
262 		xfs_debug(mp, "%s: flags=%x m_qflags=%x",
263 			__func__, flags, mp->m_qflags);
264 		return -EINVAL;
265 	}
266 
267 	if (flags & XFS_DQ_USER) {
268 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
269 		if (error)
270 			return error;
271 	}
272 	if (flags & XFS_DQ_GROUP) {
273 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
274 		if (error)
275 			return error;
276 	}
277 	if (flags & XFS_DQ_PROJ)
278 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
279 
280 	return error;
281 }
282 
283 /*
284  * Switch on (a given) quota enforcement for a filesystem.  This takes
285  * effect immediately.
286  * (Switching on quota accounting must be done at mount time.)
287  */
288 int
xfs_qm_scall_quotaon(xfs_mount_t * mp,uint flags)289 xfs_qm_scall_quotaon(
290 	xfs_mount_t	*mp,
291 	uint		flags)
292 {
293 	int		error;
294 	uint		qf;
295 
296 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
297 	/*
298 	 * Switching on quota accounting must be done at mount time.
299 	 */
300 	flags &= ~(XFS_ALL_QUOTA_ACCT);
301 
302 	if (flags == 0) {
303 		xfs_debug(mp, "%s: zero flags, m_qflags=%x",
304 			__func__, mp->m_qflags);
305 		return -EINVAL;
306 	}
307 
308 	/*
309 	 * Can't enforce without accounting. We check the superblock
310 	 * qflags here instead of m_qflags because rootfs can have
311 	 * quota acct on ondisk without m_qflags' knowing.
312 	 */
313 	if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
314 	     (flags & XFS_UQUOTA_ENFD)) ||
315 	    ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
316 	     (flags & XFS_GQUOTA_ENFD)) ||
317 	    ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
318 	     (flags & XFS_PQUOTA_ENFD))) {
319 		xfs_debug(mp,
320 			"%s: Can't enforce without acct, flags=%x sbflags=%x",
321 			__func__, flags, mp->m_sb.sb_qflags);
322 		return -EINVAL;
323 	}
324 	/*
325 	 * If everything's up to-date incore, then don't waste time.
326 	 */
327 	if ((mp->m_qflags & flags) == flags)
328 		return -EEXIST;
329 
330 	/*
331 	 * Change sb_qflags on disk but not incore mp->qflags
332 	 * if this is the root filesystem.
333 	 */
334 	spin_lock(&mp->m_sb_lock);
335 	qf = mp->m_sb.sb_qflags;
336 	mp->m_sb.sb_qflags = qf | flags;
337 	spin_unlock(&mp->m_sb_lock);
338 
339 	/*
340 	 * There's nothing to change if it's the same.
341 	 */
342 	if ((qf & flags) == flags)
343 		return -EEXIST;
344 
345 	error = xfs_sync_sb(mp, false);
346 	if (error)
347 		return error;
348 	/*
349 	 * If we aren't trying to switch on quota enforcement, we are done.
350 	 */
351 	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
352 	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
353 	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
354 	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
355 	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
356 	     (mp->m_qflags & XFS_GQUOTA_ACCT)))
357 		return 0;
358 
359 	if (! XFS_IS_QUOTA_RUNNING(mp))
360 		return -ESRCH;
361 
362 	/*
363 	 * Switch on quota enforcement in core.
364 	 */
365 	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
366 	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
367 	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
368 
369 	return 0;
370 }
371 
372 #define XFS_QC_MASK \
373 	(QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
374 
375 /*
376  * Adjust quota limits, and start/stop timers accordingly.
377  */
378 int
xfs_qm_scall_setqlim(struct xfs_mount * mp,xfs_dqid_t id,uint type,struct qc_dqblk * newlim)379 xfs_qm_scall_setqlim(
380 	struct xfs_mount	*mp,
381 	xfs_dqid_t		id,
382 	uint			type,
383 	struct qc_dqblk		*newlim)
384 {
385 	struct xfs_quotainfo	*q = mp->m_quotainfo;
386 	struct xfs_disk_dquot	*ddq;
387 	struct xfs_dquot	*dqp;
388 	struct xfs_trans	*tp;
389 	struct xfs_def_quota	*defq;
390 	int			error;
391 	xfs_qcnt_t		hard, soft;
392 
393 	if (newlim->d_fieldmask & ~XFS_QC_MASK)
394 		return -EINVAL;
395 	if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
396 		return 0;
397 
398 	/*
399 	 * We don't want to race with a quotaoff so take the quotaoff lock.
400 	 * We don't hold an inode lock, so there's nothing else to stop
401 	 * a quotaoff from happening.
402 	 */
403 	mutex_lock(&q->qi_quotaofflock);
404 
405 	/*
406 	 * Get the dquot (locked) before we start, as we need to do a
407 	 * transaction to allocate it if it doesn't exist. Once we have the
408 	 * dquot, unlock it so we can start the next transaction safely. We hold
409 	 * a reference to the dquot, so it's safe to do this unlock/lock without
410 	 * it being reclaimed in the mean time.
411 	 */
412 	error = xfs_qm_dqget(mp, id, type, true, &dqp);
413 	if (error) {
414 		ASSERT(error != -ENOENT);
415 		goto out_unlock;
416 	}
417 
418 	defq = xfs_get_defquota(dqp, q);
419 	xfs_dqunlock(dqp);
420 
421 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
422 	if (error)
423 		goto out_rele;
424 
425 	xfs_dqlock(dqp);
426 	xfs_trans_dqjoin(tp, dqp);
427 	ddq = &dqp->q_core;
428 
429 	/*
430 	 * Make sure that hardlimits are >= soft limits before changing.
431 	 */
432 	hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
433 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
434 			be64_to_cpu(ddq->d_blk_hardlimit);
435 	soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
436 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
437 			be64_to_cpu(ddq->d_blk_softlimit);
438 	if (hard == 0 || hard >= soft) {
439 		ddq->d_blk_hardlimit = cpu_to_be64(hard);
440 		ddq->d_blk_softlimit = cpu_to_be64(soft);
441 		xfs_dquot_set_prealloc_limits(dqp);
442 		if (id == 0) {
443 			defq->bhardlimit = hard;
444 			defq->bsoftlimit = soft;
445 		}
446 	} else {
447 		xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
448 	}
449 	hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
450 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
451 			be64_to_cpu(ddq->d_rtb_hardlimit);
452 	soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
453 		(xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
454 			be64_to_cpu(ddq->d_rtb_softlimit);
455 	if (hard == 0 || hard >= soft) {
456 		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
457 		ddq->d_rtb_softlimit = cpu_to_be64(soft);
458 		if (id == 0) {
459 			defq->rtbhardlimit = hard;
460 			defq->rtbsoftlimit = soft;
461 		}
462 	} else {
463 		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
464 	}
465 
466 	hard = (newlim->d_fieldmask & QC_INO_HARD) ?
467 		(xfs_qcnt_t) newlim->d_ino_hardlimit :
468 			be64_to_cpu(ddq->d_ino_hardlimit);
469 	soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
470 		(xfs_qcnt_t) newlim->d_ino_softlimit :
471 			be64_to_cpu(ddq->d_ino_softlimit);
472 	if (hard == 0 || hard >= soft) {
473 		ddq->d_ino_hardlimit = cpu_to_be64(hard);
474 		ddq->d_ino_softlimit = cpu_to_be64(soft);
475 		if (id == 0) {
476 			defq->ihardlimit = hard;
477 			defq->isoftlimit = soft;
478 		}
479 	} else {
480 		xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
481 	}
482 
483 	/*
484 	 * Update warnings counter(s) if requested
485 	 */
486 	if (newlim->d_fieldmask & QC_SPC_WARNS)
487 		ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
488 	if (newlim->d_fieldmask & QC_INO_WARNS)
489 		ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
490 	if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
491 		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
492 
493 	if (id == 0) {
494 		/*
495 		 * Timelimits for the super user set the relative time
496 		 * the other users can be over quota for this file system.
497 		 * If it is zero a default is used.  Ditto for the default
498 		 * soft and hard limit values (already done, above), and
499 		 * for warnings.
500 		 */
501 		if (newlim->d_fieldmask & QC_SPC_TIMER) {
502 			q->qi_btimelimit = newlim->d_spc_timer;
503 			ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
504 		}
505 		if (newlim->d_fieldmask & QC_INO_TIMER) {
506 			q->qi_itimelimit = newlim->d_ino_timer;
507 			ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
508 		}
509 		if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
510 			q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
511 			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
512 		}
513 		if (newlim->d_fieldmask & QC_SPC_WARNS)
514 			q->qi_bwarnlimit = newlim->d_spc_warns;
515 		if (newlim->d_fieldmask & QC_INO_WARNS)
516 			q->qi_iwarnlimit = newlim->d_ino_warns;
517 		if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
518 			q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
519 	} else {
520 		/*
521 		 * If the user is now over quota, start the timelimit.
522 		 * The user will not be 'warned'.
523 		 * Note that we keep the timers ticking, whether enforcement
524 		 * is on or off. We don't really want to bother with iterating
525 		 * over all ondisk dquots and turning the timers on/off.
526 		 */
527 		xfs_qm_adjust_dqtimers(mp, ddq);
528 	}
529 	dqp->dq_flags |= XFS_DQ_DIRTY;
530 	xfs_trans_log_dquot(tp, dqp);
531 
532 	error = xfs_trans_commit(tp);
533 
534 out_rele:
535 	xfs_qm_dqrele(dqp);
536 out_unlock:
537 	mutex_unlock(&q->qi_quotaofflock);
538 	return error;
539 }
540 
541 STATIC int
xfs_qm_log_quotaoff_end(xfs_mount_t * mp,xfs_qoff_logitem_t * startqoff,uint flags)542 xfs_qm_log_quotaoff_end(
543 	xfs_mount_t		*mp,
544 	xfs_qoff_logitem_t	*startqoff,
545 	uint			flags)
546 {
547 	xfs_trans_t		*tp;
548 	int			error;
549 	xfs_qoff_logitem_t	*qoffi;
550 
551 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
552 	if (error)
553 		return error;
554 
555 	qoffi = xfs_trans_get_qoff_item(tp, startqoff,
556 					flags & XFS_ALL_QUOTA_ACCT);
557 	xfs_trans_log_quotaoff_item(tp, qoffi);
558 
559 	/*
560 	 * We have to make sure that the transaction is secure on disk before we
561 	 * return and actually stop quota accounting. So, make it synchronous.
562 	 * We don't care about quotoff's performance.
563 	 */
564 	xfs_trans_set_sync(tp);
565 	return xfs_trans_commit(tp);
566 }
567 
568 
569 STATIC int
xfs_qm_log_quotaoff(xfs_mount_t * mp,xfs_qoff_logitem_t ** qoffstartp,uint flags)570 xfs_qm_log_quotaoff(
571 	xfs_mount_t	       *mp,
572 	xfs_qoff_logitem_t     **qoffstartp,
573 	uint		       flags)
574 {
575 	xfs_trans_t	       *tp;
576 	int			error;
577 	xfs_qoff_logitem_t     *qoffi;
578 
579 	*qoffstartp = NULL;
580 
581 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
582 	if (error)
583 		goto out;
584 
585 	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
586 	xfs_trans_log_quotaoff_item(tp, qoffi);
587 
588 	spin_lock(&mp->m_sb_lock);
589 	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
590 	spin_unlock(&mp->m_sb_lock);
591 
592 	xfs_log_sb(tp);
593 
594 	/*
595 	 * We have to make sure that the transaction is secure on disk before we
596 	 * return and actually stop quota accounting. So, make it synchronous.
597 	 * We don't care about quotoff's performance.
598 	 */
599 	xfs_trans_set_sync(tp);
600 	error = xfs_trans_commit(tp);
601 	if (error)
602 		goto out;
603 
604 	*qoffstartp = qoffi;
605 out:
606 	return error;
607 }
608 
609 /* Fill out the quota context. */
610 static void
xfs_qm_scall_getquota_fill_qc(struct xfs_mount * mp,uint type,const struct xfs_dquot * dqp,struct qc_dqblk * dst)611 xfs_qm_scall_getquota_fill_qc(
612 	struct xfs_mount	*mp,
613 	uint			type,
614 	const struct xfs_dquot	*dqp,
615 	struct qc_dqblk		*dst)
616 {
617 	memset(dst, 0, sizeof(*dst));
618 	dst->d_spc_hardlimit =
619 		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
620 	dst->d_spc_softlimit =
621 		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
622 	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
623 	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
624 	dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
625 	dst->d_ino_count = dqp->q_res_icount;
626 	dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
627 	dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
628 	dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
629 	dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
630 	dst->d_rt_spc_hardlimit =
631 		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
632 	dst->d_rt_spc_softlimit =
633 		XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
634 	dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
635 	dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
636 	dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
637 
638 	/*
639 	 * Internally, we don't reset all the timers when quota enforcement
640 	 * gets turned off. No need to confuse the user level code,
641 	 * so return zeroes in that case.
642 	 */
643 	if ((!XFS_IS_UQUOTA_ENFORCED(mp) &&
644 	     dqp->q_core.d_flags == XFS_DQ_USER) ||
645 	    (!XFS_IS_GQUOTA_ENFORCED(mp) &&
646 	     dqp->q_core.d_flags == XFS_DQ_GROUP) ||
647 	    (!XFS_IS_PQUOTA_ENFORCED(mp) &&
648 	     dqp->q_core.d_flags == XFS_DQ_PROJ)) {
649 		dst->d_spc_timer = 0;
650 		dst->d_ino_timer = 0;
651 		dst->d_rt_spc_timer = 0;
652 	}
653 
654 #ifdef DEBUG
655 	if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
656 	     (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
657 	     (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
658 	    dqp->q_core.d_id != 0) {
659 		if ((dst->d_space > dst->d_spc_softlimit) &&
660 		    (dst->d_spc_softlimit > 0)) {
661 			ASSERT(dst->d_spc_timer != 0);
662 		}
663 		if ((dst->d_ino_count > dst->d_ino_softlimit) &&
664 		    (dst->d_ino_softlimit > 0)) {
665 			ASSERT(dst->d_ino_timer != 0);
666 		}
667 	}
668 #endif
669 }
670 
671 /* Return the quota information for the dquot matching id. */
672 int
xfs_qm_scall_getquota(struct xfs_mount * mp,xfs_dqid_t id,uint type,struct qc_dqblk * dst)673 xfs_qm_scall_getquota(
674 	struct xfs_mount	*mp,
675 	xfs_dqid_t		id,
676 	uint			type,
677 	struct qc_dqblk		*dst)
678 {
679 	struct xfs_dquot	*dqp;
680 	int			error;
681 
682 	/*
683 	 * Try to get the dquot. We don't want it allocated on disk, so don't
684 	 * set doalloc. If it doesn't exist, we'll get ENOENT back.
685 	 */
686 	error = xfs_qm_dqget(mp, id, type, false, &dqp);
687 	if (error)
688 		return error;
689 
690 	/*
691 	 * If everything's NULL, this dquot doesn't quite exist as far as
692 	 * our utility programs are concerned.
693 	 */
694 	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
695 		error = -ENOENT;
696 		goto out_put;
697 	}
698 
699 	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
700 
701 out_put:
702 	xfs_qm_dqput(dqp);
703 	return error;
704 }
705 
706 /*
707  * Return the quota information for the first initialized dquot whose id
708  * is at least as high as id.
709  */
710 int
xfs_qm_scall_getquota_next(struct xfs_mount * mp,xfs_dqid_t * id,uint type,struct qc_dqblk * dst)711 xfs_qm_scall_getquota_next(
712 	struct xfs_mount	*mp,
713 	xfs_dqid_t		*id,
714 	uint			type,
715 	struct qc_dqblk		*dst)
716 {
717 	struct xfs_dquot	*dqp;
718 	int			error;
719 
720 	error = xfs_qm_dqget_next(mp, *id, type, &dqp);
721 	if (error)
722 		return error;
723 
724 	/* Fill in the ID we actually read from disk */
725 	*id = be32_to_cpu(dqp->q_core.d_id);
726 
727 	xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
728 
729 	xfs_qm_dqput(dqp);
730 	return error;
731 }
732 
733 STATIC int
xfs_dqrele_inode(struct xfs_inode * ip,int flags,void * args)734 xfs_dqrele_inode(
735 	struct xfs_inode	*ip,
736 	int			flags,
737 	void			*args)
738 {
739 	/* skip quota inodes */
740 	if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
741 	    ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
742 	    ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
743 		ASSERT(ip->i_udquot == NULL);
744 		ASSERT(ip->i_gdquot == NULL);
745 		ASSERT(ip->i_pdquot == NULL);
746 		return 0;
747 	}
748 
749 	xfs_ilock(ip, XFS_ILOCK_EXCL);
750 	if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
751 		xfs_qm_dqrele(ip->i_udquot);
752 		ip->i_udquot = NULL;
753 	}
754 	if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
755 		xfs_qm_dqrele(ip->i_gdquot);
756 		ip->i_gdquot = NULL;
757 	}
758 	if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
759 		xfs_qm_dqrele(ip->i_pdquot);
760 		ip->i_pdquot = NULL;
761 	}
762 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
763 	return 0;
764 }
765 
766 
767 /*
768  * Go thru all the inodes in the file system, releasing their dquots.
769  *
770  * Note that the mount structure gets modified to indicate that quotas are off
771  * AFTER this, in the case of quotaoff.
772  */
773 void
xfs_qm_dqrele_all_inodes(struct xfs_mount * mp,uint flags)774 xfs_qm_dqrele_all_inodes(
775 	struct xfs_mount *mp,
776 	uint		 flags)
777 {
778 	ASSERT(mp->m_quotainfo);
779 	xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
780 				    XFS_AGITER_INEW_WAIT);
781 }
782