1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
22 #include "xfs_qm.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_error.h"
26
27 /*
28 * The global quota manager. There is only one of these for the entire
29 * system, _not_ one per file system. XQM keeps track of the overall
30 * quota functionality, including maintaining the freelist and hash
31 * tables of dquots.
32 */
33 STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
34 STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
35
36 STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
37 STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
38 /*
39 * We use the batch lookup interface to iterate over the dquots as it
40 * currently is the only interface into the radix tree code that allows
41 * fuzzy lookups instead of exact matches. Holding the lock over multiple
42 * operations is fine as all callers are used either during mount/umount
43 * or quotaoff.
44 */
45 #define XFS_DQ_LOOKUP_BATCH 32
46
47 STATIC int
xfs_qm_dquot_walk(struct xfs_mount * mp,int type,int (* execute)(struct xfs_dquot * dqp,void * data),void * data)48 xfs_qm_dquot_walk(
49 struct xfs_mount *mp,
50 int type,
51 int (*execute)(struct xfs_dquot *dqp, void *data),
52 void *data)
53 {
54 struct xfs_quotainfo *qi = mp->m_quotainfo;
55 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
56 uint32_t next_index;
57 int last_error = 0;
58 int skipped;
59 int nr_found;
60
61 restart:
62 skipped = 0;
63 next_index = 0;
64 nr_found = 0;
65
66 while (1) {
67 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
68 int error = 0;
69 int i;
70
71 mutex_lock(&qi->qi_tree_lock);
72 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
73 next_index, XFS_DQ_LOOKUP_BATCH);
74 if (!nr_found) {
75 mutex_unlock(&qi->qi_tree_lock);
76 break;
77 }
78
79 for (i = 0; i < nr_found; i++) {
80 struct xfs_dquot *dqp = batch[i];
81
82 next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
83
84 error = execute(batch[i], data);
85 if (error == -EAGAIN) {
86 skipped++;
87 continue;
88 }
89 if (error && last_error != -EFSCORRUPTED)
90 last_error = error;
91 }
92
93 mutex_unlock(&qi->qi_tree_lock);
94
95 /* bail out if the filesystem is corrupted. */
96 if (last_error == -EFSCORRUPTED) {
97 skipped = 0;
98 break;
99 }
100 /* we're done if id overflows back to zero */
101 if (!next_index)
102 break;
103 }
104
105 if (skipped) {
106 delay(1);
107 goto restart;
108 }
109
110 return last_error;
111 }
112
113
114 /*
115 * Purge a dquot from all tracking data structures and free it.
116 */
117 STATIC int
xfs_qm_dqpurge(struct xfs_dquot * dqp,void * data)118 xfs_qm_dqpurge(
119 struct xfs_dquot *dqp,
120 void *data)
121 {
122 struct xfs_mount *mp = dqp->q_mount;
123 struct xfs_quotainfo *qi = mp->m_quotainfo;
124 int error = -EAGAIN;
125
126 xfs_dqlock(dqp);
127 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0)
128 goto out_unlock;
129
130 dqp->dq_flags |= XFS_DQ_FREEING;
131
132 xfs_dqflock(dqp);
133
134 /*
135 * If we are turning this type of quotas off, we don't care
136 * about the dirty metadata sitting in this dquot. OTOH, if
137 * we're unmounting, we do care, so we flush it and wait.
138 */
139 if (XFS_DQ_IS_DIRTY(dqp)) {
140 struct xfs_buf *bp = NULL;
141
142 /*
143 * We don't care about getting disk errors here. We need
144 * to purge this dquot anyway, so we go ahead regardless.
145 */
146 error = xfs_qm_dqflush(dqp, &bp);
147 if (!error) {
148 error = xfs_bwrite(bp);
149 xfs_buf_relse(bp);
150 } else if (error == -EAGAIN) {
151 dqp->dq_flags &= ~XFS_DQ_FREEING;
152 goto out_unlock;
153 }
154 xfs_dqflock(dqp);
155 }
156
157 ASSERT(atomic_read(&dqp->q_pincount) == 0);
158 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
159 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
160
161 xfs_dqfunlock(dqp);
162 xfs_dqunlock(dqp);
163
164 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
165 be32_to_cpu(dqp->q_core.d_id));
166 qi->qi_dquots--;
167
168 /*
169 * We move dquots to the freelist as soon as their reference count
170 * hits zero, so it really should be on the freelist here.
171 */
172 ASSERT(!list_empty(&dqp->q_lru));
173 list_lru_del(&qi->qi_lru, &dqp->q_lru);
174 XFS_STATS_DEC(mp, xs_qm_dquot_unused);
175
176 xfs_qm_dqdestroy(dqp);
177 return 0;
178
179 out_unlock:
180 xfs_dqunlock(dqp);
181 return error;
182 }
183
184 /*
185 * Purge the dquot cache.
186 */
187 void
xfs_qm_dqpurge_all(struct xfs_mount * mp,uint flags)188 xfs_qm_dqpurge_all(
189 struct xfs_mount *mp,
190 uint flags)
191 {
192 if (flags & XFS_QMOPT_UQUOTA)
193 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
194 if (flags & XFS_QMOPT_GQUOTA)
195 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
196 if (flags & XFS_QMOPT_PQUOTA)
197 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
198 }
199
200 /*
201 * Just destroy the quotainfo structure.
202 */
203 void
xfs_qm_unmount(struct xfs_mount * mp)204 xfs_qm_unmount(
205 struct xfs_mount *mp)
206 {
207 if (mp->m_quotainfo) {
208 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
209 xfs_qm_destroy_quotainfo(mp);
210 }
211 }
212
213 /*
214 * Called from the vfsops layer.
215 */
216 void
xfs_qm_unmount_quotas(xfs_mount_t * mp)217 xfs_qm_unmount_quotas(
218 xfs_mount_t *mp)
219 {
220 /*
221 * Release the dquots that root inode, et al might be holding,
222 * before we flush quotas and blow away the quotainfo structure.
223 */
224 ASSERT(mp->m_rootip);
225 xfs_qm_dqdetach(mp->m_rootip);
226 if (mp->m_rbmip)
227 xfs_qm_dqdetach(mp->m_rbmip);
228 if (mp->m_rsumip)
229 xfs_qm_dqdetach(mp->m_rsumip);
230
231 /*
232 * Release the quota inodes.
233 */
234 if (mp->m_quotainfo) {
235 if (mp->m_quotainfo->qi_uquotaip) {
236 xfs_irele(mp->m_quotainfo->qi_uquotaip);
237 mp->m_quotainfo->qi_uquotaip = NULL;
238 }
239 if (mp->m_quotainfo->qi_gquotaip) {
240 xfs_irele(mp->m_quotainfo->qi_gquotaip);
241 mp->m_quotainfo->qi_gquotaip = NULL;
242 }
243 if (mp->m_quotainfo->qi_pquotaip) {
244 xfs_irele(mp->m_quotainfo->qi_pquotaip);
245 mp->m_quotainfo->qi_pquotaip = NULL;
246 }
247 }
248 }
249
250 STATIC int
xfs_qm_dqattach_one(struct xfs_inode * ip,xfs_dqid_t id,uint type,bool doalloc,struct xfs_dquot ** IO_idqpp)251 xfs_qm_dqattach_one(
252 struct xfs_inode *ip,
253 xfs_dqid_t id,
254 uint type,
255 bool doalloc,
256 struct xfs_dquot **IO_idqpp)
257 {
258 struct xfs_dquot *dqp;
259 int error;
260
261 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
262 error = 0;
263
264 /*
265 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
266 * or &i_gdquot. This made the code look weird, but made the logic a lot
267 * simpler.
268 */
269 dqp = *IO_idqpp;
270 if (dqp) {
271 trace_xfs_dqattach_found(dqp);
272 return 0;
273 }
274
275 /*
276 * Find the dquot from somewhere. This bumps the reference count of
277 * dquot and returns it locked. This can return ENOENT if dquot didn't
278 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
279 * turned off suddenly.
280 */
281 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
282 if (error)
283 return error;
284
285 trace_xfs_dqattach_get(dqp);
286
287 /*
288 * dqget may have dropped and re-acquired the ilock, but it guarantees
289 * that the dquot returned is the one that should go in the inode.
290 */
291 *IO_idqpp = dqp;
292 xfs_dqunlock(dqp);
293 return 0;
294 }
295
296 static bool
xfs_qm_need_dqattach(struct xfs_inode * ip)297 xfs_qm_need_dqattach(
298 struct xfs_inode *ip)
299 {
300 struct xfs_mount *mp = ip->i_mount;
301
302 if (!XFS_IS_QUOTA_RUNNING(mp))
303 return false;
304 if (!XFS_IS_QUOTA_ON(mp))
305 return false;
306 if (!XFS_NOT_DQATTACHED(mp, ip))
307 return false;
308 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
309 return false;
310 return true;
311 }
312
313 /*
314 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
315 * into account.
316 * If @doalloc is true, the dquot(s) will be allocated if needed.
317 * Inode may get unlocked and relocked in here, and the caller must deal with
318 * the consequences.
319 */
320 int
xfs_qm_dqattach_locked(xfs_inode_t * ip,bool doalloc)321 xfs_qm_dqattach_locked(
322 xfs_inode_t *ip,
323 bool doalloc)
324 {
325 xfs_mount_t *mp = ip->i_mount;
326 int error = 0;
327
328 if (!xfs_qm_need_dqattach(ip))
329 return 0;
330
331 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
332
333 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
334 error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)),
335 XFS_DQ_USER, doalloc, &ip->i_udquot);
336 if (error)
337 goto done;
338 ASSERT(ip->i_udquot);
339 }
340
341 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
342 error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)),
343 XFS_DQ_GROUP, doalloc, &ip->i_gdquot);
344 if (error)
345 goto done;
346 ASSERT(ip->i_gdquot);
347 }
348
349 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
350 error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
351 doalloc, &ip->i_pdquot);
352 if (error)
353 goto done;
354 ASSERT(ip->i_pdquot);
355 }
356
357 done:
358 /*
359 * Don't worry about the dquots that we may have attached before any
360 * error - they'll get detached later if it has not already been done.
361 */
362 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
363 return error;
364 }
365
366 int
xfs_qm_dqattach(struct xfs_inode * ip)367 xfs_qm_dqattach(
368 struct xfs_inode *ip)
369 {
370 int error;
371
372 if (!xfs_qm_need_dqattach(ip))
373 return 0;
374
375 xfs_ilock(ip, XFS_ILOCK_EXCL);
376 error = xfs_qm_dqattach_locked(ip, false);
377 xfs_iunlock(ip, XFS_ILOCK_EXCL);
378
379 return error;
380 }
381
382 /*
383 * Release dquots (and their references) if any.
384 * The inode should be locked EXCL except when this's called by
385 * xfs_ireclaim.
386 */
387 void
xfs_qm_dqdetach(xfs_inode_t * ip)388 xfs_qm_dqdetach(
389 xfs_inode_t *ip)
390 {
391 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
392 return;
393
394 trace_xfs_dquot_dqdetach(ip);
395
396 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
397 if (ip->i_udquot) {
398 xfs_qm_dqrele(ip->i_udquot);
399 ip->i_udquot = NULL;
400 }
401 if (ip->i_gdquot) {
402 xfs_qm_dqrele(ip->i_gdquot);
403 ip->i_gdquot = NULL;
404 }
405 if (ip->i_pdquot) {
406 xfs_qm_dqrele(ip->i_pdquot);
407 ip->i_pdquot = NULL;
408 }
409 }
410
411 struct xfs_qm_isolate {
412 struct list_head buffers;
413 struct list_head dispose;
414 };
415
416 static enum lru_status
xfs_qm_dquot_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)417 xfs_qm_dquot_isolate(
418 struct list_head *item,
419 struct list_lru_one *lru,
420 spinlock_t *lru_lock,
421 void *arg)
422 __releases(lru_lock) __acquires(lru_lock)
423 {
424 struct xfs_dquot *dqp = container_of(item,
425 struct xfs_dquot, q_lru);
426 struct xfs_qm_isolate *isol = arg;
427
428 if (!xfs_dqlock_nowait(dqp))
429 goto out_miss_busy;
430
431 /*
432 * This dquot has acquired a reference in the meantime remove it from
433 * the freelist and try again.
434 */
435 if (dqp->q_nrefs) {
436 xfs_dqunlock(dqp);
437 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
438
439 trace_xfs_dqreclaim_want(dqp);
440 list_lru_isolate(lru, &dqp->q_lru);
441 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
442 return LRU_REMOVED;
443 }
444
445 /*
446 * If the dquot is dirty, flush it. If it's already being flushed, just
447 * skip it so there is time for the IO to complete before we try to
448 * reclaim it again on the next LRU pass.
449 */
450 if (!xfs_dqflock_nowait(dqp)) {
451 xfs_dqunlock(dqp);
452 goto out_miss_busy;
453 }
454
455 if (XFS_DQ_IS_DIRTY(dqp)) {
456 struct xfs_buf *bp = NULL;
457 int error;
458
459 trace_xfs_dqreclaim_dirty(dqp);
460
461 /* we have to drop the LRU lock to flush the dquot */
462 spin_unlock(lru_lock);
463
464 error = xfs_qm_dqflush(dqp, &bp);
465 if (error)
466 goto out_unlock_dirty;
467
468 xfs_buf_delwri_queue(bp, &isol->buffers);
469 xfs_buf_relse(bp);
470 goto out_unlock_dirty;
471 }
472 xfs_dqfunlock(dqp);
473
474 /*
475 * Prevent lookups now that we are past the point of no return.
476 */
477 dqp->dq_flags |= XFS_DQ_FREEING;
478 xfs_dqunlock(dqp);
479
480 ASSERT(dqp->q_nrefs == 0);
481 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
482 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
483 trace_xfs_dqreclaim_done(dqp);
484 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
485 return LRU_REMOVED;
486
487 out_miss_busy:
488 trace_xfs_dqreclaim_busy(dqp);
489 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
490 return LRU_SKIP;
491
492 out_unlock_dirty:
493 trace_xfs_dqreclaim_busy(dqp);
494 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
495 xfs_dqunlock(dqp);
496 spin_lock(lru_lock);
497 return LRU_RETRY;
498 }
499
500 static unsigned long
xfs_qm_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)501 xfs_qm_shrink_scan(
502 struct shrinker *shrink,
503 struct shrink_control *sc)
504 {
505 struct xfs_quotainfo *qi = container_of(shrink,
506 struct xfs_quotainfo, qi_shrinker);
507 struct xfs_qm_isolate isol;
508 unsigned long freed;
509 int error;
510
511 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
512 return 0;
513
514 INIT_LIST_HEAD(&isol.buffers);
515 INIT_LIST_HEAD(&isol.dispose);
516
517 freed = list_lru_shrink_walk(&qi->qi_lru, sc,
518 xfs_qm_dquot_isolate, &isol);
519
520 error = xfs_buf_delwri_submit(&isol.buffers);
521 if (error)
522 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
523
524 while (!list_empty(&isol.dispose)) {
525 struct xfs_dquot *dqp;
526
527 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
528 list_del_init(&dqp->q_lru);
529 xfs_qm_dqfree_one(dqp);
530 }
531
532 return freed;
533 }
534
535 static unsigned long
xfs_qm_shrink_count(struct shrinker * shrink,struct shrink_control * sc)536 xfs_qm_shrink_count(
537 struct shrinker *shrink,
538 struct shrink_control *sc)
539 {
540 struct xfs_quotainfo *qi = container_of(shrink,
541 struct xfs_quotainfo, qi_shrinker);
542
543 return list_lru_shrink_count(&qi->qi_lru, sc);
544 }
545
546 STATIC void
xfs_qm_set_defquota(xfs_mount_t * mp,uint type,xfs_quotainfo_t * qinf)547 xfs_qm_set_defquota(
548 xfs_mount_t *mp,
549 uint type,
550 xfs_quotainfo_t *qinf)
551 {
552 struct xfs_dquot *dqp;
553 struct xfs_def_quota *defq;
554 struct xfs_disk_dquot *ddqp;
555 int error;
556
557 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
558 if (error)
559 return;
560
561 ddqp = &dqp->q_core;
562 defq = xfs_get_defquota(dqp, qinf);
563
564 /*
565 * Timers and warnings have been already set, let's just set the
566 * default limits for this quota type
567 */
568 defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
569 defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
570 defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
571 defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
572 defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
573 defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
574 xfs_qm_dqdestroy(dqp);
575 }
576
577 /* Initialize quota time limits from the root dquot. */
578 static void
xfs_qm_init_timelimits(struct xfs_mount * mp,struct xfs_quotainfo * qinf)579 xfs_qm_init_timelimits(
580 struct xfs_mount *mp,
581 struct xfs_quotainfo *qinf)
582 {
583 struct xfs_disk_dquot *ddqp;
584 struct xfs_dquot *dqp;
585 uint type;
586 int error;
587
588 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
589 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
590 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
591 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
592 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
593 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
594
595 /*
596 * We try to get the limits from the superuser's limits fields.
597 * This is quite hacky, but it is standard quota practice.
598 *
599 * Since we may not have done a quotacheck by this point, just read
600 * the dquot without attaching it to any hashtables or lists.
601 *
602 * Timers and warnings are globally set by the first timer found in
603 * user/group/proj quota types, otherwise a default value is used.
604 * This should be split into different fields per quota type.
605 */
606 if (XFS_IS_UQUOTA_RUNNING(mp))
607 type = XFS_DQ_USER;
608 else if (XFS_IS_GQUOTA_RUNNING(mp))
609 type = XFS_DQ_GROUP;
610 else
611 type = XFS_DQ_PROJ;
612 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
613 if (error)
614 return;
615
616 ddqp = &dqp->q_core;
617 /*
618 * The warnings and timers set the grace period given to
619 * a user or group before he or she can not perform any
620 * more writing. If it is zero, a default is used.
621 */
622 if (ddqp->d_btimer)
623 qinf->qi_btimelimit = be32_to_cpu(ddqp->d_btimer);
624 if (ddqp->d_itimer)
625 qinf->qi_itimelimit = be32_to_cpu(ddqp->d_itimer);
626 if (ddqp->d_rtbtimer)
627 qinf->qi_rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer);
628 if (ddqp->d_bwarns)
629 qinf->qi_bwarnlimit = be16_to_cpu(ddqp->d_bwarns);
630 if (ddqp->d_iwarns)
631 qinf->qi_iwarnlimit = be16_to_cpu(ddqp->d_iwarns);
632 if (ddqp->d_rtbwarns)
633 qinf->qi_rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns);
634
635 xfs_qm_dqdestroy(dqp);
636 }
637
638 /*
639 * This initializes all the quota information that's kept in the
640 * mount structure
641 */
642 STATIC int
xfs_qm_init_quotainfo(struct xfs_mount * mp)643 xfs_qm_init_quotainfo(
644 struct xfs_mount *mp)
645 {
646 struct xfs_quotainfo *qinf;
647 int error;
648
649 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
650
651 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), 0);
652
653 error = list_lru_init(&qinf->qi_lru);
654 if (error)
655 goto out_free_qinf;
656
657 /*
658 * See if quotainodes are setup, and if not, allocate them,
659 * and change the superblock accordingly.
660 */
661 error = xfs_qm_init_quotainos(mp);
662 if (error)
663 goto out_free_lru;
664
665 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
666 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
667 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
668 mutex_init(&qinf->qi_tree_lock);
669
670 /* mutex used to serialize quotaoffs */
671 mutex_init(&qinf->qi_quotaofflock);
672
673 /* Precalc some constants */
674 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
675 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
676
677 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
678
679 xfs_qm_init_timelimits(mp, qinf);
680
681 if (XFS_IS_UQUOTA_RUNNING(mp))
682 xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
683 if (XFS_IS_GQUOTA_RUNNING(mp))
684 xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
685 if (XFS_IS_PQUOTA_RUNNING(mp))
686 xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
687
688 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
689 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
690 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
691 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
692
693 error = register_shrinker(&qinf->qi_shrinker);
694 if (error)
695 goto out_free_inos;
696
697 return 0;
698
699 out_free_inos:
700 mutex_destroy(&qinf->qi_quotaofflock);
701 mutex_destroy(&qinf->qi_tree_lock);
702 xfs_qm_destroy_quotainos(qinf);
703 out_free_lru:
704 list_lru_destroy(&qinf->qi_lru);
705 out_free_qinf:
706 kmem_free(qinf);
707 mp->m_quotainfo = NULL;
708 return error;
709 }
710
711 /*
712 * Gets called when unmounting a filesystem or when all quotas get
713 * turned off.
714 * This purges the quota inodes, destroys locks and frees itself.
715 */
716 void
xfs_qm_destroy_quotainfo(xfs_mount_t * mp)717 xfs_qm_destroy_quotainfo(
718 xfs_mount_t *mp)
719 {
720 xfs_quotainfo_t *qi;
721
722 qi = mp->m_quotainfo;
723 ASSERT(qi != NULL);
724
725 unregister_shrinker(&qi->qi_shrinker);
726 list_lru_destroy(&qi->qi_lru);
727 xfs_qm_destroy_quotainos(qi);
728 mutex_destroy(&qi->qi_tree_lock);
729 mutex_destroy(&qi->qi_quotaofflock);
730 kmem_free(qi);
731 mp->m_quotainfo = NULL;
732 }
733
734 /*
735 * Create an inode and return with a reference already taken, but unlocked
736 * This is how we create quota inodes
737 */
738 STATIC int
xfs_qm_qino_alloc(xfs_mount_t * mp,xfs_inode_t ** ip,uint flags)739 xfs_qm_qino_alloc(
740 xfs_mount_t *mp,
741 xfs_inode_t **ip,
742 uint flags)
743 {
744 xfs_trans_t *tp;
745 int error;
746 bool need_alloc = true;
747
748 *ip = NULL;
749 /*
750 * With superblock that doesn't have separate pquotino, we
751 * share an inode between gquota and pquota. If the on-disk
752 * superblock has GQUOTA and the filesystem is now mounted
753 * with PQUOTA, just use sb_gquotino for sb_pquotino and
754 * vice-versa.
755 */
756 if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
757 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
758 xfs_ino_t ino = NULLFSINO;
759
760 if ((flags & XFS_QMOPT_PQUOTA) &&
761 (mp->m_sb.sb_gquotino != NULLFSINO)) {
762 ino = mp->m_sb.sb_gquotino;
763 if (mp->m_sb.sb_pquotino != NULLFSINO) {
764 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
765 mp);
766 return -EFSCORRUPTED;
767 }
768 } else if ((flags & XFS_QMOPT_GQUOTA) &&
769 (mp->m_sb.sb_pquotino != NULLFSINO)) {
770 ino = mp->m_sb.sb_pquotino;
771 if (mp->m_sb.sb_gquotino != NULLFSINO) {
772 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
773 mp);
774 return -EFSCORRUPTED;
775 }
776 }
777 if (ino != NULLFSINO) {
778 error = xfs_iget(mp, NULL, ino, 0, 0, ip);
779 if (error)
780 return error;
781 mp->m_sb.sb_gquotino = NULLFSINO;
782 mp->m_sb.sb_pquotino = NULLFSINO;
783 need_alloc = false;
784 }
785 }
786
787 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
788 XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
789 if (error)
790 return error;
791
792 if (need_alloc) {
793 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
794 if (error) {
795 xfs_trans_cancel(tp);
796 return error;
797 }
798 }
799
800 /*
801 * Make the changes in the superblock, and log those too.
802 * sbfields arg may contain fields other than *QUOTINO;
803 * VERSIONNUM for example.
804 */
805 spin_lock(&mp->m_sb_lock);
806 if (flags & XFS_QMOPT_SBVERSION) {
807 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
808
809 xfs_sb_version_addquota(&mp->m_sb);
810 mp->m_sb.sb_uquotino = NULLFSINO;
811 mp->m_sb.sb_gquotino = NULLFSINO;
812 mp->m_sb.sb_pquotino = NULLFSINO;
813
814 /* qflags will get updated fully _after_ quotacheck */
815 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
816 }
817 if (flags & XFS_QMOPT_UQUOTA)
818 mp->m_sb.sb_uquotino = (*ip)->i_ino;
819 else if (flags & XFS_QMOPT_GQUOTA)
820 mp->m_sb.sb_gquotino = (*ip)->i_ino;
821 else
822 mp->m_sb.sb_pquotino = (*ip)->i_ino;
823 spin_unlock(&mp->m_sb_lock);
824 xfs_log_sb(tp);
825
826 error = xfs_trans_commit(tp);
827 if (error) {
828 ASSERT(XFS_FORCED_SHUTDOWN(mp));
829 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
830 }
831 if (need_alloc)
832 xfs_finish_inode_setup(*ip);
833 return error;
834 }
835
836
837 STATIC void
xfs_qm_reset_dqcounts(xfs_mount_t * mp,xfs_buf_t * bp,xfs_dqid_t id,uint type)838 xfs_qm_reset_dqcounts(
839 xfs_mount_t *mp,
840 xfs_buf_t *bp,
841 xfs_dqid_t id,
842 uint type)
843 {
844 struct xfs_dqblk *dqb;
845 int j;
846 xfs_failaddr_t fa;
847
848 trace_xfs_reset_dqcounts(bp, _RET_IP_);
849
850 /*
851 * Reset all counters and timers. They'll be
852 * started afresh by xfs_qm_quotacheck.
853 */
854 #ifdef DEBUG
855 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
856 sizeof(xfs_dqblk_t);
857 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
858 #endif
859 dqb = bp->b_addr;
860 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
861 struct xfs_disk_dquot *ddq;
862
863 ddq = (struct xfs_disk_dquot *)&dqb[j];
864
865 /*
866 * Do a sanity check, and if needed, repair the dqblk. Don't
867 * output any warnings because it's perfectly possible to
868 * find uninitialised dquot blks. See comment in
869 * xfs_dquot_verify.
870 */
871 fa = xfs_dqblk_verify(mp, &dqb[j], id + j, type);
872 if (fa)
873 xfs_dqblk_repair(mp, &dqb[j], id + j, type);
874
875 /*
876 * Reset type in case we are reusing group quota file for
877 * project quotas or vice versa
878 */
879 ddq->d_flags = type;
880 ddq->d_bcount = 0;
881 ddq->d_icount = 0;
882 ddq->d_rtbcount = 0;
883
884 /*
885 * dquot id 0 stores the default grace period and the maximum
886 * warning limit that were set by the administrator, so we
887 * should not reset them.
888 */
889 if (ddq->d_id != 0) {
890 ddq->d_btimer = 0;
891 ddq->d_itimer = 0;
892 ddq->d_rtbtimer = 0;
893 ddq->d_bwarns = 0;
894 ddq->d_iwarns = 0;
895 ddq->d_rtbwarns = 0;
896 }
897
898 if (xfs_sb_version_hascrc(&mp->m_sb)) {
899 xfs_update_cksum((char *)&dqb[j],
900 sizeof(struct xfs_dqblk),
901 XFS_DQUOT_CRC_OFF);
902 }
903 }
904 }
905
906 STATIC int
xfs_qm_reset_dqcounts_all(struct xfs_mount * mp,xfs_dqid_t firstid,xfs_fsblock_t bno,xfs_filblks_t blkcnt,uint flags,struct list_head * buffer_list)907 xfs_qm_reset_dqcounts_all(
908 struct xfs_mount *mp,
909 xfs_dqid_t firstid,
910 xfs_fsblock_t bno,
911 xfs_filblks_t blkcnt,
912 uint flags,
913 struct list_head *buffer_list)
914 {
915 struct xfs_buf *bp;
916 int error;
917 int type;
918
919 ASSERT(blkcnt > 0);
920 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
921 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
922 error = 0;
923
924 /*
925 * Blkcnt arg can be a very big number, and might even be
926 * larger than the log itself. So, we have to break it up into
927 * manageable-sized transactions.
928 * Note that we don't start a permanent transaction here; we might
929 * not be able to get a log reservation for the whole thing up front,
930 * and we don't really care to either, because we just discard
931 * everything if we were to crash in the middle of this loop.
932 */
933 while (blkcnt--) {
934 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
935 XFS_FSB_TO_DADDR(mp, bno),
936 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
937 &xfs_dquot_buf_ops);
938
939 /*
940 * CRC and validation errors will return a EFSCORRUPTED here. If
941 * this occurs, re-read without CRC validation so that we can
942 * repair the damage via xfs_qm_reset_dqcounts(). This process
943 * will leave a trace in the log indicating corruption has
944 * been detected.
945 */
946 if (error == -EFSCORRUPTED) {
947 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
948 XFS_FSB_TO_DADDR(mp, bno),
949 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
950 NULL);
951 }
952
953 if (error)
954 break;
955
956 /*
957 * A corrupt buffer might not have a verifier attached, so
958 * make sure we have the correct one attached before writeback
959 * occurs.
960 */
961 bp->b_ops = &xfs_dquot_buf_ops;
962 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
963 xfs_buf_delwri_queue(bp, buffer_list);
964 xfs_buf_relse(bp);
965
966 /* goto the next block. */
967 bno++;
968 firstid += mp->m_quotainfo->qi_dqperchunk;
969 }
970
971 return error;
972 }
973
974 /*
975 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
976 * counters for every chunk of dquots that we find.
977 */
978 STATIC int
xfs_qm_reset_dqcounts_buf(struct xfs_mount * mp,struct xfs_inode * qip,uint flags,struct list_head * buffer_list)979 xfs_qm_reset_dqcounts_buf(
980 struct xfs_mount *mp,
981 struct xfs_inode *qip,
982 uint flags,
983 struct list_head *buffer_list)
984 {
985 struct xfs_bmbt_irec *map;
986 int i, nmaps; /* number of map entries */
987 int error; /* return value */
988 xfs_fileoff_t lblkno;
989 xfs_filblks_t maxlblkcnt;
990 xfs_dqid_t firstid;
991 xfs_fsblock_t rablkno;
992 xfs_filblks_t rablkcnt;
993
994 error = 0;
995 /*
996 * This looks racy, but we can't keep an inode lock across a
997 * trans_reserve. But, this gets called during quotacheck, and that
998 * happens only at mount time which is single threaded.
999 */
1000 if (qip->i_d.di_nblocks == 0)
1001 return 0;
1002
1003 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
1004
1005 lblkno = 0;
1006 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1007 do {
1008 uint lock_mode;
1009
1010 nmaps = XFS_DQITER_MAP_SIZE;
1011 /*
1012 * We aren't changing the inode itself. Just changing
1013 * some of its data. No new blocks are added here, and
1014 * the inode is never added to the transaction.
1015 */
1016 lock_mode = xfs_ilock_data_map_shared(qip);
1017 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1018 map, &nmaps, 0);
1019 xfs_iunlock(qip, lock_mode);
1020 if (error)
1021 break;
1022
1023 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1024 for (i = 0; i < nmaps; i++) {
1025 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1026 ASSERT(map[i].br_blockcount);
1027
1028
1029 lblkno += map[i].br_blockcount;
1030
1031 if (map[i].br_startblock == HOLESTARTBLOCK)
1032 continue;
1033
1034 firstid = (xfs_dqid_t) map[i].br_startoff *
1035 mp->m_quotainfo->qi_dqperchunk;
1036 /*
1037 * Do a read-ahead on the next extent.
1038 */
1039 if ((i+1 < nmaps) &&
1040 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1041 rablkcnt = map[i+1].br_blockcount;
1042 rablkno = map[i+1].br_startblock;
1043 while (rablkcnt--) {
1044 xfs_buf_readahead(mp->m_ddev_targp,
1045 XFS_FSB_TO_DADDR(mp, rablkno),
1046 mp->m_quotainfo->qi_dqchunklen,
1047 &xfs_dquot_buf_ops);
1048 rablkno++;
1049 }
1050 }
1051 /*
1052 * Iterate thru all the blks in the extent and
1053 * reset the counters of all the dquots inside them.
1054 */
1055 error = xfs_qm_reset_dqcounts_all(mp, firstid,
1056 map[i].br_startblock,
1057 map[i].br_blockcount,
1058 flags, buffer_list);
1059 if (error)
1060 goto out;
1061 }
1062 } while (nmaps > 0);
1063
1064 out:
1065 kmem_free(map);
1066 return error;
1067 }
1068
1069 /*
1070 * Called by dqusage_adjust in doing a quotacheck.
1071 *
1072 * Given the inode, and a dquot id this updates both the incore dqout as well
1073 * as the buffer copy. This is so that once the quotacheck is done, we can
1074 * just log all the buffers, as opposed to logging numerous updates to
1075 * individual dquots.
1076 */
1077 STATIC int
xfs_qm_quotacheck_dqadjust(struct xfs_inode * ip,uint type,xfs_qcnt_t nblks,xfs_qcnt_t rtblks)1078 xfs_qm_quotacheck_dqadjust(
1079 struct xfs_inode *ip,
1080 uint type,
1081 xfs_qcnt_t nblks,
1082 xfs_qcnt_t rtblks)
1083 {
1084 struct xfs_mount *mp = ip->i_mount;
1085 struct xfs_dquot *dqp;
1086 xfs_dqid_t id;
1087 int error;
1088
1089 id = xfs_qm_id_for_quotatype(ip, type);
1090 error = xfs_qm_dqget(mp, id, type, true, &dqp);
1091 if (error) {
1092 /*
1093 * Shouldn't be able to turn off quotas here.
1094 */
1095 ASSERT(error != -ESRCH);
1096 ASSERT(error != -ENOENT);
1097 return error;
1098 }
1099
1100 trace_xfs_dqadjust(dqp);
1101
1102 /*
1103 * Adjust the inode count and the block count to reflect this inode's
1104 * resource usage.
1105 */
1106 be64_add_cpu(&dqp->q_core.d_icount, 1);
1107 dqp->q_res_icount++;
1108 if (nblks) {
1109 be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1110 dqp->q_res_bcount += nblks;
1111 }
1112 if (rtblks) {
1113 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1114 dqp->q_res_rtbcount += rtblks;
1115 }
1116
1117 /*
1118 * Set default limits, adjust timers (since we changed usages)
1119 *
1120 * There are no timers for the default values set in the root dquot.
1121 */
1122 if (dqp->q_core.d_id) {
1123 xfs_qm_adjust_dqlimits(mp, dqp);
1124 xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1125 }
1126
1127 dqp->dq_flags |= XFS_DQ_DIRTY;
1128 xfs_qm_dqput(dqp);
1129 return 0;
1130 }
1131
1132 /*
1133 * callback routine supplied to bulkstat(). Given an inumber, find its
1134 * dquots and update them to account for resources taken by that inode.
1135 */
1136 /* ARGSUSED */
1137 STATIC int
xfs_qm_dqusage_adjust(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,void * data)1138 xfs_qm_dqusage_adjust(
1139 struct xfs_mount *mp,
1140 struct xfs_trans *tp,
1141 xfs_ino_t ino,
1142 void *data)
1143 {
1144 struct xfs_inode *ip;
1145 xfs_qcnt_t nblks;
1146 xfs_filblks_t rtblks = 0; /* total rt blks */
1147 int error;
1148
1149 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1150
1151 /*
1152 * rootino must have its resources accounted for, not so with the quota
1153 * inodes.
1154 */
1155 if (xfs_is_quota_inode(&mp->m_sb, ino))
1156 return 0;
1157
1158 /*
1159 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1160 * at mount time and therefore nobody will be racing chown/chproj.
1161 */
1162 error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1163 if (error == -EINVAL || error == -ENOENT)
1164 return 0;
1165 if (error)
1166 return error;
1167
1168 ASSERT(ip->i_delayed_blks == 0);
1169
1170 if (XFS_IS_REALTIME_INODE(ip)) {
1171 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1172
1173 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1174 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1175 if (error)
1176 goto error0;
1177 }
1178
1179 xfs_bmap_count_leaves(ifp, &rtblks);
1180 }
1181
1182 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1183
1184 /*
1185 * Add the (disk blocks and inode) resources occupied by this
1186 * inode to its dquots. We do this adjustment in the incore dquot,
1187 * and also copy the changes to its buffer.
1188 * We don't care about putting these changes in a transaction
1189 * envelope because if we crash in the middle of a 'quotacheck'
1190 * we have to start from the beginning anyway.
1191 * Once we're done, we'll log all the dquot bufs.
1192 *
1193 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1194 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1195 */
1196 if (XFS_IS_UQUOTA_ON(mp)) {
1197 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_USER, nblks,
1198 rtblks);
1199 if (error)
1200 goto error0;
1201 }
1202
1203 if (XFS_IS_GQUOTA_ON(mp)) {
1204 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_GROUP, nblks,
1205 rtblks);
1206 if (error)
1207 goto error0;
1208 }
1209
1210 if (XFS_IS_PQUOTA_ON(mp)) {
1211 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_PROJ, nblks,
1212 rtblks);
1213 if (error)
1214 goto error0;
1215 }
1216
1217 error0:
1218 xfs_irele(ip);
1219 return error;
1220 }
1221
1222 STATIC int
xfs_qm_flush_one(struct xfs_dquot * dqp,void * data)1223 xfs_qm_flush_one(
1224 struct xfs_dquot *dqp,
1225 void *data)
1226 {
1227 struct xfs_mount *mp = dqp->q_mount;
1228 struct list_head *buffer_list = data;
1229 struct xfs_buf *bp = NULL;
1230 int error = 0;
1231
1232 xfs_dqlock(dqp);
1233 if (dqp->dq_flags & XFS_DQ_FREEING)
1234 goto out_unlock;
1235 if (!XFS_DQ_IS_DIRTY(dqp))
1236 goto out_unlock;
1237
1238 /*
1239 * The only way the dquot is already flush locked by the time quotacheck
1240 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1241 * it for the final time. Quotacheck collects all dquot bufs in the
1242 * local delwri queue before dquots are dirtied, so reclaim can't have
1243 * possibly queued it for I/O. The only way out is to push the buffer to
1244 * cycle the flush lock.
1245 */
1246 if (!xfs_dqflock_nowait(dqp)) {
1247 /* buf is pinned in-core by delwri list */
1248 bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1249 mp->m_quotainfo->qi_dqchunklen, 0);
1250 if (!bp) {
1251 error = -EINVAL;
1252 goto out_unlock;
1253 }
1254 xfs_buf_unlock(bp);
1255
1256 xfs_buf_delwri_pushbuf(bp, buffer_list);
1257 xfs_buf_rele(bp);
1258
1259 error = -EAGAIN;
1260 goto out_unlock;
1261 }
1262
1263 error = xfs_qm_dqflush(dqp, &bp);
1264 if (error)
1265 goto out_unlock;
1266
1267 xfs_buf_delwri_queue(bp, buffer_list);
1268 xfs_buf_relse(bp);
1269 out_unlock:
1270 xfs_dqunlock(dqp);
1271 return error;
1272 }
1273
1274 /*
1275 * Walk thru all the filesystem inodes and construct a consistent view
1276 * of the disk quota world. If the quotacheck fails, disable quotas.
1277 */
1278 STATIC int
xfs_qm_quotacheck(xfs_mount_t * mp)1279 xfs_qm_quotacheck(
1280 xfs_mount_t *mp)
1281 {
1282 int error, error2;
1283 uint flags;
1284 LIST_HEAD (buffer_list);
1285 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1286 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1287 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1288
1289 flags = 0;
1290
1291 ASSERT(uip || gip || pip);
1292 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1293
1294 xfs_notice(mp, "Quotacheck needed: Please wait.");
1295
1296 /*
1297 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1298 * their counters to zero. We need a clean slate.
1299 * We don't log our changes till later.
1300 */
1301 if (uip) {
1302 error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_QMOPT_UQUOTA,
1303 &buffer_list);
1304 if (error)
1305 goto error_return;
1306 flags |= XFS_UQUOTA_CHKD;
1307 }
1308
1309 if (gip) {
1310 error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_QMOPT_GQUOTA,
1311 &buffer_list);
1312 if (error)
1313 goto error_return;
1314 flags |= XFS_GQUOTA_CHKD;
1315 }
1316
1317 if (pip) {
1318 error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_QMOPT_PQUOTA,
1319 &buffer_list);
1320 if (error)
1321 goto error_return;
1322 flags |= XFS_PQUOTA_CHKD;
1323 }
1324
1325 error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1326 NULL);
1327 if (error)
1328 goto error_return;
1329
1330 /*
1331 * We've made all the changes that we need to make incore. Flush them
1332 * down to disk buffers if everything was updated successfully.
1333 */
1334 if (XFS_IS_UQUOTA_ON(mp)) {
1335 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1336 &buffer_list);
1337 }
1338 if (XFS_IS_GQUOTA_ON(mp)) {
1339 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1340 &buffer_list);
1341 if (!error)
1342 error = error2;
1343 }
1344 if (XFS_IS_PQUOTA_ON(mp)) {
1345 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1346 &buffer_list);
1347 if (!error)
1348 error = error2;
1349 }
1350
1351 error2 = xfs_buf_delwri_submit(&buffer_list);
1352 if (!error)
1353 error = error2;
1354
1355 /*
1356 * We can get this error if we couldn't do a dquot allocation inside
1357 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1358 * dirty dquots that might be cached, we just want to get rid of them
1359 * and turn quotaoff. The dquots won't be attached to any of the inodes
1360 * at this point (because we intentionally didn't in dqget_noattach).
1361 */
1362 if (error) {
1363 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1364 goto error_return;
1365 }
1366
1367 /*
1368 * If one type of quotas is off, then it will lose its
1369 * quotachecked status, since we won't be doing accounting for
1370 * that type anymore.
1371 */
1372 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1373 mp->m_qflags |= flags;
1374
1375 error_return:
1376 xfs_buf_delwri_cancel(&buffer_list);
1377
1378 if (error) {
1379 xfs_warn(mp,
1380 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1381 error);
1382 /*
1383 * We must turn off quotas.
1384 */
1385 ASSERT(mp->m_quotainfo != NULL);
1386 xfs_qm_destroy_quotainfo(mp);
1387 if (xfs_mount_reset_sbqflags(mp)) {
1388 xfs_warn(mp,
1389 "Quotacheck: Failed to reset quota flags.");
1390 }
1391 } else
1392 xfs_notice(mp, "Quotacheck: Done.");
1393 return error;
1394 }
1395
1396 /*
1397 * This is called from xfs_mountfs to start quotas and initialize all
1398 * necessary data structures like quotainfo. This is also responsible for
1399 * running a quotacheck as necessary. We are guaranteed that the superblock
1400 * is consistently read in at this point.
1401 *
1402 * If we fail here, the mount will continue with quota turned off. We don't
1403 * need to inidicate success or failure at all.
1404 */
1405 void
xfs_qm_mount_quotas(struct xfs_mount * mp)1406 xfs_qm_mount_quotas(
1407 struct xfs_mount *mp)
1408 {
1409 int error = 0;
1410 uint sbf;
1411
1412 /*
1413 * If quotas on realtime volumes is not supported, we disable
1414 * quotas immediately.
1415 */
1416 if (mp->m_sb.sb_rextents) {
1417 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1418 mp->m_qflags = 0;
1419 goto write_changes;
1420 }
1421
1422 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1423
1424 /*
1425 * Allocate the quotainfo structure inside the mount struct, and
1426 * create quotainode(s), and change/rev superblock if necessary.
1427 */
1428 error = xfs_qm_init_quotainfo(mp);
1429 if (error) {
1430 /*
1431 * We must turn off quotas.
1432 */
1433 ASSERT(mp->m_quotainfo == NULL);
1434 mp->m_qflags = 0;
1435 goto write_changes;
1436 }
1437 /*
1438 * If any of the quotas are not consistent, do a quotacheck.
1439 */
1440 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1441 error = xfs_qm_quotacheck(mp);
1442 if (error) {
1443 /* Quotacheck failed and disabled quotas. */
1444 return;
1445 }
1446 }
1447 /*
1448 * If one type of quotas is off, then it will lose its
1449 * quotachecked status, since we won't be doing accounting for
1450 * that type anymore.
1451 */
1452 if (!XFS_IS_UQUOTA_ON(mp))
1453 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1454 if (!XFS_IS_GQUOTA_ON(mp))
1455 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1456 if (!XFS_IS_PQUOTA_ON(mp))
1457 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1458
1459 write_changes:
1460 /*
1461 * We actually don't have to acquire the m_sb_lock at all.
1462 * This can only be called from mount, and that's single threaded. XXX
1463 */
1464 spin_lock(&mp->m_sb_lock);
1465 sbf = mp->m_sb.sb_qflags;
1466 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1467 spin_unlock(&mp->m_sb_lock);
1468
1469 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1470 if (xfs_sync_sb(mp, false)) {
1471 /*
1472 * We could only have been turning quotas off.
1473 * We aren't in very good shape actually because
1474 * the incore structures are convinced that quotas are
1475 * off, but the on disk superblock doesn't know that !
1476 */
1477 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1478 xfs_alert(mp, "%s: Superblock update failed!",
1479 __func__);
1480 }
1481 }
1482
1483 if (error) {
1484 xfs_warn(mp, "Failed to initialize disk quotas.");
1485 return;
1486 }
1487 }
1488
1489 /*
1490 * This is called after the superblock has been read in and we're ready to
1491 * iget the quota inodes.
1492 */
1493 STATIC int
xfs_qm_init_quotainos(xfs_mount_t * mp)1494 xfs_qm_init_quotainos(
1495 xfs_mount_t *mp)
1496 {
1497 struct xfs_inode *uip = NULL;
1498 struct xfs_inode *gip = NULL;
1499 struct xfs_inode *pip = NULL;
1500 int error;
1501 uint flags = 0;
1502
1503 ASSERT(mp->m_quotainfo);
1504
1505 /*
1506 * Get the uquota and gquota inodes
1507 */
1508 if (xfs_sb_version_hasquota(&mp->m_sb)) {
1509 if (XFS_IS_UQUOTA_ON(mp) &&
1510 mp->m_sb.sb_uquotino != NULLFSINO) {
1511 ASSERT(mp->m_sb.sb_uquotino > 0);
1512 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1513 0, 0, &uip);
1514 if (error)
1515 return error;
1516 }
1517 if (XFS_IS_GQUOTA_ON(mp) &&
1518 mp->m_sb.sb_gquotino != NULLFSINO) {
1519 ASSERT(mp->m_sb.sb_gquotino > 0);
1520 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1521 0, 0, &gip);
1522 if (error)
1523 goto error_rele;
1524 }
1525 if (XFS_IS_PQUOTA_ON(mp) &&
1526 mp->m_sb.sb_pquotino != NULLFSINO) {
1527 ASSERT(mp->m_sb.sb_pquotino > 0);
1528 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1529 0, 0, &pip);
1530 if (error)
1531 goto error_rele;
1532 }
1533 } else {
1534 flags |= XFS_QMOPT_SBVERSION;
1535 }
1536
1537 /*
1538 * Create the three inodes, if they don't exist already. The changes
1539 * made above will get added to a transaction and logged in one of
1540 * the qino_alloc calls below. If the device is readonly,
1541 * temporarily switch to read-write to do this.
1542 */
1543 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1544 error = xfs_qm_qino_alloc(mp, &uip,
1545 flags | XFS_QMOPT_UQUOTA);
1546 if (error)
1547 goto error_rele;
1548
1549 flags &= ~XFS_QMOPT_SBVERSION;
1550 }
1551 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1552 error = xfs_qm_qino_alloc(mp, &gip,
1553 flags | XFS_QMOPT_GQUOTA);
1554 if (error)
1555 goto error_rele;
1556
1557 flags &= ~XFS_QMOPT_SBVERSION;
1558 }
1559 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1560 error = xfs_qm_qino_alloc(mp, &pip,
1561 flags | XFS_QMOPT_PQUOTA);
1562 if (error)
1563 goto error_rele;
1564 }
1565
1566 mp->m_quotainfo->qi_uquotaip = uip;
1567 mp->m_quotainfo->qi_gquotaip = gip;
1568 mp->m_quotainfo->qi_pquotaip = pip;
1569
1570 return 0;
1571
1572 error_rele:
1573 if (uip)
1574 xfs_irele(uip);
1575 if (gip)
1576 xfs_irele(gip);
1577 if (pip)
1578 xfs_irele(pip);
1579 return error;
1580 }
1581
1582 STATIC void
xfs_qm_destroy_quotainos(xfs_quotainfo_t * qi)1583 xfs_qm_destroy_quotainos(
1584 xfs_quotainfo_t *qi)
1585 {
1586 if (qi->qi_uquotaip) {
1587 xfs_irele(qi->qi_uquotaip);
1588 qi->qi_uquotaip = NULL; /* paranoia */
1589 }
1590 if (qi->qi_gquotaip) {
1591 xfs_irele(qi->qi_gquotaip);
1592 qi->qi_gquotaip = NULL;
1593 }
1594 if (qi->qi_pquotaip) {
1595 xfs_irele(qi->qi_pquotaip);
1596 qi->qi_pquotaip = NULL;
1597 }
1598 }
1599
1600 STATIC void
xfs_qm_dqfree_one(struct xfs_dquot * dqp)1601 xfs_qm_dqfree_one(
1602 struct xfs_dquot *dqp)
1603 {
1604 struct xfs_mount *mp = dqp->q_mount;
1605 struct xfs_quotainfo *qi = mp->m_quotainfo;
1606
1607 mutex_lock(&qi->qi_tree_lock);
1608 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1609 be32_to_cpu(dqp->q_core.d_id));
1610
1611 qi->qi_dquots--;
1612 mutex_unlock(&qi->qi_tree_lock);
1613
1614 xfs_qm_dqdestroy(dqp);
1615 }
1616
1617 /* --------------- utility functions for vnodeops ---------------- */
1618
1619
1620 /*
1621 * Given an inode, a uid, gid and prid make sure that we have
1622 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1623 * quotas by creating this file.
1624 * This also attaches dquot(s) to the given inode after locking it,
1625 * and returns the dquots corresponding to the uid and/or gid.
1626 *
1627 * in : inode (unlocked)
1628 * out : udquot, gdquot with references taken and unlocked
1629 */
1630 int
xfs_qm_vop_dqalloc(struct xfs_inode * ip,kuid_t uid,kgid_t gid,prid_t prid,uint flags,struct xfs_dquot ** O_udqpp,struct xfs_dquot ** O_gdqpp,struct xfs_dquot ** O_pdqpp)1631 xfs_qm_vop_dqalloc(
1632 struct xfs_inode *ip,
1633 kuid_t uid,
1634 kgid_t gid,
1635 prid_t prid,
1636 uint flags,
1637 struct xfs_dquot **O_udqpp,
1638 struct xfs_dquot **O_gdqpp,
1639 struct xfs_dquot **O_pdqpp)
1640 {
1641 struct xfs_mount *mp = ip->i_mount;
1642 struct inode *inode = VFS_I(ip);
1643 struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1644 struct xfs_dquot *uq = NULL;
1645 struct xfs_dquot *gq = NULL;
1646 struct xfs_dquot *pq = NULL;
1647 int error;
1648 uint lockflags;
1649
1650 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1651 return 0;
1652
1653 lockflags = XFS_ILOCK_EXCL;
1654 xfs_ilock(ip, lockflags);
1655
1656 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1657 gid = inode->i_gid;
1658
1659 /*
1660 * Attach the dquot(s) to this inode, doing a dquot allocation
1661 * if necessary. The dquot(s) will not be locked.
1662 */
1663 if (XFS_NOT_DQATTACHED(mp, ip)) {
1664 error = xfs_qm_dqattach_locked(ip, true);
1665 if (error) {
1666 xfs_iunlock(ip, lockflags);
1667 return error;
1668 }
1669 }
1670
1671 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1672 if (!uid_eq(inode->i_uid, uid)) {
1673 /*
1674 * What we need is the dquot that has this uid, and
1675 * if we send the inode to dqget, the uid of the inode
1676 * takes priority over what's sent in the uid argument.
1677 * We must unlock inode here before calling dqget if
1678 * we're not sending the inode, because otherwise
1679 * we'll deadlock by doing trans_reserve while
1680 * holding ilock.
1681 */
1682 xfs_iunlock(ip, lockflags);
1683 error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1684 XFS_DQ_USER, true, &uq);
1685 if (error) {
1686 ASSERT(error != -ENOENT);
1687 return error;
1688 }
1689 /*
1690 * Get the ilock in the right order.
1691 */
1692 xfs_dqunlock(uq);
1693 lockflags = XFS_ILOCK_SHARED;
1694 xfs_ilock(ip, lockflags);
1695 } else {
1696 /*
1697 * Take an extra reference, because we'll return
1698 * this to caller
1699 */
1700 ASSERT(ip->i_udquot);
1701 uq = xfs_qm_dqhold(ip->i_udquot);
1702 }
1703 }
1704 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1705 if (!gid_eq(inode->i_gid, gid)) {
1706 xfs_iunlock(ip, lockflags);
1707 error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1708 XFS_DQ_GROUP, true, &gq);
1709 if (error) {
1710 ASSERT(error != -ENOENT);
1711 goto error_rele;
1712 }
1713 xfs_dqunlock(gq);
1714 lockflags = XFS_ILOCK_SHARED;
1715 xfs_ilock(ip, lockflags);
1716 } else {
1717 ASSERT(ip->i_gdquot);
1718 gq = xfs_qm_dqhold(ip->i_gdquot);
1719 }
1720 }
1721 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1722 if (ip->i_d.di_projid != prid) {
1723 xfs_iunlock(ip, lockflags);
1724 error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ,
1725 true, &pq);
1726 if (error) {
1727 ASSERT(error != -ENOENT);
1728 goto error_rele;
1729 }
1730 xfs_dqunlock(pq);
1731 lockflags = XFS_ILOCK_SHARED;
1732 xfs_ilock(ip, lockflags);
1733 } else {
1734 ASSERT(ip->i_pdquot);
1735 pq = xfs_qm_dqhold(ip->i_pdquot);
1736 }
1737 }
1738 if (uq)
1739 trace_xfs_dquot_dqalloc(ip);
1740
1741 xfs_iunlock(ip, lockflags);
1742 if (O_udqpp)
1743 *O_udqpp = uq;
1744 else
1745 xfs_qm_dqrele(uq);
1746 if (O_gdqpp)
1747 *O_gdqpp = gq;
1748 else
1749 xfs_qm_dqrele(gq);
1750 if (O_pdqpp)
1751 *O_pdqpp = pq;
1752 else
1753 xfs_qm_dqrele(pq);
1754 return 0;
1755
1756 error_rele:
1757 xfs_qm_dqrele(gq);
1758 xfs_qm_dqrele(uq);
1759 return error;
1760 }
1761
1762 /*
1763 * Actually transfer ownership, and do dquot modifications.
1764 * These were already reserved.
1765 */
1766 struct xfs_dquot *
xfs_qm_vop_chown(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot ** IO_olddq,struct xfs_dquot * newdq)1767 xfs_qm_vop_chown(
1768 struct xfs_trans *tp,
1769 struct xfs_inode *ip,
1770 struct xfs_dquot **IO_olddq,
1771 struct xfs_dquot *newdq)
1772 {
1773 struct xfs_dquot *prevdq;
1774 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1775 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1776
1777
1778 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1779 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1780
1781 /* old dquot */
1782 prevdq = *IO_olddq;
1783 ASSERT(prevdq);
1784 ASSERT(prevdq != newdq);
1785
1786 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1787 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1788
1789 /* the sparkling new dquot */
1790 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1791 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1792
1793 /*
1794 * Take an extra reference, because the inode is going to keep
1795 * this dquot pointer even after the trans_commit.
1796 */
1797 *IO_olddq = xfs_qm_dqhold(newdq);
1798
1799 return prevdq;
1800 }
1801
1802 /*
1803 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1804 */
1805 int
xfs_qm_vop_chown_reserve(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp,uint flags)1806 xfs_qm_vop_chown_reserve(
1807 struct xfs_trans *tp,
1808 struct xfs_inode *ip,
1809 struct xfs_dquot *udqp,
1810 struct xfs_dquot *gdqp,
1811 struct xfs_dquot *pdqp,
1812 uint flags)
1813 {
1814 struct xfs_mount *mp = ip->i_mount;
1815 uint64_t delblks;
1816 unsigned int blkflags, prjflags = 0;
1817 struct xfs_dquot *udq_unres = NULL;
1818 struct xfs_dquot *gdq_unres = NULL;
1819 struct xfs_dquot *pdq_unres = NULL;
1820 struct xfs_dquot *udq_delblks = NULL;
1821 struct xfs_dquot *gdq_delblks = NULL;
1822 struct xfs_dquot *pdq_delblks = NULL;
1823 int error;
1824
1825
1826 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1827 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1828
1829 delblks = ip->i_delayed_blks;
1830 blkflags = XFS_IS_REALTIME_INODE(ip) ?
1831 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1832
1833 if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1834 i_uid_read(VFS_I(ip)) != be32_to_cpu(udqp->q_core.d_id)) {
1835 udq_delblks = udqp;
1836 /*
1837 * If there are delayed allocation blocks, then we have to
1838 * unreserve those from the old dquot, and add them to the
1839 * new dquot.
1840 */
1841 if (delblks) {
1842 ASSERT(ip->i_udquot);
1843 udq_unres = ip->i_udquot;
1844 }
1845 }
1846 if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1847 i_gid_read(VFS_I(ip)) != be32_to_cpu(gdqp->q_core.d_id)) {
1848 gdq_delblks = gdqp;
1849 if (delblks) {
1850 ASSERT(ip->i_gdquot);
1851 gdq_unres = ip->i_gdquot;
1852 }
1853 }
1854
1855 if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1856 ip->i_d.di_projid != be32_to_cpu(pdqp->q_core.d_id)) {
1857 prjflags = XFS_QMOPT_ENOSPC;
1858 pdq_delblks = pdqp;
1859 if (delblks) {
1860 ASSERT(ip->i_pdquot);
1861 pdq_unres = ip->i_pdquot;
1862 }
1863 }
1864
1865 error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1866 udq_delblks, gdq_delblks, pdq_delblks,
1867 ip->i_d.di_nblocks, 1,
1868 flags | blkflags | prjflags);
1869 if (error)
1870 return error;
1871
1872 /*
1873 * Do the delayed blks reservations/unreservations now. Since, these
1874 * are done without the help of a transaction, if a reservation fails
1875 * its previous reservations won't be automatically undone by trans
1876 * code. So, we have to do it manually here.
1877 */
1878 if (delblks) {
1879 /*
1880 * Do the reservations first. Unreservation can't fail.
1881 */
1882 ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1883 ASSERT(udq_unres || gdq_unres || pdq_unres);
1884 error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1885 udq_delblks, gdq_delblks, pdq_delblks,
1886 (xfs_qcnt_t)delblks, 0,
1887 flags | blkflags | prjflags);
1888 if (error)
1889 return error;
1890 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1891 udq_unres, gdq_unres, pdq_unres,
1892 -((xfs_qcnt_t)delblks), 0, blkflags);
1893 }
1894
1895 return 0;
1896 }
1897
1898 int
xfs_qm_vop_rename_dqattach(struct xfs_inode ** i_tab)1899 xfs_qm_vop_rename_dqattach(
1900 struct xfs_inode **i_tab)
1901 {
1902 struct xfs_mount *mp = i_tab[0]->i_mount;
1903 int i;
1904
1905 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1906 return 0;
1907
1908 for (i = 0; (i < 4 && i_tab[i]); i++) {
1909 struct xfs_inode *ip = i_tab[i];
1910 int error;
1911
1912 /*
1913 * Watch out for duplicate entries in the table.
1914 */
1915 if (i == 0 || ip != i_tab[i-1]) {
1916 if (XFS_NOT_DQATTACHED(mp, ip)) {
1917 error = xfs_qm_dqattach(ip);
1918 if (error)
1919 return error;
1920 }
1921 }
1922 }
1923 return 0;
1924 }
1925
1926 void
xfs_qm_vop_create_dqattach(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp)1927 xfs_qm_vop_create_dqattach(
1928 struct xfs_trans *tp,
1929 struct xfs_inode *ip,
1930 struct xfs_dquot *udqp,
1931 struct xfs_dquot *gdqp,
1932 struct xfs_dquot *pdqp)
1933 {
1934 struct xfs_mount *mp = tp->t_mountp;
1935
1936 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1937 return;
1938
1939 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1940 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1941
1942 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1943 ASSERT(ip->i_udquot == NULL);
1944 ASSERT(i_uid_read(VFS_I(ip)) == be32_to_cpu(udqp->q_core.d_id));
1945
1946 ip->i_udquot = xfs_qm_dqhold(udqp);
1947 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1948 }
1949 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1950 ASSERT(ip->i_gdquot == NULL);
1951 ASSERT(i_gid_read(VFS_I(ip)) == be32_to_cpu(gdqp->q_core.d_id));
1952
1953 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1954 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1955 }
1956 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1957 ASSERT(ip->i_pdquot == NULL);
1958 ASSERT(ip->i_d.di_projid == be32_to_cpu(pdqp->q_core.d_id));
1959
1960 ip->i_pdquot = xfs_qm_dqhold(pdqp);
1961 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1962 }
1963 }
1964
1965