• Home
  • Raw
  • Download

Lines Matching +full:ip +full:- +full:blocks

2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
13 * per-node file) and then are periodically synced to the quota file. This
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
79 #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
81 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
82 /* -> sd_bitmap_lock */
121 qd = list_entry(list->next, struct gfs2_quota_data, qd_lru); in gfs2_qd_dispose()
122 sdp = qd->qd_gl->gl_name.ln_sbd; in gfs2_qd_dispose()
124 list_del(&qd->qd_lru); in gfs2_qd_dispose()
126 /* Free from the filesystem-specific list */ in gfs2_qd_dispose()
128 list_del(&qd->qd_list); in gfs2_qd_dispose()
131 spin_lock_bucket(qd->qd_hash); in gfs2_qd_dispose()
132 hlist_bl_del_rcu(&qd->qd_hlist); in gfs2_qd_dispose()
133 spin_unlock_bucket(qd->qd_hash); in gfs2_qd_dispose()
135 gfs2_assert_warn(sdp, !qd->qd_change); in gfs2_qd_dispose()
136 gfs2_assert_warn(sdp, !qd->qd_slot_count); in gfs2_qd_dispose()
137 gfs2_assert_warn(sdp, !qd->qd_bh_count); in gfs2_qd_dispose()
139 gfs2_glock_put(qd->qd_gl); in gfs2_qd_dispose()
140 atomic_dec(&sdp->sd_quota_count); in gfs2_qd_dispose()
143 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); in gfs2_qd_dispose()
154 if (!spin_trylock(&qd->qd_lockref.lock)) in gfs2_qd_isolate()
157 if (qd->qd_lockref.count == 0) { in gfs2_qd_isolate()
158 lockref_mark_dead(&qd->qd_lockref); in gfs2_qd_isolate()
159 list_lru_isolate_move(lru, &qd->qd_lru, dispose); in gfs2_qd_isolate()
162 spin_unlock(&qd->qd_lockref.lock); in gfs2_qd_isolate()
172 if (!(sc->gfp_mask & __GFP_FS)) in gfs2_qd_shrink_scan()
199 struct kqid qid = qd->qd_id; in qd2index()
223 qd->qd_sbd = sdp; in qd_alloc()
224 qd->qd_lockref.count = 1; in qd_alloc()
225 spin_lock_init(&qd->qd_lockref.lock); in qd_alloc()
226 qd->qd_id = qid; in qd_alloc()
227 qd->qd_slot = -1; in qd_alloc()
228 INIT_LIST_HEAD(&qd->qd_lru); in qd_alloc()
229 qd->qd_hash = hash; in qd_alloc()
232 &gfs2_quota_glops, CREATE, &qd->qd_gl); in qd_alloc()
251 if (!qid_eq(qd->qd_id, qid)) in gfs2_qd_search_bucket()
253 if (qd->qd_sbd != sdp) in gfs2_qd_search_bucket()
255 if (lockref_get_not_dead(&qd->qd_lockref)) { in gfs2_qd_search_bucket()
256 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); in gfs2_qd_search_bucket()
280 return -ENOMEM; in qd_get()
287 list_add(&new_qd->qd_list, &sdp->sd_quota_list); in qd_get()
288 hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]); in qd_get()
289 atomic_inc(&sdp->sd_quota_count); in qd_get()
295 gfs2_glock_put(new_qd->qd_gl); in qd_get()
305 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in qd_hold()
306 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref)); in qd_hold()
307 lockref_get(&qd->qd_lockref); in qd_hold()
312 if (lockref_put_or_lock(&qd->qd_lockref)) in qd_put()
315 qd->qd_lockref.count = 0; in qd_put()
316 list_lru_add(&gfs2_qd_lru, &qd->qd_lru); in qd_put()
317 spin_unlock(&qd->qd_lockref.lock); in qd_put()
323 struct gfs2_sbd *sdp = qd->qd_sbd; in slot_get()
327 spin_lock(&sdp->sd_bitmap_lock); in slot_get()
328 if (qd->qd_slot_count != 0) in slot_get()
331 error = -ENOSPC; in slot_get()
332 bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots); in slot_get()
333 if (bit < sdp->sd_quota_slots) { in slot_get()
334 set_bit(bit, sdp->sd_quota_bitmap); in slot_get()
335 qd->qd_slot = bit; in slot_get()
338 qd->qd_slot_count++; in slot_get()
340 spin_unlock(&sdp->sd_bitmap_lock); in slot_get()
347 struct gfs2_sbd *sdp = qd->qd_sbd; in slot_hold()
349 spin_lock(&sdp->sd_bitmap_lock); in slot_hold()
350 gfs2_assert(sdp, qd->qd_slot_count); in slot_hold()
351 qd->qd_slot_count++; in slot_hold()
352 spin_unlock(&sdp->sd_bitmap_lock); in slot_hold()
357 struct gfs2_sbd *sdp = qd->qd_sbd; in slot_put()
359 spin_lock(&sdp->sd_bitmap_lock); in slot_put()
360 gfs2_assert(sdp, qd->qd_slot_count); in slot_put()
361 if (!--qd->qd_slot_count) { in slot_put()
362 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap)); in slot_put()
363 qd->qd_slot = -1; in slot_put()
365 spin_unlock(&sdp->sd_bitmap_lock); in slot_put()
370 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in bh_get()
371 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); in bh_get() local
377 mutex_lock(&sdp->sd_quota_mutex); in bh_get()
379 if (qd->qd_bh_count++) { in bh_get()
380 mutex_unlock(&sdp->sd_quota_mutex); in bh_get()
384 block = qd->qd_slot / sdp->sd_qc_per_block; in bh_get()
385 offset = qd->qd_slot % sdp->sd_qc_per_block; in bh_get()
387 bh_map.b_size = BIT(ip->i_inode.i_blkbits); in bh_get()
388 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); in bh_get()
391 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh); in bh_get()
394 error = -EIO; in bh_get()
398 qd->qd_bh = bh; in bh_get()
399 qd->qd_bh_qc = (struct gfs2_quota_change *) in bh_get()
400 (bh->b_data + sizeof(struct gfs2_meta_header) + in bh_get()
403 mutex_unlock(&sdp->sd_quota_mutex); in bh_get()
410 qd->qd_bh_count--; in bh_get()
411 mutex_unlock(&sdp->sd_quota_mutex); in bh_get()
417 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in bh_put()
419 mutex_lock(&sdp->sd_quota_mutex); in bh_put()
420 gfs2_assert(sdp, qd->qd_bh_count); in bh_put()
421 if (!--qd->qd_bh_count) { in bh_put()
422 brelse(qd->qd_bh); in bh_put()
423 qd->qd_bh = NULL; in bh_put()
424 qd->qd_bh_qc = NULL; in bh_put()
426 mutex_unlock(&sdp->sd_quota_mutex); in bh_put()
432 if (test_bit(QDF_LOCKED, &qd->qd_flags) || in qd_check_sync()
433 !test_bit(QDF_CHANGE, &qd->qd_flags) || in qd_check_sync()
434 (sync_gen && (qd->qd_sync_gen >= *sync_gen))) in qd_check_sync()
437 if (!lockref_get_not_dead(&qd->qd_lockref)) in qd_check_sync()
440 list_move_tail(&qd->qd_list, &sdp->sd_quota_list); in qd_check_sync()
441 set_bit(QDF_LOCKED, &qd->qd_flags); in qd_check_sync()
442 qd->qd_change_sync = qd->qd_change; in qd_check_sync()
455 if (sb_rdonly(sdp->sd_vfs)) in qd_fish()
460 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { in qd_fish()
461 found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen); in qd_fish()
472 gfs2_assert_warn(sdp, qd->qd_change_sync); in qd_fish()
475 clear_bit(QDF_LOCKED, &qd->qd_flags); in qd_fish()
489 gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd, in qd_unlock()
490 test_bit(QDF_LOCKED, &qd->qd_flags)); in qd_unlock()
491 clear_bit(QDF_LOCKED, &qd->qd_flags); in qd_unlock()
531 * gfs2_qa_alloc - make sure we have a quota allocations data structure,
533 * @ip: the inode for this reservation
535 int gfs2_qa_alloc(struct gfs2_inode *ip) in gfs2_qa_alloc() argument
538 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_qa_alloc()
540 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_qa_alloc()
543 down_write(&ip->i_rw_mutex); in gfs2_qa_alloc()
544 if (ip->i_qadata == NULL) { in gfs2_qa_alloc()
545 ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS); in gfs2_qa_alloc()
546 if (!ip->i_qadata) in gfs2_qa_alloc()
547 error = -ENOMEM; in gfs2_qa_alloc()
549 up_write(&ip->i_rw_mutex); in gfs2_qa_alloc()
553 void gfs2_qa_delete(struct gfs2_inode *ip, atomic_t *wcount) in gfs2_qa_delete() argument
555 down_write(&ip->i_rw_mutex); in gfs2_qa_delete()
556 if (ip->i_qadata && ((wcount == NULL) || (atomic_read(wcount) <= 1))) { in gfs2_qa_delete()
557 kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata); in gfs2_qa_delete()
558 ip->i_qadata = NULL; in gfs2_qa_delete()
560 up_write(&ip->i_rw_mutex); in gfs2_qa_delete()
563 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) in gfs2_quota_hold() argument
565 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_hold()
569 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_quota_hold()
572 if (ip->i_qadata == NULL) { in gfs2_quota_hold()
573 error = gfs2_rsqa_alloc(ip); in gfs2_quota_hold()
578 qd = ip->i_qadata->qa_qd; in gfs2_quota_hold()
580 if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) || in gfs2_quota_hold()
581 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) in gfs2_quota_hold()
582 return -EIO; in gfs2_quota_hold()
584 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); in gfs2_quota_hold()
587 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
590 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); in gfs2_quota_hold()
593 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
597 !uid_eq(uid, ip->i_inode.i_uid)) { in gfs2_quota_hold()
601 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
606 !gid_eq(gid, ip->i_inode.i_gid)) { in gfs2_quota_hold()
610 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
616 gfs2_quota_unhold(ip); in gfs2_quota_hold()
620 void gfs2_quota_unhold(struct gfs2_inode *ip) in gfs2_quota_unhold() argument
622 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_unhold()
625 if (ip->i_qadata == NULL) in gfs2_quota_unhold()
627 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); in gfs2_quota_unhold()
629 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_unhold()
630 qdsb_put(ip->i_qadata->qa_qd[x]); in gfs2_quota_unhold()
631 ip->i_qadata->qa_qd[x] = NULL; in gfs2_quota_unhold()
633 ip->i_qadata->qa_qd_num = 0; in gfs2_quota_unhold()
641 if (qid_lt(qd_a->qd_id, qd_b->qd_id)) in sort_qd()
642 return -1; in sort_qd()
643 if (qid_lt(qd_b->qd_id, qd_a->qd_id)) in sort_qd()
650 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in do_qc()
651 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); in do_qc() local
652 struct gfs2_quota_change *qc = qd->qd_bh_qc; in do_qc()
655 mutex_lock(&sdp->sd_quota_mutex); in do_qc()
656 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); in do_qc()
658 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) { in do_qc()
659 qc->qc_change = 0; in do_qc()
660 qc->qc_flags = 0; in do_qc()
661 if (qd->qd_id.type == USRQUOTA) in do_qc()
662 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); in do_qc()
663 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id)); in do_qc()
666 x = be64_to_cpu(qc->qc_change) + change; in do_qc()
667 qc->qc_change = cpu_to_be64(x); in do_qc()
670 qd->qd_change = x; in do_qc()
674 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); in do_qc()
675 clear_bit(QDF_CHANGE, &qd->qd_flags); in do_qc()
676 qc->qc_flags = 0; in do_qc()
677 qc->qc_id = 0; in do_qc()
680 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { in do_qc()
685 if (change < 0) /* Reset quiet flag if we freed some blocks */ in do_qc()
686 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); in do_qc()
687 mutex_unlock(&sdp->sd_quota_mutex); in do_qc()
690 static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, in gfs2_write_buf_to_page() argument
693 struct inode *inode = &ip->i_inode; in gfs2_write_buf_to_page()
695 struct address_space *mapping = inode->i_mapping; in gfs2_write_buf_to_page()
700 unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0; in gfs2_write_buf_to_page()
704 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift); in gfs2_write_buf_to_page()
709 return -ENOMEM; in gfs2_write_buf_to_page()
717 bh = bh->b_this_page; in gfs2_write_buf_to_page()
728 zero_user(page, bnum * bsize, bh->b_size); in gfs2_write_buf_to_page()
738 if (gfs2_is_jdata(ip)) in gfs2_write_buf_to_page()
739 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_write_buf_to_page()
741 gfs2_ordered_add_inode(ip); in gfs2_write_buf_to_page()
744 if (to_write > (bsize - boff)) { in gfs2_write_buf_to_page()
745 pg_off += (bsize - boff); in gfs2_write_buf_to_page()
746 to_write -= (bsize - boff); in gfs2_write_buf_to_page()
766 return -EIO; in gfs2_write_buf_to_page()
769 static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp, in gfs2_write_disk_quota() argument
785 overflow = (pg_off + nbytes) - PAGE_SIZE; in gfs2_write_disk_quota()
789 error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr, in gfs2_write_disk_quota()
790 nbytes - overflow); in gfs2_write_disk_quota()
793 error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0, in gfs2_write_disk_quota()
794 ptr + nbytes - overflow, in gfs2_write_disk_quota()
800 * gfs2_adjust_quota - adjust record of current block usage
801 * @ip: The quota inode
810 * Returns: 0 or -ve on error
813 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, in gfs2_adjust_quota() argument
817 struct inode *inode = &ip->i_inode; in gfs2_adjust_quota()
823 if (gfs2_is_stuffed(ip)) { in gfs2_adjust_quota()
824 err = gfs2_unstuff_dinode(ip, NULL); in gfs2_adjust_quota()
830 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q)); in gfs2_adjust_quota()
834 loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */ in gfs2_adjust_quota()
835 err = -EIO; in gfs2_adjust_quota()
839 qd->qd_qb.qb_value = q.qu_value; in gfs2_adjust_quota()
841 if (fdq->d_fieldmask & QC_SPC_SOFT) { in gfs2_adjust_quota()
842 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); in gfs2_adjust_quota()
843 qd->qd_qb.qb_warn = q.qu_warn; in gfs2_adjust_quota()
845 if (fdq->d_fieldmask & QC_SPC_HARD) { in gfs2_adjust_quota()
846 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); in gfs2_adjust_quota()
847 qd->qd_qb.qb_limit = q.qu_limit; in gfs2_adjust_quota()
849 if (fdq->d_fieldmask & QC_SPACE) { in gfs2_adjust_quota()
850 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); in gfs2_adjust_quota()
851 qd->qd_qb.qb_value = q.qu_value; in gfs2_adjust_quota()
855 err = gfs2_write_disk_quota(ip, &q, loc); in gfs2_adjust_quota()
858 if (size > inode->i_size) in gfs2_adjust_quota()
860 inode->i_mtime = inode->i_atime = current_time(inode); in gfs2_adjust_quota()
862 set_bit(QDF_REFRESH, &qd->qd_flags); in gfs2_adjust_quota()
870 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd; in do_sync()
871 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in do_sync() local
879 unsigned int nalloc = 0, blocks; in do_sync() local
882 error = gfs2_rsqa_alloc(ip); in do_sync()
886 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), in do_sync()
891 return -ENOMEM; in do_sync()
894 inode_lock(&ip->i_inode); in do_sync()
896 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, in do_sync()
902 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in do_sync()
908 if (gfs2_write_alloc_required(ip, offset, in do_sync()
921 * two blocks need to be updated instead of 1 */ in do_sync()
922 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; in do_sync()
926 error = gfs2_inplace_reserve(ip, &ap); in do_sync()
931 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; in do_sync()
933 error = gfs2_trans_begin(sdp, blocks, 0); in do_sync()
940 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); in do_sync()
944 do_qc(qd, -qd->qd_change_sync); in do_sync()
945 set_bit(QDF_REFRESH, &qd->qd_flags); in do_sync()
953 gfs2_inplace_release(ip); in do_sync()
957 while (qx--) in do_sync()
959 inode_unlock(&ip->i_inode); in do_sync()
961 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, in do_sync()
968 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in update_qd() local
976 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q)); in update_qd()
980 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; in update_qd()
981 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); in update_qd()
982 qlvb->__pad = 0; in update_qd()
983 qlvb->qb_limit = q.qu_limit; in update_qd()
984 qlvb->qb_warn = q.qu_warn; in update_qd()
985 qlvb->qb_value = q.qu_value; in update_qd()
986 qd->qd_qb = *qlvb; in update_qd()
994 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in do_glock()
995 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in do_glock() local
1000 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); in do_glock()
1004 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags)) in do_glock()
1007 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; in do_glock()
1009 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { in do_glock()
1011 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, in do_glock()
1016 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); in do_glock()
1039 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) in gfs2_quota_lock() argument
1041 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_lock()
1046 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) in gfs2_quota_lock()
1049 error = gfs2_quota_hold(ip, uid, gid); in gfs2_quota_lock()
1053 sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num, in gfs2_quota_lock()
1056 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_lock()
1057 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_lock()
1058 error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]); in gfs2_quota_lock()
1064 set_bit(GIF_QD_LOCKED, &ip->i_flags); in gfs2_quota_lock()
1066 while (x--) in gfs2_quota_lock()
1067 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]); in gfs2_quota_lock()
1068 gfs2_quota_unhold(ip); in gfs2_quota_lock()
1076 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in need_sync()
1077 struct gfs2_tune *gt = &sdp->sd_tune; in need_sync()
1082 if (!qd->qd_qb.qb_limit) in need_sync()
1086 value = qd->qd_change; in need_sync()
1089 spin_lock(&gt->gt_spin); in need_sync()
1090 num = gt->gt_quota_scale_num; in need_sync()
1091 den = gt->gt_quota_scale_den; in need_sync()
1092 spin_unlock(&gt->gt_spin); in need_sync()
1096 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= in need_sync()
1097 (s64)be64_to_cpu(qd->qd_qb.qb_limit)) in need_sync()
1102 value += (s64)be64_to_cpu(qd->qd_qb.qb_value); in need_sync()
1103 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) in need_sync()
1110 void gfs2_quota_unlock(struct gfs2_inode *ip) in gfs2_quota_unlock() argument
1112 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_unlock()
1118 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) in gfs2_quota_unlock()
1121 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_unlock()
1125 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_unlock()
1128 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]); in gfs2_quota_unlock()
1139 gfs2_assert_warn(sdp, qd->qd_change_sync); in gfs2_quota_unlock()
1141 clear_bit(QDF_LOCKED, &qd->qd_flags); in gfs2_quota_unlock()
1157 gfs2_quota_unhold(ip); in gfs2_quota_unlock()
1164 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; in print_message()
1168 (qd->qd_id.type == USRQUOTA) ? "user" : "group", in print_message()
1169 from_kqid(&init_user_ns, qd->qd_id)); in print_message()
1175 * gfs2_quota_check - check if allocating new blocks will exceed quota
1176 * @ip: The inode for which this check is being performed
1179 * @ap: The allocation parameters. ap->target contains the requested
1180 * blocks. ap->min_target, if set, contains the minimum blks
1184 * min_req = ap->min_target ? ap->min_target : ap->target;
1186 * ap->allowed is set to the number of blocks allowed
1188 * -EDQUOT otherwise, quota violation. ap->allowed is set to number
1189 * of blocks available.
1191 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, in gfs2_quota_check() argument
1194 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_check()
1200 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ in gfs2_quota_check()
1201 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) in gfs2_quota_check()
1204 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) in gfs2_quota_check()
1207 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_check()
1208 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_check()
1210 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) || in gfs2_quota_check()
1211 qid_eq(qd->qd_id, make_kqid_gid(gid)))) in gfs2_quota_check()
1214 warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn); in gfs2_quota_check()
1215 limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); in gfs2_quota_check()
1216 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); in gfs2_quota_check()
1218 value += qd->qd_change; in gfs2_quota_check()
1221 if (limit > 0 && (limit - value) < ap->allowed) in gfs2_quota_check()
1222 ap->allowed = limit - value; in gfs2_quota_check()
1224 if (limit && limit < (value + (s64)ap->target)) { in gfs2_quota_check()
1226 * min_target, return -EDQUOT */ in gfs2_quota_check()
1227 if (!ap->min_target || ap->min_target > ap->allowed) { in gfs2_quota_check()
1229 &qd->qd_flags)) { in gfs2_quota_check()
1231 quota_send_warning(qd->qd_id, in gfs2_quota_check()
1232 sdp->sd_vfs->s_dev, in gfs2_quota_check()
1235 error = -EDQUOT; in gfs2_quota_check()
1239 time_after_eq(jiffies, qd->qd_last_warn + in gfs2_quota_check()
1242 quota_send_warning(qd->qd_id, in gfs2_quota_check()
1243 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); in gfs2_quota_check()
1245 qd->qd_last_warn = jiffies; in gfs2_quota_check()
1251 void gfs2_quota_change(struct gfs2_inode *ip, s64 change, in gfs2_quota_change() argument
1256 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_change()
1258 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON || in gfs2_quota_change()
1261 if (ip->i_diskflags & GFS2_DIF_SYSTEM) in gfs2_quota_change()
1264 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_change()
1265 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_change()
1267 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) || in gfs2_quota_change()
1268 qid_eq(qd->qd_id, make_kqid_gid(gid))) { in gfs2_quota_change()
1276 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_quota_sync()
1285 return -ENOMEM; in gfs2_quota_sync()
1287 mutex_lock(&sdp->sd_quota_sync_mutex); in gfs2_quota_sync()
1288 sdp->sd_quota_sync_gen++; in gfs2_quota_sync()
1306 qda[x]->qd_sync_gen = in gfs2_quota_sync()
1307 sdp->sd_quota_sync_gen; in gfs2_quota_sync()
1314 mutex_unlock(&sdp->sd_quota_sync_mutex); in gfs2_quota_sync()
1340 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); in gfs2_quota_init() local
1341 u64 size = i_size_read(sdp->sd_qc_inode); in gfs2_quota_init()
1342 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift; in gfs2_quota_init() local
1351 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20)) in gfs2_quota_init()
1352 return -EIO; in gfs2_quota_init()
1354 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; in gfs2_quota_init()
1355 bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long)); in gfs2_quota_init()
1357 error = -ENOMEM; in gfs2_quota_init()
1358 sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN); in gfs2_quota_init()
1359 if (sdp->sd_quota_bitmap == NULL) in gfs2_quota_init()
1360 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS | in gfs2_quota_init()
1362 if (!sdp->sd_quota_bitmap) in gfs2_quota_init()
1365 for (x = 0; x < blocks; x++) { in gfs2_quota_init()
1372 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen); in gfs2_quota_init()
1376 error = -EIO; in gfs2_quota_init()
1377 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); in gfs2_quota_init()
1385 qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header)); in gfs2_quota_init()
1386 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; in gfs2_quota_init()
1389 s64 qc_change = be64_to_cpu(qc->qc_change); in gfs2_quota_init()
1390 u32 qc_flags = be32_to_cpu(qc->qc_flags); in gfs2_quota_init()
1394 be32_to_cpu(qc->qc_id)); in gfs2_quota_init()
1406 set_bit(QDF_CHANGE, &qd->qd_flags); in gfs2_quota_init()
1407 qd->qd_change = qc_change; in gfs2_quota_init()
1408 qd->qd_slot = slot; in gfs2_quota_init()
1409 qd->qd_slot_count = 1; in gfs2_quota_init()
1412 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap)); in gfs2_quota_init()
1413 list_add(&qd->qd_list, &sdp->sd_quota_list); in gfs2_quota_init()
1414 atomic_inc(&sdp->sd_quota_count); in gfs2_quota_init()
1418 hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]); in gfs2_quota_init()
1426 extlen--; in gfs2_quota_init()
1441 struct list_head *head = &sdp->sd_quota_list; in gfs2_quota_cleanup()
1446 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); in gfs2_quota_cleanup()
1448 list_del(&qd->qd_list); in gfs2_quota_cleanup()
1451 list_lru_del(&gfs2_qd_lru, &qd->qd_lru); in gfs2_quota_cleanup()
1452 atomic_dec(&sdp->sd_quota_count); in gfs2_quota_cleanup()
1455 spin_lock_bucket(qd->qd_hash); in gfs2_quota_cleanup()
1456 hlist_bl_del_rcu(&qd->qd_hlist); in gfs2_quota_cleanup()
1457 spin_unlock_bucket(qd->qd_hash); in gfs2_quota_cleanup()
1459 gfs2_assert_warn(sdp, !qd->qd_change); in gfs2_quota_cleanup()
1460 gfs2_assert_warn(sdp, !qd->qd_slot_count); in gfs2_quota_cleanup()
1461 gfs2_assert_warn(sdp, !qd->qd_bh_count); in gfs2_quota_cleanup()
1463 gfs2_glock_put(qd->qd_gl); in gfs2_quota_cleanup()
1464 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc); in gfs2_quota_cleanup()
1470 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); in gfs2_quota_cleanup()
1472 kvfree(sdp->sd_quota_bitmap); in gfs2_quota_cleanup()
1473 sdp->sd_quota_bitmap = NULL; in gfs2_quota_cleanup()
1478 if (error == 0 || error == -EROFS) in quotad_error()
1480 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { in quotad_error()
1482 sdp->sd_log_error = error; in quotad_error()
1483 wake_up(&sdp->sd_logd_waitq); in quotad_error()
1493 int error = fxn(sdp->sd_vfs, 0); in quotad_check_timeo()
1495 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; in quotad_check_timeo()
1497 *timeo -= t; in quotad_check_timeo()
1503 struct gfs2_inode *ip; in quotad_check_trunc_list() local
1506 ip = NULL; in quotad_check_trunc_list()
1507 spin_lock(&sdp->sd_trunc_lock); in quotad_check_trunc_list()
1508 if (!list_empty(&sdp->sd_trunc_list)) { in quotad_check_trunc_list()
1509 ip = list_entry(sdp->sd_trunc_list.next, in quotad_check_trunc_list()
1511 list_del_init(&ip->i_trunc_list); in quotad_check_trunc_list()
1513 spin_unlock(&sdp->sd_trunc_lock); in quotad_check_trunc_list()
1514 if (ip == NULL) in quotad_check_trunc_list()
1516 gfs2_glock_finish_truncate(ip); in quotad_check_trunc_list()
1521 if (!sdp->sd_statfs_force_sync) { in gfs2_wake_up_statfs()
1522 sdp->sd_statfs_force_sync = 1; in gfs2_wake_up_statfs()
1523 wake_up(&sdp->sd_quota_wait); in gfs2_wake_up_statfs()
1529 * gfs2_quotad - Write cached quota changes into the quota file
1537 struct gfs2_tune *tune = &sdp->sd_tune; in gfs2_quotad()
1547 if (sdp->sd_statfs_force_sync) { in gfs2_quotad()
1548 int error = gfs2_statfs_sync(sdp->sd_vfs, 0); in gfs2_quotad()
1555 &tune->gt_statfs_quantum); in gfs2_quotad()
1559 &quotad_timeo, &tune->gt_quota_quantum); in gfs2_quotad()
1568 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); in gfs2_quotad()
1569 spin_lock(&sdp->sd_trunc_lock); in gfs2_quotad()
1570 empty = list_empty(&sdp->sd_trunc_list); in gfs2_quotad()
1571 spin_unlock(&sdp->sd_trunc_lock); in gfs2_quotad()
1572 if (empty && !sdp->sd_statfs_force_sync) in gfs2_quotad()
1573 t -= schedule_timeout(t); in gfs2_quotad()
1576 finish_wait(&sdp->sd_quota_wait, &wait); in gfs2_quotad()
1584 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_quota_get_state()
1588 switch (sdp->sd_args.ar_quota) { in gfs2_quota_get_state()
1590 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; in gfs2_quota_get_state()
1591 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; in gfs2_quota_get_state()
1594 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED | in gfs2_quota_get_state()
1596 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED | in gfs2_quota_get_state()
1602 if (sdp->sd_quota_inode) { in gfs2_quota_get_state()
1603 state->s_state[USRQUOTA].ino = in gfs2_quota_get_state()
1604 GFS2_I(sdp->sd_quota_inode)->i_no_addr; in gfs2_quota_get_state()
1605 state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks; in gfs2_quota_get_state()
1607 state->s_state[USRQUOTA].nextents = 1; /* unsupported */ in gfs2_quota_get_state()
1608 state->s_state[GRPQUOTA] = state->s_state[USRQUOTA]; in gfs2_quota_get_state()
1609 state->s_incoredqs = list_lru_count(&gfs2_qd_lru); in gfs2_quota_get_state()
1616 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_get_dqblk()
1624 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_get_dqblk()
1625 return -ESRCH; /* Crazy XFS error code */ in gfs2_get_dqblk()
1629 return -EINVAL; in gfs2_get_dqblk()
1638 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; in gfs2_get_dqblk()
1639 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; in gfs2_get_dqblk()
1640 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; in gfs2_get_dqblk()
1641 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; in gfs2_get_dqblk()
1655 struct gfs2_sbd *sdp = sb->s_fs_info; in gfs2_set_dqblk()
1656 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in gfs2_set_dqblk() local
1660 unsigned int blocks = 0; in gfs2_set_dqblk() local
1665 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) in gfs2_set_dqblk()
1666 return -ESRCH; /* Crazy XFS error code */ in gfs2_set_dqblk()
1670 return -EINVAL; in gfs2_set_dqblk()
1672 if (fdq->d_fieldmask & ~GFS2_FIELDMASK) in gfs2_set_dqblk()
1673 return -EINVAL; in gfs2_set_dqblk()
1679 error = gfs2_rsqa_alloc(ip); in gfs2_set_dqblk()
1683 inode_lock(&ip->i_inode); in gfs2_set_dqblk()
1684 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); in gfs2_set_dqblk()
1687 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in gfs2_set_dqblk()
1691 /* Check for existing entry, if none then alloc new blocks */ in gfs2_set_dqblk()
1696 /* If nothing has changed, this is a no-op */ in gfs2_set_dqblk()
1697 if ((fdq->d_fieldmask & QC_SPC_SOFT) && in gfs2_set_dqblk()
1698 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) in gfs2_set_dqblk()
1699 fdq->d_fieldmask ^= QC_SPC_SOFT; in gfs2_set_dqblk()
1701 if ((fdq->d_fieldmask & QC_SPC_HARD) && in gfs2_set_dqblk()
1702 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) in gfs2_set_dqblk()
1703 fdq->d_fieldmask ^= QC_SPC_HARD; in gfs2_set_dqblk()
1705 if ((fdq->d_fieldmask & QC_SPACE) && in gfs2_set_dqblk()
1706 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) in gfs2_set_dqblk()
1707 fdq->d_fieldmask ^= QC_SPACE; in gfs2_set_dqblk()
1709 if (fdq->d_fieldmask == 0) in gfs2_set_dqblk()
1713 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); in gfs2_set_dqblk()
1714 if (gfs2_is_stuffed(ip)) in gfs2_set_dqblk()
1718 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), in gfs2_set_dqblk()
1720 blocks = 1 + data_blocks + ind_blocks; in gfs2_set_dqblk()
1721 ap.target = blocks; in gfs2_set_dqblk()
1722 error = gfs2_inplace_reserve(ip, &ap); in gfs2_set_dqblk()
1725 blocks += gfs2_rg_blocks(ip, blocks); in gfs2_set_dqblk()
1728 /* Some quotas span block boundaries and can update two blocks, in gfs2_set_dqblk()
1730 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0); in gfs2_set_dqblk()
1735 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); in gfs2_set_dqblk()
1737 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); in gfs2_set_dqblk()
1742 gfs2_inplace_release(ip); in gfs2_set_dqblk()
1748 inode_unlock(&ip->i_inode); in gfs2_set_dqblk()