• Home
  • Raw
  • Download

Lines Matching +full:depth +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
25 * Maximum quota tree depth we support. Only to limit recursion when working
32 static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) in __get_index() argument
34 unsigned int epb = info->dqi_usable_bs >> 2; in __get_index()
36 depth = info->dqi_qtree_depth - depth - 1; in __get_index()
37 while (depth--) in __get_index()
42 static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth) in get_index() argument
46 return __get_index(info, id, depth); in get_index()
52 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) in qtree_dqstr_in_blk()
53 / info->dqi_entry_size; in qtree_dqstr_in_blk()
58 struct super_block *sb = info->dqi_sb; in read_blk()
60 memset(buf, 0, info->dqi_usable_bs); in read_blk()
61 return sb->s_op->quota_read(sb, info->dqi_type, buf, in read_blk()
62 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); in read_blk()
67 struct super_block *sb = info->dqi_sb; in write_blk()
70 ret = sb->s_op->quota_write(sb, info->dqi_type, buf, in write_blk()
71 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); in write_blk()
72 if (ret != info->dqi_usable_bs) { in write_blk()
75 ret = -EIO; in write_blk()
84 quota_error(sb, "Getting %s %u out of range %u-%u", in do_check_range()
86 return -EUCLEAN; in do_check_range()
97 err = do_check_range(info->dqi_sb, "dqdh_next_free", in check_dquot_block_header()
98 le32_to_cpu(dh->dqdh_next_free), 0, in check_dquot_block_header()
99 info->dqi_blocks - 1); in check_dquot_block_header()
102 err = do_check_range(info->dqi_sb, "dqdh_prev_free", in check_dquot_block_header()
103 le32_to_cpu(dh->dqdh_prev_free), 0, in check_dquot_block_header()
104 info->dqi_blocks - 1); in check_dquot_block_header()
107 err = do_check_range(info->dqi_sb, "dqdh_entries", in check_dquot_block_header()
108 le16_to_cpu(dh->dqdh_entries), 0, in check_dquot_block_header()
117 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); in get_free_dqblk()
122 return -ENOMEM; in get_free_dqblk()
123 if (info->dqi_free_blk) { in get_free_dqblk()
124 blk = info->dqi_free_blk; in get_free_dqblk()
131 info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); in get_free_dqblk()
134 memset(buf, 0, info->dqi_usable_bs); in get_free_dqblk()
136 ret = write_blk(info, info->dqi_blocks, buf); in get_free_dqblk()
139 blk = info->dqi_blocks++; in get_free_dqblk()
141 mark_info_dirty(info->dqi_sb, info->dqi_type); in get_free_dqblk()
154 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk); in put_free_dqblk()
155 dh->dqdh_prev_free = cpu_to_le32(0); in put_free_dqblk()
156 dh->dqdh_entries = cpu_to_le16(0); in put_free_dqblk()
160 info->dqi_free_blk = blk; in put_free_dqblk()
161 mark_info_dirty(info->dqi_sb, info->dqi_type); in put_free_dqblk()
169 char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS); in remove_free_dqentry()
171 uint nextblk = le32_to_cpu(dh->dqdh_next_free); in remove_free_dqentry()
172 uint prevblk = le32_to_cpu(dh->dqdh_prev_free); in remove_free_dqentry()
176 return -ENOMEM; in remove_free_dqentry()
181 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = in remove_free_dqentry()
182 dh->dqdh_prev_free; in remove_free_dqentry()
191 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = in remove_free_dqentry()
192 dh->dqdh_next_free; in remove_free_dqentry()
197 info->dqi_free_entry = nextblk; in remove_free_dqentry()
198 mark_info_dirty(info->dqi_sb, info->dqi_type); in remove_free_dqentry()
201 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); in remove_free_dqentry()
204 quota_error(info->dqi_sb, "Can't write block (%u) " in remove_free_dqentry()
216 char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS); in insert_free_dqentry()
221 return -ENOMEM; in insert_free_dqentry()
222 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); in insert_free_dqentry()
223 dh->dqdh_prev_free = cpu_to_le32(0); in insert_free_dqentry()
227 if (info->dqi_free_entry) { in insert_free_dqentry()
228 err = read_blk(info, info->dqi_free_entry, tmpbuf); in insert_free_dqentry()
231 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = in insert_free_dqentry()
233 err = write_blk(info, info->dqi_free_entry, tmpbuf); in insert_free_dqentry()
238 info->dqi_free_entry = blk; in insert_free_dqentry()
239 mark_info_dirty(info->dqi_sb, info->dqi_type); in insert_free_dqentry()
251 for (i = 0; i < info->dqi_entry_size; i++) in qtree_entry_unused()
264 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); in find_free_dqentry()
269 *err = -ENOMEM; in find_free_dqentry()
273 if (info->dqi_free_entry) { in find_free_dqentry()
274 blk = info->dqi_free_entry; in find_free_dqentry()
288 memset(buf, 0, info->dqi_usable_bs); in find_free_dqentry()
291 info->dqi_free_entry = blk; in find_free_dqentry()
292 mark_info_dirty(dquot->dq_sb, dquot->dq_id.type); in find_free_dqentry()
295 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { in find_free_dqentry()
298 quota_error(dquot->dq_sb, "Can't remove block (%u) " in find_free_dqentry()
303 le16_add_cpu(&dh->dqdh_entries, 1); in find_free_dqentry()
309 ddquot += info->dqi_entry_size; in find_free_dqentry()
313 quota_error(dquot->dq_sb, "Data block full but it shouldn't"); in find_free_dqentry()
314 *err = -EIO; in find_free_dqentry()
320 quota_error(dquot->dq_sb, "Can't write quota data block %u", in find_free_dqentry()
324 dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) + in find_free_dqentry()
326 i * info->dqi_entry_size; in find_free_dqentry()
336 uint *blks, int depth) in do_insert_tree() argument
338 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); in do_insert_tree()
345 return -ENOMEM; in do_insert_tree()
346 if (!blks[depth]) { in do_insert_tree()
350 for (i = 0; i < depth; i++) in do_insert_tree()
352 quota_error(dquot->dq_sb, in do_insert_tree()
355 ret = -EIO; in do_insert_tree()
358 blks[depth] = ret; in do_insert_tree()
359 memset(buf, 0, info->dqi_usable_bs); in do_insert_tree()
362 ret = read_blk(info, blks[depth], buf); in do_insert_tree()
364 quota_error(dquot->dq_sb, "Can't read tree quota " in do_insert_tree()
365 "block %u", blks[depth]); in do_insert_tree()
370 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); in do_insert_tree()
371 ret = do_check_range(dquot->dq_sb, "block", newblk, 0, in do_insert_tree()
372 info->dqi_blocks - 1); in do_insert_tree()
378 for (i = 0; i <= depth; i++) in do_insert_tree()
380 quota_error(dquot->dq_sb, in do_insert_tree()
382 blks[depth], in do_insert_tree()
383 get_index(info, dquot->dq_id, depth)); in do_insert_tree()
384 ret = -EIO; in do_insert_tree()
388 blks[depth + 1] = newblk; in do_insert_tree()
389 if (depth == info->dqi_qtree_depth - 1) { in do_insert_tree()
392 quota_error(dquot->dq_sb, "Inserting already present " in do_insert_tree()
395 dquot->dq_id, depth)])); in do_insert_tree()
396 ret = -EIO; in do_insert_tree()
400 blks[depth + 1] = find_free_dqentry(info, dquot, &ret); in do_insert_tree()
402 ret = do_insert_tree(info, dquot, blks, depth + 1); in do_insert_tree()
405 ref[get_index(info, dquot->dq_id, depth)] = in do_insert_tree()
406 cpu_to_le32(blks[depth + 1]); in do_insert_tree()
407 ret = write_blk(info, blks[depth], buf); in do_insert_tree()
409 put_free_dqblk(info, buf, blks[depth]); in do_insert_tree()
423 if (info->dqi_blocks <= QT_TREEOFF) { in dq_insert_tree()
424 quota_error(dquot->dq_sb, "Quota tree root isn't allocated!"); in dq_insert_tree()
425 return -EIO; in dq_insert_tree()
428 if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { in dq_insert_tree()
429 quota_error(dquot->dq_sb, "Quota tree depth too big!"); in dq_insert_tree()
430 return -EIO; in dq_insert_tree()
441 int type = dquot->dq_id.type; in qtree_write_dquot()
442 struct super_block *sb = dquot->dq_sb; in qtree_write_dquot()
444 char *ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS); in qtree_write_dquot()
447 return -ENOMEM; in qtree_write_dquot()
450 if (!dquot->dq_off) { in qtree_write_dquot()
459 spin_lock(&dquot->dq_dqb_lock); in qtree_write_dquot()
460 info->dqi_ops->mem2disk_dqblk(ddquot, dquot); in qtree_write_dquot()
461 spin_unlock(&dquot->dq_dqb_lock); in qtree_write_dquot()
462 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size, in qtree_write_dquot()
463 dquot->dq_off); in qtree_write_dquot()
464 if (ret != info->dqi_entry_size) { in qtree_write_dquot()
467 ret = -ENOSPC; in qtree_write_dquot()
483 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); in free_dqentry()
487 return -ENOMEM; in free_dqentry()
488 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { in free_dqentry()
489 quota_error(dquot->dq_sb, "Quota structure has offset to " in free_dqentry()
491 (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); in free_dqentry()
492 ret = -EIO; in free_dqentry()
497 quota_error(dquot->dq_sb, "Can't read quota data block %u", in free_dqentry()
505 le16_add_cpu(&dh->dqdh_entries, -1); in free_dqentry()
506 if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ in free_dqentry()
511 quota_error(dquot->dq_sb, "Can't move quota data block " in free_dqentry()
517 (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)), in free_dqentry()
518 0, info->dqi_entry_size); in free_dqentry()
519 if (le16_to_cpu(dh->dqdh_entries) == in free_dqentry()
520 qtree_dqstr_in_blk(info) - 1) { in free_dqentry()
524 quota_error(dquot->dq_sb, "Can't insert quota " in free_dqentry()
531 quota_error(dquot->dq_sb, "Can't write quota " in free_dqentry()
537 dquot->dq_off = 0; /* Quota is now unattached */ in free_dqentry()
545 uint *blks, int depth) in remove_tree() argument
547 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); in remove_tree()
554 return -ENOMEM; in remove_tree()
555 ret = read_blk(info, blks[depth], buf); in remove_tree()
557 quota_error(dquot->dq_sb, "Can't read quota data block %u", in remove_tree()
558 blks[depth]); in remove_tree()
561 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); in remove_tree()
562 ret = do_check_range(dquot->dq_sb, "block", newblk, QT_TREEOFF, in remove_tree()
563 info->dqi_blocks - 1); in remove_tree()
567 for (i = 0; i <= depth; i++) in remove_tree()
569 quota_error(dquot->dq_sb, in remove_tree()
571 blks[depth], in remove_tree()
572 get_index(info, dquot->dq_id, depth)); in remove_tree()
573 ret = -EIO; in remove_tree()
576 if (depth == info->dqi_qtree_depth - 1) { in remove_tree()
578 blks[depth + 1] = 0; in remove_tree()
580 blks[depth + 1] = newblk; in remove_tree()
581 ret = remove_tree(info, dquot, blks, depth + 1); in remove_tree()
583 if (ret >= 0 && !blks[depth + 1]) { in remove_tree()
584 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); in remove_tree()
586 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++) in remove_tree()
589 if (i == (info->dqi_usable_bs >> 2) in remove_tree()
590 && blks[depth] != QT_TREEOFF) { in remove_tree()
591 put_free_dqblk(info, buf, blks[depth]); in remove_tree()
592 blks[depth] = 0; in remove_tree()
594 ret = write_blk(info, blks[depth], buf); in remove_tree()
596 quota_error(dquot->dq_sb, in remove_tree()
598 blks[depth]); in remove_tree()
611 if (!dquot->dq_off) /* Even not allocated? */ in qtree_delete_dquot()
613 if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { in qtree_delete_dquot()
614 quota_error(dquot->dq_sb, "Quota tree depth too big!"); in qtree_delete_dquot()
615 return -EIO; in qtree_delete_dquot()
625 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); in find_block_dqentry()
631 return -ENOMEM; in find_block_dqentry()
634 quota_error(dquot->dq_sb, "Can't read quota tree " in find_block_dqentry()
640 if (info->dqi_ops->is_id(ddquot, dquot)) in find_block_dqentry()
642 ddquot += info->dqi_entry_size; in find_block_dqentry()
645 quota_error(dquot->dq_sb, in find_block_dqentry()
647 from_kqid(&init_user_ns, dquot->dq_id)); in find_block_dqentry()
648 ret = -EIO; in find_block_dqentry()
651 ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct in find_block_dqentry()
652 qt_disk_dqdbheader) + i * info->dqi_entry_size; in find_block_dqentry()
661 struct dquot *dquot, uint *blks, int depth) in find_tree_dqentry() argument
663 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); in find_tree_dqentry()
670 return -ENOMEM; in find_tree_dqentry()
671 ret = read_blk(info, blks[depth], buf); in find_tree_dqentry()
673 quota_error(dquot->dq_sb, "Can't read quota tree block %u", in find_tree_dqentry()
674 blks[depth]); in find_tree_dqentry()
678 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); in find_tree_dqentry()
681 ret = do_check_range(dquot->dq_sb, "block", blk, QT_TREEOFF, in find_tree_dqentry()
682 info->dqi_blocks - 1); in find_tree_dqentry()
687 for (i = 0; i <= depth; i++) in find_tree_dqentry()
689 quota_error(dquot->dq_sb, in find_tree_dqentry()
691 blks[depth], in find_tree_dqentry()
692 get_index(info, dquot->dq_id, depth)); in find_tree_dqentry()
693 ret = -EIO; in find_tree_dqentry()
696 blks[depth + 1] = blk; in find_tree_dqentry()
697 if (depth < info->dqi_qtree_depth - 1) in find_tree_dqentry()
698 ret = find_tree_dqentry(info, dquot, blks, depth + 1); in find_tree_dqentry()
706 /* Find entry for given id in the tree - wrapper function */
712 if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { in find_dqentry()
713 quota_error(dquot->dq_sb, "Quota tree depth too big!"); in find_dqentry()
714 return -EIO; in find_dqentry()
721 int type = dquot->dq_id.type; in qtree_read_dquot()
722 struct super_block *sb = dquot->dq_sb; in qtree_read_dquot()
729 if (!sb_dqopt(dquot->dq_sb)->files[type]) { in qtree_read_dquot()
731 return -EIO; in qtree_read_dquot()
735 if (!dquot->dq_off) { in qtree_read_dquot()
742 dquot->dq_id)); in qtree_read_dquot()
743 dquot->dq_off = 0; in qtree_read_dquot()
744 set_bit(DQ_FAKE_B, &dquot->dq_flags); in qtree_read_dquot()
745 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); in qtree_read_dquot()
749 dquot->dq_off = offset; in qtree_read_dquot()
751 ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS); in qtree_read_dquot()
753 return -ENOMEM; in qtree_read_dquot()
754 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size, in qtree_read_dquot()
755 dquot->dq_off); in qtree_read_dquot()
756 if (ret != info->dqi_entry_size) { in qtree_read_dquot()
758 ret = -EIO; in qtree_read_dquot()
760 from_kqid(&init_user_ns, dquot->dq_id)); in qtree_read_dquot()
761 set_bit(DQ_FAKE_B, &dquot->dq_flags); in qtree_read_dquot()
762 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); in qtree_read_dquot()
766 spin_lock(&dquot->dq_dqb_lock); in qtree_read_dquot()
767 info->dqi_ops->disk2mem_dqblk(dquot, ddquot); in qtree_read_dquot()
768 if (!dquot->dq_dqb.dqb_bhardlimit && in qtree_read_dquot()
769 !dquot->dq_dqb.dqb_bsoftlimit && in qtree_read_dquot()
770 !dquot->dq_dqb.dqb_ihardlimit && in qtree_read_dquot()
771 !dquot->dq_dqb.dqb_isoftlimit) in qtree_read_dquot()
772 set_bit(DQ_FAKE_B, &dquot->dq_flags); in qtree_read_dquot()
773 spin_unlock(&dquot->dq_dqb_lock); in qtree_read_dquot()
785 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && in qtree_release_dquot()
786 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) in qtree_release_dquot()
793 unsigned int blk, int depth) in find_next_id() argument
795 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); in find_next_id()
798 unsigned int epb = info->dqi_usable_bs >> 2; in find_next_id()
803 return -ENOMEM; in find_next_id()
805 for (i = depth; i < info->dqi_qtree_depth - 1; i++) in find_next_id()
810 quota_error(info->dqi_sb, in find_next_id()
814 for (i = __get_index(info, *id, depth); i < epb; i++) { in find_next_id()
821 ret = do_check_range(info->dqi_sb, "block", blk_no, 0, in find_next_id()
822 info->dqi_blocks - 1); in find_next_id()
825 if (depth == info->dqi_qtree_depth - 1) { in find_next_id()
829 ret = find_next_id(info, id, blk_no, depth + 1); in find_next_id()
830 if (ret != -ENOENT) in find_next_id()
834 ret = -ENOENT; in find_next_id()
850 *qid = make_kqid(&init_user_ns, qid->type, id); in qtree_get_next_id()