1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sched/mm.h>
15 
16 #include "ctree.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "locking.h"
20 #include "ulist.h"
21 #include "backref.h"
22 #include "extent_io.h"
23 #include "qgroup.h"
24 #include "block-group.h"
25 #include "sysfs.h"
26 #include "tree-mod-log.h"
27 #include "fs.h"
28 #include "accessors.h"
29 #include "extent-tree.h"
30 #include "root-tree.h"
31 #include "tree-checker.h"
32 
btrfs_qgroup_mode(const struct btrfs_fs_info * fs_info)33 enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info)
34 {
35 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
36 		return BTRFS_QGROUP_MODE_DISABLED;
37 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
38 		return BTRFS_QGROUP_MODE_SIMPLE;
39 	return BTRFS_QGROUP_MODE_FULL;
40 }
41 
btrfs_qgroup_enabled(const struct btrfs_fs_info * fs_info)42 bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info)
43 {
44 	return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
45 }
46 
btrfs_qgroup_full_accounting(const struct btrfs_fs_info * fs_info)47 bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info)
48 {
49 	return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
50 }
51 
52 /*
53  * Helpers to access qgroup reservation
54  *
55  * Callers should ensure the lock context and type are valid
56  */
57 
qgroup_rsv_total(const struct btrfs_qgroup * qgroup)58 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
59 {
60 	u64 ret = 0;
61 	int i;
62 
63 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
64 		ret += qgroup->rsv.values[i];
65 
66 	return ret;
67 }
68 
69 #ifdef CONFIG_BTRFS_DEBUG
qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)70 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
71 {
72 	if (type == BTRFS_QGROUP_RSV_DATA)
73 		return "data";
74 	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
75 		return "meta_pertrans";
76 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
77 		return "meta_prealloc";
78 	return NULL;
79 }
80 #endif
81 
qgroup_rsv_add(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)82 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
83 			   struct btrfs_qgroup *qgroup, u64 num_bytes,
84 			   enum btrfs_qgroup_rsv_type type)
85 {
86 	trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
87 	qgroup->rsv.values[type] += num_bytes;
88 }
89 
qgroup_rsv_release(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)90 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
91 			       struct btrfs_qgroup *qgroup, u64 num_bytes,
92 			       enum btrfs_qgroup_rsv_type type)
93 {
94 	trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
95 	if (qgroup->rsv.values[type] >= num_bytes) {
96 		qgroup->rsv.values[type] -= num_bytes;
97 		return;
98 	}
99 #ifdef CONFIG_BTRFS_DEBUG
100 	WARN_RATELIMIT(1,
101 		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
102 		qgroup->qgroupid, qgroup_rsv_type_str(type),
103 		qgroup->rsv.values[type], num_bytes);
104 #endif
105 	qgroup->rsv.values[type] = 0;
106 }
107 
qgroup_rsv_add_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,const struct btrfs_qgroup * src)108 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
109 				     struct btrfs_qgroup *dest,
110 				     const struct btrfs_qgroup *src)
111 {
112 	int i;
113 
114 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
115 		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
116 }
117 
qgroup_rsv_release_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,const struct btrfs_qgroup * src)118 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
119 					 struct btrfs_qgroup *dest,
120 					 const struct btrfs_qgroup *src)
121 {
122 	int i;
123 
124 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
125 		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
126 }
127 
btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)128 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
129 					   int mod)
130 {
131 	if (qg->old_refcnt < seq)
132 		qg->old_refcnt = seq;
133 	qg->old_refcnt += mod;
134 }
135 
btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)136 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
137 					   int mod)
138 {
139 	if (qg->new_refcnt < seq)
140 		qg->new_refcnt = seq;
141 	qg->new_refcnt += mod;
142 }
143 
btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup * qg,u64 seq)144 static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq)
145 {
146 	if (qg->old_refcnt < seq)
147 		return 0;
148 	return qg->old_refcnt - seq;
149 }
150 
btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup * qg,u64 seq)151 static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq)
152 {
153 	if (qg->new_refcnt < seq)
154 		return 0;
155 	return qg->new_refcnt - seq;
156 }
157 
158 static int
159 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
160 		   int init_flags);
161 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
162 
163 /* must be called with qgroup_ioctl_lock held */
find_qgroup_rb(const struct btrfs_fs_info * fs_info,u64 qgroupid)164 static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info,
165 					   u64 qgroupid)
166 {
167 	struct rb_node *n = fs_info->qgroup_tree.rb_node;
168 	struct btrfs_qgroup *qgroup;
169 
170 	while (n) {
171 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
172 		if (qgroup->qgroupid < qgroupid)
173 			n = n->rb_left;
174 		else if (qgroup->qgroupid > qgroupid)
175 			n = n->rb_right;
176 		else
177 			return qgroup;
178 	}
179 	return NULL;
180 }
181 
182 /*
183  * Add qgroup to the filesystem's qgroup tree.
184  *
185  * Must be called with qgroup_lock held and @prealloc preallocated.
186  *
187  * The control on the lifespan of @prealloc would be transferred to this
188  * function, thus caller should no longer touch @prealloc.
189  */
add_qgroup_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * prealloc,u64 qgroupid)190 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
191 					  struct btrfs_qgroup *prealloc,
192 					  u64 qgroupid)
193 {
194 	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
195 	struct rb_node *parent = NULL;
196 	struct btrfs_qgroup *qgroup;
197 
198 	/* Caller must have pre-allocated @prealloc. */
199 	ASSERT(prealloc);
200 
201 	while (*p) {
202 		parent = *p;
203 		qgroup = rb_entry(parent, struct btrfs_qgroup, node);
204 
205 		if (qgroup->qgroupid < qgroupid) {
206 			p = &(*p)->rb_left;
207 		} else if (qgroup->qgroupid > qgroupid) {
208 			p = &(*p)->rb_right;
209 		} else {
210 			kfree(prealloc);
211 			return qgroup;
212 		}
213 	}
214 
215 	qgroup = prealloc;
216 	qgroup->qgroupid = qgroupid;
217 	INIT_LIST_HEAD(&qgroup->groups);
218 	INIT_LIST_HEAD(&qgroup->members);
219 	INIT_LIST_HEAD(&qgroup->dirty);
220 	INIT_LIST_HEAD(&qgroup->iterator);
221 	INIT_LIST_HEAD(&qgroup->nested_iterator);
222 
223 	rb_link_node(&qgroup->node, parent, p);
224 	rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
225 
226 	return qgroup;
227 }
228 
__del_qgroup_rb(struct btrfs_qgroup * qgroup)229 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
230 {
231 	struct btrfs_qgroup_list *list;
232 
233 	list_del(&qgroup->dirty);
234 	while (!list_empty(&qgroup->groups)) {
235 		list = list_first_entry(&qgroup->groups,
236 					struct btrfs_qgroup_list, next_group);
237 		list_del(&list->next_group);
238 		list_del(&list->next_member);
239 		kfree(list);
240 	}
241 
242 	while (!list_empty(&qgroup->members)) {
243 		list = list_first_entry(&qgroup->members,
244 					struct btrfs_qgroup_list, next_member);
245 		list_del(&list->next_group);
246 		list_del(&list->next_member);
247 		kfree(list);
248 	}
249 }
250 
251 /* must be called with qgroup_lock held */
del_qgroup_rb(struct btrfs_fs_info * fs_info,u64 qgroupid)252 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
253 {
254 	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
255 
256 	if (!qgroup)
257 		return -ENOENT;
258 
259 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
260 	__del_qgroup_rb(qgroup);
261 	return 0;
262 }
263 
264 /*
265  * Add relation specified by two qgroups.
266  *
267  * Must be called with qgroup_lock held, the ownership of @prealloc is
268  * transferred to this function and caller should not touch it anymore.
269  *
270  * Return: 0        on success
271  *         -ENOENT  if one of the qgroups is NULL
272  *         <0       other errors
273  */
__add_relation_rb(struct btrfs_qgroup_list * prealloc,struct btrfs_qgroup * member,struct btrfs_qgroup * parent)274 static int __add_relation_rb(struct btrfs_qgroup_list *prealloc,
275 			     struct btrfs_qgroup *member,
276 			     struct btrfs_qgroup *parent)
277 {
278 	if (!member || !parent) {
279 		kfree(prealloc);
280 		return -ENOENT;
281 	}
282 
283 	prealloc->group = parent;
284 	prealloc->member = member;
285 	list_add_tail(&prealloc->next_group, &member->groups);
286 	list_add_tail(&prealloc->next_member, &parent->members);
287 
288 	return 0;
289 }
290 
291 /*
292  * Add relation specified by two qgroup ids.
293  *
294  * Must be called with qgroup_lock held.
295  *
296  * Return: 0        on success
297  *         -ENOENT  if one of the ids does not exist
298  *         <0       other errors
299  */
add_relation_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_list * prealloc,u64 memberid,u64 parentid)300 static int add_relation_rb(struct btrfs_fs_info *fs_info,
301 			   struct btrfs_qgroup_list *prealloc,
302 			   u64 memberid, u64 parentid)
303 {
304 	struct btrfs_qgroup *member;
305 	struct btrfs_qgroup *parent;
306 
307 	member = find_qgroup_rb(fs_info, memberid);
308 	parent = find_qgroup_rb(fs_info, parentid);
309 
310 	return __add_relation_rb(prealloc, member, parent);
311 }
312 
313 /* Must be called with qgroup_lock held */
del_relation_rb(struct btrfs_fs_info * fs_info,u64 memberid,u64 parentid)314 static int del_relation_rb(struct btrfs_fs_info *fs_info,
315 			   u64 memberid, u64 parentid)
316 {
317 	struct btrfs_qgroup *member;
318 	struct btrfs_qgroup *parent;
319 	struct btrfs_qgroup_list *list;
320 
321 	member = find_qgroup_rb(fs_info, memberid);
322 	parent = find_qgroup_rb(fs_info, parentid);
323 	if (!member || !parent)
324 		return -ENOENT;
325 
326 	list_for_each_entry(list, &member->groups, next_group) {
327 		if (list->group == parent) {
328 			list_del(&list->next_group);
329 			list_del(&list->next_member);
330 			kfree(list);
331 			return 0;
332 		}
333 	}
334 	return -ENOENT;
335 }
336 
337 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_verify_qgroup_counts(const struct btrfs_fs_info * fs_info,u64 qgroupid,u64 rfer,u64 excl)338 int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
339 			       u64 rfer, u64 excl)
340 {
341 	struct btrfs_qgroup *qgroup;
342 
343 	qgroup = find_qgroup_rb(fs_info, qgroupid);
344 	if (!qgroup)
345 		return -EINVAL;
346 	if (qgroup->rfer != rfer || qgroup->excl != excl)
347 		return -EINVAL;
348 	return 0;
349 }
350 #endif
351 
qgroup_mark_inconsistent(struct btrfs_fs_info * fs_info)352 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info)
353 {
354 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
355 		return;
356 	fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
357 				  BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
358 				  BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
359 }
360 
qgroup_read_enable_gen(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,int slot,struct btrfs_qgroup_status_item * ptr)361 static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
362 				   struct extent_buffer *leaf, int slot,
363 				   struct btrfs_qgroup_status_item *ptr)
364 {
365 	ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
366 	ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr));
367 	fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr);
368 }
369 
370 /*
371  * The full config is read in one go, only called from open_ctree()
372  * It doesn't use any locking, as at this point we're still single-threaded
373  */
btrfs_read_qgroup_config(struct btrfs_fs_info * fs_info)374 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
375 {
376 	struct btrfs_key key;
377 	struct btrfs_key found_key;
378 	struct btrfs_root *quota_root = fs_info->quota_root;
379 	struct btrfs_path *path = NULL;
380 	struct extent_buffer *l;
381 	int slot;
382 	int ret = 0;
383 	u64 flags = 0;
384 	u64 rescan_progress = 0;
385 
386 	if (!fs_info->quota_root)
387 		return 0;
388 
389 	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
390 	if (!fs_info->qgroup_ulist) {
391 		ret = -ENOMEM;
392 		goto out;
393 	}
394 
395 	path = btrfs_alloc_path();
396 	if (!path) {
397 		ret = -ENOMEM;
398 		goto out;
399 	}
400 
401 	ret = btrfs_sysfs_add_qgroups(fs_info);
402 	if (ret < 0)
403 		goto out;
404 	/* default this to quota off, in case no status key is found */
405 	fs_info->qgroup_flags = 0;
406 
407 	/*
408 	 * pass 1: read status, all qgroup infos and limits
409 	 */
410 	key.objectid = 0;
411 	key.type = 0;
412 	key.offset = 0;
413 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
414 	if (ret)
415 		goto out;
416 
417 	while (1) {
418 		struct btrfs_qgroup *qgroup;
419 
420 		slot = path->slots[0];
421 		l = path->nodes[0];
422 		btrfs_item_key_to_cpu(l, &found_key, slot);
423 
424 		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
425 			struct btrfs_qgroup_status_item *ptr;
426 
427 			ptr = btrfs_item_ptr(l, slot,
428 					     struct btrfs_qgroup_status_item);
429 
430 			if (btrfs_qgroup_status_version(l, ptr) !=
431 			    BTRFS_QGROUP_STATUS_VERSION) {
432 				btrfs_err(fs_info,
433 				 "old qgroup version, quota disabled");
434 				goto out;
435 			}
436 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
437 			if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) {
438 				qgroup_read_enable_gen(fs_info, l, slot, ptr);
439 			} else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) {
440 				qgroup_mark_inconsistent(fs_info);
441 				btrfs_err(fs_info,
442 					"qgroup generation mismatch, marked as inconsistent");
443 			}
444 			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
445 			goto next1;
446 		}
447 
448 		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
449 		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
450 			goto next1;
451 
452 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
453 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
454 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
455 			btrfs_err(fs_info, "inconsistent qgroup config");
456 			qgroup_mark_inconsistent(fs_info);
457 		}
458 		if (!qgroup) {
459 			struct btrfs_qgroup *prealloc;
460 			struct btrfs_root *tree_root = fs_info->tree_root;
461 
462 			prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
463 			if (!prealloc) {
464 				ret = -ENOMEM;
465 				goto out;
466 			}
467 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
468 			/*
469 			 * If a qgroup exists for a subvolume ID, it is possible
470 			 * that subvolume has been deleted, in which case
471 			 * re-using that ID would lead to incorrect accounting.
472 			 *
473 			 * Ensure that we skip any such subvol ids.
474 			 *
475 			 * We don't need to lock because this is only called
476 			 * during mount before we start doing things like creating
477 			 * subvolumes.
478 			 */
479 			if (is_fstree(qgroup->qgroupid) &&
480 			    qgroup->qgroupid > tree_root->free_objectid)
481 				/*
482 				 * Don't need to check against BTRFS_LAST_FREE_OBJECTID,
483 				 * as it will get checked on the next call to
484 				 * btrfs_get_free_objectid.
485 				 */
486 				tree_root->free_objectid = qgroup->qgroupid + 1;
487 		}
488 		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
489 		if (ret < 0)
490 			goto out;
491 
492 		switch (found_key.type) {
493 		case BTRFS_QGROUP_INFO_KEY: {
494 			struct btrfs_qgroup_info_item *ptr;
495 
496 			ptr = btrfs_item_ptr(l, slot,
497 					     struct btrfs_qgroup_info_item);
498 			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
499 			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
500 			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
501 			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
502 			/* generation currently unused */
503 			break;
504 		}
505 		case BTRFS_QGROUP_LIMIT_KEY: {
506 			struct btrfs_qgroup_limit_item *ptr;
507 
508 			ptr = btrfs_item_ptr(l, slot,
509 					     struct btrfs_qgroup_limit_item);
510 			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
511 			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
512 			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
513 			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
514 			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
515 			break;
516 		}
517 		}
518 next1:
519 		ret = btrfs_next_item(quota_root, path);
520 		if (ret < 0)
521 			goto out;
522 		if (ret)
523 			break;
524 	}
525 	btrfs_release_path(path);
526 
527 	/*
528 	 * pass 2: read all qgroup relations
529 	 */
530 	key.objectid = 0;
531 	key.type = BTRFS_QGROUP_RELATION_KEY;
532 	key.offset = 0;
533 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
534 	if (ret)
535 		goto out;
536 	while (1) {
537 		struct btrfs_qgroup_list *list = NULL;
538 
539 		slot = path->slots[0];
540 		l = path->nodes[0];
541 		btrfs_item_key_to_cpu(l, &found_key, slot);
542 
543 		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
544 			goto next2;
545 
546 		if (found_key.objectid > found_key.offset) {
547 			/* parent <- member, not needed to build config */
548 			/* FIXME should we omit the key completely? */
549 			goto next2;
550 		}
551 
552 		list = kzalloc(sizeof(*list), GFP_KERNEL);
553 		if (!list) {
554 			ret = -ENOMEM;
555 			goto out;
556 		}
557 		ret = add_relation_rb(fs_info, list, found_key.objectid,
558 				      found_key.offset);
559 		list = NULL;
560 		if (ret == -ENOENT) {
561 			btrfs_warn(fs_info,
562 				"orphan qgroup relation 0x%llx->0x%llx",
563 				found_key.objectid, found_key.offset);
564 			ret = 0;	/* ignore the error */
565 		}
566 		if (ret)
567 			goto out;
568 next2:
569 		ret = btrfs_next_item(quota_root, path);
570 		if (ret < 0)
571 			goto out;
572 		if (ret)
573 			break;
574 	}
575 out:
576 	btrfs_free_path(path);
577 	fs_info->qgroup_flags |= flags;
578 	if (ret >= 0) {
579 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)
580 			set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
581 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
582 			ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
583 	} else {
584 		ulist_free(fs_info->qgroup_ulist);
585 		fs_info->qgroup_ulist = NULL;
586 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
587 		btrfs_sysfs_del_qgroups(fs_info);
588 	}
589 
590 	return ret < 0 ? ret : 0;
591 }
592 
593 /*
594  * Called in close_ctree() when quota is still enabled.  This verifies we don't
595  * leak some reserved space.
596  *
597  * Return false if no reserved space is left.
598  * Return true if some reserved space is leaked.
599  */
btrfs_check_quota_leak(const struct btrfs_fs_info * fs_info)600 bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
601 {
602 	struct rb_node *node;
603 	bool ret = false;
604 
605 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
606 		return ret;
607 	/*
608 	 * Since we're unmounting, there is no race and no need to grab qgroup
609 	 * lock.  And here we don't go post-order to provide a more user
610 	 * friendly sorted result.
611 	 */
612 	for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
613 		struct btrfs_qgroup *qgroup;
614 		int i;
615 
616 		qgroup = rb_entry(node, struct btrfs_qgroup, node);
617 		for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
618 			if (qgroup->rsv.values[i]) {
619 				ret = true;
620 				btrfs_warn(fs_info,
621 		"qgroup %hu/%llu has unreleased space, type %d rsv %llu",
622 				   btrfs_qgroup_level(qgroup->qgroupid),
623 				   btrfs_qgroup_subvolid(qgroup->qgroupid),
624 				   i, qgroup->rsv.values[i]);
625 			}
626 		}
627 	}
628 	return ret;
629 }
630 
631 /*
632  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
633  * first two are in single-threaded paths.
634  */
btrfs_free_qgroup_config(struct btrfs_fs_info * fs_info)635 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
636 {
637 	struct rb_node *n;
638 	struct btrfs_qgroup *qgroup;
639 
640 	/*
641 	 * btrfs_quota_disable() can be called concurrently with
642 	 * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the
643 	 * lock.
644 	 */
645 	spin_lock(&fs_info->qgroup_lock);
646 	while ((n = rb_first(&fs_info->qgroup_tree))) {
647 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
648 		rb_erase(n, &fs_info->qgroup_tree);
649 		__del_qgroup_rb(qgroup);
650 		spin_unlock(&fs_info->qgroup_lock);
651 		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
652 		kfree(qgroup);
653 		spin_lock(&fs_info->qgroup_lock);
654 	}
655 	spin_unlock(&fs_info->qgroup_lock);
656 
657 	/*
658 	 * We call btrfs_free_qgroup_config() when unmounting
659 	 * filesystem and disabling quota, so we set qgroup_ulist
660 	 * to be null here to avoid double free.
661 	 */
662 	ulist_free(fs_info->qgroup_ulist);
663 	fs_info->qgroup_ulist = NULL;
664 	btrfs_sysfs_del_qgroups(fs_info);
665 }
666 
add_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)667 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
668 				    u64 dst)
669 {
670 	int ret;
671 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
672 	struct btrfs_path *path;
673 	struct btrfs_key key;
674 
675 	path = btrfs_alloc_path();
676 	if (!path)
677 		return -ENOMEM;
678 
679 	key.objectid = src;
680 	key.type = BTRFS_QGROUP_RELATION_KEY;
681 	key.offset = dst;
682 
683 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
684 
685 	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
686 
687 	btrfs_free_path(path);
688 	return ret;
689 }
690 
del_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)691 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
692 				    u64 dst)
693 {
694 	int ret;
695 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
696 	struct btrfs_path *path;
697 	struct btrfs_key key;
698 
699 	path = btrfs_alloc_path();
700 	if (!path)
701 		return -ENOMEM;
702 
703 	key.objectid = src;
704 	key.type = BTRFS_QGROUP_RELATION_KEY;
705 	key.offset = dst;
706 
707 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
708 	if (ret < 0)
709 		goto out;
710 
711 	if (ret > 0) {
712 		ret = -ENOENT;
713 		goto out;
714 	}
715 
716 	ret = btrfs_del_item(trans, quota_root, path);
717 out:
718 	btrfs_free_path(path);
719 	return ret;
720 }
721 
add_qgroup_item(struct btrfs_trans_handle * trans,struct btrfs_root * quota_root,u64 qgroupid)722 static int add_qgroup_item(struct btrfs_trans_handle *trans,
723 			   struct btrfs_root *quota_root, u64 qgroupid)
724 {
725 	int ret;
726 	struct btrfs_path *path;
727 	struct btrfs_qgroup_info_item *qgroup_info;
728 	struct btrfs_qgroup_limit_item *qgroup_limit;
729 	struct extent_buffer *leaf;
730 	struct btrfs_key key;
731 
732 	if (btrfs_is_testing(quota_root->fs_info))
733 		return 0;
734 
735 	path = btrfs_alloc_path();
736 	if (!path)
737 		return -ENOMEM;
738 
739 	key.objectid = 0;
740 	key.type = BTRFS_QGROUP_INFO_KEY;
741 	key.offset = qgroupid;
742 
743 	/*
744 	 * Avoid a transaction abort by catching -EEXIST here. In that
745 	 * case, we proceed by re-initializing the existing structure
746 	 * on disk.
747 	 */
748 
749 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
750 				      sizeof(*qgroup_info));
751 	if (ret && ret != -EEXIST)
752 		goto out;
753 
754 	leaf = path->nodes[0];
755 	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
756 				 struct btrfs_qgroup_info_item);
757 	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
758 	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
759 	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
760 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
761 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
762 
763 	btrfs_mark_buffer_dirty(trans, leaf);
764 
765 	btrfs_release_path(path);
766 
767 	key.type = BTRFS_QGROUP_LIMIT_KEY;
768 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
769 				      sizeof(*qgroup_limit));
770 	if (ret && ret != -EEXIST)
771 		goto out;
772 
773 	leaf = path->nodes[0];
774 	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
775 				  struct btrfs_qgroup_limit_item);
776 	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
777 	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
778 	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
779 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
780 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
781 
782 	btrfs_mark_buffer_dirty(trans, leaf);
783 
784 	ret = 0;
785 out:
786 	btrfs_free_path(path);
787 	return ret;
788 }
789 
del_qgroup_item(struct btrfs_trans_handle * trans,u64 qgroupid)790 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
791 {
792 	int ret;
793 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
794 	struct btrfs_path *path;
795 	struct btrfs_key key;
796 
797 	path = btrfs_alloc_path();
798 	if (!path)
799 		return -ENOMEM;
800 
801 	key.objectid = 0;
802 	key.type = BTRFS_QGROUP_INFO_KEY;
803 	key.offset = qgroupid;
804 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
805 	if (ret < 0)
806 		goto out;
807 
808 	if (ret > 0) {
809 		ret = -ENOENT;
810 		goto out;
811 	}
812 
813 	ret = btrfs_del_item(trans, quota_root, path);
814 	if (ret)
815 		goto out;
816 
817 	btrfs_release_path(path);
818 
819 	key.type = BTRFS_QGROUP_LIMIT_KEY;
820 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
821 	if (ret < 0)
822 		goto out;
823 
824 	if (ret > 0) {
825 		ret = -ENOENT;
826 		goto out;
827 	}
828 
829 	ret = btrfs_del_item(trans, quota_root, path);
830 
831 out:
832 	btrfs_free_path(path);
833 	return ret;
834 }
835 
update_qgroup_limit_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)836 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
837 				    struct btrfs_qgroup *qgroup)
838 {
839 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
840 	struct btrfs_path *path;
841 	struct btrfs_key key;
842 	struct extent_buffer *l;
843 	struct btrfs_qgroup_limit_item *qgroup_limit;
844 	int ret;
845 	int slot;
846 
847 	key.objectid = 0;
848 	key.type = BTRFS_QGROUP_LIMIT_KEY;
849 	key.offset = qgroup->qgroupid;
850 
851 	path = btrfs_alloc_path();
852 	if (!path)
853 		return -ENOMEM;
854 
855 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
856 	if (ret > 0)
857 		ret = -ENOENT;
858 
859 	if (ret)
860 		goto out;
861 
862 	l = path->nodes[0];
863 	slot = path->slots[0];
864 	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
865 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
866 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
867 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
868 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
869 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
870 
871 	btrfs_mark_buffer_dirty(trans, l);
872 
873 out:
874 	btrfs_free_path(path);
875 	return ret;
876 }
877 
update_qgroup_info_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)878 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
879 				   struct btrfs_qgroup *qgroup)
880 {
881 	struct btrfs_fs_info *fs_info = trans->fs_info;
882 	struct btrfs_root *quota_root = fs_info->quota_root;
883 	struct btrfs_path *path;
884 	struct btrfs_key key;
885 	struct extent_buffer *l;
886 	struct btrfs_qgroup_info_item *qgroup_info;
887 	int ret;
888 	int slot;
889 
890 	if (btrfs_is_testing(fs_info))
891 		return 0;
892 
893 	key.objectid = 0;
894 	key.type = BTRFS_QGROUP_INFO_KEY;
895 	key.offset = qgroup->qgroupid;
896 
897 	path = btrfs_alloc_path();
898 	if (!path)
899 		return -ENOMEM;
900 
901 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
902 	if (ret > 0)
903 		ret = -ENOENT;
904 
905 	if (ret)
906 		goto out;
907 
908 	l = path->nodes[0];
909 	slot = path->slots[0];
910 	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
911 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
912 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
913 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
914 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
915 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
916 
917 	btrfs_mark_buffer_dirty(trans, l);
918 
919 out:
920 	btrfs_free_path(path);
921 	return ret;
922 }
923 
update_qgroup_status_item(struct btrfs_trans_handle * trans)924 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
925 {
926 	struct btrfs_fs_info *fs_info = trans->fs_info;
927 	struct btrfs_root *quota_root = fs_info->quota_root;
928 	struct btrfs_path *path;
929 	struct btrfs_key key;
930 	struct extent_buffer *l;
931 	struct btrfs_qgroup_status_item *ptr;
932 	int ret;
933 	int slot;
934 
935 	key.objectid = 0;
936 	key.type = BTRFS_QGROUP_STATUS_KEY;
937 	key.offset = 0;
938 
939 	path = btrfs_alloc_path();
940 	if (!path)
941 		return -ENOMEM;
942 
943 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
944 	if (ret > 0)
945 		ret = -ENOENT;
946 
947 	if (ret)
948 		goto out;
949 
950 	l = path->nodes[0];
951 	slot = path->slots[0];
952 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
953 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags &
954 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
955 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
956 	btrfs_set_qgroup_status_rescan(l, ptr,
957 				fs_info->qgroup_rescan_progress.objectid);
958 
959 	btrfs_mark_buffer_dirty(trans, l);
960 
961 out:
962 	btrfs_free_path(path);
963 	return ret;
964 }
965 
966 /*
967  * called with qgroup_lock held
968  */
btrfs_clean_quota_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)969 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
970 				  struct btrfs_root *root)
971 {
972 	struct btrfs_path *path;
973 	struct btrfs_key key;
974 	struct extent_buffer *leaf = NULL;
975 	int ret;
976 	int nr = 0;
977 
978 	path = btrfs_alloc_path();
979 	if (!path)
980 		return -ENOMEM;
981 
982 	key.objectid = 0;
983 	key.offset = 0;
984 	key.type = 0;
985 
986 	while (1) {
987 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
988 		if (ret < 0)
989 			goto out;
990 		leaf = path->nodes[0];
991 		nr = btrfs_header_nritems(leaf);
992 		if (!nr)
993 			break;
994 		/*
995 		 * delete the leaf one by one
996 		 * since the whole tree is going
997 		 * to be deleted.
998 		 */
999 		path->slots[0] = 0;
1000 		ret = btrfs_del_items(trans, root, path, 0, nr);
1001 		if (ret)
1002 			goto out;
1003 
1004 		btrfs_release_path(path);
1005 	}
1006 	ret = 0;
1007 out:
1008 	btrfs_free_path(path);
1009 	return ret;
1010 }
1011 
btrfs_quota_enable(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_quota_ctl_args * quota_ctl_args)1012 int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
1013 		       struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
1014 {
1015 	struct btrfs_root *quota_root;
1016 	struct btrfs_root *tree_root = fs_info->tree_root;
1017 	struct btrfs_path *path = NULL;
1018 	struct btrfs_qgroup_status_item *ptr;
1019 	struct extent_buffer *leaf;
1020 	struct btrfs_key key;
1021 	struct btrfs_key found_key;
1022 	struct btrfs_qgroup *qgroup = NULL;
1023 	struct btrfs_qgroup *prealloc = NULL;
1024 	struct btrfs_trans_handle *trans = NULL;
1025 	struct ulist *ulist = NULL;
1026 	const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
1027 	int ret = 0;
1028 	int slot;
1029 
1030 	/*
1031 	 * We need to have subvol_sem write locked, to prevent races between
1032 	 * concurrent tasks trying to enable quotas, because we will unlock
1033 	 * and relock qgroup_ioctl_lock before setting fs_info->quota_root
1034 	 * and before setting BTRFS_FS_QUOTA_ENABLED.
1035 	 */
1036 	lockdep_assert_held_write(&fs_info->subvol_sem);
1037 
1038 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
1039 		btrfs_err(fs_info,
1040 			  "qgroups are currently unsupported in extent tree v2");
1041 		return -EINVAL;
1042 	}
1043 
1044 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1045 	if (fs_info->quota_root)
1046 		goto out;
1047 
1048 	ulist = ulist_alloc(GFP_KERNEL);
1049 	if (!ulist) {
1050 		ret = -ENOMEM;
1051 		goto out;
1052 	}
1053 
1054 	ret = btrfs_sysfs_add_qgroups(fs_info);
1055 	if (ret < 0)
1056 		goto out;
1057 
1058 	/*
1059 	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
1060 	 * avoid lock acquisition inversion problems (reported by lockdep) between
1061 	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
1062 	 * start a transaction.
1063 	 * After we started the transaction lock qgroup_ioctl_lock again and
1064 	 * check if someone else created the quota root in the meanwhile. If so,
1065 	 * just return success and release the transaction handle.
1066 	 *
1067 	 * Also we don't need to worry about someone else calling
1068 	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
1069 	 * that function returns 0 (success) when the sysfs entries already exist.
1070 	 */
1071 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1072 
1073 	/*
1074 	 * 1 for quota root item
1075 	 * 1 for BTRFS_QGROUP_STATUS item
1076 	 *
1077 	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
1078 	 * per subvolume. However those are not currently reserved since it
1079 	 * would be a lot of overkill.
1080 	 */
1081 	trans = btrfs_start_transaction(tree_root, 2);
1082 
1083 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1084 	if (IS_ERR(trans)) {
1085 		ret = PTR_ERR(trans);
1086 		trans = NULL;
1087 		goto out;
1088 	}
1089 
1090 	if (fs_info->quota_root)
1091 		goto out;
1092 
1093 	fs_info->qgroup_ulist = ulist;
1094 	ulist = NULL;
1095 
1096 	/*
1097 	 * initially create the quota tree
1098 	 */
1099 	quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
1100 	if (IS_ERR(quota_root)) {
1101 		ret =  PTR_ERR(quota_root);
1102 		btrfs_abort_transaction(trans, ret);
1103 		goto out;
1104 	}
1105 
1106 	path = btrfs_alloc_path();
1107 	if (!path) {
1108 		ret = -ENOMEM;
1109 		btrfs_abort_transaction(trans, ret);
1110 		goto out_free_root;
1111 	}
1112 
1113 	key.objectid = 0;
1114 	key.type = BTRFS_QGROUP_STATUS_KEY;
1115 	key.offset = 0;
1116 
1117 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
1118 				      sizeof(*ptr));
1119 	if (ret) {
1120 		btrfs_abort_transaction(trans, ret);
1121 		goto out_free_path;
1122 	}
1123 
1124 	leaf = path->nodes[0];
1125 	ptr = btrfs_item_ptr(leaf, path->slots[0],
1126 				 struct btrfs_qgroup_status_item);
1127 	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1128 	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1129 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
1130 	if (simple) {
1131 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1132 		btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
1133 		btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
1134 	} else {
1135 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1136 	}
1137 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
1138 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
1139 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1140 
1141 	btrfs_mark_buffer_dirty(trans, leaf);
1142 
1143 	key.objectid = 0;
1144 	key.type = BTRFS_ROOT_REF_KEY;
1145 	key.offset = 0;
1146 
1147 	btrfs_release_path(path);
1148 	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1149 	if (ret > 0)
1150 		goto out_add_root;
1151 	if (ret < 0) {
1152 		btrfs_abort_transaction(trans, ret);
1153 		goto out_free_path;
1154 	}
1155 
1156 	while (1) {
1157 		slot = path->slots[0];
1158 		leaf = path->nodes[0];
1159 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1160 
1161 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
1162 
1163 			/* Release locks on tree_root before we access quota_root */
1164 			btrfs_release_path(path);
1165 
1166 			/* We should not have a stray @prealloc pointer. */
1167 			ASSERT(prealloc == NULL);
1168 			prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1169 			if (!prealloc) {
1170 				ret = -ENOMEM;
1171 				btrfs_abort_transaction(trans, ret);
1172 				goto out_free_path;
1173 			}
1174 
1175 			ret = add_qgroup_item(trans, quota_root,
1176 					      found_key.offset);
1177 			if (ret) {
1178 				btrfs_abort_transaction(trans, ret);
1179 				goto out_free_path;
1180 			}
1181 
1182 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
1183 			prealloc = NULL;
1184 			if (IS_ERR(qgroup)) {
1185 				ret = PTR_ERR(qgroup);
1186 				btrfs_abort_transaction(trans, ret);
1187 				goto out_free_path;
1188 			}
1189 			ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1190 			if (ret < 0) {
1191 				btrfs_abort_transaction(trans, ret);
1192 				goto out_free_path;
1193 			}
1194 			ret = btrfs_search_slot_for_read(tree_root, &found_key,
1195 							 path, 1, 0);
1196 			if (ret < 0) {
1197 				btrfs_abort_transaction(trans, ret);
1198 				goto out_free_path;
1199 			}
1200 			if (ret > 0) {
1201 				/*
1202 				 * Shouldn't happen, but in case it does we
1203 				 * don't need to do the btrfs_next_item, just
1204 				 * continue.
1205 				 */
1206 				continue;
1207 			}
1208 		}
1209 		ret = btrfs_next_item(tree_root, path);
1210 		if (ret < 0) {
1211 			btrfs_abort_transaction(trans, ret);
1212 			goto out_free_path;
1213 		}
1214 		if (ret)
1215 			break;
1216 	}
1217 
1218 out_add_root:
1219 	btrfs_release_path(path);
1220 	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1221 	if (ret) {
1222 		btrfs_abort_transaction(trans, ret);
1223 		goto out_free_path;
1224 	}
1225 
1226 	ASSERT(prealloc == NULL);
1227 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1228 	if (!prealloc) {
1229 		ret = -ENOMEM;
1230 		goto out_free_path;
1231 	}
1232 	qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
1233 	prealloc = NULL;
1234 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1235 	if (ret < 0) {
1236 		btrfs_abort_transaction(trans, ret);
1237 		goto out_free_path;
1238 	}
1239 
1240 	fs_info->qgroup_enable_gen = trans->transid;
1241 
1242 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1243 	/*
1244 	 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
1245 	 * a deadlock with tasks concurrently doing other qgroup operations, such
1246 	 * adding/removing qgroups or adding/deleting qgroup relations for example,
1247 	 * because all qgroup operations first start or join a transaction and then
1248 	 * lock the qgroup_ioctl_lock mutex.
1249 	 * We are safe from a concurrent task trying to enable quotas, by calling
1250 	 * this function, since we are serialized by fs_info->subvol_sem.
1251 	 */
1252 	ret = btrfs_commit_transaction(trans);
1253 	trans = NULL;
1254 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1255 	if (ret)
1256 		goto out_free_path;
1257 
1258 	/*
1259 	 * Set quota enabled flag after committing the transaction, to avoid
1260 	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1261 	 * creation.
1262 	 */
1263 	spin_lock(&fs_info->qgroup_lock);
1264 	fs_info->quota_root = quota_root;
1265 	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1266 	spin_unlock(&fs_info->qgroup_lock);
1267 
1268 	/* Skip rescan for simple qgroups. */
1269 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
1270 		goto out_free_path;
1271 
1272 	ret = qgroup_rescan_init(fs_info, 0, 1);
1273 	if (!ret) {
1274 	        qgroup_rescan_zero_tracking(fs_info);
1275 		fs_info->qgroup_rescan_running = true;
1276 	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1277 	                         &fs_info->qgroup_rescan_work);
1278 	} else {
1279 		/*
1280 		 * We have set both BTRFS_FS_QUOTA_ENABLED and
1281 		 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
1282 		 * -EINPROGRESS. That can happen because someone started the
1283 		 * rescan worker by calling quota rescan ioctl before we
1284 		 * attempted to initialize the rescan worker. Failure due to
1285 		 * quotas disabled in the meanwhile is not possible, because
1286 		 * we are holding a write lock on fs_info->subvol_sem, which
1287 		 * is also acquired when disabling quotas.
1288 		 * Ignore such error, and any other error would need to undo
1289 		 * everything we did in the transaction we just committed.
1290 		 */
1291 		ASSERT(ret == -EINPROGRESS);
1292 		ret = 0;
1293 	}
1294 
1295 out_free_path:
1296 	btrfs_free_path(path);
1297 out_free_root:
1298 	if (ret)
1299 		btrfs_put_root(quota_root);
1300 out:
1301 	if (ret) {
1302 		ulist_free(fs_info->qgroup_ulist);
1303 		fs_info->qgroup_ulist = NULL;
1304 		btrfs_sysfs_del_qgroups(fs_info);
1305 	}
1306 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1307 	if (ret && trans)
1308 		btrfs_end_transaction(trans);
1309 	else if (trans)
1310 		ret = btrfs_end_transaction(trans);
1311 	ulist_free(ulist);
1312 	kfree(prealloc);
1313 	return ret;
1314 }
1315 
1316 /*
1317  * It is possible to have outstanding ordered extents which reserved bytes
1318  * before we disabled. We need to fully flush delalloc, ordered extents, and a
1319  * commit to ensure that we don't leak such reservations, only to have them
1320  * come back if we re-enable.
1321  *
1322  * - enable simple quotas
1323  * - reserve space
1324  * - release it, store rsv_bytes in OE
1325  * - disable quotas
1326  * - enable simple quotas (qgroup rsv are all 0)
1327  * - OE finishes
1328  * - run delayed refs
1329  * - free rsv_bytes, resulting in miscounting or even underflow
1330  */
flush_reservations(struct btrfs_fs_info * fs_info)1331 static int flush_reservations(struct btrfs_fs_info *fs_info)
1332 {
1333 	int ret;
1334 
1335 	ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
1336 	if (ret)
1337 		return ret;
1338 	btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
1339 
1340 	return btrfs_commit_current_transaction(fs_info->tree_root);
1341 }
1342 
btrfs_quota_disable(struct btrfs_fs_info * fs_info)1343 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1344 {
1345 	struct btrfs_root *quota_root = NULL;
1346 	struct btrfs_trans_handle *trans = NULL;
1347 	int ret = 0;
1348 
1349 	/*
1350 	 * We need to have subvol_sem write locked to prevent races with
1351 	 * snapshot creation.
1352 	 */
1353 	lockdep_assert_held_write(&fs_info->subvol_sem);
1354 
1355 	/*
1356 	 * Relocation will mess with backrefs, so make sure we have the
1357 	 * cleaner_mutex held to protect us from relocate.
1358 	 */
1359 	lockdep_assert_held(&fs_info->cleaner_mutex);
1360 
1361 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1362 	if (!fs_info->quota_root)
1363 		goto out;
1364 
1365 	/*
1366 	 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
1367 	 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
1368 	 * to lock that mutex while holding a transaction handle and the rescan
1369 	 * worker needs to commit a transaction.
1370 	 */
1371 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1372 
1373 	/*
1374 	 * Request qgroup rescan worker to complete and wait for it. This wait
1375 	 * must be done before transaction start for quota disable since it may
1376 	 * deadlock with transaction by the qgroup rescan worker.
1377 	 */
1378 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1379 	btrfs_qgroup_wait_for_completion(fs_info, false);
1380 
1381 	/*
1382 	 * We have nothing held here and no trans handle, just return the error
1383 	 * if there is one and set back the quota enabled bit since we didn't
1384 	 * actually disable quotas.
1385 	 */
1386 	ret = flush_reservations(fs_info);
1387 	if (ret) {
1388 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1389 		return ret;
1390 	}
1391 
1392 	/*
1393 	 * 1 For the root item
1394 	 *
1395 	 * We should also reserve enough items for the quota tree deletion in
1396 	 * btrfs_clean_quota_tree but this is not done.
1397 	 *
1398 	 * Also, we must always start a transaction without holding the mutex
1399 	 * qgroup_ioctl_lock, see btrfs_quota_enable().
1400 	 */
1401 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1402 
1403 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1404 	if (IS_ERR(trans)) {
1405 		ret = PTR_ERR(trans);
1406 		trans = NULL;
1407 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1408 		goto out;
1409 	}
1410 
1411 	if (!fs_info->quota_root)
1412 		goto out;
1413 
1414 	spin_lock(&fs_info->qgroup_lock);
1415 	quota_root = fs_info->quota_root;
1416 	fs_info->quota_root = NULL;
1417 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1418 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1419 	fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
1420 	spin_unlock(&fs_info->qgroup_lock);
1421 
1422 	btrfs_free_qgroup_config(fs_info);
1423 
1424 	ret = btrfs_clean_quota_tree(trans, quota_root);
1425 	if (ret) {
1426 		btrfs_abort_transaction(trans, ret);
1427 		goto out;
1428 	}
1429 
1430 	ret = btrfs_del_root(trans, "a_root->root_key);
1431 	if (ret) {
1432 		btrfs_abort_transaction(trans, ret);
1433 		goto out;
1434 	}
1435 
1436 	spin_lock(&fs_info->trans_lock);
1437 	list_del("a_root->dirty_list);
1438 	spin_unlock(&fs_info->trans_lock);
1439 
1440 	btrfs_tree_lock(quota_root->node);
1441 	btrfs_clear_buffer_dirty(trans, quota_root->node);
1442 	btrfs_tree_unlock(quota_root->node);
1443 	ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
1444 				    quota_root->node, 0, 1);
1445 
1446 	if (ret < 0)
1447 		btrfs_abort_transaction(trans, ret);
1448 
1449 out:
1450 	btrfs_put_root(quota_root);
1451 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1452 	if (ret && trans)
1453 		btrfs_end_transaction(trans);
1454 	else if (trans)
1455 		ret = btrfs_commit_transaction(trans);
1456 	return ret;
1457 }
1458 
qgroup_dirty(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1459 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1460 			 struct btrfs_qgroup *qgroup)
1461 {
1462 	if (list_empty(&qgroup->dirty))
1463 		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1464 }
1465 
qgroup_iterator_add(struct list_head * head,struct btrfs_qgroup * qgroup)1466 static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
1467 {
1468 	if (!list_empty(&qgroup->iterator))
1469 		return;
1470 
1471 	list_add_tail(&qgroup->iterator, head);
1472 }
1473 
qgroup_iterator_clean(struct list_head * head)1474 static void qgroup_iterator_clean(struct list_head *head)
1475 {
1476 	while (!list_empty(head)) {
1477 		struct btrfs_qgroup *qgroup;
1478 
1479 		qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
1480 		list_del_init(&qgroup->iterator);
1481 	}
1482 }
1483 
1484 /*
1485  * The easy accounting, we're updating qgroup relationship whose child qgroup
1486  * only has exclusive extents.
1487  *
1488  * In this case, all exclusive extents will also be exclusive for parent, so
1489  * excl/rfer just get added/removed.
1490  *
1491  * So is qgroup reservation space, which should also be added/removed to
1492  * parent.
1493  * Or when child tries to release reservation space, parent will underflow its
1494  * reservation (for relationship adding case).
1495  *
1496  * Caller should hold fs_info->qgroup_lock.
1497  */
__qgroup_excl_accounting(struct btrfs_fs_info * fs_info,u64 ref_root,struct btrfs_qgroup * src,int sign)1498 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
1499 				    struct btrfs_qgroup *src, int sign)
1500 {
1501 	struct btrfs_qgroup *qgroup;
1502 	LIST_HEAD(qgroup_list);
1503 	u64 num_bytes = src->excl;
1504 	u64 num_bytes_cmpr = src->excl_cmpr;
1505 	int ret = 0;
1506 
1507 	qgroup = find_qgroup_rb(fs_info, ref_root);
1508 	if (!qgroup)
1509 		goto out;
1510 
1511 	qgroup_iterator_add(&qgroup_list, qgroup);
1512 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
1513 		struct btrfs_qgroup_list *glist;
1514 
1515 		qgroup->rfer += sign * num_bytes;
1516 		qgroup->rfer_cmpr += sign * num_bytes_cmpr;
1517 
1518 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1519 		WARN_ON(sign < 0 && qgroup->excl_cmpr < num_bytes_cmpr);
1520 		qgroup->excl += sign * num_bytes;
1521 		qgroup->excl_cmpr += sign * num_bytes_cmpr;
1522 
1523 		if (sign > 0)
1524 			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1525 		else
1526 			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1527 		qgroup_dirty(fs_info, qgroup);
1528 
1529 		/* Append parent qgroups to @qgroup_list. */
1530 		list_for_each_entry(glist, &qgroup->groups, next_group)
1531 			qgroup_iterator_add(&qgroup_list, glist->group);
1532 	}
1533 	ret = 0;
1534 out:
1535 	qgroup_iterator_clean(&qgroup_list);
1536 	return ret;
1537 }
1538 
1539 
1540 /*
1541  * Quick path for updating qgroup with only excl refs.
1542  *
1543  * In that case, just update all parent will be enough.
1544  * Or we needs to do a full rescan.
1545  * Caller should also hold fs_info->qgroup_lock.
1546  *
1547  * Return 0 for quick update, return >0 for need to full rescan
1548  * and mark INCONSISTENT flag.
1549  * Return < 0 for other error.
1550  */
quick_update_accounting(struct btrfs_fs_info * fs_info,u64 src,u64 dst,int sign)1551 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1552 				   u64 src, u64 dst, int sign)
1553 {
1554 	struct btrfs_qgroup *qgroup;
1555 	int ret = 1;
1556 
1557 	qgroup = find_qgroup_rb(fs_info, src);
1558 	if (!qgroup)
1559 		goto out;
1560 	if (qgroup->excl == qgroup->rfer) {
1561 		ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
1562 		if (ret < 0)
1563 			goto out;
1564 		ret = 0;
1565 	}
1566 out:
1567 	if (ret)
1568 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1569 	return ret;
1570 }
1571 
1572 /*
1573  * Add relation between @src and @dst qgroup. The @prealloc is allocated by the
1574  * callers and transferred here (either used or freed on error).
1575  */
btrfs_add_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst,struct btrfs_qgroup_list * prealloc)1576 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
1577 			      struct btrfs_qgroup_list *prealloc)
1578 {
1579 	struct btrfs_fs_info *fs_info = trans->fs_info;
1580 	struct btrfs_qgroup *parent;
1581 	struct btrfs_qgroup *member;
1582 	struct btrfs_qgroup_list *list;
1583 	int ret = 0;
1584 
1585 	ASSERT(prealloc);
1586 
1587 	/* Check the level of src and dst first */
1588 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1589 		return -EINVAL;
1590 
1591 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1592 	if (!fs_info->quota_root) {
1593 		ret = -ENOTCONN;
1594 		goto out;
1595 	}
1596 	member = find_qgroup_rb(fs_info, src);
1597 	parent = find_qgroup_rb(fs_info, dst);
1598 	if (!member || !parent) {
1599 		ret = -EINVAL;
1600 		goto out;
1601 	}
1602 
1603 	/* check if such qgroup relation exist firstly */
1604 	list_for_each_entry(list, &member->groups, next_group) {
1605 		if (list->group == parent) {
1606 			ret = -EEXIST;
1607 			goto out;
1608 		}
1609 	}
1610 
1611 	ret = add_qgroup_relation_item(trans, src, dst);
1612 	if (ret)
1613 		goto out;
1614 
1615 	ret = add_qgroup_relation_item(trans, dst, src);
1616 	if (ret) {
1617 		del_qgroup_relation_item(trans, src, dst);
1618 		goto out;
1619 	}
1620 
1621 	spin_lock(&fs_info->qgroup_lock);
1622 	ret = __add_relation_rb(prealloc, member, parent);
1623 	prealloc = NULL;
1624 	if (ret < 0) {
1625 		spin_unlock(&fs_info->qgroup_lock);
1626 		goto out;
1627 	}
1628 	ret = quick_update_accounting(fs_info, src, dst, 1);
1629 	spin_unlock(&fs_info->qgroup_lock);
1630 out:
1631 	kfree(prealloc);
1632 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1633 	return ret;
1634 }
1635 
__del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1636 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1637 				 u64 dst)
1638 {
1639 	struct btrfs_fs_info *fs_info = trans->fs_info;
1640 	struct btrfs_qgroup *parent;
1641 	struct btrfs_qgroup *member;
1642 	struct btrfs_qgroup_list *list;
1643 	bool found = false;
1644 	int ret = 0;
1645 	int ret2;
1646 
1647 	if (!fs_info->quota_root) {
1648 		ret = -ENOTCONN;
1649 		goto out;
1650 	}
1651 
1652 	member = find_qgroup_rb(fs_info, src);
1653 	parent = find_qgroup_rb(fs_info, dst);
1654 	/*
1655 	 * The parent/member pair doesn't exist, then try to delete the dead
1656 	 * relation items only.
1657 	 */
1658 	if (!member || !parent)
1659 		goto delete_item;
1660 
1661 	/* check if such qgroup relation exist firstly */
1662 	list_for_each_entry(list, &member->groups, next_group) {
1663 		if (list->group == parent) {
1664 			found = true;
1665 			break;
1666 		}
1667 	}
1668 
1669 delete_item:
1670 	ret = del_qgroup_relation_item(trans, src, dst);
1671 	if (ret < 0 && ret != -ENOENT)
1672 		goto out;
1673 	ret2 = del_qgroup_relation_item(trans, dst, src);
1674 	if (ret2 < 0 && ret2 != -ENOENT)
1675 		goto out;
1676 
1677 	/* At least one deletion succeeded, return 0 */
1678 	if (!ret || !ret2)
1679 		ret = 0;
1680 
1681 	if (found) {
1682 		spin_lock(&fs_info->qgroup_lock);
1683 		del_relation_rb(fs_info, src, dst);
1684 		ret = quick_update_accounting(fs_info, src, dst, -1);
1685 		spin_unlock(&fs_info->qgroup_lock);
1686 	}
1687 out:
1688 	return ret;
1689 }
1690 
btrfs_del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1691 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1692 			      u64 dst)
1693 {
1694 	struct btrfs_fs_info *fs_info = trans->fs_info;
1695 	int ret = 0;
1696 
1697 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1698 	ret = __del_qgroup_relation(trans, src, dst);
1699 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1700 
1701 	return ret;
1702 }
1703 
btrfs_create_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1704 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1705 {
1706 	struct btrfs_fs_info *fs_info = trans->fs_info;
1707 	struct btrfs_root *quota_root;
1708 	struct btrfs_qgroup *qgroup;
1709 	struct btrfs_qgroup *prealloc = NULL;
1710 	int ret = 0;
1711 
1712 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1713 	if (!fs_info->quota_root) {
1714 		ret = -ENOTCONN;
1715 		goto out;
1716 	}
1717 	quota_root = fs_info->quota_root;
1718 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1719 	if (qgroup) {
1720 		ret = -EEXIST;
1721 		goto out;
1722 	}
1723 
1724 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1725 	if (!prealloc) {
1726 		ret = -ENOMEM;
1727 		goto out;
1728 	}
1729 
1730 	ret = add_qgroup_item(trans, quota_root, qgroupid);
1731 	if (ret)
1732 		goto out;
1733 
1734 	spin_lock(&fs_info->qgroup_lock);
1735 	qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
1736 	spin_unlock(&fs_info->qgroup_lock);
1737 	prealloc = NULL;
1738 
1739 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1740 out:
1741 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1742 	kfree(prealloc);
1743 	return ret;
1744 }
1745 
1746 /*
1747  * Return 0 if we can not delete the qgroup (not empty or has children etc).
1748  * Return >0 if we can delete the qgroup.
1749  * Return <0 for other errors during tree search.
1750  */
can_delete_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1751 static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
1752 {
1753 	struct btrfs_key key;
1754 	struct btrfs_path *path;
1755 	int ret;
1756 
1757 	/*
1758 	 * Squota would never be inconsistent, but there can still be case
1759 	 * where a dropped subvolume still has qgroup numbers, and squota
1760 	 * relies on such qgroup for future accounting.
1761 	 *
1762 	 * So for squota, do not allow dropping any non-zero qgroup.
1763 	 */
1764 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
1765 	    (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr))
1766 		return 0;
1767 
1768 	/* For higher level qgroup, we can only delete it if it has no child. */
1769 	if (btrfs_qgroup_level(qgroup->qgroupid)) {
1770 		if (!list_empty(&qgroup->members))
1771 			return 0;
1772 		return 1;
1773 	}
1774 
1775 	/*
1776 	 * For level-0 qgroups, we can only delete it if it has no subvolume
1777 	 * for it.
1778 	 * This means even a subvolume is unlinked but not yet fully dropped,
1779 	 * we can not delete the qgroup.
1780 	 */
1781 	key.objectid = qgroup->qgroupid;
1782 	key.type = BTRFS_ROOT_ITEM_KEY;
1783 	key.offset = -1ULL;
1784 	path = btrfs_alloc_path();
1785 	if (!path)
1786 		return -ENOMEM;
1787 
1788 	ret = btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
1789 	btrfs_free_path(path);
1790 	/*
1791 	 * The @ret from btrfs_find_root() exactly matches our definition for
1792 	 * the return value, thus can be returned directly.
1793 	 */
1794 	return ret;
1795 }
1796 
btrfs_remove_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1797 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1798 {
1799 	struct btrfs_fs_info *fs_info = trans->fs_info;
1800 	struct btrfs_qgroup *qgroup;
1801 	struct btrfs_qgroup_list *list;
1802 	int ret = 0;
1803 
1804 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1805 	if (!fs_info->quota_root) {
1806 		ret = -ENOTCONN;
1807 		goto out;
1808 	}
1809 
1810 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1811 	if (!qgroup) {
1812 		ret = -ENOENT;
1813 		goto out;
1814 	}
1815 
1816 	ret = can_delete_qgroup(fs_info, qgroup);
1817 	if (ret < 0)
1818 		goto out;
1819 	if (ret == 0) {
1820 		ret = -EBUSY;
1821 		goto out;
1822 	}
1823 
1824 	/* Check if there are no children of this qgroup */
1825 	if (!list_empty(&qgroup->members)) {
1826 		ret = -EBUSY;
1827 		goto out;
1828 	}
1829 
1830 	ret = del_qgroup_item(trans, qgroupid);
1831 	if (ret && ret != -ENOENT)
1832 		goto out;
1833 
1834 	while (!list_empty(&qgroup->groups)) {
1835 		list = list_first_entry(&qgroup->groups,
1836 					struct btrfs_qgroup_list, next_group);
1837 		ret = __del_qgroup_relation(trans, qgroupid,
1838 					    list->group->qgroupid);
1839 		if (ret)
1840 			goto out;
1841 	}
1842 
1843 	spin_lock(&fs_info->qgroup_lock);
1844 	/*
1845 	 * Warn on reserved space. The subvolume should has no child nor
1846 	 * corresponding subvolume.
1847 	 * Thus its reserved space should all be zero, no matter if qgroup
1848 	 * is consistent or the mode.
1849 	 */
1850 	if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
1851 	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
1852 	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) {
1853 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
1854 		btrfs_warn_rl(fs_info,
1855 "to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu",
1856 			      btrfs_qgroup_level(qgroup->qgroupid),
1857 			      btrfs_qgroup_subvolid(qgroup->qgroupid),
1858 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA],
1859 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC],
1860 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
1861 
1862 	}
1863 	/*
1864 	 * The same for rfer/excl numbers, but that's only if our qgroup is
1865 	 * consistent and if it's in regular qgroup mode.
1866 	 * For simple mode it's not as accurate thus we can hit non-zero values
1867 	 * very frequently.
1868 	 */
1869 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
1870 	    !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
1871 		if (qgroup->rfer || qgroup->excl ||
1872 		    qgroup->rfer_cmpr || qgroup->excl_cmpr) {
1873 			WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
1874 			btrfs_warn_rl(fs_info,
1875 "to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
1876 				      btrfs_qgroup_level(qgroup->qgroupid),
1877 				      btrfs_qgroup_subvolid(qgroup->qgroupid),
1878 				      qgroup->rfer, qgroup->rfer_cmpr,
1879 				      qgroup->excl, qgroup->excl_cmpr);
1880 			qgroup_mark_inconsistent(fs_info);
1881 		}
1882 	}
1883 	del_qgroup_rb(fs_info, qgroupid);
1884 	spin_unlock(&fs_info->qgroup_lock);
1885 
1886 	/*
1887 	 * Remove the qgroup from sysfs now without holding the qgroup_lock
1888 	 * spinlock, since the sysfs_remove_group() function needs to take
1889 	 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1890 	 */
1891 	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1892 	kfree(qgroup);
1893 out:
1894 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1895 	return ret;
1896 }
1897 
btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info * fs_info,u64 subvolid)1898 int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid)
1899 {
1900 	struct btrfs_trans_handle *trans;
1901 	int ret;
1902 
1903 	if (!is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) || !fs_info->quota_root)
1904 		return 0;
1905 
1906 	/*
1907 	 * Commit current transaction to make sure all the rfer/excl numbers
1908 	 * get updated.
1909 	 */
1910 	ret = btrfs_commit_current_transaction(fs_info->quota_root);
1911 	if (ret < 0)
1912 		return ret;
1913 
1914 	/* Start new trans to delete the qgroup info and limit items. */
1915 	trans = btrfs_start_transaction(fs_info->quota_root, 2);
1916 	if (IS_ERR(trans))
1917 		return PTR_ERR(trans);
1918 	ret = btrfs_remove_qgroup(trans, subvolid);
1919 	btrfs_end_transaction(trans);
1920 	/*
1921 	 * It's squota and the subvolume still has numbers needed for future
1922 	 * accounting, in this case we can not delete it.  Just skip it.
1923 	 *
1924 	 * Or the qgroup is already removed by a qgroup rescan. For both cases we're
1925 	 * safe to ignore them.
1926 	 */
1927 	if (ret == -EBUSY || ret == -ENOENT)
1928 		ret = 0;
1929 	return ret;
1930 }
1931 
btrfs_limit_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid,struct btrfs_qgroup_limit * limit)1932 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1933 		       struct btrfs_qgroup_limit *limit)
1934 {
1935 	struct btrfs_fs_info *fs_info = trans->fs_info;
1936 	struct btrfs_qgroup *qgroup;
1937 	int ret = 0;
1938 	/* Sometimes we would want to clear the limit on this qgroup.
1939 	 * To meet this requirement, we treat the -1 as a special value
1940 	 * which tell kernel to clear the limit on this qgroup.
1941 	 */
1942 	const u64 CLEAR_VALUE = -1;
1943 
1944 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1945 	if (!fs_info->quota_root) {
1946 		ret = -ENOTCONN;
1947 		goto out;
1948 	}
1949 
1950 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1951 	if (!qgroup) {
1952 		ret = -ENOENT;
1953 		goto out;
1954 	}
1955 
1956 	spin_lock(&fs_info->qgroup_lock);
1957 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1958 		if (limit->max_rfer == CLEAR_VALUE) {
1959 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1960 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1961 			qgroup->max_rfer = 0;
1962 		} else {
1963 			qgroup->max_rfer = limit->max_rfer;
1964 		}
1965 	}
1966 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1967 		if (limit->max_excl == CLEAR_VALUE) {
1968 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1969 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1970 			qgroup->max_excl = 0;
1971 		} else {
1972 			qgroup->max_excl = limit->max_excl;
1973 		}
1974 	}
1975 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1976 		if (limit->rsv_rfer == CLEAR_VALUE) {
1977 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1978 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1979 			qgroup->rsv_rfer = 0;
1980 		} else {
1981 			qgroup->rsv_rfer = limit->rsv_rfer;
1982 		}
1983 	}
1984 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1985 		if (limit->rsv_excl == CLEAR_VALUE) {
1986 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1987 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1988 			qgroup->rsv_excl = 0;
1989 		} else {
1990 			qgroup->rsv_excl = limit->rsv_excl;
1991 		}
1992 	}
1993 	qgroup->lim_flags |= limit->flags;
1994 
1995 	spin_unlock(&fs_info->qgroup_lock);
1996 
1997 	ret = update_qgroup_limit_item(trans, qgroup);
1998 	if (ret) {
1999 		qgroup_mark_inconsistent(fs_info);
2000 		btrfs_info(fs_info, "unable to update quota limit for %llu",
2001 		       qgroupid);
2002 	}
2003 
2004 out:
2005 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
2006 	return ret;
2007 }
2008 
2009 /*
2010  * Inform qgroup to trace one dirty extent, its info is recorded in @record.
2011  * So qgroup can account it at transaction committing time.
2012  *
2013  * No lock version, caller must acquire delayed ref lock and allocated memory,
2014  * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
2015  *
2016  * Return 0 for success insert
2017  * Return >0 for existing record, caller can free @record safely.
2018  * Return <0 for insertion failure, caller can free @record safely.
2019  */
btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_qgroup_extent_record * record)2020 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
2021 				struct btrfs_delayed_ref_root *delayed_refs,
2022 				struct btrfs_qgroup_extent_record *record)
2023 {
2024 	struct btrfs_qgroup_extent_record *existing, *ret;
2025 	const unsigned long index = (record->bytenr >> fs_info->sectorsize_bits);
2026 
2027 	if (!btrfs_qgroup_full_accounting(fs_info))
2028 		return 1;
2029 
2030 #if BITS_PER_LONG == 32
2031 	if (record->bytenr >= MAX_LFS_FILESIZE) {
2032 		btrfs_err_rl(fs_info,
2033 "qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit",
2034 			     record->bytenr);
2035 		btrfs_err_32bit_limit(fs_info);
2036 		return -EOVERFLOW;
2037 	}
2038 #endif
2039 
2040 	lockdep_assert_held(&delayed_refs->lock);
2041 	trace_btrfs_qgroup_trace_extent(fs_info, record);
2042 
2043 	xa_lock(&delayed_refs->dirty_extents);
2044 	existing = xa_load(&delayed_refs->dirty_extents, index);
2045 	if (existing) {
2046 		if (record->data_rsv && !existing->data_rsv) {
2047 			existing->data_rsv = record->data_rsv;
2048 			existing->data_rsv_refroot = record->data_rsv_refroot;
2049 		}
2050 		xa_unlock(&delayed_refs->dirty_extents);
2051 		return 1;
2052 	}
2053 
2054 	ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC);
2055 	xa_unlock(&delayed_refs->dirty_extents);
2056 	if (xa_is_err(ret)) {
2057 		qgroup_mark_inconsistent(fs_info);
2058 		return xa_err(ret);
2059 	}
2060 
2061 	return 0;
2062 }
2063 
2064 /*
2065  * Post handler after qgroup_trace_extent_nolock().
2066  *
2067  * NOTE: Current qgroup does the expensive backref walk at transaction
2068  * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
2069  * new transaction.
2070  * This is designed to allow btrfs_find_all_roots() to get correct new_roots
2071  * result.
2072  *
2073  * However for old_roots there is no need to do backref walk at that time,
2074  * since we search commit roots to walk backref and result will always be
2075  * correct.
2076  *
2077  * Due to the nature of no lock version, we can't do backref there.
2078  * So we must call btrfs_qgroup_trace_extent_post() after exiting
2079  * spinlock context.
2080  *
2081  * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
2082  * using current root, then we can move all expensive backref walk out of
2083  * transaction committing, but not now as qgroup accounting will be wrong again.
2084  */
btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle * trans,struct btrfs_qgroup_extent_record * qrecord)2085 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
2086 				   struct btrfs_qgroup_extent_record *qrecord)
2087 {
2088 	struct btrfs_backref_walk_ctx ctx = { 0 };
2089 	int ret;
2090 
2091 	if (!btrfs_qgroup_full_accounting(trans->fs_info))
2092 		return 0;
2093 	/*
2094 	 * We are always called in a context where we are already holding a
2095 	 * transaction handle. Often we are called when adding a data delayed
2096 	 * reference from btrfs_truncate_inode_items() (truncating or unlinking),
2097 	 * in which case we will be holding a write lock on extent buffer from a
2098 	 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
2099 	 * acquire fs_info->commit_root_sem, because that is a higher level lock
2100 	 * that must be acquired before locking any extent buffers.
2101 	 *
2102 	 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
2103 	 * but we can't pass it a non-NULL transaction handle, because otherwise
2104 	 * it would not use commit roots and would lock extent buffers, causing
2105 	 * a deadlock if it ends up trying to read lock the same extent buffer
2106 	 * that was previously write locked at btrfs_truncate_inode_items().
2107 	 *
2108 	 * So pass a NULL transaction handle to btrfs_find_all_roots() and
2109 	 * explicitly tell it to not acquire the commit_root_sem - if we are
2110 	 * holding a transaction handle we don't need its protection.
2111 	 */
2112 	ASSERT(trans != NULL);
2113 
2114 	if (trans->fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2115 		return 0;
2116 
2117 	ctx.bytenr = qrecord->bytenr;
2118 	ctx.fs_info = trans->fs_info;
2119 
2120 	ret = btrfs_find_all_roots(&ctx, true);
2121 	if (ret < 0) {
2122 		qgroup_mark_inconsistent(trans->fs_info);
2123 		btrfs_warn(trans->fs_info,
2124 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
2125 			ret);
2126 		return 0;
2127 	}
2128 
2129 	/*
2130 	 * Here we don't need to get the lock of
2131 	 * trans->transaction->delayed_refs, since inserted qrecord won't
2132 	 * be deleted, only qrecord->node may be modified (new qrecord insert)
2133 	 *
2134 	 * So modifying qrecord->old_roots is safe here
2135 	 */
2136 	qrecord->old_roots = ctx.roots;
2137 	return 0;
2138 }
2139 
2140 /*
2141  * Inform qgroup to trace one dirty extent, specified by @bytenr and
2142  * @num_bytes.
2143  * So qgroup can account it at commit trans time.
2144  *
2145  * Better encapsulated version, with memory allocation and backref walk for
2146  * commit roots.
2147  * So this can sleep.
2148  *
2149  * Return 0 if the operation is done.
2150  * Return <0 for error, like memory allocation failure or invalid parameter
2151  * (NULL trans)
2152  */
btrfs_qgroup_trace_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes)2153 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2154 			      u64 num_bytes)
2155 {
2156 	struct btrfs_fs_info *fs_info = trans->fs_info;
2157 	struct btrfs_qgroup_extent_record *record;
2158 	struct btrfs_delayed_ref_root *delayed_refs;
2159 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
2160 	int ret;
2161 
2162 	if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
2163 		return 0;
2164 	record = kzalloc(sizeof(*record), GFP_NOFS);
2165 	if (!record)
2166 		return -ENOMEM;
2167 
2168 	if (xa_reserve(&trans->transaction->delayed_refs.dirty_extents, index, GFP_NOFS)) {
2169 		kfree(record);
2170 		return -ENOMEM;
2171 	}
2172 
2173 	delayed_refs = &trans->transaction->delayed_refs;
2174 	record->bytenr = bytenr;
2175 	record->num_bytes = num_bytes;
2176 	record->old_roots = NULL;
2177 
2178 	spin_lock(&delayed_refs->lock);
2179 	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
2180 	spin_unlock(&delayed_refs->lock);
2181 	if (ret) {
2182 		/* Clean up if insertion fails or item exists. */
2183 		xa_release(&delayed_refs->dirty_extents, index);
2184 		kfree(record);
2185 		return 0;
2186 	}
2187 	return btrfs_qgroup_trace_extent_post(trans, record);
2188 }
2189 
2190 /*
2191  * Inform qgroup to trace all leaf items of data
2192  *
2193  * Return 0 for success
2194  * Return <0 for error(ENOMEM)
2195  */
btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle * trans,struct extent_buffer * eb)2196 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
2197 				  struct extent_buffer *eb)
2198 {
2199 	struct btrfs_fs_info *fs_info = trans->fs_info;
2200 	int nr = btrfs_header_nritems(eb);
2201 	int i, extent_type, ret;
2202 	struct btrfs_key key;
2203 	struct btrfs_file_extent_item *fi;
2204 	u64 bytenr, num_bytes;
2205 
2206 	/* We can be called directly from walk_up_proc() */
2207 	if (!btrfs_qgroup_full_accounting(fs_info))
2208 		return 0;
2209 
2210 	for (i = 0; i < nr; i++) {
2211 		btrfs_item_key_to_cpu(eb, &key, i);
2212 
2213 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2214 			continue;
2215 
2216 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2217 		/* filter out non qgroup-accountable extents  */
2218 		extent_type = btrfs_file_extent_type(eb, fi);
2219 
2220 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
2221 			continue;
2222 
2223 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
2224 		if (!bytenr)
2225 			continue;
2226 
2227 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
2228 
2229 		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes);
2230 		if (ret)
2231 			return ret;
2232 	}
2233 	cond_resched();
2234 	return 0;
2235 }
2236 
2237 /*
2238  * Walk up the tree from the bottom, freeing leaves and any interior
2239  * nodes which have had all slots visited. If a node (leaf or
2240  * interior) is freed, the node above it will have it's slot
2241  * incremented. The root node will never be freed.
2242  *
2243  * At the end of this function, we should have a path which has all
2244  * slots incremented to the next position for a search. If we need to
2245  * read a new node it will be NULL and the node above it will have the
2246  * correct slot selected for a later read.
2247  *
2248  * If we increment the root nodes slot counter past the number of
2249  * elements, 1 is returned to signal completion of the search.
2250  */
adjust_slots_upwards(struct btrfs_path * path,int root_level)2251 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
2252 {
2253 	int level = 0;
2254 	int nr, slot;
2255 	struct extent_buffer *eb;
2256 
2257 	if (root_level == 0)
2258 		return 1;
2259 
2260 	while (level <= root_level) {
2261 		eb = path->nodes[level];
2262 		nr = btrfs_header_nritems(eb);
2263 		path->slots[level]++;
2264 		slot = path->slots[level];
2265 		if (slot >= nr || level == 0) {
2266 			/*
2267 			 * Don't free the root -  we will detect this
2268 			 * condition after our loop and return a
2269 			 * positive value for caller to stop walking the tree.
2270 			 */
2271 			if (level != root_level) {
2272 				btrfs_tree_unlock_rw(eb, path->locks[level]);
2273 				path->locks[level] = 0;
2274 
2275 				free_extent_buffer(eb);
2276 				path->nodes[level] = NULL;
2277 				path->slots[level] = 0;
2278 			}
2279 		} else {
2280 			/*
2281 			 * We have a valid slot to walk back down
2282 			 * from. Stop here so caller can process these
2283 			 * new nodes.
2284 			 */
2285 			break;
2286 		}
2287 
2288 		level++;
2289 	}
2290 
2291 	eb = path->nodes[root_level];
2292 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
2293 		return 1;
2294 
2295 	return 0;
2296 }
2297 
2298 /*
2299  * Helper function to trace a subtree tree block swap.
2300  *
2301  * The swap will happen in highest tree block, but there may be a lot of
2302  * tree blocks involved.
2303  *
2304  * For example:
2305  *  OO = Old tree blocks
2306  *  NN = New tree blocks allocated during balance
2307  *
2308  *           File tree (257)                  Reloc tree for 257
2309  * L2              OO                                NN
2310  *               /    \                            /    \
2311  * L1          OO      OO (a)                    OO      NN (a)
2312  *            / \     / \                       / \     / \
2313  * L0       OO   OO OO   OO                   OO   OO NN   NN
2314  *                  (b)  (c)                          (b)  (c)
2315  *
2316  * When calling qgroup_trace_extent_swap(), we will pass:
2317  * @src_eb = OO(a)
2318  * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
2319  * @dst_level = 0
2320  * @root_level = 1
2321  *
2322  * In that case, qgroup_trace_extent_swap() will search from OO(a) to
2323  * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
2324  *
2325  * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
2326  *
2327  * 1) Tree search from @src_eb
2328  *    It should acts as a simplified btrfs_search_slot().
2329  *    The key for search can be extracted from @dst_path->nodes[dst_level]
2330  *    (first key).
2331  *
2332  * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
2333  *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
2334  *    They should be marked during previous (@dst_level = 1) iteration.
2335  *
2336  * 3) Mark file extents in leaves dirty
2337  *    We don't have good way to pick out new file extents only.
2338  *    So we still follow the old method by scanning all file extents in
2339  *    the leave.
2340  *
2341  * This function can free us from keeping two paths, thus later we only need
2342  * to care about how to iterate all new tree blocks in reloc tree.
2343  */
qgroup_trace_extent_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int dst_level,int root_level,bool trace_leaf)2344 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
2345 				    struct extent_buffer *src_eb,
2346 				    struct btrfs_path *dst_path,
2347 				    int dst_level, int root_level,
2348 				    bool trace_leaf)
2349 {
2350 	struct btrfs_key key;
2351 	struct btrfs_path *src_path;
2352 	struct btrfs_fs_info *fs_info = trans->fs_info;
2353 	u32 nodesize = fs_info->nodesize;
2354 	int cur_level = root_level;
2355 	int ret;
2356 
2357 	BUG_ON(dst_level > root_level);
2358 	/* Level mismatch */
2359 	if (btrfs_header_level(src_eb) != root_level)
2360 		return -EINVAL;
2361 
2362 	src_path = btrfs_alloc_path();
2363 	if (!src_path) {
2364 		ret = -ENOMEM;
2365 		goto out;
2366 	}
2367 
2368 	if (dst_level)
2369 		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2370 	else
2371 		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2372 
2373 	/* For src_path */
2374 	atomic_inc(&src_eb->refs);
2375 	src_path->nodes[root_level] = src_eb;
2376 	src_path->slots[root_level] = dst_path->slots[root_level];
2377 	src_path->locks[root_level] = 0;
2378 
2379 	/* A simplified version of btrfs_search_slot() */
2380 	while (cur_level >= dst_level) {
2381 		struct btrfs_key src_key;
2382 		struct btrfs_key dst_key;
2383 
2384 		if (src_path->nodes[cur_level] == NULL) {
2385 			struct extent_buffer *eb;
2386 			int parent_slot;
2387 
2388 			eb = src_path->nodes[cur_level + 1];
2389 			parent_slot = src_path->slots[cur_level + 1];
2390 
2391 			eb = btrfs_read_node_slot(eb, parent_slot);
2392 			if (IS_ERR(eb)) {
2393 				ret = PTR_ERR(eb);
2394 				goto out;
2395 			}
2396 
2397 			src_path->nodes[cur_level] = eb;
2398 
2399 			btrfs_tree_read_lock(eb);
2400 			src_path->locks[cur_level] = BTRFS_READ_LOCK;
2401 		}
2402 
2403 		src_path->slots[cur_level] = dst_path->slots[cur_level];
2404 		if (cur_level) {
2405 			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
2406 					&dst_key, dst_path->slots[cur_level]);
2407 			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
2408 					&src_key, src_path->slots[cur_level]);
2409 		} else {
2410 			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
2411 					&dst_key, dst_path->slots[cur_level]);
2412 			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
2413 					&src_key, src_path->slots[cur_level]);
2414 		}
2415 		/* Content mismatch, something went wrong */
2416 		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
2417 			ret = -ENOENT;
2418 			goto out;
2419 		}
2420 		cur_level--;
2421 	}
2422 
2423 	/*
2424 	 * Now both @dst_path and @src_path have been populated, record the tree
2425 	 * blocks for qgroup accounting.
2426 	 */
2427 	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
2428 					nodesize);
2429 	if (ret < 0)
2430 		goto out;
2431 	ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start,
2432 					nodesize);
2433 	if (ret < 0)
2434 		goto out;
2435 
2436 	/* Record leaf file extents */
2437 	if (dst_level == 0 && trace_leaf) {
2438 		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
2439 		if (ret < 0)
2440 			goto out;
2441 		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
2442 	}
2443 out:
2444 	btrfs_free_path(src_path);
2445 	return ret;
2446 }
2447 
2448 /*
2449  * Helper function to do recursive generation-aware depth-first search, to
2450  * locate all new tree blocks in a subtree of reloc tree.
2451  *
2452  * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2453  *         reloc tree
2454  * L2         NN (a)
2455  *          /    \
2456  * L1    OO        NN (b)
2457  *      /  \      /  \
2458  * L0  OO  OO    OO  NN
2459  *               (c) (d)
2460  * If we pass:
2461  * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2462  * @cur_level = 1
2463  * @root_level = 1
2464  *
2465  * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2466  * above tree blocks along with their counter parts in file tree.
2467  * While during search, old tree blocks OO(c) will be skipped as tree block swap
2468  * won't affect OO(c).
2469  */
qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int cur_level,int root_level,u64 last_snapshot,bool trace_leaf)2470 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2471 					   struct extent_buffer *src_eb,
2472 					   struct btrfs_path *dst_path,
2473 					   int cur_level, int root_level,
2474 					   u64 last_snapshot, bool trace_leaf)
2475 {
2476 	struct btrfs_fs_info *fs_info = trans->fs_info;
2477 	struct extent_buffer *eb;
2478 	bool need_cleanup = false;
2479 	int ret = 0;
2480 	int i;
2481 
2482 	/* Level sanity check */
2483 	if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2484 	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2485 	    root_level < cur_level) {
2486 		btrfs_err_rl(fs_info,
2487 			"%s: bad levels, cur_level=%d root_level=%d",
2488 			__func__, cur_level, root_level);
2489 		return -EUCLEAN;
2490 	}
2491 
2492 	/* Read the tree block if needed */
2493 	if (dst_path->nodes[cur_level] == NULL) {
2494 		int parent_slot;
2495 		u64 child_gen;
2496 
2497 		/*
2498 		 * dst_path->nodes[root_level] must be initialized before
2499 		 * calling this function.
2500 		 */
2501 		if (cur_level == root_level) {
2502 			btrfs_err_rl(fs_info,
2503 	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2504 				__func__, root_level, root_level, cur_level);
2505 			return -EUCLEAN;
2506 		}
2507 
2508 		/*
2509 		 * We need to get child blockptr/gen from parent before we can
2510 		 * read it.
2511 		  */
2512 		eb = dst_path->nodes[cur_level + 1];
2513 		parent_slot = dst_path->slots[cur_level + 1];
2514 		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2515 
2516 		/* This node is old, no need to trace */
2517 		if (child_gen < last_snapshot)
2518 			goto out;
2519 
2520 		eb = btrfs_read_node_slot(eb, parent_slot);
2521 		if (IS_ERR(eb)) {
2522 			ret = PTR_ERR(eb);
2523 			goto out;
2524 		}
2525 
2526 		dst_path->nodes[cur_level] = eb;
2527 		dst_path->slots[cur_level] = 0;
2528 
2529 		btrfs_tree_read_lock(eb);
2530 		dst_path->locks[cur_level] = BTRFS_READ_LOCK;
2531 		need_cleanup = true;
2532 	}
2533 
2534 	/* Now record this tree block and its counter part for qgroups */
2535 	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2536 				       root_level, trace_leaf);
2537 	if (ret < 0)
2538 		goto cleanup;
2539 
2540 	eb = dst_path->nodes[cur_level];
2541 
2542 	if (cur_level > 0) {
2543 		/* Iterate all child tree blocks */
2544 		for (i = 0; i < btrfs_header_nritems(eb); i++) {
2545 			/* Skip old tree blocks as they won't be swapped */
2546 			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2547 				continue;
2548 			dst_path->slots[cur_level] = i;
2549 
2550 			/* Recursive call (at most 7 times) */
2551 			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2552 					dst_path, cur_level - 1, root_level,
2553 					last_snapshot, trace_leaf);
2554 			if (ret < 0)
2555 				goto cleanup;
2556 		}
2557 	}
2558 
2559 cleanup:
2560 	if (need_cleanup) {
2561 		/* Clean up */
2562 		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2563 				     dst_path->locks[cur_level]);
2564 		free_extent_buffer(dst_path->nodes[cur_level]);
2565 		dst_path->nodes[cur_level] = NULL;
2566 		dst_path->slots[cur_level] = 0;
2567 		dst_path->locks[cur_level] = 0;
2568 	}
2569 out:
2570 	return ret;
2571 }
2572 
qgroup_trace_subtree_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct extent_buffer * dst_eb,u64 last_snapshot,bool trace_leaf)2573 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2574 				struct extent_buffer *src_eb,
2575 				struct extent_buffer *dst_eb,
2576 				u64 last_snapshot, bool trace_leaf)
2577 {
2578 	struct btrfs_fs_info *fs_info = trans->fs_info;
2579 	struct btrfs_path *dst_path = NULL;
2580 	int level;
2581 	int ret;
2582 
2583 	if (!btrfs_qgroup_full_accounting(fs_info))
2584 		return 0;
2585 
2586 	/* Wrong parameter order */
2587 	if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2588 		btrfs_err_rl(fs_info,
2589 		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2590 			     btrfs_header_generation(src_eb),
2591 			     btrfs_header_generation(dst_eb));
2592 		return -EUCLEAN;
2593 	}
2594 
2595 	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2596 		ret = -EIO;
2597 		goto out;
2598 	}
2599 
2600 	level = btrfs_header_level(dst_eb);
2601 	dst_path = btrfs_alloc_path();
2602 	if (!dst_path) {
2603 		ret = -ENOMEM;
2604 		goto out;
2605 	}
2606 	/* For dst_path */
2607 	atomic_inc(&dst_eb->refs);
2608 	dst_path->nodes[level] = dst_eb;
2609 	dst_path->slots[level] = 0;
2610 	dst_path->locks[level] = 0;
2611 
2612 	/* Do the generation aware breadth-first search */
2613 	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2614 					      level, last_snapshot, trace_leaf);
2615 	if (ret < 0)
2616 		goto out;
2617 	ret = 0;
2618 
2619 out:
2620 	btrfs_free_path(dst_path);
2621 	if (ret < 0)
2622 		qgroup_mark_inconsistent(fs_info);
2623 	return ret;
2624 }
2625 
2626 /*
2627  * Inform qgroup to trace a whole subtree, including all its child tree
2628  * blocks and data.
2629  * The root tree block is specified by @root_eb.
2630  *
2631  * Normally used by relocation(tree block swap) and subvolume deletion.
2632  *
2633  * Return 0 for success
2634  * Return <0 for error(ENOMEM or tree search error)
2635  */
btrfs_qgroup_trace_subtree(struct btrfs_trans_handle * trans,struct extent_buffer * root_eb,u64 root_gen,int root_level)2636 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2637 			       struct extent_buffer *root_eb,
2638 			       u64 root_gen, int root_level)
2639 {
2640 	struct btrfs_fs_info *fs_info = trans->fs_info;
2641 	int ret = 0;
2642 	int level;
2643 	u8 drop_subptree_thres;
2644 	struct extent_buffer *eb = root_eb;
2645 	struct btrfs_path *path = NULL;
2646 
2647 	ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL);
2648 	ASSERT(root_eb != NULL);
2649 
2650 	if (!btrfs_qgroup_full_accounting(fs_info))
2651 		return 0;
2652 
2653 	spin_lock(&fs_info->qgroup_lock);
2654 	drop_subptree_thres = fs_info->qgroup_drop_subtree_thres;
2655 	spin_unlock(&fs_info->qgroup_lock);
2656 
2657 	/*
2658 	 * This function only gets called for snapshot drop, if we hit a high
2659 	 * node here, it means we are going to change ownership for quite a lot
2660 	 * of extents, which will greatly slow down btrfs_commit_transaction().
2661 	 *
2662 	 * So here if we find a high tree here, we just skip the accounting and
2663 	 * mark qgroup inconsistent.
2664 	 */
2665 	if (root_level >= drop_subptree_thres) {
2666 		qgroup_mark_inconsistent(fs_info);
2667 		return 0;
2668 	}
2669 
2670 	if (!extent_buffer_uptodate(root_eb)) {
2671 		struct btrfs_tree_parent_check check = {
2672 			.has_first_key = false,
2673 			.transid = root_gen,
2674 			.level = root_level
2675 		};
2676 
2677 		ret = btrfs_read_extent_buffer(root_eb, &check);
2678 		if (ret)
2679 			goto out;
2680 	}
2681 
2682 	if (root_level == 0) {
2683 		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2684 		goto out;
2685 	}
2686 
2687 	path = btrfs_alloc_path();
2688 	if (!path)
2689 		return -ENOMEM;
2690 
2691 	/*
2692 	 * Walk down the tree.  Missing extent blocks are filled in as
2693 	 * we go. Metadata is accounted every time we read a new
2694 	 * extent block.
2695 	 *
2696 	 * When we reach a leaf, we account for file extent items in it,
2697 	 * walk back up the tree (adjusting slot pointers as we go)
2698 	 * and restart the search process.
2699 	 */
2700 	atomic_inc(&root_eb->refs);	/* For path */
2701 	path->nodes[root_level] = root_eb;
2702 	path->slots[root_level] = 0;
2703 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2704 walk_down:
2705 	level = root_level;
2706 	while (level >= 0) {
2707 		if (path->nodes[level] == NULL) {
2708 			int parent_slot;
2709 			u64 child_bytenr;
2710 
2711 			/*
2712 			 * We need to get child blockptr from parent before we
2713 			 * can read it.
2714 			  */
2715 			eb = path->nodes[level + 1];
2716 			parent_slot = path->slots[level + 1];
2717 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2718 
2719 			eb = btrfs_read_node_slot(eb, parent_slot);
2720 			if (IS_ERR(eb)) {
2721 				ret = PTR_ERR(eb);
2722 				goto out;
2723 			}
2724 
2725 			path->nodes[level] = eb;
2726 			path->slots[level] = 0;
2727 
2728 			btrfs_tree_read_lock(eb);
2729 			path->locks[level] = BTRFS_READ_LOCK;
2730 
2731 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2732 							fs_info->nodesize);
2733 			if (ret)
2734 				goto out;
2735 		}
2736 
2737 		if (level == 0) {
2738 			ret = btrfs_qgroup_trace_leaf_items(trans,
2739 							    path->nodes[level]);
2740 			if (ret)
2741 				goto out;
2742 
2743 			/* Nonzero return here means we completed our search */
2744 			ret = adjust_slots_upwards(path, root_level);
2745 			if (ret)
2746 				break;
2747 
2748 			/* Restart search with new slots */
2749 			goto walk_down;
2750 		}
2751 
2752 		level--;
2753 	}
2754 
2755 	ret = 0;
2756 out:
2757 	btrfs_free_path(path);
2758 
2759 	return ret;
2760 }
2761 
qgroup_iterator_nested_add(struct list_head * head,struct btrfs_qgroup * qgroup)2762 static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
2763 {
2764 	if (!list_empty(&qgroup->nested_iterator))
2765 		return;
2766 
2767 	list_add_tail(&qgroup->nested_iterator, head);
2768 }
2769 
qgroup_iterator_nested_clean(struct list_head * head)2770 static void qgroup_iterator_nested_clean(struct list_head *head)
2771 {
2772 	while (!list_empty(head)) {
2773 		struct btrfs_qgroup *qgroup;
2774 
2775 		qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator);
2776 		list_del_init(&qgroup->nested_iterator);
2777 	}
2778 }
2779 
2780 #define UPDATE_NEW	0
2781 #define UPDATE_OLD	1
2782 /*
2783  * Walk all of the roots that points to the bytenr and adjust their refcnts.
2784  */
qgroup_update_refcnt(struct btrfs_fs_info * fs_info,struct ulist * roots,struct list_head * qgroups,u64 seq,int update_old)2785 static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2786 				 struct ulist *roots, struct list_head *qgroups,
2787 				 u64 seq, int update_old)
2788 {
2789 	struct ulist_node *unode;
2790 	struct ulist_iterator uiter;
2791 	struct btrfs_qgroup *qg;
2792 
2793 	if (!roots)
2794 		return;
2795 	ULIST_ITER_INIT(&uiter);
2796 	while ((unode = ulist_next(roots, &uiter))) {
2797 		LIST_HEAD(tmp);
2798 
2799 		qg = find_qgroup_rb(fs_info, unode->val);
2800 		if (!qg)
2801 			continue;
2802 
2803 		qgroup_iterator_nested_add(qgroups, qg);
2804 		qgroup_iterator_add(&tmp, qg);
2805 		list_for_each_entry(qg, &tmp, iterator) {
2806 			struct btrfs_qgroup_list *glist;
2807 
2808 			if (update_old)
2809 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2810 			else
2811 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2812 
2813 			list_for_each_entry(glist, &qg->groups, next_group) {
2814 				qgroup_iterator_nested_add(qgroups, glist->group);
2815 				qgroup_iterator_add(&tmp, glist->group);
2816 			}
2817 		}
2818 		qgroup_iterator_clean(&tmp);
2819 	}
2820 }
2821 
2822 /*
2823  * Update qgroup rfer/excl counters.
2824  * Rfer update is easy, codes can explain themselves.
2825  *
2826  * Excl update is tricky, the update is split into 2 parts.
2827  * Part 1: Possible exclusive <-> sharing detect:
2828  *	|	A	|	!A	|
2829  *  -------------------------------------
2830  *  B	|	*	|	-	|
2831  *  -------------------------------------
2832  *  !B	|	+	|	**	|
2833  *  -------------------------------------
2834  *
2835  * Conditions:
2836  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2837  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2838  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2839  * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2840  *
2841  * Results:
2842  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2843  * *: Definitely not changed.		**: Possible unchanged.
2844  *
2845  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2846  *
2847  * To make the logic clear, we first use condition A and B to split
2848  * combination into 4 results.
2849  *
2850  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2851  * only on variant maybe 0.
2852  *
2853  * Lastly, check result **, since there are 2 variants maybe 0, split them
2854  * again(2x2).
2855  * But this time we don't need to consider other things, the codes and logic
2856  * is easy to understand now.
2857  */
qgroup_update_counters(struct btrfs_fs_info * fs_info,struct list_head * qgroups,u64 nr_old_roots,u64 nr_new_roots,u64 num_bytes,u64 seq)2858 static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
2859 				   struct list_head *qgroups, u64 nr_old_roots,
2860 				   u64 nr_new_roots, u64 num_bytes, u64 seq)
2861 {
2862 	struct btrfs_qgroup *qg;
2863 
2864 	list_for_each_entry(qg, qgroups, nested_iterator) {
2865 		u64 cur_new_count, cur_old_count;
2866 		bool dirty = false;
2867 
2868 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2869 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2870 
2871 		trace_qgroup_update_counters(fs_info, qg, cur_old_count,
2872 					     cur_new_count);
2873 
2874 		/* Rfer update part */
2875 		if (cur_old_count == 0 && cur_new_count > 0) {
2876 			qg->rfer += num_bytes;
2877 			qg->rfer_cmpr += num_bytes;
2878 			dirty = true;
2879 		}
2880 		if (cur_old_count > 0 && cur_new_count == 0) {
2881 			qg->rfer -= num_bytes;
2882 			qg->rfer_cmpr -= num_bytes;
2883 			dirty = true;
2884 		}
2885 
2886 		/* Excl update part */
2887 		/* Exclusive/none -> shared case */
2888 		if (cur_old_count == nr_old_roots &&
2889 		    cur_new_count < nr_new_roots) {
2890 			/* Exclusive -> shared */
2891 			if (cur_old_count != 0) {
2892 				qg->excl -= num_bytes;
2893 				qg->excl_cmpr -= num_bytes;
2894 				dirty = true;
2895 			}
2896 		}
2897 
2898 		/* Shared -> exclusive/none case */
2899 		if (cur_old_count < nr_old_roots &&
2900 		    cur_new_count == nr_new_roots) {
2901 			/* Shared->exclusive */
2902 			if (cur_new_count != 0) {
2903 				qg->excl += num_bytes;
2904 				qg->excl_cmpr += num_bytes;
2905 				dirty = true;
2906 			}
2907 		}
2908 
2909 		/* Exclusive/none -> exclusive/none case */
2910 		if (cur_old_count == nr_old_roots &&
2911 		    cur_new_count == nr_new_roots) {
2912 			if (cur_old_count == 0) {
2913 				/* None -> exclusive/none */
2914 
2915 				if (cur_new_count != 0) {
2916 					/* None -> exclusive */
2917 					qg->excl += num_bytes;
2918 					qg->excl_cmpr += num_bytes;
2919 					dirty = true;
2920 				}
2921 				/* None -> none, nothing changed */
2922 			} else {
2923 				/* Exclusive -> exclusive/none */
2924 
2925 				if (cur_new_count == 0) {
2926 					/* Exclusive -> none */
2927 					qg->excl -= num_bytes;
2928 					qg->excl_cmpr -= num_bytes;
2929 					dirty = true;
2930 				}
2931 				/* Exclusive -> exclusive, nothing changed */
2932 			}
2933 		}
2934 
2935 		if (dirty)
2936 			qgroup_dirty(fs_info, qg);
2937 	}
2938 }
2939 
2940 /*
2941  * Check if the @roots potentially is a list of fs tree roots
2942  *
2943  * Return 0 for definitely not a fs/subvol tree roots ulist
2944  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2945  *          one as well)
2946  */
maybe_fs_roots(struct ulist * roots)2947 static int maybe_fs_roots(struct ulist *roots)
2948 {
2949 	struct ulist_node *unode;
2950 	struct ulist_iterator uiter;
2951 
2952 	/* Empty one, still possible for fs roots */
2953 	if (!roots || roots->nnodes == 0)
2954 		return 1;
2955 
2956 	ULIST_ITER_INIT(&uiter);
2957 	unode = ulist_next(roots, &uiter);
2958 	if (!unode)
2959 		return 1;
2960 
2961 	/*
2962 	 * If it contains fs tree roots, then it must belong to fs/subvol
2963 	 * trees.
2964 	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2965 	 */
2966 	return is_fstree(unode->val);
2967 }
2968 
btrfs_qgroup_account_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct ulist * old_roots,struct ulist * new_roots)2969 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2970 				u64 num_bytes, struct ulist *old_roots,
2971 				struct ulist *new_roots)
2972 {
2973 	struct btrfs_fs_info *fs_info = trans->fs_info;
2974 	LIST_HEAD(qgroups);
2975 	u64 seq;
2976 	u64 nr_new_roots = 0;
2977 	u64 nr_old_roots = 0;
2978 	int ret = 0;
2979 
2980 	/*
2981 	 * If quotas get disabled meanwhile, the resources need to be freed and
2982 	 * we can't just exit here.
2983 	 */
2984 	if (!btrfs_qgroup_full_accounting(fs_info) ||
2985 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2986 		goto out_free;
2987 
2988 	if (new_roots) {
2989 		if (!maybe_fs_roots(new_roots))
2990 			goto out_free;
2991 		nr_new_roots = new_roots->nnodes;
2992 	}
2993 	if (old_roots) {
2994 		if (!maybe_fs_roots(old_roots))
2995 			goto out_free;
2996 		nr_old_roots = old_roots->nnodes;
2997 	}
2998 
2999 	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
3000 	if (nr_old_roots == 0 && nr_new_roots == 0)
3001 		goto out_free;
3002 
3003 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
3004 					num_bytes, nr_old_roots, nr_new_roots);
3005 
3006 	mutex_lock(&fs_info->qgroup_rescan_lock);
3007 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3008 		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
3009 			mutex_unlock(&fs_info->qgroup_rescan_lock);
3010 			ret = 0;
3011 			goto out_free;
3012 		}
3013 	}
3014 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3015 
3016 	spin_lock(&fs_info->qgroup_lock);
3017 	seq = fs_info->qgroup_seq;
3018 
3019 	/* Update old refcnts using old_roots */
3020 	qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD);
3021 
3022 	/* Update new refcnts using new_roots */
3023 	qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW);
3024 
3025 	qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots,
3026 			       num_bytes, seq);
3027 
3028 	/*
3029 	 * We're done using the iterator, release all its qgroups while holding
3030 	 * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup()
3031 	 * and trigger use-after-free accesses to qgroups.
3032 	 */
3033 	qgroup_iterator_nested_clean(&qgroups);
3034 
3035 	/*
3036 	 * Bump qgroup_seq to avoid seq overlap
3037 	 */
3038 	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
3039 	spin_unlock(&fs_info->qgroup_lock);
3040 out_free:
3041 	ulist_free(old_roots);
3042 	ulist_free(new_roots);
3043 	return ret;
3044 }
3045 
btrfs_qgroup_account_extents(struct btrfs_trans_handle * trans)3046 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
3047 {
3048 	struct btrfs_fs_info *fs_info = trans->fs_info;
3049 	struct btrfs_qgroup_extent_record *record;
3050 	struct btrfs_delayed_ref_root *delayed_refs;
3051 	struct ulist *new_roots = NULL;
3052 	unsigned long index;
3053 	u64 num_dirty_extents = 0;
3054 	u64 qgroup_to_skip;
3055 	int ret = 0;
3056 
3057 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3058 		return 0;
3059 
3060 	delayed_refs = &trans->transaction->delayed_refs;
3061 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
3062 	xa_for_each(&delayed_refs->dirty_extents, index, record) {
3063 		num_dirty_extents++;
3064 		trace_btrfs_qgroup_account_extents(fs_info, record);
3065 
3066 		if (!ret && !(fs_info->qgroup_flags &
3067 			      BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
3068 			struct btrfs_backref_walk_ctx ctx = { 0 };
3069 
3070 			ctx.bytenr = record->bytenr;
3071 			ctx.fs_info = fs_info;
3072 
3073 			/*
3074 			 * Old roots should be searched when inserting qgroup
3075 			 * extent record.
3076 			 *
3077 			 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
3078 			 * we may have some record inserted during
3079 			 * NO_ACCOUNTING (thus no old_roots populated), but
3080 			 * later we start rescan, which clears NO_ACCOUNTING,
3081 			 * leaving some inserted records without old_roots
3082 			 * populated.
3083 			 *
3084 			 * Those cases are rare and should not cause too much
3085 			 * time spent during commit_transaction().
3086 			 */
3087 			if (!record->old_roots) {
3088 				/* Search commit root to find old_roots */
3089 				ret = btrfs_find_all_roots(&ctx, false);
3090 				if (ret < 0)
3091 					goto cleanup;
3092 				record->old_roots = ctx.roots;
3093 				ctx.roots = NULL;
3094 			}
3095 
3096 			/*
3097 			 * Use BTRFS_SEQ_LAST as time_seq to do special search,
3098 			 * which doesn't lock tree or delayed_refs and search
3099 			 * current root. It's safe inside commit_transaction().
3100 			 */
3101 			ctx.trans = trans;
3102 			ctx.time_seq = BTRFS_SEQ_LAST;
3103 			ret = btrfs_find_all_roots(&ctx, false);
3104 			if (ret < 0)
3105 				goto cleanup;
3106 			new_roots = ctx.roots;
3107 			if (qgroup_to_skip) {
3108 				ulist_del(new_roots, qgroup_to_skip, 0);
3109 				ulist_del(record->old_roots, qgroup_to_skip,
3110 					  0);
3111 			}
3112 			ret = btrfs_qgroup_account_extent(trans, record->bytenr,
3113 							  record->num_bytes,
3114 							  record->old_roots,
3115 							  new_roots);
3116 			record->old_roots = NULL;
3117 			new_roots = NULL;
3118 		}
3119 		/* Free the reserved data space */
3120 		btrfs_qgroup_free_refroot(fs_info,
3121 				record->data_rsv_refroot,
3122 				record->data_rsv,
3123 				BTRFS_QGROUP_RSV_DATA);
3124 cleanup:
3125 		ulist_free(record->old_roots);
3126 		ulist_free(new_roots);
3127 		new_roots = NULL;
3128 		xa_erase(&delayed_refs->dirty_extents, index);
3129 		kfree(record);
3130 
3131 	}
3132 	trace_qgroup_num_dirty_extents(fs_info, trans->transid,
3133 				       num_dirty_extents);
3134 	return ret;
3135 }
3136 
3137 /*
3138  * Writes all changed qgroups to disk.
3139  * Called by the transaction commit path and the qgroup assign ioctl.
3140  */
btrfs_run_qgroups(struct btrfs_trans_handle * trans)3141 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
3142 {
3143 	struct btrfs_fs_info *fs_info = trans->fs_info;
3144 	int ret = 0;
3145 
3146 	/*
3147 	 * In case we are called from the qgroup assign ioctl, assert that we
3148 	 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
3149 	 * disable operation (ioctl) and access a freed quota root.
3150 	 */
3151 	if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
3152 		lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
3153 
3154 	if (!fs_info->quota_root)
3155 		return ret;
3156 
3157 	spin_lock(&fs_info->qgroup_lock);
3158 	while (!list_empty(&fs_info->dirty_qgroups)) {
3159 		struct btrfs_qgroup *qgroup;
3160 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
3161 					  struct btrfs_qgroup, dirty);
3162 		list_del_init(&qgroup->dirty);
3163 		spin_unlock(&fs_info->qgroup_lock);
3164 		ret = update_qgroup_info_item(trans, qgroup);
3165 		if (ret)
3166 			qgroup_mark_inconsistent(fs_info);
3167 		ret = update_qgroup_limit_item(trans, qgroup);
3168 		if (ret)
3169 			qgroup_mark_inconsistent(fs_info);
3170 		spin_lock(&fs_info->qgroup_lock);
3171 	}
3172 	if (btrfs_qgroup_enabled(fs_info))
3173 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
3174 	else
3175 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
3176 	spin_unlock(&fs_info->qgroup_lock);
3177 
3178 	ret = update_qgroup_status_item(trans);
3179 	if (ret)
3180 		qgroup_mark_inconsistent(fs_info);
3181 
3182 	return ret;
3183 }
3184 
btrfs_qgroup_check_inherit(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_inherit * inherit,size_t size)3185 int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
3186 			       struct btrfs_qgroup_inherit *inherit,
3187 			       size_t size)
3188 {
3189 	if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
3190 		return -EOPNOTSUPP;
3191 	if (size < sizeof(*inherit) || size > PAGE_SIZE)
3192 		return -EINVAL;
3193 
3194 	/*
3195 	 * In the past we allowed btrfs_qgroup_inherit to specify to copy
3196 	 * rfer/excl numbers directly from other qgroups.  This behavior has
3197 	 * been disabled in userspace for a very long time, but here we should
3198 	 * also disable it in kernel, as this behavior is known to mark qgroup
3199 	 * inconsistent, and a rescan would wipe out the changes anyway.
3200 	 *
3201 	 * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies.
3202 	 */
3203 	if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0)
3204 		return -EINVAL;
3205 
3206 	if (size != struct_size(inherit, qgroups, inherit->num_qgroups))
3207 		return -EINVAL;
3208 
3209 	/*
3210 	 * Skip the inherit source qgroups check if qgroup is not enabled.
3211 	 * Qgroup can still be later enabled causing problems, but in that case
3212 	 * btrfs_qgroup_inherit() would just ignore those invalid ones.
3213 	 */
3214 	if (!btrfs_qgroup_enabled(fs_info))
3215 		return 0;
3216 
3217 	/*
3218 	 * Now check all the remaining qgroups, they should all:
3219 	 *
3220 	 * - Exist
3221 	 * - Be higher level qgroups.
3222 	 */
3223 	for (int i = 0; i < inherit->num_qgroups; i++) {
3224 		struct btrfs_qgroup *qgroup;
3225 		u64 qgroupid = inherit->qgroups[i];
3226 
3227 		if (btrfs_qgroup_level(qgroupid) == 0)
3228 			return -EINVAL;
3229 
3230 		spin_lock(&fs_info->qgroup_lock);
3231 		qgroup = find_qgroup_rb(fs_info, qgroupid);
3232 		if (!qgroup) {
3233 			spin_unlock(&fs_info->qgroup_lock);
3234 			return -ENOENT;
3235 		}
3236 		spin_unlock(&fs_info->qgroup_lock);
3237 	}
3238 	return 0;
3239 }
3240 
qgroup_auto_inherit(struct btrfs_fs_info * fs_info,u64 inode_rootid,struct btrfs_qgroup_inherit ** inherit)3241 static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
3242 			       u64 inode_rootid,
3243 			       struct btrfs_qgroup_inherit **inherit)
3244 {
3245 	int i = 0;
3246 	u64 num_qgroups = 0;
3247 	struct btrfs_qgroup *inode_qg;
3248 	struct btrfs_qgroup_list *qg_list;
3249 	struct btrfs_qgroup_inherit *res;
3250 	size_t struct_sz;
3251 	u64 *qgids;
3252 
3253 	if (*inherit)
3254 		return -EEXIST;
3255 
3256 	inode_qg = find_qgroup_rb(fs_info, inode_rootid);
3257 	if (!inode_qg)
3258 		return -ENOENT;
3259 
3260 	num_qgroups = list_count_nodes(&inode_qg->groups);
3261 
3262 	if (!num_qgroups)
3263 		return 0;
3264 
3265 	struct_sz = struct_size(res, qgroups, num_qgroups);
3266 	if (struct_sz == SIZE_MAX)
3267 		return -ERANGE;
3268 
3269 	res = kzalloc(struct_sz, GFP_NOFS);
3270 	if (!res)
3271 		return -ENOMEM;
3272 	res->num_qgroups = num_qgroups;
3273 	qgids = res->qgroups;
3274 
3275 	list_for_each_entry(qg_list, &inode_qg->groups, next_group)
3276 		qgids[i++] = qg_list->group->qgroupid;
3277 
3278 	*inherit = res;
3279 	return 0;
3280 }
3281 
3282 /*
3283  * Check if we can skip rescan when inheriting qgroups.  If @src has a single
3284  * @parent, and that @parent is owning all its bytes exclusively, we can skip
3285  * the full rescan, by just adding nodesize to the @parent's excl/rfer.
3286  *
3287  * Return <0 for fatal errors (like srcid/parentid has no qgroup).
3288  * Return 0 if a quick inherit is done.
3289  * Return >0 if a quick inherit is not possible, and a full rescan is needed.
3290  */
qgroup_snapshot_quick_inherit(struct btrfs_fs_info * fs_info,u64 srcid,u64 parentid)3291 static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
3292 					 u64 srcid, u64 parentid)
3293 {
3294 	struct btrfs_qgroup *src;
3295 	struct btrfs_qgroup *parent;
3296 	struct btrfs_qgroup_list *list;
3297 	int nr_parents = 0;
3298 
3299 	src = find_qgroup_rb(fs_info, srcid);
3300 	if (!src)
3301 		return -ENOENT;
3302 	parent = find_qgroup_rb(fs_info, parentid);
3303 	if (!parent)
3304 		return -ENOENT;
3305 
3306 	/*
3307 	 * Source has no parent qgroup, but our new qgroup would have one.
3308 	 * Qgroup numbers would become inconsistent.
3309 	 */
3310 	if (list_empty(&src->groups))
3311 		return 1;
3312 
3313 	list_for_each_entry(list, &src->groups, next_group) {
3314 		/* The parent is not the same, quick update is not possible. */
3315 		if (list->group->qgroupid != parentid)
3316 			return 1;
3317 		nr_parents++;
3318 		/*
3319 		 * More than one parent qgroup, we can't be sure about accounting
3320 		 * consistency.
3321 		 */
3322 		if (nr_parents > 1)
3323 			return 1;
3324 	}
3325 
3326 	/*
3327 	 * The parent is not exclusively owning all its bytes.  We're not sure
3328 	 * if the source has any bytes not fully owned by the parent.
3329 	 */
3330 	if (parent->excl != parent->rfer)
3331 		return 1;
3332 
3333 	parent->excl += fs_info->nodesize;
3334 	parent->rfer += fs_info->nodesize;
3335 	return 0;
3336 }
3337 
3338 /*
3339  * Copy the accounting information between qgroups. This is necessary
3340  * when a snapshot or a subvolume is created. Throwing an error will
3341  * cause a transaction abort so we take extra care here to only error
3342  * when a readonly fs is a reasonable outcome.
3343  */
btrfs_qgroup_inherit(struct btrfs_trans_handle * trans,u64 srcid,u64 objectid,u64 inode_rootid,struct btrfs_qgroup_inherit * inherit)3344 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
3345 			 u64 objectid, u64 inode_rootid,
3346 			 struct btrfs_qgroup_inherit *inherit)
3347 {
3348 	int ret = 0;
3349 	u64 *i_qgroups;
3350 	bool committing = false;
3351 	struct btrfs_fs_info *fs_info = trans->fs_info;
3352 	struct btrfs_root *quota_root;
3353 	struct btrfs_qgroup *srcgroup;
3354 	struct btrfs_qgroup *dstgroup;
3355 	struct btrfs_qgroup *prealloc;
3356 	struct btrfs_qgroup_list **qlist_prealloc = NULL;
3357 	bool free_inherit = false;
3358 	bool need_rescan = false;
3359 	u32 level_size = 0;
3360 	u64 nums;
3361 
3362 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
3363 	if (!prealloc)
3364 		return -ENOMEM;
3365 
3366 	/*
3367 	 * There are only two callers of this function.
3368 	 *
3369 	 * One in create_subvol() in the ioctl context, which needs to hold
3370 	 * the qgroup_ioctl_lock.
3371 	 *
3372 	 * The other one in create_pending_snapshot() where no other qgroup
3373 	 * code can modify the fs as they all need to either start a new trans
3374 	 * or hold a trans handler, thus we don't need to hold
3375 	 * qgroup_ioctl_lock.
3376 	 * This would avoid long and complex lock chain and make lockdep happy.
3377 	 */
3378 	spin_lock(&fs_info->trans_lock);
3379 	if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
3380 		committing = true;
3381 	spin_unlock(&fs_info->trans_lock);
3382 
3383 	if (!committing)
3384 		mutex_lock(&fs_info->qgroup_ioctl_lock);
3385 	if (!btrfs_qgroup_enabled(fs_info))
3386 		goto out;
3387 
3388 	quota_root = fs_info->quota_root;
3389 	if (!quota_root) {
3390 		ret = -EINVAL;
3391 		goto out;
3392 	}
3393 
3394 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) {
3395 		ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit);
3396 		if (ret)
3397 			goto out;
3398 		free_inherit = true;
3399 	}
3400 
3401 	if (inherit) {
3402 		i_qgroups = (u64 *)(inherit + 1);
3403 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
3404 		       2 * inherit->num_excl_copies;
3405 		for (int i = 0; i < nums; i++) {
3406 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
3407 
3408 			/*
3409 			 * Zero out invalid groups so we can ignore
3410 			 * them later.
3411 			 */
3412 			if (!srcgroup ||
3413 			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
3414 				*i_qgroups = 0ULL;
3415 
3416 			++i_qgroups;
3417 		}
3418 	}
3419 
3420 	/*
3421 	 * create a tracking group for the subvol itself
3422 	 */
3423 	ret = add_qgroup_item(trans, quota_root, objectid);
3424 	if (ret)
3425 		goto out;
3426 
3427 	/*
3428 	 * add qgroup to all inherited groups
3429 	 */
3430 	if (inherit) {
3431 		i_qgroups = (u64 *)(inherit + 1);
3432 		for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) {
3433 			if (*i_qgroups == 0)
3434 				continue;
3435 			ret = add_qgroup_relation_item(trans, objectid,
3436 						       *i_qgroups);
3437 			if (ret && ret != -EEXIST)
3438 				goto out;
3439 			ret = add_qgroup_relation_item(trans, *i_qgroups,
3440 						       objectid);
3441 			if (ret && ret != -EEXIST)
3442 				goto out;
3443 		}
3444 		ret = 0;
3445 
3446 		qlist_prealloc = kcalloc(inherit->num_qgroups,
3447 					 sizeof(struct btrfs_qgroup_list *),
3448 					 GFP_NOFS);
3449 		if (!qlist_prealloc) {
3450 			ret = -ENOMEM;
3451 			goto out;
3452 		}
3453 		for (int i = 0; i < inherit->num_qgroups; i++) {
3454 			qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list),
3455 						    GFP_NOFS);
3456 			if (!qlist_prealloc[i]) {
3457 				ret = -ENOMEM;
3458 				goto out;
3459 			}
3460 		}
3461 	}
3462 
3463 	spin_lock(&fs_info->qgroup_lock);
3464 
3465 	dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
3466 	prealloc = NULL;
3467 
3468 	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
3469 		dstgroup->lim_flags = inherit->lim.flags;
3470 		dstgroup->max_rfer = inherit->lim.max_rfer;
3471 		dstgroup->max_excl = inherit->lim.max_excl;
3472 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
3473 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
3474 
3475 		qgroup_dirty(fs_info, dstgroup);
3476 	}
3477 
3478 	if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) {
3479 		srcgroup = find_qgroup_rb(fs_info, srcid);
3480 		if (!srcgroup)
3481 			goto unlock;
3482 
3483 		/*
3484 		 * We call inherit after we clone the root in order to make sure
3485 		 * our counts don't go crazy, so at this point the only
3486 		 * difference between the two roots should be the root node.
3487 		 */
3488 		level_size = fs_info->nodesize;
3489 		dstgroup->rfer = srcgroup->rfer;
3490 		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
3491 		dstgroup->excl = level_size;
3492 		dstgroup->excl_cmpr = level_size;
3493 		srcgroup->excl = level_size;
3494 		srcgroup->excl_cmpr = level_size;
3495 
3496 		/* inherit the limit info */
3497 		dstgroup->lim_flags = srcgroup->lim_flags;
3498 		dstgroup->max_rfer = srcgroup->max_rfer;
3499 		dstgroup->max_excl = srcgroup->max_excl;
3500 		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
3501 		dstgroup->rsv_excl = srcgroup->rsv_excl;
3502 
3503 		qgroup_dirty(fs_info, dstgroup);
3504 		qgroup_dirty(fs_info, srcgroup);
3505 
3506 		/*
3507 		 * If the source qgroup has parent but the new one doesn't,
3508 		 * we need a full rescan.
3509 		 */
3510 		if (!inherit && !list_empty(&srcgroup->groups))
3511 			need_rescan = true;
3512 	}
3513 
3514 	if (!inherit)
3515 		goto unlock;
3516 
3517 	i_qgroups = (u64 *)(inherit + 1);
3518 	for (int i = 0; i < inherit->num_qgroups; i++) {
3519 		if (*i_qgroups) {
3520 			ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid,
3521 					      *i_qgroups);
3522 			qlist_prealloc[i] = NULL;
3523 			if (ret)
3524 				goto unlock;
3525 		}
3526 		if (srcid) {
3527 			/* Check if we can do a quick inherit. */
3528 			ret = qgroup_snapshot_quick_inherit(fs_info, srcid, *i_qgroups);
3529 			if (ret < 0)
3530 				goto unlock;
3531 			if (ret > 0)
3532 				need_rescan = true;
3533 			ret = 0;
3534 		}
3535 		++i_qgroups;
3536 	}
3537 
3538 	for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) {
3539 		struct btrfs_qgroup *src;
3540 		struct btrfs_qgroup *dst;
3541 
3542 		if (!i_qgroups[0] || !i_qgroups[1])
3543 			continue;
3544 
3545 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3546 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3547 
3548 		if (!src || !dst) {
3549 			ret = -EINVAL;
3550 			goto unlock;
3551 		}
3552 
3553 		dst->rfer = src->rfer - level_size;
3554 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
3555 
3556 		/* Manually tweaking numbers certainly needs a rescan */
3557 		need_rescan = true;
3558 	}
3559 	for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) {
3560 		struct btrfs_qgroup *src;
3561 		struct btrfs_qgroup *dst;
3562 
3563 		if (!i_qgroups[0] || !i_qgroups[1])
3564 			continue;
3565 
3566 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3567 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3568 
3569 		if (!src || !dst) {
3570 			ret = -EINVAL;
3571 			goto unlock;
3572 		}
3573 
3574 		dst->excl = src->excl + level_size;
3575 		dst->excl_cmpr = src->excl_cmpr + level_size;
3576 		need_rescan = true;
3577 	}
3578 
3579 unlock:
3580 	spin_unlock(&fs_info->qgroup_lock);
3581 	if (!ret)
3582 		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
3583 out:
3584 	if (!committing)
3585 		mutex_unlock(&fs_info->qgroup_ioctl_lock);
3586 	if (need_rescan)
3587 		qgroup_mark_inconsistent(fs_info);
3588 	if (qlist_prealloc) {
3589 		for (int i = 0; i < inherit->num_qgroups; i++)
3590 			kfree(qlist_prealloc[i]);
3591 		kfree(qlist_prealloc);
3592 	}
3593 	if (free_inherit)
3594 		kfree(inherit);
3595 	kfree(prealloc);
3596 	return ret;
3597 }
3598 
qgroup_check_limits(const struct btrfs_qgroup * qg,u64 num_bytes)3599 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
3600 {
3601 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
3602 	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
3603 		return false;
3604 
3605 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
3606 	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
3607 		return false;
3608 
3609 	return true;
3610 }
3611 
qgroup_reserve(struct btrfs_root * root,u64 num_bytes,bool enforce,enum btrfs_qgroup_rsv_type type)3612 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
3613 			  enum btrfs_qgroup_rsv_type type)
3614 {
3615 	struct btrfs_qgroup *qgroup;
3616 	struct btrfs_fs_info *fs_info = root->fs_info;
3617 	u64 ref_root = btrfs_root_id(root);
3618 	int ret = 0;
3619 	LIST_HEAD(qgroup_list);
3620 
3621 	if (!is_fstree(ref_root))
3622 		return 0;
3623 
3624 	if (num_bytes == 0)
3625 		return 0;
3626 
3627 	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
3628 	    capable(CAP_SYS_RESOURCE))
3629 		enforce = false;
3630 
3631 	spin_lock(&fs_info->qgroup_lock);
3632 	if (!fs_info->quota_root)
3633 		goto out;
3634 
3635 	qgroup = find_qgroup_rb(fs_info, ref_root);
3636 	if (!qgroup)
3637 		goto out;
3638 
3639 	qgroup_iterator_add(&qgroup_list, qgroup);
3640 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3641 		struct btrfs_qgroup_list *glist;
3642 
3643 		if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
3644 			ret = -EDQUOT;
3645 			goto out;
3646 		}
3647 
3648 		list_for_each_entry(glist, &qgroup->groups, next_group)
3649 			qgroup_iterator_add(&qgroup_list, glist->group);
3650 	}
3651 
3652 	ret = 0;
3653 	/*
3654 	 * no limits exceeded, now record the reservation into all qgroups
3655 	 */
3656 	list_for_each_entry(qgroup, &qgroup_list, iterator)
3657 		qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
3658 
3659 out:
3660 	qgroup_iterator_clean(&qgroup_list);
3661 	spin_unlock(&fs_info->qgroup_lock);
3662 	return ret;
3663 }
3664 
3665 /*
3666  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
3667  * qgroup).
3668  *
3669  * Will handle all higher level qgroup too.
3670  *
3671  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3672  * This special case is only used for META_PERTRANS type.
3673  */
btrfs_qgroup_free_refroot(struct btrfs_fs_info * fs_info,u64 ref_root,u64 num_bytes,enum btrfs_qgroup_rsv_type type)3674 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3675 			       u64 ref_root, u64 num_bytes,
3676 			       enum btrfs_qgroup_rsv_type type)
3677 {
3678 	struct btrfs_qgroup *qgroup;
3679 	LIST_HEAD(qgroup_list);
3680 
3681 	if (!is_fstree(ref_root))
3682 		return;
3683 
3684 	if (num_bytes == 0)
3685 		return;
3686 
3687 	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3688 		WARN(1, "%s: Invalid type to free", __func__);
3689 		return;
3690 	}
3691 	spin_lock(&fs_info->qgroup_lock);
3692 
3693 	if (!fs_info->quota_root)
3694 		goto out;
3695 
3696 	qgroup = find_qgroup_rb(fs_info, ref_root);
3697 	if (!qgroup)
3698 		goto out;
3699 
3700 	if (num_bytes == (u64)-1)
3701 		/*
3702 		 * We're freeing all pertrans rsv, get reserved value from
3703 		 * level 0 qgroup as real num_bytes to free.
3704 		 */
3705 		num_bytes = qgroup->rsv.values[type];
3706 
3707 	qgroup_iterator_add(&qgroup_list, qgroup);
3708 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3709 		struct btrfs_qgroup_list *glist;
3710 
3711 		qgroup_rsv_release(fs_info, qgroup, num_bytes, type);
3712 		list_for_each_entry(glist, &qgroup->groups, next_group) {
3713 			qgroup_iterator_add(&qgroup_list, glist->group);
3714 		}
3715 	}
3716 out:
3717 	qgroup_iterator_clean(&qgroup_list);
3718 	spin_unlock(&fs_info->qgroup_lock);
3719 }
3720 
3721 /*
3722  * Check if the leaf is the last leaf. Which means all node pointers
3723  * are at their last position.
3724  */
is_last_leaf(struct btrfs_path * path)3725 static bool is_last_leaf(struct btrfs_path *path)
3726 {
3727 	int i;
3728 
3729 	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3730 		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3731 			return false;
3732 	}
3733 	return true;
3734 }
3735 
3736 /*
3737  * returns < 0 on error, 0 when more leafs are to be scanned.
3738  * returns 1 when done.
3739  */
qgroup_rescan_leaf(struct btrfs_trans_handle * trans,struct btrfs_path * path)3740 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3741 			      struct btrfs_path *path)
3742 {
3743 	struct btrfs_fs_info *fs_info = trans->fs_info;
3744 	struct btrfs_root *extent_root;
3745 	struct btrfs_key found;
3746 	struct extent_buffer *scratch_leaf = NULL;
3747 	u64 num_bytes;
3748 	bool done;
3749 	int slot;
3750 	int ret;
3751 
3752 	if (!btrfs_qgroup_full_accounting(fs_info))
3753 		return 1;
3754 
3755 	mutex_lock(&fs_info->qgroup_rescan_lock);
3756 	extent_root = btrfs_extent_root(fs_info,
3757 				fs_info->qgroup_rescan_progress.objectid);
3758 	ret = btrfs_search_slot_for_read(extent_root,
3759 					 &fs_info->qgroup_rescan_progress,
3760 					 path, 1, 0);
3761 
3762 	btrfs_debug(fs_info,
3763 		"current progress key (%llu %u %llu), search_slot ret %d",
3764 		fs_info->qgroup_rescan_progress.objectid,
3765 		fs_info->qgroup_rescan_progress.type,
3766 		fs_info->qgroup_rescan_progress.offset, ret);
3767 
3768 	if (ret) {
3769 		/*
3770 		 * The rescan is about to end, we will not be scanning any
3771 		 * further blocks. We cannot unset the RESCAN flag here, because
3772 		 * we want to commit the transaction if everything went well.
3773 		 * To make the live accounting work in this phase, we set our
3774 		 * scan progress pointer such that every real extent objectid
3775 		 * will be smaller.
3776 		 */
3777 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3778 		btrfs_release_path(path);
3779 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3780 		return ret;
3781 	}
3782 	done = is_last_leaf(path);
3783 
3784 	btrfs_item_key_to_cpu(path->nodes[0], &found,
3785 			      btrfs_header_nritems(path->nodes[0]) - 1);
3786 	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3787 
3788 	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3789 	if (!scratch_leaf) {
3790 		ret = -ENOMEM;
3791 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3792 		goto out;
3793 	}
3794 	slot = path->slots[0];
3795 	btrfs_release_path(path);
3796 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3797 
3798 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3799 		struct btrfs_backref_walk_ctx ctx = { 0 };
3800 
3801 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3802 		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3803 		    found.type != BTRFS_METADATA_ITEM_KEY)
3804 			continue;
3805 		if (found.type == BTRFS_METADATA_ITEM_KEY)
3806 			num_bytes = fs_info->nodesize;
3807 		else
3808 			num_bytes = found.offset;
3809 
3810 		ctx.bytenr = found.objectid;
3811 		ctx.fs_info = fs_info;
3812 
3813 		ret = btrfs_find_all_roots(&ctx, false);
3814 		if (ret < 0)
3815 			goto out;
3816 		/* For rescan, just pass old_roots as NULL */
3817 		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3818 						  num_bytes, NULL, ctx.roots);
3819 		if (ret < 0)
3820 			goto out;
3821 	}
3822 out:
3823 	if (scratch_leaf)
3824 		free_extent_buffer(scratch_leaf);
3825 
3826 	if (done && !ret) {
3827 		ret = 1;
3828 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3829 	}
3830 	return ret;
3831 }
3832 
rescan_should_stop(struct btrfs_fs_info * fs_info)3833 static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
3834 {
3835 	if (btrfs_fs_closing(fs_info))
3836 		return true;
3837 	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
3838 		return true;
3839 	if (!btrfs_qgroup_enabled(fs_info))
3840 		return true;
3841 	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3842 		return true;
3843 	return false;
3844 }
3845 
btrfs_qgroup_rescan_worker(struct btrfs_work * work)3846 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3847 {
3848 	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3849 						     qgroup_rescan_work);
3850 	struct btrfs_path *path;
3851 	struct btrfs_trans_handle *trans = NULL;
3852 	int ret = 0;
3853 	bool stopped = false;
3854 	bool did_leaf_rescans = false;
3855 
3856 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3857 		return;
3858 
3859 	path = btrfs_alloc_path();
3860 	if (!path) {
3861 		ret = -ENOMEM;
3862 		goto out;
3863 	}
3864 	/*
3865 	 * Rescan should only search for commit root, and any later difference
3866 	 * should be recorded by qgroup
3867 	 */
3868 	path->search_commit_root = 1;
3869 	path->skip_locking = 1;
3870 
3871 	while (!ret && !(stopped = rescan_should_stop(fs_info))) {
3872 		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3873 		if (IS_ERR(trans)) {
3874 			ret = PTR_ERR(trans);
3875 			break;
3876 		}
3877 
3878 		ret = qgroup_rescan_leaf(trans, path);
3879 		did_leaf_rescans = true;
3880 
3881 		if (ret > 0)
3882 			btrfs_commit_transaction(trans);
3883 		else
3884 			btrfs_end_transaction(trans);
3885 	}
3886 
3887 out:
3888 	btrfs_free_path(path);
3889 
3890 	mutex_lock(&fs_info->qgroup_rescan_lock);
3891 	if (ret > 0 &&
3892 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3893 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3894 	} else if (ret < 0 || stopped) {
3895 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3896 	}
3897 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3898 
3899 	/*
3900 	 * Only update status, since the previous part has already updated the
3901 	 * qgroup info, and only if we did any actual work. This also prevents
3902 	 * race with a concurrent quota disable, which has already set
3903 	 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
3904 	 * btrfs_quota_disable().
3905 	 */
3906 	if (did_leaf_rescans) {
3907 		trans = btrfs_start_transaction(fs_info->quota_root, 1);
3908 		if (IS_ERR(trans)) {
3909 			ret = PTR_ERR(trans);
3910 			trans = NULL;
3911 			btrfs_err(fs_info,
3912 				  "fail to start transaction for status update: %d",
3913 				  ret);
3914 		}
3915 	} else {
3916 		trans = NULL;
3917 	}
3918 
3919 	mutex_lock(&fs_info->qgroup_rescan_lock);
3920 	if (!stopped ||
3921 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3922 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3923 	if (trans) {
3924 		int ret2 = update_qgroup_status_item(trans);
3925 
3926 		if (ret2 < 0) {
3927 			ret = ret2;
3928 			btrfs_err(fs_info, "fail to update qgroup status: %d", ret);
3929 		}
3930 	}
3931 	fs_info->qgroup_rescan_running = false;
3932 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
3933 	complete_all(&fs_info->qgroup_rescan_completion);
3934 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3935 
3936 	if (!trans)
3937 		return;
3938 
3939 	btrfs_end_transaction(trans);
3940 
3941 	if (stopped) {
3942 		btrfs_info(fs_info, "qgroup scan paused");
3943 	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
3944 		btrfs_info(fs_info, "qgroup scan cancelled");
3945 	} else if (ret >= 0) {
3946 		btrfs_info(fs_info, "qgroup scan completed%s",
3947 			ret > 0 ? " (inconsistency flag cleared)" : "");
3948 	} else {
3949 		btrfs_err(fs_info, "qgroup scan failed with %d", ret);
3950 	}
3951 }
3952 
3953 /*
3954  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3955  * memory required for the rescan context.
3956  */
3957 static int
qgroup_rescan_init(struct btrfs_fs_info * fs_info,u64 progress_objectid,int init_flags)3958 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3959 		   int init_flags)
3960 {
3961 	int ret = 0;
3962 
3963 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3964 		btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode");
3965 		return -EINVAL;
3966 	}
3967 
3968 	if (!init_flags) {
3969 		/* we're resuming qgroup rescan at mount time */
3970 		if (!(fs_info->qgroup_flags &
3971 		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3972 			btrfs_debug(fs_info,
3973 			"qgroup rescan init failed, qgroup rescan is not queued");
3974 			ret = -EINVAL;
3975 		} else if (!(fs_info->qgroup_flags &
3976 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3977 			btrfs_debug(fs_info,
3978 			"qgroup rescan init failed, qgroup is not enabled");
3979 			ret = -ENOTCONN;
3980 		}
3981 
3982 		if (ret)
3983 			return ret;
3984 	}
3985 
3986 	mutex_lock(&fs_info->qgroup_rescan_lock);
3987 
3988 	if (init_flags) {
3989 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3990 			ret = -EINPROGRESS;
3991 		} else if (!(fs_info->qgroup_flags &
3992 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3993 			btrfs_debug(fs_info,
3994 			"qgroup rescan init failed, qgroup is not enabled");
3995 			ret = -ENOTCONN;
3996 		} else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
3997 			/* Quota disable is in progress */
3998 			ret = -EBUSY;
3999 		}
4000 
4001 		if (ret) {
4002 			mutex_unlock(&fs_info->qgroup_rescan_lock);
4003 			return ret;
4004 		}
4005 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
4006 	}
4007 
4008 	memset(&fs_info->qgroup_rescan_progress, 0,
4009 		sizeof(fs_info->qgroup_rescan_progress));
4010 	fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
4011 				   BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
4012 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
4013 	init_completion(&fs_info->qgroup_rescan_completion);
4014 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4015 
4016 	btrfs_init_work(&fs_info->qgroup_rescan_work,
4017 			btrfs_qgroup_rescan_worker, NULL);
4018 	return 0;
4019 }
4020 
4021 static void
qgroup_rescan_zero_tracking(struct btrfs_fs_info * fs_info)4022 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
4023 {
4024 	struct rb_node *n;
4025 	struct btrfs_qgroup *qgroup;
4026 
4027 	spin_lock(&fs_info->qgroup_lock);
4028 	/* clear all current qgroup tracking information */
4029 	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
4030 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
4031 		qgroup->rfer = 0;
4032 		qgroup->rfer_cmpr = 0;
4033 		qgroup->excl = 0;
4034 		qgroup->excl_cmpr = 0;
4035 		qgroup_dirty(fs_info, qgroup);
4036 	}
4037 	spin_unlock(&fs_info->qgroup_lock);
4038 }
4039 
4040 int
btrfs_qgroup_rescan(struct btrfs_fs_info * fs_info)4041 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
4042 {
4043 	int ret = 0;
4044 
4045 	ret = qgroup_rescan_init(fs_info, 0, 1);
4046 	if (ret)
4047 		return ret;
4048 
4049 	/*
4050 	 * We have set the rescan_progress to 0, which means no more
4051 	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
4052 	 * However, btrfs_qgroup_account_ref may be right after its call
4053 	 * to btrfs_find_all_roots, in which case it would still do the
4054 	 * accounting.
4055 	 * To solve this, we're committing the transaction, which will
4056 	 * ensure we run all delayed refs and only after that, we are
4057 	 * going to clear all tracking information for a clean start.
4058 	 */
4059 
4060 	ret = btrfs_commit_current_transaction(fs_info->fs_root);
4061 	if (ret) {
4062 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
4063 		return ret;
4064 	}
4065 
4066 	qgroup_rescan_zero_tracking(fs_info);
4067 
4068 	mutex_lock(&fs_info->qgroup_rescan_lock);
4069 	/*
4070 	 * The rescan worker is only for full accounting qgroups, check if it's
4071 	 * enabled as it is pointless to queue it otherwise. A concurrent quota
4072 	 * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED.
4073 	 */
4074 	if (btrfs_qgroup_full_accounting(fs_info)) {
4075 		fs_info->qgroup_rescan_running = true;
4076 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
4077 				 &fs_info->qgroup_rescan_work);
4078 	} else {
4079 		ret = -ENOTCONN;
4080 	}
4081 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4082 
4083 	return ret;
4084 }
4085 
btrfs_qgroup_wait_for_completion(struct btrfs_fs_info * fs_info,bool interruptible)4086 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
4087 				     bool interruptible)
4088 {
4089 	int running;
4090 	int ret = 0;
4091 
4092 	mutex_lock(&fs_info->qgroup_rescan_lock);
4093 	running = fs_info->qgroup_rescan_running;
4094 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4095 
4096 	if (!running)
4097 		return 0;
4098 
4099 	if (interruptible)
4100 		ret = wait_for_completion_interruptible(
4101 					&fs_info->qgroup_rescan_completion);
4102 	else
4103 		wait_for_completion(&fs_info->qgroup_rescan_completion);
4104 
4105 	return ret;
4106 }
4107 
4108 /*
4109  * this is only called from open_ctree where we're still single threaded, thus
4110  * locking is omitted here.
4111  */
4112 void
btrfs_qgroup_rescan_resume(struct btrfs_fs_info * fs_info)4113 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
4114 {
4115 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4116 		mutex_lock(&fs_info->qgroup_rescan_lock);
4117 		fs_info->qgroup_rescan_running = true;
4118 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
4119 				 &fs_info->qgroup_rescan_work);
4120 		mutex_unlock(&fs_info->qgroup_rescan_lock);
4121 	}
4122 }
4123 
4124 #define rbtree_iterate_from_safe(node, next, start)				\
4125        for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
4126 
qgroup_unreserve_range(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len)4127 static int qgroup_unreserve_range(struct btrfs_inode *inode,
4128 				  struct extent_changeset *reserved, u64 start,
4129 				  u64 len)
4130 {
4131 	struct rb_node *node;
4132 	struct rb_node *next;
4133 	struct ulist_node *entry;
4134 	int ret = 0;
4135 
4136 	node = reserved->range_changed.root.rb_node;
4137 	if (!node)
4138 		return 0;
4139 	while (node) {
4140 		entry = rb_entry(node, struct ulist_node, rb_node);
4141 		if (entry->val < start)
4142 			node = node->rb_right;
4143 		else
4144 			node = node->rb_left;
4145 	}
4146 
4147 	if (entry->val > start && rb_prev(&entry->rb_node))
4148 		entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
4149 				 rb_node);
4150 
4151 	rbtree_iterate_from_safe(node, next, &entry->rb_node) {
4152 		u64 entry_start;
4153 		u64 entry_end;
4154 		u64 entry_len;
4155 		int clear_ret;
4156 
4157 		entry = rb_entry(node, struct ulist_node, rb_node);
4158 		entry_start = entry->val;
4159 		entry_end = entry->aux;
4160 		entry_len = entry_end - entry_start + 1;
4161 
4162 		if (entry_start >= start + len)
4163 			break;
4164 		if (entry_start + entry_len <= start)
4165 			continue;
4166 		/*
4167 		 * Now the entry is in [start, start + len), revert the
4168 		 * EXTENT_QGROUP_RESERVED bit.
4169 		 */
4170 		clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
4171 					      entry_end, EXTENT_QGROUP_RESERVED);
4172 		if (!ret && clear_ret < 0)
4173 			ret = clear_ret;
4174 
4175 		ulist_del(&reserved->range_changed, entry->val, entry->aux);
4176 		if (likely(reserved->bytes_changed >= entry_len)) {
4177 			reserved->bytes_changed -= entry_len;
4178 		} else {
4179 			WARN_ON(1);
4180 			reserved->bytes_changed = 0;
4181 		}
4182 	}
4183 
4184 	return ret;
4185 }
4186 
4187 /*
4188  * Try to free some space for qgroup.
4189  *
4190  * For qgroup, there are only 3 ways to free qgroup space:
4191  * - Flush nodatacow write
4192  *   Any nodatacow write will free its reserved data space at run_delalloc_range().
4193  *   In theory, we should only flush nodatacow inodes, but it's not yet
4194  *   possible, so we need to flush the whole root.
4195  *
4196  * - Wait for ordered extents
4197  *   When ordered extents are finished, their reserved metadata is finally
4198  *   converted to per_trans status, which can be freed by later commit
4199  *   transaction.
4200  *
4201  * - Commit transaction
4202  *   This would free the meta_per_trans space.
4203  *   In theory this shouldn't provide much space, but any more qgroup space
4204  *   is needed.
4205  */
try_flush_qgroup(struct btrfs_root * root)4206 static int try_flush_qgroup(struct btrfs_root *root)
4207 {
4208 	int ret;
4209 
4210 	/* Can't hold an open transaction or we run the risk of deadlocking. */
4211 	ASSERT(current->journal_info == NULL);
4212 	if (WARN_ON(current->journal_info))
4213 		return 0;
4214 
4215 	/*
4216 	 * We don't want to run flush again and again, so if there is a running
4217 	 * one, we won't try to start a new flush, but exit directly.
4218 	 */
4219 	if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
4220 		wait_event(root->qgroup_flush_wait,
4221 			!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
4222 		return 0;
4223 	}
4224 
4225 	btrfs_run_delayed_iputs(root->fs_info);
4226 	btrfs_wait_on_delayed_iputs(root->fs_info);
4227 	ret = btrfs_start_delalloc_snapshot(root, true);
4228 	if (ret < 0)
4229 		goto out;
4230 	btrfs_wait_ordered_extents(root, U64_MAX, NULL);
4231 
4232 	ret = btrfs_commit_current_transaction(root);
4233 out:
4234 	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
4235 	wake_up(&root->qgroup_flush_wait);
4236 	return ret;
4237 }
4238 
qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)4239 static int qgroup_reserve_data(struct btrfs_inode *inode,
4240 			struct extent_changeset **reserved_ret, u64 start,
4241 			u64 len)
4242 {
4243 	struct btrfs_root *root = inode->root;
4244 	struct extent_changeset *reserved;
4245 	bool new_reserved = false;
4246 	u64 orig_reserved;
4247 	u64 to_reserve;
4248 	int ret;
4249 
4250 	if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4251 	    !is_fstree(btrfs_root_id(root)) || len == 0)
4252 		return 0;
4253 
4254 	/* @reserved parameter is mandatory for qgroup */
4255 	if (WARN_ON(!reserved_ret))
4256 		return -EINVAL;
4257 	if (!*reserved_ret) {
4258 		new_reserved = true;
4259 		*reserved_ret = extent_changeset_alloc();
4260 		if (!*reserved_ret)
4261 			return -ENOMEM;
4262 	}
4263 	reserved = *reserved_ret;
4264 	/* Record already reserved space */
4265 	orig_reserved = reserved->bytes_changed;
4266 	ret = set_record_extent_bits(&inode->io_tree, start,
4267 			start + len -1, EXTENT_QGROUP_RESERVED, reserved);
4268 
4269 	/* Newly reserved space */
4270 	to_reserve = reserved->bytes_changed - orig_reserved;
4271 	trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
4272 					to_reserve, QGROUP_RESERVE);
4273 	if (ret < 0)
4274 		goto out;
4275 	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
4276 	if (ret < 0)
4277 		goto cleanup;
4278 
4279 	return ret;
4280 
4281 cleanup:
4282 	qgroup_unreserve_range(inode, reserved, start, len);
4283 out:
4284 	if (new_reserved) {
4285 		extent_changeset_free(reserved);
4286 		*reserved_ret = NULL;
4287 	}
4288 	return ret;
4289 }
4290 
4291 /*
4292  * Reserve qgroup space for range [start, start + len).
4293  *
4294  * This function will either reserve space from related qgroups or do nothing
4295  * if the range is already reserved.
4296  *
4297  * Return 0 for successful reservation
4298  * Return <0 for error (including -EQUOT)
4299  *
4300  * NOTE: This function may sleep for memory allocation, dirty page flushing and
4301  *	 commit transaction. So caller should not hold any dirty page locked.
4302  */
btrfs_qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)4303 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
4304 			struct extent_changeset **reserved_ret, u64 start,
4305 			u64 len)
4306 {
4307 	int ret;
4308 
4309 	ret = qgroup_reserve_data(inode, reserved_ret, start, len);
4310 	if (ret <= 0 && ret != -EDQUOT)
4311 		return ret;
4312 
4313 	ret = try_flush_qgroup(inode->root);
4314 	if (ret < 0)
4315 		return ret;
4316 	return qgroup_reserve_data(inode, reserved_ret, start, len);
4317 }
4318 
4319 /* Free ranges specified by @reserved, normally in error path */
qgroup_free_reserved_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * freed_ret)4320 static int qgroup_free_reserved_data(struct btrfs_inode *inode,
4321 				     struct extent_changeset *reserved,
4322 				     u64 start, u64 len, u64 *freed_ret)
4323 {
4324 	struct btrfs_root *root = inode->root;
4325 	struct ulist_node *unode;
4326 	struct ulist_iterator uiter;
4327 	struct extent_changeset changeset;
4328 	u64 freed = 0;
4329 	int ret;
4330 
4331 	extent_changeset_init(&changeset);
4332 	len = round_up(start + len, root->fs_info->sectorsize);
4333 	start = round_down(start, root->fs_info->sectorsize);
4334 
4335 	ULIST_ITER_INIT(&uiter);
4336 	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
4337 		u64 range_start = unode->val;
4338 		/* unode->aux is the inclusive end */
4339 		u64 range_len = unode->aux - range_start + 1;
4340 		u64 free_start;
4341 		u64 free_len;
4342 
4343 		extent_changeset_release(&changeset);
4344 
4345 		/* Only free range in range [start, start + len) */
4346 		if (range_start >= start + len ||
4347 		    range_start + range_len <= start)
4348 			continue;
4349 		free_start = max(range_start, start);
4350 		free_len = min(start + len, range_start + range_len) -
4351 			   free_start;
4352 		/*
4353 		 * TODO: To also modify reserved->ranges_reserved to reflect
4354 		 * the modification.
4355 		 *
4356 		 * However as long as we free qgroup reserved according to
4357 		 * EXTENT_QGROUP_RESERVED, we won't double free.
4358 		 * So not need to rush.
4359 		 */
4360 		ret = clear_record_extent_bits(&inode->io_tree, free_start,
4361 				free_start + free_len - 1,
4362 				EXTENT_QGROUP_RESERVED, &changeset);
4363 		if (ret < 0)
4364 			goto out;
4365 		freed += changeset.bytes_changed;
4366 	}
4367 	btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed,
4368 				  BTRFS_QGROUP_RSV_DATA);
4369 	if (freed_ret)
4370 		*freed_ret = freed;
4371 	ret = 0;
4372 out:
4373 	extent_changeset_release(&changeset);
4374 	return ret;
4375 }
4376 
__btrfs_qgroup_release_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * released,int free)4377 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
4378 			struct extent_changeset *reserved, u64 start, u64 len,
4379 			u64 *released, int free)
4380 {
4381 	struct extent_changeset changeset;
4382 	int trace_op = QGROUP_RELEASE;
4383 	int ret;
4384 
4385 	if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
4386 		return clear_record_extent_bits(&inode->io_tree, start,
4387 						start + len - 1,
4388 						EXTENT_QGROUP_RESERVED, NULL);
4389 	}
4390 
4391 	/* In release case, we shouldn't have @reserved */
4392 	WARN_ON(!free && reserved);
4393 	if (free && reserved)
4394 		return qgroup_free_reserved_data(inode, reserved, start, len, released);
4395 	extent_changeset_init(&changeset);
4396 	ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
4397 				       EXTENT_QGROUP_RESERVED, &changeset);
4398 	if (ret < 0)
4399 		goto out;
4400 
4401 	if (free)
4402 		trace_op = QGROUP_FREE;
4403 	trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
4404 					changeset.bytes_changed, trace_op);
4405 	if (free)
4406 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4407 				btrfs_root_id(inode->root),
4408 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4409 	if (released)
4410 		*released = changeset.bytes_changed;
4411 out:
4412 	extent_changeset_release(&changeset);
4413 	return ret;
4414 }
4415 
4416 /*
4417  * Free a reserved space range from io_tree and related qgroups
4418  *
4419  * Should be called when a range of pages get invalidated before reaching disk.
4420  * Or for error cleanup case.
4421  * if @reserved is given, only reserved range in [@start, @start + @len) will
4422  * be freed.
4423  *
4424  * For data written to disk, use btrfs_qgroup_release_data().
4425  *
4426  * NOTE: This function may sleep for memory allocation.
4427  */
btrfs_qgroup_free_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * freed)4428 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
4429 			   struct extent_changeset *reserved,
4430 			   u64 start, u64 len, u64 *freed)
4431 {
4432 	return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
4433 }
4434 
4435 /*
4436  * Release a reserved space range from io_tree only.
4437  *
4438  * Should be called when a range of pages get written to disk and corresponding
4439  * FILE_EXTENT is inserted into corresponding root.
4440  *
4441  * Since new qgroup accounting framework will only update qgroup numbers at
4442  * commit_transaction() time, its reserved space shouldn't be freed from
4443  * related qgroups.
4444  *
4445  * But we should release the range from io_tree, to allow further write to be
4446  * COWed.
4447  *
4448  * NOTE: This function may sleep for memory allocation.
4449  */
btrfs_qgroup_release_data(struct btrfs_inode * inode,u64 start,u64 len,u64 * released)4450 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
4451 {
4452 	return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
4453 }
4454 
add_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4455 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4456 			      enum btrfs_qgroup_rsv_type type)
4457 {
4458 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4459 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4460 		return;
4461 	if (num_bytes == 0)
4462 		return;
4463 
4464 	spin_lock(&root->qgroup_meta_rsv_lock);
4465 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
4466 		root->qgroup_meta_rsv_prealloc += num_bytes;
4467 	else
4468 		root->qgroup_meta_rsv_pertrans += num_bytes;
4469 	spin_unlock(&root->qgroup_meta_rsv_lock);
4470 }
4471 
sub_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4472 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4473 			     enum btrfs_qgroup_rsv_type type)
4474 {
4475 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4476 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4477 		return 0;
4478 	if (num_bytes == 0)
4479 		return 0;
4480 
4481 	spin_lock(&root->qgroup_meta_rsv_lock);
4482 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
4483 		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
4484 				  num_bytes);
4485 		root->qgroup_meta_rsv_prealloc -= num_bytes;
4486 	} else {
4487 		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
4488 				  num_bytes);
4489 		root->qgroup_meta_rsv_pertrans -= num_bytes;
4490 	}
4491 	spin_unlock(&root->qgroup_meta_rsv_lock);
4492 	return num_bytes;
4493 }
4494 
btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce)4495 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4496 			      enum btrfs_qgroup_rsv_type type, bool enforce)
4497 {
4498 	struct btrfs_fs_info *fs_info = root->fs_info;
4499 	int ret;
4500 
4501 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4502 	    !is_fstree(btrfs_root_id(root)) || num_bytes == 0)
4503 		return 0;
4504 
4505 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4506 	trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
4507 	ret = qgroup_reserve(root, num_bytes, enforce, type);
4508 	if (ret < 0)
4509 		return ret;
4510 	/*
4511 	 * Record what we have reserved into root.
4512 	 *
4513 	 * To avoid quota disabled->enabled underflow.
4514 	 * In that case, we may try to free space we haven't reserved
4515 	 * (since quota was disabled), so record what we reserved into root.
4516 	 * And ensure later release won't underflow this number.
4517 	 */
4518 	add_root_meta_rsv(root, num_bytes, type);
4519 	return ret;
4520 }
4521 
__btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce,bool noflush)4522 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4523 				enum btrfs_qgroup_rsv_type type, bool enforce,
4524 				bool noflush)
4525 {
4526 	int ret;
4527 
4528 	ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4529 	if ((ret <= 0 && ret != -EDQUOT) || noflush)
4530 		return ret;
4531 
4532 	ret = try_flush_qgroup(root);
4533 	if (ret < 0)
4534 		return ret;
4535 	return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4536 }
4537 
4538 /*
4539  * Per-transaction meta reservation should be all freed at transaction commit
4540  * time
4541  */
btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root * root)4542 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
4543 {
4544 	struct btrfs_fs_info *fs_info = root->fs_info;
4545 
4546 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4547 	    !is_fstree(btrfs_root_id(root)))
4548 		return;
4549 
4550 	/* TODO: Update trace point to handle such free */
4551 	trace_qgroup_meta_free_all_pertrans(root);
4552 	/* Special value -1 means to free all reserved space */
4553 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1,
4554 				  BTRFS_QGROUP_RSV_META_PERTRANS);
4555 }
4556 
__btrfs_qgroup_free_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4557 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
4558 			      enum btrfs_qgroup_rsv_type type)
4559 {
4560 	struct btrfs_fs_info *fs_info = root->fs_info;
4561 
4562 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4563 	    !is_fstree(btrfs_root_id(root)))
4564 		return;
4565 
4566 	/*
4567 	 * reservation for META_PREALLOC can happen before quota is enabled,
4568 	 * which can lead to underflow.
4569 	 * Here ensure we will only free what we really have reserved.
4570 	 */
4571 	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
4572 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4573 	trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
4574 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type);
4575 }
4576 
qgroup_convert_meta(struct btrfs_fs_info * fs_info,u64 ref_root,int num_bytes)4577 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
4578 				int num_bytes)
4579 {
4580 	struct btrfs_qgroup *qgroup;
4581 	LIST_HEAD(qgroup_list);
4582 
4583 	if (num_bytes == 0)
4584 		return;
4585 	if (!fs_info->quota_root)
4586 		return;
4587 
4588 	spin_lock(&fs_info->qgroup_lock);
4589 	qgroup = find_qgroup_rb(fs_info, ref_root);
4590 	if (!qgroup)
4591 		goto out;
4592 
4593 	qgroup_iterator_add(&qgroup_list, qgroup);
4594 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
4595 		struct btrfs_qgroup_list *glist;
4596 
4597 		qgroup_rsv_release(fs_info, qgroup, num_bytes,
4598 				BTRFS_QGROUP_RSV_META_PREALLOC);
4599 		if (!sb_rdonly(fs_info->sb))
4600 			qgroup_rsv_add(fs_info, qgroup, num_bytes,
4601 				       BTRFS_QGROUP_RSV_META_PERTRANS);
4602 
4603 		list_for_each_entry(glist, &qgroup->groups, next_group)
4604 			qgroup_iterator_add(&qgroup_list, glist->group);
4605 	}
4606 out:
4607 	qgroup_iterator_clean(&qgroup_list);
4608 	spin_unlock(&fs_info->qgroup_lock);
4609 }
4610 
4611 /*
4612  * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
4613  *
4614  * This is called when preallocated meta reservation needs to be used.
4615  * Normally after btrfs_join_transaction() call.
4616  */
btrfs_qgroup_convert_reserved_meta(struct btrfs_root * root,int num_bytes)4617 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
4618 {
4619 	struct btrfs_fs_info *fs_info = root->fs_info;
4620 
4621 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4622 	    !is_fstree(btrfs_root_id(root)))
4623 		return;
4624 	/* Same as btrfs_qgroup_free_meta_prealloc() */
4625 	num_bytes = sub_root_meta_rsv(root, num_bytes,
4626 				      BTRFS_QGROUP_RSV_META_PREALLOC);
4627 	trace_qgroup_meta_convert(root, num_bytes);
4628 	qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes);
4629 	if (!sb_rdonly(fs_info->sb))
4630 		add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
4631 }
4632 
4633 /*
4634  * Check qgroup reserved space leaking, normally at destroy inode
4635  * time
4636  */
btrfs_qgroup_check_reserved_leak(struct btrfs_inode * inode)4637 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
4638 {
4639 	struct extent_changeset changeset;
4640 	struct ulist_node *unode;
4641 	struct ulist_iterator iter;
4642 	int ret;
4643 
4644 	extent_changeset_init(&changeset);
4645 	ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
4646 			EXTENT_QGROUP_RESERVED, &changeset);
4647 
4648 	WARN_ON(ret < 0);
4649 	if (WARN_ON(changeset.bytes_changed)) {
4650 		ULIST_ITER_INIT(&iter);
4651 		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
4652 			btrfs_warn(inode->root->fs_info,
4653 		"leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4654 				btrfs_ino(inode), unode->val, unode->aux);
4655 		}
4656 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4657 				btrfs_root_id(inode->root),
4658 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4659 
4660 	}
4661 	extent_changeset_release(&changeset);
4662 }
4663 
btrfs_qgroup_init_swapped_blocks(struct btrfs_qgroup_swapped_blocks * swapped_blocks)4664 void btrfs_qgroup_init_swapped_blocks(
4665 	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
4666 {
4667 	int i;
4668 
4669 	spin_lock_init(&swapped_blocks->lock);
4670 	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
4671 		swapped_blocks->blocks[i] = RB_ROOT;
4672 	swapped_blocks->swapped = false;
4673 }
4674 
4675 /*
4676  * Delete all swapped blocks record of @root.
4677  * Every record here means we skipped a full subtree scan for qgroup.
4678  *
4679  * Gets called when committing one transaction.
4680  */
btrfs_qgroup_clean_swapped_blocks(struct btrfs_root * root)4681 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
4682 {
4683 	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
4684 	int i;
4685 
4686 	swapped_blocks = &root->swapped_blocks;
4687 
4688 	spin_lock(&swapped_blocks->lock);
4689 	if (!swapped_blocks->swapped)
4690 		goto out;
4691 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4692 		struct rb_root *cur_root = &swapped_blocks->blocks[i];
4693 		struct btrfs_qgroup_swapped_block *entry;
4694 		struct btrfs_qgroup_swapped_block *next;
4695 
4696 		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
4697 						     node)
4698 			kfree(entry);
4699 		swapped_blocks->blocks[i] = RB_ROOT;
4700 	}
4701 	swapped_blocks->swapped = false;
4702 out:
4703 	spin_unlock(&swapped_blocks->lock);
4704 }
4705 
4706 /*
4707  * Add subtree roots record into @subvol_root.
4708  *
4709  * @subvol_root:	tree root of the subvolume tree get swapped
4710  * @bg:			block group under balance
4711  * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
4712  * @reloc_parent/slot:	pointer to the subtree root in reloc tree
4713  *			BOTH POINTERS ARE BEFORE TREE SWAP
4714  * @last_snapshot:	last snapshot generation of the subvolume tree
4715  */
btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle * trans,struct btrfs_root * subvol_root,struct btrfs_block_group * bg,struct extent_buffer * subvol_parent,int subvol_slot,struct extent_buffer * reloc_parent,int reloc_slot,u64 last_snapshot)4716 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
4717 		struct btrfs_root *subvol_root,
4718 		struct btrfs_block_group *bg,
4719 		struct extent_buffer *subvol_parent, int subvol_slot,
4720 		struct extent_buffer *reloc_parent, int reloc_slot,
4721 		u64 last_snapshot)
4722 {
4723 	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4724 	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4725 	struct btrfs_qgroup_swapped_block *block;
4726 	struct rb_node **cur;
4727 	struct rb_node *parent = NULL;
4728 	int level = btrfs_header_level(subvol_parent) - 1;
4729 	int ret = 0;
4730 
4731 	if (!btrfs_qgroup_full_accounting(fs_info))
4732 		return 0;
4733 
4734 	if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4735 	    btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
4736 		btrfs_err_rl(fs_info,
4737 		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4738 			__func__,
4739 			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4740 			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4741 		return -EUCLEAN;
4742 	}
4743 
4744 	block = kmalloc(sizeof(*block), GFP_NOFS);
4745 	if (!block) {
4746 		ret = -ENOMEM;
4747 		goto out;
4748 	}
4749 
4750 	/*
4751 	 * @reloc_parent/slot is still before swap, while @block is going to
4752 	 * record the bytenr after swap, so we do the swap here.
4753 	 */
4754 	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4755 	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4756 							     reloc_slot);
4757 	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4758 	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4759 							    subvol_slot);
4760 	block->last_snapshot = last_snapshot;
4761 	block->level = level;
4762 
4763 	/*
4764 	 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4765 	 * no one else can modify tree blocks thus we qgroup will not change
4766 	 * no matter the value of trace_leaf.
4767 	 */
4768 	if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4769 		block->trace_leaf = true;
4770 	else
4771 		block->trace_leaf = false;
4772 	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4773 
4774 	/* Insert @block into @blocks */
4775 	spin_lock(&blocks->lock);
4776 	cur = &blocks->blocks[level].rb_node;
4777 	while (*cur) {
4778 		struct btrfs_qgroup_swapped_block *entry;
4779 
4780 		parent = *cur;
4781 		entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
4782 				 node);
4783 
4784 		if (entry->subvol_bytenr < block->subvol_bytenr) {
4785 			cur = &(*cur)->rb_left;
4786 		} else if (entry->subvol_bytenr > block->subvol_bytenr) {
4787 			cur = &(*cur)->rb_right;
4788 		} else {
4789 			if (entry->subvol_generation !=
4790 					block->subvol_generation ||
4791 			    entry->reloc_bytenr != block->reloc_bytenr ||
4792 			    entry->reloc_generation !=
4793 					block->reloc_generation) {
4794 				/*
4795 				 * Duplicated but mismatch entry found.
4796 				 * Shouldn't happen.
4797 				 *
4798 				 * Marking qgroup inconsistent should be enough
4799 				 * for end users.
4800 				 */
4801 				WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4802 				ret = -EEXIST;
4803 			}
4804 			kfree(block);
4805 			goto out_unlock;
4806 		}
4807 	}
4808 	rb_link_node(&block->node, parent, cur);
4809 	rb_insert_color(&block->node, &blocks->blocks[level]);
4810 	blocks->swapped = true;
4811 out_unlock:
4812 	spin_unlock(&blocks->lock);
4813 out:
4814 	if (ret < 0)
4815 		qgroup_mark_inconsistent(fs_info);
4816 	return ret;
4817 }
4818 
4819 /*
4820  * Check if the tree block is a subtree root, and if so do the needed
4821  * delayed subtree trace for qgroup.
4822  *
4823  * This is called during btrfs_cow_block().
4824  */
btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * subvol_eb)4825 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4826 					 struct btrfs_root *root,
4827 					 struct extent_buffer *subvol_eb)
4828 {
4829 	struct btrfs_fs_info *fs_info = root->fs_info;
4830 	struct btrfs_tree_parent_check check = { 0 };
4831 	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4832 	struct btrfs_qgroup_swapped_block *block;
4833 	struct extent_buffer *reloc_eb = NULL;
4834 	struct rb_node *node;
4835 	bool found = false;
4836 	bool swapped = false;
4837 	int level = btrfs_header_level(subvol_eb);
4838 	int ret = 0;
4839 	int i;
4840 
4841 	if (!btrfs_qgroup_full_accounting(fs_info))
4842 		return 0;
4843 	if (!is_fstree(btrfs_root_id(root)) || !root->reloc_root)
4844 		return 0;
4845 
4846 	spin_lock(&blocks->lock);
4847 	if (!blocks->swapped) {
4848 		spin_unlock(&blocks->lock);
4849 		return 0;
4850 	}
4851 	node = blocks->blocks[level].rb_node;
4852 
4853 	while (node) {
4854 		block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4855 		if (block->subvol_bytenr < subvol_eb->start) {
4856 			node = node->rb_left;
4857 		} else if (block->subvol_bytenr > subvol_eb->start) {
4858 			node = node->rb_right;
4859 		} else {
4860 			found = true;
4861 			break;
4862 		}
4863 	}
4864 	if (!found) {
4865 		spin_unlock(&blocks->lock);
4866 		goto out;
4867 	}
4868 	/* Found one, remove it from @blocks first and update blocks->swapped */
4869 	rb_erase(&block->node, &blocks->blocks[level]);
4870 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4871 		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4872 			swapped = true;
4873 			break;
4874 		}
4875 	}
4876 	blocks->swapped = swapped;
4877 	spin_unlock(&blocks->lock);
4878 
4879 	check.level = block->level;
4880 	check.transid = block->reloc_generation;
4881 	check.has_first_key = true;
4882 	memcpy(&check.first_key, &block->first_key, sizeof(check.first_key));
4883 
4884 	/* Read out reloc subtree root */
4885 	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check);
4886 	if (IS_ERR(reloc_eb)) {
4887 		ret = PTR_ERR(reloc_eb);
4888 		reloc_eb = NULL;
4889 		goto free_out;
4890 	}
4891 	if (!extent_buffer_uptodate(reloc_eb)) {
4892 		ret = -EIO;
4893 		goto free_out;
4894 	}
4895 
4896 	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4897 			block->last_snapshot, block->trace_leaf);
4898 free_out:
4899 	kfree(block);
4900 	free_extent_buffer(reloc_eb);
4901 out:
4902 	if (ret < 0) {
4903 		btrfs_err_rl(fs_info,
4904 			     "failed to account subtree at bytenr %llu: %d",
4905 			     subvol_eb->start, ret);
4906 		qgroup_mark_inconsistent(fs_info);
4907 	}
4908 	return ret;
4909 }
4910 
btrfs_qgroup_destroy_extent_records(struct btrfs_transaction * trans)4911 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4912 {
4913 	struct btrfs_qgroup_extent_record *entry;
4914 	unsigned long index;
4915 
4916 	xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) {
4917 		ulist_free(entry->old_roots);
4918 		kfree(entry);
4919 	}
4920 	xa_destroy(&trans->delayed_refs.dirty_extents);
4921 }
4922 
btrfs_free_squota_rsv(struct btrfs_fs_info * fs_info,u64 root,u64 rsv_bytes)4923 void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes)
4924 {
4925 	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
4926 		return;
4927 
4928 	if (!is_fstree(root))
4929 		return;
4930 
4931 	btrfs_qgroup_free_refroot(fs_info, root, rsv_bytes, BTRFS_QGROUP_RSV_DATA);
4932 }
4933 
btrfs_record_squota_delta(struct btrfs_fs_info * fs_info,const struct btrfs_squota_delta * delta)4934 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
4935 			      const struct btrfs_squota_delta *delta)
4936 {
4937 	int ret;
4938 	struct btrfs_qgroup *qgroup;
4939 	struct btrfs_qgroup *qg;
4940 	LIST_HEAD(qgroup_list);
4941 	u64 root = delta->root;
4942 	u64 num_bytes = delta->num_bytes;
4943 	const int sign = (delta->is_inc ? 1 : -1);
4944 
4945 	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
4946 		return 0;
4947 
4948 	if (!is_fstree(root))
4949 		return 0;
4950 
4951 	/* If the extent predates enabling quotas, don't count it. */
4952 	if (delta->generation < fs_info->qgroup_enable_gen)
4953 		return 0;
4954 
4955 	spin_lock(&fs_info->qgroup_lock);
4956 	qgroup = find_qgroup_rb(fs_info, root);
4957 	if (!qgroup) {
4958 		ret = -ENOENT;
4959 		goto out;
4960 	}
4961 
4962 	ret = 0;
4963 	qgroup_iterator_add(&qgroup_list, qgroup);
4964 	list_for_each_entry(qg, &qgroup_list, iterator) {
4965 		struct btrfs_qgroup_list *glist;
4966 
4967 		qg->excl += num_bytes * sign;
4968 		qg->rfer += num_bytes * sign;
4969 		qgroup_dirty(fs_info, qg);
4970 
4971 		list_for_each_entry(glist, &qg->groups, next_group)
4972 			qgroup_iterator_add(&qgroup_list, glist->group);
4973 	}
4974 	qgroup_iterator_clean(&qgroup_list);
4975 
4976 out:
4977 	spin_unlock(&fs_info->qgroup_lock);
4978 	return ret;
4979 }
4980