1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 STRATO. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sizes.h>
15
16 #include "ctree.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "locking.h"
20 #include "ulist.h"
21 #include "backref.h"
22 #include "extent_io.h"
23 #include "qgroup.h"
24 #include "block-group.h"
25
26 /* TODO XXX FIXME
27 * - subvol delete -> delete when ref goes to 0? delete limits also?
28 * - reorganize keys
29 * - compressed
30 * - sync
31 * - copy also limits on subvol creation
32 * - limit
33 * - caches for ulists
34 * - performance benchmarks
35 * - check all ioctl parameters
36 */
37
38 /*
39 * Helpers to access qgroup reservation
40 *
41 * Callers should ensure the lock context and type are valid
42 */
43
qgroup_rsv_total(const struct btrfs_qgroup * qgroup)44 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
45 {
46 u64 ret = 0;
47 int i;
48
49 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
50 ret += qgroup->rsv.values[i];
51
52 return ret;
53 }
54
55 #ifdef CONFIG_BTRFS_DEBUG
qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)56 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
57 {
58 if (type == BTRFS_QGROUP_RSV_DATA)
59 return "data";
60 if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
61 return "meta_pertrans";
62 if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
63 return "meta_prealloc";
64 return NULL;
65 }
66 #endif
67
qgroup_rsv_add(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)68 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
69 struct btrfs_qgroup *qgroup, u64 num_bytes,
70 enum btrfs_qgroup_rsv_type type)
71 {
72 trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
73 qgroup->rsv.values[type] += num_bytes;
74 }
75
qgroup_rsv_release(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)76 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
77 struct btrfs_qgroup *qgroup, u64 num_bytes,
78 enum btrfs_qgroup_rsv_type type)
79 {
80 trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
81 if (qgroup->rsv.values[type] >= num_bytes) {
82 qgroup->rsv.values[type] -= num_bytes;
83 return;
84 }
85 #ifdef CONFIG_BTRFS_DEBUG
86 WARN_RATELIMIT(1,
87 "qgroup %llu %s reserved space underflow, have %llu to free %llu",
88 qgroup->qgroupid, qgroup_rsv_type_str(type),
89 qgroup->rsv.values[type], num_bytes);
90 #endif
91 qgroup->rsv.values[type] = 0;
92 }
93
qgroup_rsv_add_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,struct btrfs_qgroup * src)94 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
95 struct btrfs_qgroup *dest,
96 struct btrfs_qgroup *src)
97 {
98 int i;
99
100 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
101 qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
102 }
103
qgroup_rsv_release_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,struct btrfs_qgroup * src)104 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
105 struct btrfs_qgroup *dest,
106 struct btrfs_qgroup *src)
107 {
108 int i;
109
110 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
111 qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
112 }
113
btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)114 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
115 int mod)
116 {
117 if (qg->old_refcnt < seq)
118 qg->old_refcnt = seq;
119 qg->old_refcnt += mod;
120 }
121
btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)122 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
123 int mod)
124 {
125 if (qg->new_refcnt < seq)
126 qg->new_refcnt = seq;
127 qg->new_refcnt += mod;
128 }
129
btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup * qg,u64 seq)130 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
131 {
132 if (qg->old_refcnt < seq)
133 return 0;
134 return qg->old_refcnt - seq;
135 }
136
btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup * qg,u64 seq)137 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
138 {
139 if (qg->new_refcnt < seq)
140 return 0;
141 return qg->new_refcnt - seq;
142 }
143
144 /*
145 * glue structure to represent the relations between qgroups.
146 */
147 struct btrfs_qgroup_list {
148 struct list_head next_group;
149 struct list_head next_member;
150 struct btrfs_qgroup *group;
151 struct btrfs_qgroup *member;
152 };
153
qgroup_to_aux(struct btrfs_qgroup * qg)154 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
155 {
156 return (u64)(uintptr_t)qg;
157 }
158
unode_aux_to_qgroup(struct ulist_node * n)159 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
160 {
161 return (struct btrfs_qgroup *)(uintptr_t)n->aux;
162 }
163
164 static int
165 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
166 int init_flags);
167 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
168
169 /* must be called with qgroup_ioctl_lock held */
find_qgroup_rb(struct btrfs_fs_info * fs_info,u64 qgroupid)170 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
171 u64 qgroupid)
172 {
173 struct rb_node *n = fs_info->qgroup_tree.rb_node;
174 struct btrfs_qgroup *qgroup;
175
176 while (n) {
177 qgroup = rb_entry(n, struct btrfs_qgroup, node);
178 if (qgroup->qgroupid < qgroupid)
179 n = n->rb_left;
180 else if (qgroup->qgroupid > qgroupid)
181 n = n->rb_right;
182 else
183 return qgroup;
184 }
185 return NULL;
186 }
187
188 /* must be called with qgroup_lock held */
add_qgroup_rb(struct btrfs_fs_info * fs_info,u64 qgroupid)189 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
190 u64 qgroupid)
191 {
192 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
193 struct rb_node *parent = NULL;
194 struct btrfs_qgroup *qgroup;
195
196 while (*p) {
197 parent = *p;
198 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
199
200 if (qgroup->qgroupid < qgroupid)
201 p = &(*p)->rb_left;
202 else if (qgroup->qgroupid > qgroupid)
203 p = &(*p)->rb_right;
204 else
205 return qgroup;
206 }
207
208 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
209 if (!qgroup)
210 return ERR_PTR(-ENOMEM);
211
212 qgroup->qgroupid = qgroupid;
213 INIT_LIST_HEAD(&qgroup->groups);
214 INIT_LIST_HEAD(&qgroup->members);
215 INIT_LIST_HEAD(&qgroup->dirty);
216
217 rb_link_node(&qgroup->node, parent, p);
218 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
219
220 return qgroup;
221 }
222
__del_qgroup_rb(struct btrfs_qgroup * qgroup)223 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
224 {
225 struct btrfs_qgroup_list *list;
226
227 list_del(&qgroup->dirty);
228 while (!list_empty(&qgroup->groups)) {
229 list = list_first_entry(&qgroup->groups,
230 struct btrfs_qgroup_list, next_group);
231 list_del(&list->next_group);
232 list_del(&list->next_member);
233 kfree(list);
234 }
235
236 while (!list_empty(&qgroup->members)) {
237 list = list_first_entry(&qgroup->members,
238 struct btrfs_qgroup_list, next_member);
239 list_del(&list->next_group);
240 list_del(&list->next_member);
241 kfree(list);
242 }
243 kfree(qgroup);
244 }
245
246 /* must be called with qgroup_lock held */
del_qgroup_rb(struct btrfs_fs_info * fs_info,u64 qgroupid)247 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
248 {
249 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
250
251 if (!qgroup)
252 return -ENOENT;
253
254 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
255 __del_qgroup_rb(qgroup);
256 return 0;
257 }
258
259 /* must be called with qgroup_lock held */
add_relation_rb(struct btrfs_fs_info * fs_info,u64 memberid,u64 parentid)260 static int add_relation_rb(struct btrfs_fs_info *fs_info,
261 u64 memberid, u64 parentid)
262 {
263 struct btrfs_qgroup *member;
264 struct btrfs_qgroup *parent;
265 struct btrfs_qgroup_list *list;
266
267 member = find_qgroup_rb(fs_info, memberid);
268 parent = find_qgroup_rb(fs_info, parentid);
269 if (!member || !parent)
270 return -ENOENT;
271
272 list = kzalloc(sizeof(*list), GFP_ATOMIC);
273 if (!list)
274 return -ENOMEM;
275
276 list->group = parent;
277 list->member = member;
278 list_add_tail(&list->next_group, &member->groups);
279 list_add_tail(&list->next_member, &parent->members);
280
281 return 0;
282 }
283
284 /* must be called with qgroup_lock held */
del_relation_rb(struct btrfs_fs_info * fs_info,u64 memberid,u64 parentid)285 static int del_relation_rb(struct btrfs_fs_info *fs_info,
286 u64 memberid, u64 parentid)
287 {
288 struct btrfs_qgroup *member;
289 struct btrfs_qgroup *parent;
290 struct btrfs_qgroup_list *list;
291
292 member = find_qgroup_rb(fs_info, memberid);
293 parent = find_qgroup_rb(fs_info, parentid);
294 if (!member || !parent)
295 return -ENOENT;
296
297 list_for_each_entry(list, &member->groups, next_group) {
298 if (list->group == parent) {
299 list_del(&list->next_group);
300 list_del(&list->next_member);
301 kfree(list);
302 return 0;
303 }
304 }
305 return -ENOENT;
306 }
307
308 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_verify_qgroup_counts(struct btrfs_fs_info * fs_info,u64 qgroupid,u64 rfer,u64 excl)309 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
310 u64 rfer, u64 excl)
311 {
312 struct btrfs_qgroup *qgroup;
313
314 qgroup = find_qgroup_rb(fs_info, qgroupid);
315 if (!qgroup)
316 return -EINVAL;
317 if (qgroup->rfer != rfer || qgroup->excl != excl)
318 return -EINVAL;
319 return 0;
320 }
321 #endif
322
323 /*
324 * The full config is read in one go, only called from open_ctree()
325 * It doesn't use any locking, as at this point we're still single-threaded
326 */
btrfs_read_qgroup_config(struct btrfs_fs_info * fs_info)327 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
328 {
329 struct btrfs_key key;
330 struct btrfs_key found_key;
331 struct btrfs_root *quota_root = fs_info->quota_root;
332 struct btrfs_path *path = NULL;
333 struct extent_buffer *l;
334 int slot;
335 int ret = 0;
336 u64 flags = 0;
337 u64 rescan_progress = 0;
338
339 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
340 return 0;
341
342 fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
343 if (!fs_info->qgroup_ulist) {
344 ret = -ENOMEM;
345 goto out;
346 }
347
348 path = btrfs_alloc_path();
349 if (!path) {
350 ret = -ENOMEM;
351 goto out;
352 }
353
354 /* default this to quota off, in case no status key is found */
355 fs_info->qgroup_flags = 0;
356
357 /*
358 * pass 1: read status, all qgroup infos and limits
359 */
360 key.objectid = 0;
361 key.type = 0;
362 key.offset = 0;
363 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
364 if (ret)
365 goto out;
366
367 while (1) {
368 struct btrfs_qgroup *qgroup;
369
370 slot = path->slots[0];
371 l = path->nodes[0];
372 btrfs_item_key_to_cpu(l, &found_key, slot);
373
374 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
375 struct btrfs_qgroup_status_item *ptr;
376
377 ptr = btrfs_item_ptr(l, slot,
378 struct btrfs_qgroup_status_item);
379
380 if (btrfs_qgroup_status_version(l, ptr) !=
381 BTRFS_QGROUP_STATUS_VERSION) {
382 btrfs_err(fs_info,
383 "old qgroup version, quota disabled");
384 goto out;
385 }
386 if (btrfs_qgroup_status_generation(l, ptr) !=
387 fs_info->generation) {
388 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
389 btrfs_err(fs_info,
390 "qgroup generation mismatch, marked as inconsistent");
391 }
392 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
393 ptr);
394 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
395 goto next1;
396 }
397
398 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
399 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
400 goto next1;
401
402 qgroup = find_qgroup_rb(fs_info, found_key.offset);
403 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
404 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
405 btrfs_err(fs_info, "inconsistent qgroup config");
406 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
407 }
408 if (!qgroup) {
409 qgroup = add_qgroup_rb(fs_info, found_key.offset);
410 if (IS_ERR(qgroup)) {
411 ret = PTR_ERR(qgroup);
412 goto out;
413 }
414 }
415 switch (found_key.type) {
416 case BTRFS_QGROUP_INFO_KEY: {
417 struct btrfs_qgroup_info_item *ptr;
418
419 ptr = btrfs_item_ptr(l, slot,
420 struct btrfs_qgroup_info_item);
421 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
422 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
423 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
424 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
425 /* generation currently unused */
426 break;
427 }
428 case BTRFS_QGROUP_LIMIT_KEY: {
429 struct btrfs_qgroup_limit_item *ptr;
430
431 ptr = btrfs_item_ptr(l, slot,
432 struct btrfs_qgroup_limit_item);
433 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
434 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
435 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
436 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
437 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
438 break;
439 }
440 }
441 next1:
442 ret = btrfs_next_item(quota_root, path);
443 if (ret < 0)
444 goto out;
445 if (ret)
446 break;
447 }
448 btrfs_release_path(path);
449
450 /*
451 * pass 2: read all qgroup relations
452 */
453 key.objectid = 0;
454 key.type = BTRFS_QGROUP_RELATION_KEY;
455 key.offset = 0;
456 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
457 if (ret)
458 goto out;
459 while (1) {
460 slot = path->slots[0];
461 l = path->nodes[0];
462 btrfs_item_key_to_cpu(l, &found_key, slot);
463
464 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
465 goto next2;
466
467 if (found_key.objectid > found_key.offset) {
468 /* parent <- member, not needed to build config */
469 /* FIXME should we omit the key completely? */
470 goto next2;
471 }
472
473 ret = add_relation_rb(fs_info, found_key.objectid,
474 found_key.offset);
475 if (ret == -ENOENT) {
476 btrfs_warn(fs_info,
477 "orphan qgroup relation 0x%llx->0x%llx",
478 found_key.objectid, found_key.offset);
479 ret = 0; /* ignore the error */
480 }
481 if (ret)
482 goto out;
483 next2:
484 ret = btrfs_next_item(quota_root, path);
485 if (ret < 0)
486 goto out;
487 if (ret)
488 break;
489 }
490 out:
491 fs_info->qgroup_flags |= flags;
492 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
493 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
494 else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
495 ret >= 0)
496 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
497 btrfs_free_path(path);
498
499 if (ret < 0) {
500 ulist_free(fs_info->qgroup_ulist);
501 fs_info->qgroup_ulist = NULL;
502 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
503 }
504
505 return ret < 0 ? ret : 0;
506 }
507
508 /*
509 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
510 * first two are in single-threaded paths.And for the third one, we have set
511 * quota_root to be null with qgroup_lock held before, so it is safe to clean
512 * up the in-memory structures without qgroup_lock held.
513 */
btrfs_free_qgroup_config(struct btrfs_fs_info * fs_info)514 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
515 {
516 struct rb_node *n;
517 struct btrfs_qgroup *qgroup;
518
519 while ((n = rb_first(&fs_info->qgroup_tree))) {
520 qgroup = rb_entry(n, struct btrfs_qgroup, node);
521 rb_erase(n, &fs_info->qgroup_tree);
522 __del_qgroup_rb(qgroup);
523 }
524 /*
525 * We call btrfs_free_qgroup_config() when unmounting
526 * filesystem and disabling quota, so we set qgroup_ulist
527 * to be null here to avoid double free.
528 */
529 ulist_free(fs_info->qgroup_ulist);
530 fs_info->qgroup_ulist = NULL;
531 }
532
add_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)533 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
534 u64 dst)
535 {
536 int ret;
537 struct btrfs_root *quota_root = trans->fs_info->quota_root;
538 struct btrfs_path *path;
539 struct btrfs_key key;
540
541 path = btrfs_alloc_path();
542 if (!path)
543 return -ENOMEM;
544
545 key.objectid = src;
546 key.type = BTRFS_QGROUP_RELATION_KEY;
547 key.offset = dst;
548
549 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
550
551 btrfs_mark_buffer_dirty(path->nodes[0]);
552
553 btrfs_free_path(path);
554 return ret;
555 }
556
del_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)557 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
558 u64 dst)
559 {
560 int ret;
561 struct btrfs_root *quota_root = trans->fs_info->quota_root;
562 struct btrfs_path *path;
563 struct btrfs_key key;
564
565 path = btrfs_alloc_path();
566 if (!path)
567 return -ENOMEM;
568
569 key.objectid = src;
570 key.type = BTRFS_QGROUP_RELATION_KEY;
571 key.offset = dst;
572
573 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
574 if (ret < 0)
575 goto out;
576
577 if (ret > 0) {
578 ret = -ENOENT;
579 goto out;
580 }
581
582 ret = btrfs_del_item(trans, quota_root, path);
583 out:
584 btrfs_free_path(path);
585 return ret;
586 }
587
add_qgroup_item(struct btrfs_trans_handle * trans,struct btrfs_root * quota_root,u64 qgroupid)588 static int add_qgroup_item(struct btrfs_trans_handle *trans,
589 struct btrfs_root *quota_root, u64 qgroupid)
590 {
591 int ret;
592 struct btrfs_path *path;
593 struct btrfs_qgroup_info_item *qgroup_info;
594 struct btrfs_qgroup_limit_item *qgroup_limit;
595 struct extent_buffer *leaf;
596 struct btrfs_key key;
597
598 if (btrfs_is_testing(quota_root->fs_info))
599 return 0;
600
601 path = btrfs_alloc_path();
602 if (!path)
603 return -ENOMEM;
604
605 key.objectid = 0;
606 key.type = BTRFS_QGROUP_INFO_KEY;
607 key.offset = qgroupid;
608
609 /*
610 * Avoid a transaction abort by catching -EEXIST here. In that
611 * case, we proceed by re-initializing the existing structure
612 * on disk.
613 */
614
615 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
616 sizeof(*qgroup_info));
617 if (ret && ret != -EEXIST)
618 goto out;
619
620 leaf = path->nodes[0];
621 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
622 struct btrfs_qgroup_info_item);
623 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
624 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
625 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
626 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
627 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
628
629 btrfs_mark_buffer_dirty(leaf);
630
631 btrfs_release_path(path);
632
633 key.type = BTRFS_QGROUP_LIMIT_KEY;
634 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
635 sizeof(*qgroup_limit));
636 if (ret && ret != -EEXIST)
637 goto out;
638
639 leaf = path->nodes[0];
640 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
641 struct btrfs_qgroup_limit_item);
642 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
643 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
644 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
645 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
646 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
647
648 btrfs_mark_buffer_dirty(leaf);
649
650 ret = 0;
651 out:
652 btrfs_free_path(path);
653 return ret;
654 }
655
del_qgroup_item(struct btrfs_trans_handle * trans,u64 qgroupid)656 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
657 {
658 int ret;
659 struct btrfs_root *quota_root = trans->fs_info->quota_root;
660 struct btrfs_path *path;
661 struct btrfs_key key;
662
663 path = btrfs_alloc_path();
664 if (!path)
665 return -ENOMEM;
666
667 key.objectid = 0;
668 key.type = BTRFS_QGROUP_INFO_KEY;
669 key.offset = qgroupid;
670 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
671 if (ret < 0)
672 goto out;
673
674 if (ret > 0) {
675 ret = -ENOENT;
676 goto out;
677 }
678
679 ret = btrfs_del_item(trans, quota_root, path);
680 if (ret)
681 goto out;
682
683 btrfs_release_path(path);
684
685 key.type = BTRFS_QGROUP_LIMIT_KEY;
686 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
687 if (ret < 0)
688 goto out;
689
690 if (ret > 0) {
691 ret = -ENOENT;
692 goto out;
693 }
694
695 ret = btrfs_del_item(trans, quota_root, path);
696
697 out:
698 btrfs_free_path(path);
699 return ret;
700 }
701
update_qgroup_limit_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)702 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
703 struct btrfs_qgroup *qgroup)
704 {
705 struct btrfs_root *quota_root = trans->fs_info->quota_root;
706 struct btrfs_path *path;
707 struct btrfs_key key;
708 struct extent_buffer *l;
709 struct btrfs_qgroup_limit_item *qgroup_limit;
710 int ret;
711 int slot;
712
713 key.objectid = 0;
714 key.type = BTRFS_QGROUP_LIMIT_KEY;
715 key.offset = qgroup->qgroupid;
716
717 path = btrfs_alloc_path();
718 if (!path)
719 return -ENOMEM;
720
721 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
722 if (ret > 0)
723 ret = -ENOENT;
724
725 if (ret)
726 goto out;
727
728 l = path->nodes[0];
729 slot = path->slots[0];
730 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
731 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
732 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
733 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
734 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
735 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
736
737 btrfs_mark_buffer_dirty(l);
738
739 out:
740 btrfs_free_path(path);
741 return ret;
742 }
743
update_qgroup_info_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)744 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
745 struct btrfs_qgroup *qgroup)
746 {
747 struct btrfs_fs_info *fs_info = trans->fs_info;
748 struct btrfs_root *quota_root = fs_info->quota_root;
749 struct btrfs_path *path;
750 struct btrfs_key key;
751 struct extent_buffer *l;
752 struct btrfs_qgroup_info_item *qgroup_info;
753 int ret;
754 int slot;
755
756 if (btrfs_is_testing(fs_info))
757 return 0;
758
759 key.objectid = 0;
760 key.type = BTRFS_QGROUP_INFO_KEY;
761 key.offset = qgroup->qgroupid;
762
763 path = btrfs_alloc_path();
764 if (!path)
765 return -ENOMEM;
766
767 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
768 if (ret > 0)
769 ret = -ENOENT;
770
771 if (ret)
772 goto out;
773
774 l = path->nodes[0];
775 slot = path->slots[0];
776 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
777 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
778 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
779 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
780 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
781 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
782
783 btrfs_mark_buffer_dirty(l);
784
785 out:
786 btrfs_free_path(path);
787 return ret;
788 }
789
update_qgroup_status_item(struct btrfs_trans_handle * trans)790 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
791 {
792 struct btrfs_fs_info *fs_info = trans->fs_info;
793 struct btrfs_root *quota_root = fs_info->quota_root;
794 struct btrfs_path *path;
795 struct btrfs_key key;
796 struct extent_buffer *l;
797 struct btrfs_qgroup_status_item *ptr;
798 int ret;
799 int slot;
800
801 key.objectid = 0;
802 key.type = BTRFS_QGROUP_STATUS_KEY;
803 key.offset = 0;
804
805 path = btrfs_alloc_path();
806 if (!path)
807 return -ENOMEM;
808
809 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
810 if (ret > 0)
811 ret = -ENOENT;
812
813 if (ret)
814 goto out;
815
816 l = path->nodes[0];
817 slot = path->slots[0];
818 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
819 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
820 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
821 btrfs_set_qgroup_status_rescan(l, ptr,
822 fs_info->qgroup_rescan_progress.objectid);
823
824 btrfs_mark_buffer_dirty(l);
825
826 out:
827 btrfs_free_path(path);
828 return ret;
829 }
830
831 /*
832 * called with qgroup_lock held
833 */
btrfs_clean_quota_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)834 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
835 struct btrfs_root *root)
836 {
837 struct btrfs_path *path;
838 struct btrfs_key key;
839 struct extent_buffer *leaf = NULL;
840 int ret;
841 int nr = 0;
842
843 path = btrfs_alloc_path();
844 if (!path)
845 return -ENOMEM;
846
847 path->leave_spinning = 1;
848
849 key.objectid = 0;
850 key.offset = 0;
851 key.type = 0;
852
853 while (1) {
854 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
855 if (ret < 0)
856 goto out;
857 leaf = path->nodes[0];
858 nr = btrfs_header_nritems(leaf);
859 if (!nr)
860 break;
861 /*
862 * delete the leaf one by one
863 * since the whole tree is going
864 * to be deleted.
865 */
866 path->slots[0] = 0;
867 ret = btrfs_del_items(trans, root, path, 0, nr);
868 if (ret)
869 goto out;
870
871 btrfs_release_path(path);
872 }
873 ret = 0;
874 out:
875 btrfs_free_path(path);
876 return ret;
877 }
878
btrfs_quota_enable(struct btrfs_fs_info * fs_info)879 int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
880 {
881 struct btrfs_root *quota_root;
882 struct btrfs_root *tree_root = fs_info->tree_root;
883 struct btrfs_path *path = NULL;
884 struct btrfs_qgroup_status_item *ptr;
885 struct extent_buffer *leaf;
886 struct btrfs_key key;
887 struct btrfs_key found_key;
888 struct btrfs_qgroup *qgroup = NULL;
889 struct btrfs_trans_handle *trans = NULL;
890 int ret = 0;
891 int slot;
892
893 mutex_lock(&fs_info->qgroup_ioctl_lock);
894 if (fs_info->quota_root)
895 goto out;
896
897 fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
898 if (!fs_info->qgroup_ulist) {
899 ret = -ENOMEM;
900 goto out;
901 }
902
903 /*
904 * 1 for quota root item
905 * 1 for BTRFS_QGROUP_STATUS item
906 *
907 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
908 * per subvolume. However those are not currently reserved since it
909 * would be a lot of overkill.
910 */
911 trans = btrfs_start_transaction(tree_root, 2);
912 if (IS_ERR(trans)) {
913 ret = PTR_ERR(trans);
914 trans = NULL;
915 goto out;
916 }
917
918 /*
919 * initially create the quota tree
920 */
921 quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
922 if (IS_ERR(quota_root)) {
923 ret = PTR_ERR(quota_root);
924 btrfs_abort_transaction(trans, ret);
925 goto out;
926 }
927
928 path = btrfs_alloc_path();
929 if (!path) {
930 ret = -ENOMEM;
931 btrfs_abort_transaction(trans, ret);
932 goto out_free_root;
933 }
934
935 key.objectid = 0;
936 key.type = BTRFS_QGROUP_STATUS_KEY;
937 key.offset = 0;
938
939 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
940 sizeof(*ptr));
941 if (ret) {
942 btrfs_abort_transaction(trans, ret);
943 goto out_free_path;
944 }
945
946 leaf = path->nodes[0];
947 ptr = btrfs_item_ptr(leaf, path->slots[0],
948 struct btrfs_qgroup_status_item);
949 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
950 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
951 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
952 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
953 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
954 btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
955
956 btrfs_mark_buffer_dirty(leaf);
957
958 key.objectid = 0;
959 key.type = BTRFS_ROOT_REF_KEY;
960 key.offset = 0;
961
962 btrfs_release_path(path);
963 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
964 if (ret > 0)
965 goto out_add_root;
966 if (ret < 0) {
967 btrfs_abort_transaction(trans, ret);
968 goto out_free_path;
969 }
970
971 while (1) {
972 slot = path->slots[0];
973 leaf = path->nodes[0];
974 btrfs_item_key_to_cpu(leaf, &found_key, slot);
975
976 if (found_key.type == BTRFS_ROOT_REF_KEY) {
977 ret = add_qgroup_item(trans, quota_root,
978 found_key.offset);
979 if (ret) {
980 btrfs_abort_transaction(trans, ret);
981 goto out_free_path;
982 }
983
984 qgroup = add_qgroup_rb(fs_info, found_key.offset);
985 if (IS_ERR(qgroup)) {
986 ret = PTR_ERR(qgroup);
987 btrfs_abort_transaction(trans, ret);
988 goto out_free_path;
989 }
990 }
991 ret = btrfs_next_item(tree_root, path);
992 if (ret < 0) {
993 btrfs_abort_transaction(trans, ret);
994 goto out_free_path;
995 }
996 if (ret)
997 break;
998 }
999
1000 out_add_root:
1001 btrfs_release_path(path);
1002 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1003 if (ret) {
1004 btrfs_abort_transaction(trans, ret);
1005 goto out_free_path;
1006 }
1007
1008 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
1009 if (IS_ERR(qgroup)) {
1010 ret = PTR_ERR(qgroup);
1011 btrfs_abort_transaction(trans, ret);
1012 goto out_free_path;
1013 }
1014
1015 ret = btrfs_commit_transaction(trans);
1016 trans = NULL;
1017 if (ret)
1018 goto out_free_path;
1019
1020 /*
1021 * Set quota enabled flag after committing the transaction, to avoid
1022 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1023 * creation.
1024 */
1025 spin_lock(&fs_info->qgroup_lock);
1026 fs_info->quota_root = quota_root;
1027 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1028 spin_unlock(&fs_info->qgroup_lock);
1029
1030 ret = qgroup_rescan_init(fs_info, 0, 1);
1031 if (!ret) {
1032 qgroup_rescan_zero_tracking(fs_info);
1033 btrfs_queue_work(fs_info->qgroup_rescan_workers,
1034 &fs_info->qgroup_rescan_work);
1035 }
1036
1037 out_free_path:
1038 btrfs_free_path(path);
1039 out_free_root:
1040 if (ret) {
1041 free_extent_buffer(quota_root->node);
1042 free_extent_buffer(quota_root->commit_root);
1043 kfree(quota_root);
1044 }
1045 out:
1046 if (ret) {
1047 ulist_free(fs_info->qgroup_ulist);
1048 fs_info->qgroup_ulist = NULL;
1049 if (trans)
1050 btrfs_end_transaction(trans);
1051 }
1052 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1053 return ret;
1054 }
1055
btrfs_quota_disable(struct btrfs_fs_info * fs_info)1056 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1057 {
1058 struct btrfs_root *quota_root;
1059 struct btrfs_trans_handle *trans = NULL;
1060 int ret = 0;
1061
1062 mutex_lock(&fs_info->qgroup_ioctl_lock);
1063 if (!fs_info->quota_root)
1064 goto out;
1065
1066 /*
1067 * 1 For the root item
1068 *
1069 * We should also reserve enough items for the quota tree deletion in
1070 * btrfs_clean_quota_tree but this is not done.
1071 */
1072 trans = btrfs_start_transaction(fs_info->tree_root, 1);
1073 if (IS_ERR(trans)) {
1074 ret = PTR_ERR(trans);
1075 goto out;
1076 }
1077
1078 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1079 btrfs_qgroup_wait_for_completion(fs_info, false);
1080 spin_lock(&fs_info->qgroup_lock);
1081 quota_root = fs_info->quota_root;
1082 fs_info->quota_root = NULL;
1083 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1084 spin_unlock(&fs_info->qgroup_lock);
1085
1086 btrfs_free_qgroup_config(fs_info);
1087
1088 ret = btrfs_clean_quota_tree(trans, quota_root);
1089 if (ret) {
1090 btrfs_abort_transaction(trans, ret);
1091 goto end_trans;
1092 }
1093
1094 ret = btrfs_del_root(trans, "a_root->root_key);
1095 if (ret) {
1096 btrfs_abort_transaction(trans, ret);
1097 goto end_trans;
1098 }
1099
1100 list_del("a_root->dirty_list);
1101
1102 btrfs_tree_lock(quota_root->node);
1103 btrfs_clean_tree_block(quota_root->node);
1104 btrfs_tree_unlock(quota_root->node);
1105 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1106
1107 free_extent_buffer(quota_root->node);
1108 free_extent_buffer(quota_root->commit_root);
1109 kfree(quota_root);
1110
1111 end_trans:
1112 ret = btrfs_end_transaction(trans);
1113 out:
1114 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1115 return ret;
1116 }
1117
qgroup_dirty(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1118 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1119 struct btrfs_qgroup *qgroup)
1120 {
1121 if (list_empty(&qgroup->dirty))
1122 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1123 }
1124
1125 /*
1126 * The easy accounting, we're updating qgroup relationship whose child qgroup
1127 * only has exclusive extents.
1128 *
1129 * In this case, all exclusive extents will also be exclusive for parent, so
1130 * excl/rfer just get added/removed.
1131 *
1132 * So is qgroup reservation space, which should also be added/removed to
1133 * parent.
1134 * Or when child tries to release reservation space, parent will underflow its
1135 * reservation (for relationship adding case).
1136 *
1137 * Caller should hold fs_info->qgroup_lock.
1138 */
__qgroup_excl_accounting(struct btrfs_fs_info * fs_info,struct ulist * tmp,u64 ref_root,struct btrfs_qgroup * src,int sign)1139 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1140 struct ulist *tmp, u64 ref_root,
1141 struct btrfs_qgroup *src, int sign)
1142 {
1143 struct btrfs_qgroup *qgroup;
1144 struct btrfs_qgroup_list *glist;
1145 struct ulist_node *unode;
1146 struct ulist_iterator uiter;
1147 u64 num_bytes = src->excl;
1148 int ret = 0;
1149
1150 qgroup = find_qgroup_rb(fs_info, ref_root);
1151 if (!qgroup)
1152 goto out;
1153
1154 qgroup->rfer += sign * num_bytes;
1155 qgroup->rfer_cmpr += sign * num_bytes;
1156
1157 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1158 qgroup->excl += sign * num_bytes;
1159 qgroup->excl_cmpr += sign * num_bytes;
1160
1161 if (sign > 0)
1162 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1163 else
1164 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1165
1166 qgroup_dirty(fs_info, qgroup);
1167
1168 /* Get all of the parent groups that contain this qgroup */
1169 list_for_each_entry(glist, &qgroup->groups, next_group) {
1170 ret = ulist_add(tmp, glist->group->qgroupid,
1171 qgroup_to_aux(glist->group), GFP_ATOMIC);
1172 if (ret < 0)
1173 goto out;
1174 }
1175
1176 /* Iterate all of the parents and adjust their reference counts */
1177 ULIST_ITER_INIT(&uiter);
1178 while ((unode = ulist_next(tmp, &uiter))) {
1179 qgroup = unode_aux_to_qgroup(unode);
1180 qgroup->rfer += sign * num_bytes;
1181 qgroup->rfer_cmpr += sign * num_bytes;
1182 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1183 qgroup->excl += sign * num_bytes;
1184 if (sign > 0)
1185 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1186 else
1187 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1188 qgroup->excl_cmpr += sign * num_bytes;
1189 qgroup_dirty(fs_info, qgroup);
1190
1191 /* Add any parents of the parents */
1192 list_for_each_entry(glist, &qgroup->groups, next_group) {
1193 ret = ulist_add(tmp, glist->group->qgroupid,
1194 qgroup_to_aux(glist->group), GFP_ATOMIC);
1195 if (ret < 0)
1196 goto out;
1197 }
1198 }
1199 ret = 0;
1200 out:
1201 return ret;
1202 }
1203
1204
1205 /*
1206 * Quick path for updating qgroup with only excl refs.
1207 *
1208 * In that case, just update all parent will be enough.
1209 * Or we needs to do a full rescan.
1210 * Caller should also hold fs_info->qgroup_lock.
1211 *
1212 * Return 0 for quick update, return >0 for need to full rescan
1213 * and mark INCONSISTENT flag.
1214 * Return < 0 for other error.
1215 */
quick_update_accounting(struct btrfs_fs_info * fs_info,struct ulist * tmp,u64 src,u64 dst,int sign)1216 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1217 struct ulist *tmp, u64 src, u64 dst,
1218 int sign)
1219 {
1220 struct btrfs_qgroup *qgroup;
1221 int ret = 1;
1222 int err = 0;
1223
1224 qgroup = find_qgroup_rb(fs_info, src);
1225 if (!qgroup)
1226 goto out;
1227 if (qgroup->excl == qgroup->rfer) {
1228 ret = 0;
1229 err = __qgroup_excl_accounting(fs_info, tmp, dst,
1230 qgroup, sign);
1231 if (err < 0) {
1232 ret = err;
1233 goto out;
1234 }
1235 }
1236 out:
1237 if (ret)
1238 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1239 return ret;
1240 }
1241
btrfs_add_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1242 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1243 u64 dst)
1244 {
1245 struct btrfs_fs_info *fs_info = trans->fs_info;
1246 struct btrfs_root *quota_root;
1247 struct btrfs_qgroup *parent;
1248 struct btrfs_qgroup *member;
1249 struct btrfs_qgroup_list *list;
1250 struct ulist *tmp;
1251 int ret = 0;
1252
1253 /* Check the level of src and dst first */
1254 if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1255 return -EINVAL;
1256
1257 tmp = ulist_alloc(GFP_KERNEL);
1258 if (!tmp)
1259 return -ENOMEM;
1260
1261 mutex_lock(&fs_info->qgroup_ioctl_lock);
1262 quota_root = fs_info->quota_root;
1263 if (!quota_root) {
1264 ret = -EINVAL;
1265 goto out;
1266 }
1267 member = find_qgroup_rb(fs_info, src);
1268 parent = find_qgroup_rb(fs_info, dst);
1269 if (!member || !parent) {
1270 ret = -EINVAL;
1271 goto out;
1272 }
1273
1274 /* check if such qgroup relation exist firstly */
1275 list_for_each_entry(list, &member->groups, next_group) {
1276 if (list->group == parent) {
1277 ret = -EEXIST;
1278 goto out;
1279 }
1280 }
1281
1282 ret = add_qgroup_relation_item(trans, src, dst);
1283 if (ret)
1284 goto out;
1285
1286 ret = add_qgroup_relation_item(trans, dst, src);
1287 if (ret) {
1288 del_qgroup_relation_item(trans, src, dst);
1289 goto out;
1290 }
1291
1292 spin_lock(&fs_info->qgroup_lock);
1293 ret = add_relation_rb(fs_info, src, dst);
1294 if (ret < 0) {
1295 spin_unlock(&fs_info->qgroup_lock);
1296 goto out;
1297 }
1298 ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1299 spin_unlock(&fs_info->qgroup_lock);
1300 out:
1301 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1302 ulist_free(tmp);
1303 return ret;
1304 }
1305
__del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1306 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1307 u64 dst)
1308 {
1309 struct btrfs_fs_info *fs_info = trans->fs_info;
1310 struct btrfs_root *quota_root;
1311 struct btrfs_qgroup *parent;
1312 struct btrfs_qgroup *member;
1313 struct btrfs_qgroup_list *list;
1314 struct ulist *tmp;
1315 bool found = false;
1316 int ret = 0;
1317 int ret2;
1318
1319 tmp = ulist_alloc(GFP_KERNEL);
1320 if (!tmp)
1321 return -ENOMEM;
1322
1323 quota_root = fs_info->quota_root;
1324 if (!quota_root) {
1325 ret = -EINVAL;
1326 goto out;
1327 }
1328
1329 member = find_qgroup_rb(fs_info, src);
1330 parent = find_qgroup_rb(fs_info, dst);
1331 /*
1332 * The parent/member pair doesn't exist, then try to delete the dead
1333 * relation items only.
1334 */
1335 if (!member || !parent)
1336 goto delete_item;
1337
1338 /* check if such qgroup relation exist firstly */
1339 list_for_each_entry(list, &member->groups, next_group) {
1340 if (list->group == parent) {
1341 found = true;
1342 break;
1343 }
1344 }
1345
1346 delete_item:
1347 ret = del_qgroup_relation_item(trans, src, dst);
1348 if (ret < 0 && ret != -ENOENT)
1349 goto out;
1350 ret2 = del_qgroup_relation_item(trans, dst, src);
1351 if (ret2 < 0 && ret2 != -ENOENT)
1352 goto out;
1353
1354 /* At least one deletion succeeded, return 0 */
1355 if (!ret || !ret2)
1356 ret = 0;
1357
1358 if (found) {
1359 spin_lock(&fs_info->qgroup_lock);
1360 del_relation_rb(fs_info, src, dst);
1361 ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1362 spin_unlock(&fs_info->qgroup_lock);
1363 }
1364 out:
1365 ulist_free(tmp);
1366 return ret;
1367 }
1368
btrfs_del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1369 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1370 u64 dst)
1371 {
1372 struct btrfs_fs_info *fs_info = trans->fs_info;
1373 int ret = 0;
1374
1375 mutex_lock(&fs_info->qgroup_ioctl_lock);
1376 ret = __del_qgroup_relation(trans, src, dst);
1377 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1378
1379 return ret;
1380 }
1381
btrfs_create_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1382 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1383 {
1384 struct btrfs_fs_info *fs_info = trans->fs_info;
1385 struct btrfs_root *quota_root;
1386 struct btrfs_qgroup *qgroup;
1387 int ret = 0;
1388
1389 mutex_lock(&fs_info->qgroup_ioctl_lock);
1390 quota_root = fs_info->quota_root;
1391 if (!quota_root) {
1392 ret = -EINVAL;
1393 goto out;
1394 }
1395 qgroup = find_qgroup_rb(fs_info, qgroupid);
1396 if (qgroup) {
1397 ret = -EEXIST;
1398 goto out;
1399 }
1400
1401 ret = add_qgroup_item(trans, quota_root, qgroupid);
1402 if (ret)
1403 goto out;
1404
1405 spin_lock(&fs_info->qgroup_lock);
1406 qgroup = add_qgroup_rb(fs_info, qgroupid);
1407 spin_unlock(&fs_info->qgroup_lock);
1408
1409 if (IS_ERR(qgroup))
1410 ret = PTR_ERR(qgroup);
1411 out:
1412 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1413 return ret;
1414 }
1415
btrfs_remove_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1416 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1417 {
1418 struct btrfs_fs_info *fs_info = trans->fs_info;
1419 struct btrfs_root *quota_root;
1420 struct btrfs_qgroup *qgroup;
1421 struct btrfs_qgroup_list *list;
1422 int ret = 0;
1423
1424 mutex_lock(&fs_info->qgroup_ioctl_lock);
1425 quota_root = fs_info->quota_root;
1426 if (!quota_root) {
1427 ret = -EINVAL;
1428 goto out;
1429 }
1430
1431 qgroup = find_qgroup_rb(fs_info, qgroupid);
1432 if (!qgroup) {
1433 ret = -ENOENT;
1434 goto out;
1435 }
1436
1437 /* Check if there are no children of this qgroup */
1438 if (!list_empty(&qgroup->members)) {
1439 ret = -EBUSY;
1440 goto out;
1441 }
1442
1443 ret = del_qgroup_item(trans, qgroupid);
1444 if (ret && ret != -ENOENT)
1445 goto out;
1446
1447 while (!list_empty(&qgroup->groups)) {
1448 list = list_first_entry(&qgroup->groups,
1449 struct btrfs_qgroup_list, next_group);
1450 ret = __del_qgroup_relation(trans, qgroupid,
1451 list->group->qgroupid);
1452 if (ret)
1453 goto out;
1454 }
1455
1456 spin_lock(&fs_info->qgroup_lock);
1457 del_qgroup_rb(fs_info, qgroupid);
1458 spin_unlock(&fs_info->qgroup_lock);
1459 out:
1460 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1461 return ret;
1462 }
1463
btrfs_limit_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid,struct btrfs_qgroup_limit * limit)1464 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1465 struct btrfs_qgroup_limit *limit)
1466 {
1467 struct btrfs_fs_info *fs_info = trans->fs_info;
1468 struct btrfs_root *quota_root;
1469 struct btrfs_qgroup *qgroup;
1470 int ret = 0;
1471 /* Sometimes we would want to clear the limit on this qgroup.
1472 * To meet this requirement, we treat the -1 as a special value
1473 * which tell kernel to clear the limit on this qgroup.
1474 */
1475 const u64 CLEAR_VALUE = -1;
1476
1477 mutex_lock(&fs_info->qgroup_ioctl_lock);
1478 quota_root = fs_info->quota_root;
1479 if (!quota_root) {
1480 ret = -EINVAL;
1481 goto out;
1482 }
1483
1484 qgroup = find_qgroup_rb(fs_info, qgroupid);
1485 if (!qgroup) {
1486 ret = -ENOENT;
1487 goto out;
1488 }
1489
1490 spin_lock(&fs_info->qgroup_lock);
1491 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1492 if (limit->max_rfer == CLEAR_VALUE) {
1493 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1494 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1495 qgroup->max_rfer = 0;
1496 } else {
1497 qgroup->max_rfer = limit->max_rfer;
1498 }
1499 }
1500 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1501 if (limit->max_excl == CLEAR_VALUE) {
1502 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1503 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1504 qgroup->max_excl = 0;
1505 } else {
1506 qgroup->max_excl = limit->max_excl;
1507 }
1508 }
1509 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1510 if (limit->rsv_rfer == CLEAR_VALUE) {
1511 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1512 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1513 qgroup->rsv_rfer = 0;
1514 } else {
1515 qgroup->rsv_rfer = limit->rsv_rfer;
1516 }
1517 }
1518 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1519 if (limit->rsv_excl == CLEAR_VALUE) {
1520 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1521 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1522 qgroup->rsv_excl = 0;
1523 } else {
1524 qgroup->rsv_excl = limit->rsv_excl;
1525 }
1526 }
1527 qgroup->lim_flags |= limit->flags;
1528
1529 spin_unlock(&fs_info->qgroup_lock);
1530
1531 ret = update_qgroup_limit_item(trans, qgroup);
1532 if (ret) {
1533 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1534 btrfs_info(fs_info, "unable to update quota limit for %llu",
1535 qgroupid);
1536 }
1537
1538 out:
1539 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1540 return ret;
1541 }
1542
btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_qgroup_extent_record * record)1543 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1544 struct btrfs_delayed_ref_root *delayed_refs,
1545 struct btrfs_qgroup_extent_record *record)
1546 {
1547 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1548 struct rb_node *parent_node = NULL;
1549 struct btrfs_qgroup_extent_record *entry;
1550 u64 bytenr = record->bytenr;
1551
1552 lockdep_assert_held(&delayed_refs->lock);
1553 trace_btrfs_qgroup_trace_extent(fs_info, record);
1554
1555 while (*p) {
1556 parent_node = *p;
1557 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1558 node);
1559 if (bytenr < entry->bytenr) {
1560 p = &(*p)->rb_left;
1561 } else if (bytenr > entry->bytenr) {
1562 p = &(*p)->rb_right;
1563 } else {
1564 if (record->data_rsv && !entry->data_rsv) {
1565 entry->data_rsv = record->data_rsv;
1566 entry->data_rsv_refroot =
1567 record->data_rsv_refroot;
1568 }
1569 return 1;
1570 }
1571 }
1572
1573 rb_link_node(&record->node, parent_node, p);
1574 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1575 return 0;
1576 }
1577
btrfs_qgroup_trace_extent_post(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_extent_record * qrecord)1578 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
1579 struct btrfs_qgroup_extent_record *qrecord)
1580 {
1581 struct ulist *old_root;
1582 u64 bytenr = qrecord->bytenr;
1583 int ret;
1584
1585 ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
1586 if (ret < 0) {
1587 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1588 btrfs_warn(fs_info,
1589 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1590 ret);
1591 return 0;
1592 }
1593
1594 /*
1595 * Here we don't need to get the lock of
1596 * trans->transaction->delayed_refs, since inserted qrecord won't
1597 * be deleted, only qrecord->node may be modified (new qrecord insert)
1598 *
1599 * So modifying qrecord->old_roots is safe here
1600 */
1601 qrecord->old_roots = old_root;
1602 return 0;
1603 }
1604
btrfs_qgroup_trace_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,gfp_t gfp_flag)1605 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
1606 u64 num_bytes, gfp_t gfp_flag)
1607 {
1608 struct btrfs_fs_info *fs_info = trans->fs_info;
1609 struct btrfs_qgroup_extent_record *record;
1610 struct btrfs_delayed_ref_root *delayed_refs;
1611 int ret;
1612
1613 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1614 || bytenr == 0 || num_bytes == 0)
1615 return 0;
1616 record = kzalloc(sizeof(*record), gfp_flag);
1617 if (!record)
1618 return -ENOMEM;
1619
1620 delayed_refs = &trans->transaction->delayed_refs;
1621 record->bytenr = bytenr;
1622 record->num_bytes = num_bytes;
1623 record->old_roots = NULL;
1624
1625 spin_lock(&delayed_refs->lock);
1626 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
1627 spin_unlock(&delayed_refs->lock);
1628 if (ret > 0) {
1629 kfree(record);
1630 return 0;
1631 }
1632 return btrfs_qgroup_trace_extent_post(fs_info, record);
1633 }
1634
btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle * trans,struct extent_buffer * eb)1635 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
1636 struct extent_buffer *eb)
1637 {
1638 struct btrfs_fs_info *fs_info = trans->fs_info;
1639 int nr = btrfs_header_nritems(eb);
1640 int i, extent_type, ret;
1641 struct btrfs_key key;
1642 struct btrfs_file_extent_item *fi;
1643 u64 bytenr, num_bytes;
1644
1645 /* We can be called directly from walk_up_proc() */
1646 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1647 return 0;
1648
1649 for (i = 0; i < nr; i++) {
1650 btrfs_item_key_to_cpu(eb, &key, i);
1651
1652 if (key.type != BTRFS_EXTENT_DATA_KEY)
1653 continue;
1654
1655 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
1656 /* filter out non qgroup-accountable extents */
1657 extent_type = btrfs_file_extent_type(eb, fi);
1658
1659 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1660 continue;
1661
1662 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1663 if (!bytenr)
1664 continue;
1665
1666 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1667
1668 ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes,
1669 GFP_NOFS);
1670 if (ret)
1671 return ret;
1672 }
1673 cond_resched();
1674 return 0;
1675 }
1676
1677 /*
1678 * Walk up the tree from the bottom, freeing leaves and any interior
1679 * nodes which have had all slots visited. If a node (leaf or
1680 * interior) is freed, the node above it will have it's slot
1681 * incremented. The root node will never be freed.
1682 *
1683 * At the end of this function, we should have a path which has all
1684 * slots incremented to the next position for a search. If we need to
1685 * read a new node it will be NULL and the node above it will have the
1686 * correct slot selected for a later read.
1687 *
1688 * If we increment the root nodes slot counter past the number of
1689 * elements, 1 is returned to signal completion of the search.
1690 */
adjust_slots_upwards(struct btrfs_path * path,int root_level)1691 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1692 {
1693 int level = 0;
1694 int nr, slot;
1695 struct extent_buffer *eb;
1696
1697 if (root_level == 0)
1698 return 1;
1699
1700 while (level <= root_level) {
1701 eb = path->nodes[level];
1702 nr = btrfs_header_nritems(eb);
1703 path->slots[level]++;
1704 slot = path->slots[level];
1705 if (slot >= nr || level == 0) {
1706 /*
1707 * Don't free the root - we will detect this
1708 * condition after our loop and return a
1709 * positive value for caller to stop walking the tree.
1710 */
1711 if (level != root_level) {
1712 btrfs_tree_unlock_rw(eb, path->locks[level]);
1713 path->locks[level] = 0;
1714
1715 free_extent_buffer(eb);
1716 path->nodes[level] = NULL;
1717 path->slots[level] = 0;
1718 }
1719 } else {
1720 /*
1721 * We have a valid slot to walk back down
1722 * from. Stop here so caller can process these
1723 * new nodes.
1724 */
1725 break;
1726 }
1727
1728 level++;
1729 }
1730
1731 eb = path->nodes[root_level];
1732 if (path->slots[root_level] >= btrfs_header_nritems(eb))
1733 return 1;
1734
1735 return 0;
1736 }
1737
1738 /*
1739 * Helper function to trace a subtree tree block swap.
1740 *
1741 * The swap will happen in highest tree block, but there may be a lot of
1742 * tree blocks involved.
1743 *
1744 * For example:
1745 * OO = Old tree blocks
1746 * NN = New tree blocks allocated during balance
1747 *
1748 * File tree (257) Reloc tree for 257
1749 * L2 OO NN
1750 * / \ / \
1751 * L1 OO OO (a) OO NN (a)
1752 * / \ / \ / \ / \
1753 * L0 OO OO OO OO OO OO NN NN
1754 * (b) (c) (b) (c)
1755 *
1756 * When calling qgroup_trace_extent_swap(), we will pass:
1757 * @src_eb = OO(a)
1758 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
1759 * @dst_level = 0
1760 * @root_level = 1
1761 *
1762 * In that case, qgroup_trace_extent_swap() will search from OO(a) to
1763 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
1764 *
1765 * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
1766 *
1767 * 1) Tree search from @src_eb
1768 * It should acts as a simplified btrfs_search_slot().
1769 * The key for search can be extracted from @dst_path->nodes[dst_level]
1770 * (first key).
1771 *
1772 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
1773 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1774 * They should be marked during previous (@dst_level = 1) iteration.
1775 *
1776 * 3) Mark file extents in leaves dirty
1777 * We don't have good way to pick out new file extents only.
1778 * So we still follow the old method by scanning all file extents in
1779 * the leave.
1780 *
1781 * This function can free us from keeping two paths, thus later we only need
1782 * to care about how to iterate all new tree blocks in reloc tree.
1783 */
qgroup_trace_extent_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int dst_level,int root_level,bool trace_leaf)1784 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
1785 struct extent_buffer *src_eb,
1786 struct btrfs_path *dst_path,
1787 int dst_level, int root_level,
1788 bool trace_leaf)
1789 {
1790 struct btrfs_key key;
1791 struct btrfs_path *src_path;
1792 struct btrfs_fs_info *fs_info = trans->fs_info;
1793 u32 nodesize = fs_info->nodesize;
1794 int cur_level = root_level;
1795 int ret;
1796
1797 BUG_ON(dst_level > root_level);
1798 /* Level mismatch */
1799 if (btrfs_header_level(src_eb) != root_level)
1800 return -EINVAL;
1801
1802 src_path = btrfs_alloc_path();
1803 if (!src_path) {
1804 ret = -ENOMEM;
1805 goto out;
1806 }
1807
1808 if (dst_level)
1809 btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
1810 else
1811 btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
1812
1813 /* For src_path */
1814 extent_buffer_get(src_eb);
1815 src_path->nodes[root_level] = src_eb;
1816 src_path->slots[root_level] = dst_path->slots[root_level];
1817 src_path->locks[root_level] = 0;
1818
1819 /* A simplified version of btrfs_search_slot() */
1820 while (cur_level >= dst_level) {
1821 struct btrfs_key src_key;
1822 struct btrfs_key dst_key;
1823
1824 if (src_path->nodes[cur_level] == NULL) {
1825 struct btrfs_key first_key;
1826 struct extent_buffer *eb;
1827 int parent_slot;
1828 u64 child_gen;
1829 u64 child_bytenr;
1830
1831 eb = src_path->nodes[cur_level + 1];
1832 parent_slot = src_path->slots[cur_level + 1];
1833 child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1834 child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1835 btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
1836
1837 eb = read_tree_block(fs_info, child_bytenr, child_gen,
1838 cur_level, &first_key);
1839 if (IS_ERR(eb)) {
1840 ret = PTR_ERR(eb);
1841 goto out;
1842 } else if (!extent_buffer_uptodate(eb)) {
1843 free_extent_buffer(eb);
1844 ret = -EIO;
1845 goto out;
1846 }
1847
1848 src_path->nodes[cur_level] = eb;
1849
1850 btrfs_tree_read_lock(eb);
1851 btrfs_set_lock_blocking_read(eb);
1852 src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
1853 }
1854
1855 src_path->slots[cur_level] = dst_path->slots[cur_level];
1856 if (cur_level) {
1857 btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
1858 &dst_key, dst_path->slots[cur_level]);
1859 btrfs_node_key_to_cpu(src_path->nodes[cur_level],
1860 &src_key, src_path->slots[cur_level]);
1861 } else {
1862 btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
1863 &dst_key, dst_path->slots[cur_level]);
1864 btrfs_item_key_to_cpu(src_path->nodes[cur_level],
1865 &src_key, src_path->slots[cur_level]);
1866 }
1867 /* Content mismatch, something went wrong */
1868 if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
1869 ret = -ENOENT;
1870 goto out;
1871 }
1872 cur_level--;
1873 }
1874
1875 /*
1876 * Now both @dst_path and @src_path have been populated, record the tree
1877 * blocks for qgroup accounting.
1878 */
1879 ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
1880 nodesize, GFP_NOFS);
1881 if (ret < 0)
1882 goto out;
1883 ret = btrfs_qgroup_trace_extent(trans,
1884 dst_path->nodes[dst_level]->start,
1885 nodesize, GFP_NOFS);
1886 if (ret < 0)
1887 goto out;
1888
1889 /* Record leaf file extents */
1890 if (dst_level == 0 && trace_leaf) {
1891 ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
1892 if (ret < 0)
1893 goto out;
1894 ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
1895 }
1896 out:
1897 btrfs_free_path(src_path);
1898 return ret;
1899 }
1900
1901 /*
1902 * Helper function to do recursive generation-aware depth-first search, to
1903 * locate all new tree blocks in a subtree of reloc tree.
1904 *
1905 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
1906 * reloc tree
1907 * L2 NN (a)
1908 * / \
1909 * L1 OO NN (b)
1910 * / \ / \
1911 * L0 OO OO OO NN
1912 * (c) (d)
1913 * If we pass:
1914 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
1915 * @cur_level = 1
1916 * @root_level = 1
1917 *
1918 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
1919 * above tree blocks along with their counter parts in file tree.
1920 * While during search, old tree blocks OO(c) will be skipped as tree block swap
1921 * won't affect OO(c).
1922 */
qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int cur_level,int root_level,u64 last_snapshot,bool trace_leaf)1923 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
1924 struct extent_buffer *src_eb,
1925 struct btrfs_path *dst_path,
1926 int cur_level, int root_level,
1927 u64 last_snapshot, bool trace_leaf)
1928 {
1929 struct btrfs_fs_info *fs_info = trans->fs_info;
1930 struct extent_buffer *eb;
1931 bool need_cleanup = false;
1932 int ret = 0;
1933 int i;
1934
1935 /* Level sanity check */
1936 if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
1937 root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
1938 root_level < cur_level) {
1939 btrfs_err_rl(fs_info,
1940 "%s: bad levels, cur_level=%d root_level=%d",
1941 __func__, cur_level, root_level);
1942 return -EUCLEAN;
1943 }
1944
1945 /* Read the tree block if needed */
1946 if (dst_path->nodes[cur_level] == NULL) {
1947 struct btrfs_key first_key;
1948 int parent_slot;
1949 u64 child_gen;
1950 u64 child_bytenr;
1951
1952 /*
1953 * dst_path->nodes[root_level] must be initialized before
1954 * calling this function.
1955 */
1956 if (cur_level == root_level) {
1957 btrfs_err_rl(fs_info,
1958 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
1959 __func__, root_level, root_level, cur_level);
1960 return -EUCLEAN;
1961 }
1962
1963 /*
1964 * We need to get child blockptr/gen from parent before we can
1965 * read it.
1966 */
1967 eb = dst_path->nodes[cur_level + 1];
1968 parent_slot = dst_path->slots[cur_level + 1];
1969 child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1970 child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1971 btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
1972
1973 /* This node is old, no need to trace */
1974 if (child_gen < last_snapshot)
1975 goto out;
1976
1977 eb = read_tree_block(fs_info, child_bytenr, child_gen,
1978 cur_level, &first_key);
1979 if (IS_ERR(eb)) {
1980 ret = PTR_ERR(eb);
1981 goto out;
1982 } else if (!extent_buffer_uptodate(eb)) {
1983 free_extent_buffer(eb);
1984 ret = -EIO;
1985 goto out;
1986 }
1987
1988 dst_path->nodes[cur_level] = eb;
1989 dst_path->slots[cur_level] = 0;
1990
1991 btrfs_tree_read_lock(eb);
1992 btrfs_set_lock_blocking_read(eb);
1993 dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
1994 need_cleanup = true;
1995 }
1996
1997 /* Now record this tree block and its counter part for qgroups */
1998 ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
1999 root_level, trace_leaf);
2000 if (ret < 0)
2001 goto cleanup;
2002
2003 eb = dst_path->nodes[cur_level];
2004
2005 if (cur_level > 0) {
2006 /* Iterate all child tree blocks */
2007 for (i = 0; i < btrfs_header_nritems(eb); i++) {
2008 /* Skip old tree blocks as they won't be swapped */
2009 if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2010 continue;
2011 dst_path->slots[cur_level] = i;
2012
2013 /* Recursive call (at most 7 times) */
2014 ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2015 dst_path, cur_level - 1, root_level,
2016 last_snapshot, trace_leaf);
2017 if (ret < 0)
2018 goto cleanup;
2019 }
2020 }
2021
2022 cleanup:
2023 if (need_cleanup) {
2024 /* Clean up */
2025 btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2026 dst_path->locks[cur_level]);
2027 free_extent_buffer(dst_path->nodes[cur_level]);
2028 dst_path->nodes[cur_level] = NULL;
2029 dst_path->slots[cur_level] = 0;
2030 dst_path->locks[cur_level] = 0;
2031 }
2032 out:
2033 return ret;
2034 }
2035
qgroup_trace_subtree_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct extent_buffer * dst_eb,u64 last_snapshot,bool trace_leaf)2036 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2037 struct extent_buffer *src_eb,
2038 struct extent_buffer *dst_eb,
2039 u64 last_snapshot, bool trace_leaf)
2040 {
2041 struct btrfs_fs_info *fs_info = trans->fs_info;
2042 struct btrfs_path *dst_path = NULL;
2043 int level;
2044 int ret;
2045
2046 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2047 return 0;
2048
2049 /* Wrong parameter order */
2050 if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2051 btrfs_err_rl(fs_info,
2052 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2053 btrfs_header_generation(src_eb),
2054 btrfs_header_generation(dst_eb));
2055 return -EUCLEAN;
2056 }
2057
2058 if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2059 ret = -EIO;
2060 goto out;
2061 }
2062
2063 level = btrfs_header_level(dst_eb);
2064 dst_path = btrfs_alloc_path();
2065 if (!dst_path) {
2066 ret = -ENOMEM;
2067 goto out;
2068 }
2069 /* For dst_path */
2070 extent_buffer_get(dst_eb);
2071 dst_path->nodes[level] = dst_eb;
2072 dst_path->slots[level] = 0;
2073 dst_path->locks[level] = 0;
2074
2075 /* Do the generation aware breadth-first search */
2076 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2077 level, last_snapshot, trace_leaf);
2078 if (ret < 0)
2079 goto out;
2080 ret = 0;
2081
2082 out:
2083 btrfs_free_path(dst_path);
2084 if (ret < 0)
2085 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2086 return ret;
2087 }
2088
btrfs_qgroup_trace_subtree(struct btrfs_trans_handle * trans,struct extent_buffer * root_eb,u64 root_gen,int root_level)2089 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2090 struct extent_buffer *root_eb,
2091 u64 root_gen, int root_level)
2092 {
2093 struct btrfs_fs_info *fs_info = trans->fs_info;
2094 int ret = 0;
2095 int level;
2096 struct extent_buffer *eb = root_eb;
2097 struct btrfs_path *path = NULL;
2098
2099 BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
2100 BUG_ON(root_eb == NULL);
2101
2102 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2103 return 0;
2104
2105 if (!extent_buffer_uptodate(root_eb)) {
2106 ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL);
2107 if (ret)
2108 goto out;
2109 }
2110
2111 if (root_level == 0) {
2112 ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2113 goto out;
2114 }
2115
2116 path = btrfs_alloc_path();
2117 if (!path)
2118 return -ENOMEM;
2119
2120 /*
2121 * Walk down the tree. Missing extent blocks are filled in as
2122 * we go. Metadata is accounted every time we read a new
2123 * extent block.
2124 *
2125 * When we reach a leaf, we account for file extent items in it,
2126 * walk back up the tree (adjusting slot pointers as we go)
2127 * and restart the search process.
2128 */
2129 extent_buffer_get(root_eb); /* For path */
2130 path->nodes[root_level] = root_eb;
2131 path->slots[root_level] = 0;
2132 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2133 walk_down:
2134 level = root_level;
2135 while (level >= 0) {
2136 if (path->nodes[level] == NULL) {
2137 struct btrfs_key first_key;
2138 int parent_slot;
2139 u64 child_gen;
2140 u64 child_bytenr;
2141
2142 /*
2143 * We need to get child blockptr/gen from parent before
2144 * we can read it.
2145 */
2146 eb = path->nodes[level + 1];
2147 parent_slot = path->slots[level + 1];
2148 child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2149 child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2150 btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
2151
2152 eb = read_tree_block(fs_info, child_bytenr, child_gen,
2153 level, &first_key);
2154 if (IS_ERR(eb)) {
2155 ret = PTR_ERR(eb);
2156 goto out;
2157 } else if (!extent_buffer_uptodate(eb)) {
2158 free_extent_buffer(eb);
2159 ret = -EIO;
2160 goto out;
2161 }
2162
2163 path->nodes[level] = eb;
2164 path->slots[level] = 0;
2165
2166 btrfs_tree_read_lock(eb);
2167 btrfs_set_lock_blocking_read(eb);
2168 path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
2169
2170 ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2171 fs_info->nodesize,
2172 GFP_NOFS);
2173 if (ret)
2174 goto out;
2175 }
2176
2177 if (level == 0) {
2178 ret = btrfs_qgroup_trace_leaf_items(trans,
2179 path->nodes[level]);
2180 if (ret)
2181 goto out;
2182
2183 /* Nonzero return here means we completed our search */
2184 ret = adjust_slots_upwards(path, root_level);
2185 if (ret)
2186 break;
2187
2188 /* Restart search with new slots */
2189 goto walk_down;
2190 }
2191
2192 level--;
2193 }
2194
2195 ret = 0;
2196 out:
2197 btrfs_free_path(path);
2198
2199 return ret;
2200 }
2201
2202 #define UPDATE_NEW 0
2203 #define UPDATE_OLD 1
2204 /*
2205 * Walk all of the roots that points to the bytenr and adjust their refcnts.
2206 */
qgroup_update_refcnt(struct btrfs_fs_info * fs_info,struct ulist * roots,struct ulist * tmp,struct ulist * qgroups,u64 seq,int update_old)2207 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2208 struct ulist *roots, struct ulist *tmp,
2209 struct ulist *qgroups, u64 seq, int update_old)
2210 {
2211 struct ulist_node *unode;
2212 struct ulist_iterator uiter;
2213 struct ulist_node *tmp_unode;
2214 struct ulist_iterator tmp_uiter;
2215 struct btrfs_qgroup *qg;
2216 int ret = 0;
2217
2218 if (!roots)
2219 return 0;
2220 ULIST_ITER_INIT(&uiter);
2221 while ((unode = ulist_next(roots, &uiter))) {
2222 qg = find_qgroup_rb(fs_info, unode->val);
2223 if (!qg)
2224 continue;
2225
2226 ulist_reinit(tmp);
2227 ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
2228 GFP_ATOMIC);
2229 if (ret < 0)
2230 return ret;
2231 ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
2232 if (ret < 0)
2233 return ret;
2234 ULIST_ITER_INIT(&tmp_uiter);
2235 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
2236 struct btrfs_qgroup_list *glist;
2237
2238 qg = unode_aux_to_qgroup(tmp_unode);
2239 if (update_old)
2240 btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2241 else
2242 btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2243 list_for_each_entry(glist, &qg->groups, next_group) {
2244 ret = ulist_add(qgroups, glist->group->qgroupid,
2245 qgroup_to_aux(glist->group),
2246 GFP_ATOMIC);
2247 if (ret < 0)
2248 return ret;
2249 ret = ulist_add(tmp, glist->group->qgroupid,
2250 qgroup_to_aux(glist->group),
2251 GFP_ATOMIC);
2252 if (ret < 0)
2253 return ret;
2254 }
2255 }
2256 }
2257 return 0;
2258 }
2259
2260 /*
2261 * Update qgroup rfer/excl counters.
2262 * Rfer update is easy, codes can explain themselves.
2263 *
2264 * Excl update is tricky, the update is split into 2 part.
2265 * Part 1: Possible exclusive <-> sharing detect:
2266 * | A | !A |
2267 * -------------------------------------
2268 * B | * | - |
2269 * -------------------------------------
2270 * !B | + | ** |
2271 * -------------------------------------
2272 *
2273 * Conditions:
2274 * A: cur_old_roots < nr_old_roots (not exclusive before)
2275 * !A: cur_old_roots == nr_old_roots (possible exclusive before)
2276 * B: cur_new_roots < nr_new_roots (not exclusive now)
2277 * !B: cur_new_roots == nr_new_roots (possible exclusive now)
2278 *
2279 * Results:
2280 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing
2281 * *: Definitely not changed. **: Possible unchanged.
2282 *
2283 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2284 *
2285 * To make the logic clear, we first use condition A and B to split
2286 * combination into 4 results.
2287 *
2288 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2289 * only on variant maybe 0.
2290 *
2291 * Lastly, check result **, since there are 2 variants maybe 0, split them
2292 * again(2x2).
2293 * But this time we don't need to consider other things, the codes and logic
2294 * is easy to understand now.
2295 */
qgroup_update_counters(struct btrfs_fs_info * fs_info,struct ulist * qgroups,u64 nr_old_roots,u64 nr_new_roots,u64 num_bytes,u64 seq)2296 static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
2297 struct ulist *qgroups,
2298 u64 nr_old_roots,
2299 u64 nr_new_roots,
2300 u64 num_bytes, u64 seq)
2301 {
2302 struct ulist_node *unode;
2303 struct ulist_iterator uiter;
2304 struct btrfs_qgroup *qg;
2305 u64 cur_new_count, cur_old_count;
2306
2307 ULIST_ITER_INIT(&uiter);
2308 while ((unode = ulist_next(qgroups, &uiter))) {
2309 bool dirty = false;
2310
2311 qg = unode_aux_to_qgroup(unode);
2312 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2313 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2314
2315 trace_qgroup_update_counters(fs_info, qg, cur_old_count,
2316 cur_new_count);
2317
2318 /* Rfer update part */
2319 if (cur_old_count == 0 && cur_new_count > 0) {
2320 qg->rfer += num_bytes;
2321 qg->rfer_cmpr += num_bytes;
2322 dirty = true;
2323 }
2324 if (cur_old_count > 0 && cur_new_count == 0) {
2325 qg->rfer -= num_bytes;
2326 qg->rfer_cmpr -= num_bytes;
2327 dirty = true;
2328 }
2329
2330 /* Excl update part */
2331 /* Exclusive/none -> shared case */
2332 if (cur_old_count == nr_old_roots &&
2333 cur_new_count < nr_new_roots) {
2334 /* Exclusive -> shared */
2335 if (cur_old_count != 0) {
2336 qg->excl -= num_bytes;
2337 qg->excl_cmpr -= num_bytes;
2338 dirty = true;
2339 }
2340 }
2341
2342 /* Shared -> exclusive/none case */
2343 if (cur_old_count < nr_old_roots &&
2344 cur_new_count == nr_new_roots) {
2345 /* Shared->exclusive */
2346 if (cur_new_count != 0) {
2347 qg->excl += num_bytes;
2348 qg->excl_cmpr += num_bytes;
2349 dirty = true;
2350 }
2351 }
2352
2353 /* Exclusive/none -> exclusive/none case */
2354 if (cur_old_count == nr_old_roots &&
2355 cur_new_count == nr_new_roots) {
2356 if (cur_old_count == 0) {
2357 /* None -> exclusive/none */
2358
2359 if (cur_new_count != 0) {
2360 /* None -> exclusive */
2361 qg->excl += num_bytes;
2362 qg->excl_cmpr += num_bytes;
2363 dirty = true;
2364 }
2365 /* None -> none, nothing changed */
2366 } else {
2367 /* Exclusive -> exclusive/none */
2368
2369 if (cur_new_count == 0) {
2370 /* Exclusive -> none */
2371 qg->excl -= num_bytes;
2372 qg->excl_cmpr -= num_bytes;
2373 dirty = true;
2374 }
2375 /* Exclusive -> exclusive, nothing changed */
2376 }
2377 }
2378
2379 if (dirty)
2380 qgroup_dirty(fs_info, qg);
2381 }
2382 return 0;
2383 }
2384
2385 /*
2386 * Check if the @roots potentially is a list of fs tree roots
2387 *
2388 * Return 0 for definitely not a fs/subvol tree roots ulist
2389 * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2390 * one as well)
2391 */
maybe_fs_roots(struct ulist * roots)2392 static int maybe_fs_roots(struct ulist *roots)
2393 {
2394 struct ulist_node *unode;
2395 struct ulist_iterator uiter;
2396
2397 /* Empty one, still possible for fs roots */
2398 if (!roots || roots->nnodes == 0)
2399 return 1;
2400
2401 ULIST_ITER_INIT(&uiter);
2402 unode = ulist_next(roots, &uiter);
2403 if (!unode)
2404 return 1;
2405
2406 /*
2407 * If it contains fs tree roots, then it must belong to fs/subvol
2408 * trees.
2409 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2410 */
2411 return is_fstree(unode->val);
2412 }
2413
btrfs_qgroup_account_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct ulist * old_roots,struct ulist * new_roots)2414 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2415 u64 num_bytes, struct ulist *old_roots,
2416 struct ulist *new_roots)
2417 {
2418 struct btrfs_fs_info *fs_info = trans->fs_info;
2419 struct ulist *qgroups = NULL;
2420 struct ulist *tmp = NULL;
2421 u64 seq;
2422 u64 nr_new_roots = 0;
2423 u64 nr_old_roots = 0;
2424 int ret = 0;
2425
2426 /*
2427 * If quotas get disabled meanwhile, the resouces need to be freed and
2428 * we can't just exit here.
2429 */
2430 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2431 goto out_free;
2432
2433 if (new_roots) {
2434 if (!maybe_fs_roots(new_roots))
2435 goto out_free;
2436 nr_new_roots = new_roots->nnodes;
2437 }
2438 if (old_roots) {
2439 if (!maybe_fs_roots(old_roots))
2440 goto out_free;
2441 nr_old_roots = old_roots->nnodes;
2442 }
2443
2444 /* Quick exit, either not fs tree roots, or won't affect any qgroup */
2445 if (nr_old_roots == 0 && nr_new_roots == 0)
2446 goto out_free;
2447
2448 BUG_ON(!fs_info->quota_root);
2449
2450 trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2451 num_bytes, nr_old_roots, nr_new_roots);
2452
2453 qgroups = ulist_alloc(GFP_NOFS);
2454 if (!qgroups) {
2455 ret = -ENOMEM;
2456 goto out_free;
2457 }
2458 tmp = ulist_alloc(GFP_NOFS);
2459 if (!tmp) {
2460 ret = -ENOMEM;
2461 goto out_free;
2462 }
2463
2464 mutex_lock(&fs_info->qgroup_rescan_lock);
2465 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2466 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2467 mutex_unlock(&fs_info->qgroup_rescan_lock);
2468 ret = 0;
2469 goto out_free;
2470 }
2471 }
2472 mutex_unlock(&fs_info->qgroup_rescan_lock);
2473
2474 spin_lock(&fs_info->qgroup_lock);
2475 seq = fs_info->qgroup_seq;
2476
2477 /* Update old refcnts using old_roots */
2478 ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
2479 UPDATE_OLD);
2480 if (ret < 0)
2481 goto out;
2482
2483 /* Update new refcnts using new_roots */
2484 ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
2485 UPDATE_NEW);
2486 if (ret < 0)
2487 goto out;
2488
2489 qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
2490 num_bytes, seq);
2491
2492 /*
2493 * Bump qgroup_seq to avoid seq overlap
2494 */
2495 fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2496 out:
2497 spin_unlock(&fs_info->qgroup_lock);
2498 out_free:
2499 ulist_free(tmp);
2500 ulist_free(qgroups);
2501 ulist_free(old_roots);
2502 ulist_free(new_roots);
2503 return ret;
2504 }
2505
btrfs_qgroup_account_extents(struct btrfs_trans_handle * trans)2506 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2507 {
2508 struct btrfs_fs_info *fs_info = trans->fs_info;
2509 struct btrfs_qgroup_extent_record *record;
2510 struct btrfs_delayed_ref_root *delayed_refs;
2511 struct ulist *new_roots = NULL;
2512 struct rb_node *node;
2513 u64 num_dirty_extents = 0;
2514 u64 qgroup_to_skip;
2515 int ret = 0;
2516
2517 delayed_refs = &trans->transaction->delayed_refs;
2518 qgroup_to_skip = delayed_refs->qgroup_to_skip;
2519 while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2520 record = rb_entry(node, struct btrfs_qgroup_extent_record,
2521 node);
2522
2523 num_dirty_extents++;
2524 trace_btrfs_qgroup_account_extents(fs_info, record);
2525
2526 if (!ret) {
2527 /*
2528 * Old roots should be searched when inserting qgroup
2529 * extent record
2530 */
2531 if (WARN_ON(!record->old_roots)) {
2532 /* Search commit root to find old_roots */
2533 ret = btrfs_find_all_roots(NULL, fs_info,
2534 record->bytenr, 0,
2535 &record->old_roots, false);
2536 if (ret < 0)
2537 goto cleanup;
2538 }
2539
2540 /* Free the reserved data space */
2541 btrfs_qgroup_free_refroot(fs_info,
2542 record->data_rsv_refroot,
2543 record->data_rsv,
2544 BTRFS_QGROUP_RSV_DATA);
2545 /*
2546 * Use SEQ_LAST as time_seq to do special search, which
2547 * doesn't lock tree or delayed_refs and search current
2548 * root. It's safe inside commit_transaction().
2549 */
2550 ret = btrfs_find_all_roots(trans, fs_info,
2551 record->bytenr, SEQ_LAST, &new_roots, false);
2552 if (ret < 0)
2553 goto cleanup;
2554 if (qgroup_to_skip) {
2555 ulist_del(new_roots, qgroup_to_skip, 0);
2556 ulist_del(record->old_roots, qgroup_to_skip,
2557 0);
2558 }
2559 ret = btrfs_qgroup_account_extent(trans, record->bytenr,
2560 record->num_bytes,
2561 record->old_roots,
2562 new_roots);
2563 record->old_roots = NULL;
2564 new_roots = NULL;
2565 }
2566 cleanup:
2567 ulist_free(record->old_roots);
2568 ulist_free(new_roots);
2569 new_roots = NULL;
2570 rb_erase(node, &delayed_refs->dirty_extent_root);
2571 kfree(record);
2572
2573 }
2574 trace_qgroup_num_dirty_extents(fs_info, trans->transid,
2575 num_dirty_extents);
2576 return ret;
2577 }
2578
2579 /*
2580 * called from commit_transaction. Writes all changed qgroups to disk.
2581 */
btrfs_run_qgroups(struct btrfs_trans_handle * trans)2582 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
2583 {
2584 struct btrfs_fs_info *fs_info = trans->fs_info;
2585 struct btrfs_root *quota_root = fs_info->quota_root;
2586 int ret = 0;
2587
2588 if (!quota_root)
2589 return ret;
2590
2591 spin_lock(&fs_info->qgroup_lock);
2592 while (!list_empty(&fs_info->dirty_qgroups)) {
2593 struct btrfs_qgroup *qgroup;
2594 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2595 struct btrfs_qgroup, dirty);
2596 list_del_init(&qgroup->dirty);
2597 spin_unlock(&fs_info->qgroup_lock);
2598 ret = update_qgroup_info_item(trans, qgroup);
2599 if (ret)
2600 fs_info->qgroup_flags |=
2601 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2602 ret = update_qgroup_limit_item(trans, qgroup);
2603 if (ret)
2604 fs_info->qgroup_flags |=
2605 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2606 spin_lock(&fs_info->qgroup_lock);
2607 }
2608 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2609 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2610 else
2611 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2612 spin_unlock(&fs_info->qgroup_lock);
2613
2614 ret = update_qgroup_status_item(trans);
2615 if (ret)
2616 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2617
2618 return ret;
2619 }
2620
2621 /*
2622 * Copy the accounting information between qgroups. This is necessary
2623 * when a snapshot or a subvolume is created. Throwing an error will
2624 * cause a transaction abort so we take extra care here to only error
2625 * when a readonly fs is a reasonable outcome.
2626 */
btrfs_qgroup_inherit(struct btrfs_trans_handle * trans,u64 srcid,u64 objectid,struct btrfs_qgroup_inherit * inherit)2627 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
2628 u64 objectid, struct btrfs_qgroup_inherit *inherit)
2629 {
2630 int ret = 0;
2631 int i;
2632 u64 *i_qgroups;
2633 bool committing = false;
2634 struct btrfs_fs_info *fs_info = trans->fs_info;
2635 struct btrfs_root *quota_root;
2636 struct btrfs_qgroup *srcgroup;
2637 struct btrfs_qgroup *dstgroup;
2638 u32 level_size = 0;
2639 u64 nums;
2640
2641 /*
2642 * There are only two callers of this function.
2643 *
2644 * One in create_subvol() in the ioctl context, which needs to hold
2645 * the qgroup_ioctl_lock.
2646 *
2647 * The other one in create_pending_snapshot() where no other qgroup
2648 * code can modify the fs as they all need to either start a new trans
2649 * or hold a trans handler, thus we don't need to hold
2650 * qgroup_ioctl_lock.
2651 * This would avoid long and complex lock chain and make lockdep happy.
2652 */
2653 spin_lock(&fs_info->trans_lock);
2654 if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
2655 committing = true;
2656 spin_unlock(&fs_info->trans_lock);
2657
2658 if (!committing)
2659 mutex_lock(&fs_info->qgroup_ioctl_lock);
2660 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2661 goto out;
2662
2663 quota_root = fs_info->quota_root;
2664 if (!quota_root) {
2665 ret = -EINVAL;
2666 goto out;
2667 }
2668
2669 if (inherit) {
2670 i_qgroups = (u64 *)(inherit + 1);
2671 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2672 2 * inherit->num_excl_copies;
2673 for (i = 0; i < nums; ++i) {
2674 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2675
2676 /*
2677 * Zero out invalid groups so we can ignore
2678 * them later.
2679 */
2680 if (!srcgroup ||
2681 ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
2682 *i_qgroups = 0ULL;
2683
2684 ++i_qgroups;
2685 }
2686 }
2687
2688 /*
2689 * create a tracking group for the subvol itself
2690 */
2691 ret = add_qgroup_item(trans, quota_root, objectid);
2692 if (ret)
2693 goto out;
2694
2695 /*
2696 * add qgroup to all inherited groups
2697 */
2698 if (inherit) {
2699 i_qgroups = (u64 *)(inherit + 1);
2700 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
2701 if (*i_qgroups == 0)
2702 continue;
2703 ret = add_qgroup_relation_item(trans, objectid,
2704 *i_qgroups);
2705 if (ret && ret != -EEXIST)
2706 goto out;
2707 ret = add_qgroup_relation_item(trans, *i_qgroups,
2708 objectid);
2709 if (ret && ret != -EEXIST)
2710 goto out;
2711 }
2712 ret = 0;
2713 }
2714
2715
2716 spin_lock(&fs_info->qgroup_lock);
2717
2718 dstgroup = add_qgroup_rb(fs_info, objectid);
2719 if (IS_ERR(dstgroup)) {
2720 ret = PTR_ERR(dstgroup);
2721 goto unlock;
2722 }
2723
2724 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2725 dstgroup->lim_flags = inherit->lim.flags;
2726 dstgroup->max_rfer = inherit->lim.max_rfer;
2727 dstgroup->max_excl = inherit->lim.max_excl;
2728 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2729 dstgroup->rsv_excl = inherit->lim.rsv_excl;
2730
2731 ret = update_qgroup_limit_item(trans, dstgroup);
2732 if (ret) {
2733 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2734 btrfs_info(fs_info,
2735 "unable to update quota limit for %llu",
2736 dstgroup->qgroupid);
2737 goto unlock;
2738 }
2739 }
2740
2741 if (srcid) {
2742 srcgroup = find_qgroup_rb(fs_info, srcid);
2743 if (!srcgroup)
2744 goto unlock;
2745
2746 /*
2747 * We call inherit after we clone the root in order to make sure
2748 * our counts don't go crazy, so at this point the only
2749 * difference between the two roots should be the root node.
2750 */
2751 level_size = fs_info->nodesize;
2752 dstgroup->rfer = srcgroup->rfer;
2753 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2754 dstgroup->excl = level_size;
2755 dstgroup->excl_cmpr = level_size;
2756 srcgroup->excl = level_size;
2757 srcgroup->excl_cmpr = level_size;
2758
2759 /* inherit the limit info */
2760 dstgroup->lim_flags = srcgroup->lim_flags;
2761 dstgroup->max_rfer = srcgroup->max_rfer;
2762 dstgroup->max_excl = srcgroup->max_excl;
2763 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2764 dstgroup->rsv_excl = srcgroup->rsv_excl;
2765
2766 qgroup_dirty(fs_info, dstgroup);
2767 qgroup_dirty(fs_info, srcgroup);
2768 }
2769
2770 if (!inherit)
2771 goto unlock;
2772
2773 i_qgroups = (u64 *)(inherit + 1);
2774 for (i = 0; i < inherit->num_qgroups; ++i) {
2775 if (*i_qgroups) {
2776 ret = add_relation_rb(fs_info, objectid, *i_qgroups);
2777 if (ret)
2778 goto unlock;
2779 }
2780 ++i_qgroups;
2781 }
2782
2783 for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
2784 struct btrfs_qgroup *src;
2785 struct btrfs_qgroup *dst;
2786
2787 if (!i_qgroups[0] || !i_qgroups[1])
2788 continue;
2789
2790 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2791 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2792
2793 if (!src || !dst) {
2794 ret = -EINVAL;
2795 goto unlock;
2796 }
2797
2798 dst->rfer = src->rfer - level_size;
2799 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2800 }
2801 for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
2802 struct btrfs_qgroup *src;
2803 struct btrfs_qgroup *dst;
2804
2805 if (!i_qgroups[0] || !i_qgroups[1])
2806 continue;
2807
2808 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2809 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2810
2811 if (!src || !dst) {
2812 ret = -EINVAL;
2813 goto unlock;
2814 }
2815
2816 dst->excl = src->excl + level_size;
2817 dst->excl_cmpr = src->excl_cmpr + level_size;
2818 }
2819
2820 unlock:
2821 spin_unlock(&fs_info->qgroup_lock);
2822 out:
2823 if (!committing)
2824 mutex_unlock(&fs_info->qgroup_ioctl_lock);
2825 return ret;
2826 }
2827
2828 /*
2829 * Two limits to commit transaction in advance.
2830 *
2831 * For RATIO, it will be 1/RATIO of the remaining limit as threshold.
2832 * For SIZE, it will be in byte unit as threshold.
2833 */
2834 #define QGROUP_FREE_RATIO 32
2835 #define QGROUP_FREE_SIZE SZ_32M
qgroup_check_limits(struct btrfs_fs_info * fs_info,const struct btrfs_qgroup * qg,u64 num_bytes)2836 static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
2837 const struct btrfs_qgroup *qg, u64 num_bytes)
2838 {
2839 u64 free;
2840 u64 threshold;
2841
2842 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2843 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
2844 return false;
2845
2846 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2847 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
2848 return false;
2849
2850 /*
2851 * Even if we passed the check, it's better to check if reservation
2852 * for meta_pertrans is pushing us near limit.
2853 * If there is too much pertrans reservation or it's near the limit,
2854 * let's try commit transaction to free some, using transaction_kthread
2855 */
2856 if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER |
2857 BTRFS_QGROUP_LIMIT_MAX_EXCL))) {
2858 if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
2859 free = qg->max_excl - qgroup_rsv_total(qg) - qg->excl;
2860 threshold = min_t(u64, qg->max_excl / QGROUP_FREE_RATIO,
2861 QGROUP_FREE_SIZE);
2862 } else {
2863 free = qg->max_rfer - qgroup_rsv_total(qg) - qg->rfer;
2864 threshold = min_t(u64, qg->max_rfer / QGROUP_FREE_RATIO,
2865 QGROUP_FREE_SIZE);
2866 }
2867
2868 /*
2869 * Use transaction_kthread to commit transaction, so we no
2870 * longer need to bother nested transaction nor lock context.
2871 */
2872 if (free < threshold)
2873 btrfs_commit_transaction_locksafe(fs_info);
2874 }
2875
2876 return true;
2877 }
2878
qgroup_reserve(struct btrfs_root * root,u64 num_bytes,bool enforce,enum btrfs_qgroup_rsv_type type)2879 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
2880 enum btrfs_qgroup_rsv_type type)
2881 {
2882 struct btrfs_root *quota_root;
2883 struct btrfs_qgroup *qgroup;
2884 struct btrfs_fs_info *fs_info = root->fs_info;
2885 u64 ref_root = root->root_key.objectid;
2886 int ret = 0;
2887 struct ulist_node *unode;
2888 struct ulist_iterator uiter;
2889
2890 if (!is_fstree(ref_root))
2891 return 0;
2892
2893 if (num_bytes == 0)
2894 return 0;
2895
2896 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
2897 capable(CAP_SYS_RESOURCE))
2898 enforce = false;
2899
2900 spin_lock(&fs_info->qgroup_lock);
2901 quota_root = fs_info->quota_root;
2902 if (!quota_root)
2903 goto out;
2904
2905 qgroup = find_qgroup_rb(fs_info, ref_root);
2906 if (!qgroup)
2907 goto out;
2908
2909 /*
2910 * in a first step, we check all affected qgroups if any limits would
2911 * be exceeded
2912 */
2913 ulist_reinit(fs_info->qgroup_ulist);
2914 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2915 qgroup_to_aux(qgroup), GFP_ATOMIC);
2916 if (ret < 0)
2917 goto out;
2918 ULIST_ITER_INIT(&uiter);
2919 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2920 struct btrfs_qgroup *qg;
2921 struct btrfs_qgroup_list *glist;
2922
2923 qg = unode_aux_to_qgroup(unode);
2924
2925 if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) {
2926 ret = -EDQUOT;
2927 goto out;
2928 }
2929
2930 list_for_each_entry(glist, &qg->groups, next_group) {
2931 ret = ulist_add(fs_info->qgroup_ulist,
2932 glist->group->qgroupid,
2933 qgroup_to_aux(glist->group), GFP_ATOMIC);
2934 if (ret < 0)
2935 goto out;
2936 }
2937 }
2938 ret = 0;
2939 /*
2940 * no limits exceeded, now record the reservation into all qgroups
2941 */
2942 ULIST_ITER_INIT(&uiter);
2943 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2944 struct btrfs_qgroup *qg;
2945
2946 qg = unode_aux_to_qgroup(unode);
2947
2948 qgroup_rsv_add(fs_info, qg, num_bytes, type);
2949 }
2950
2951 out:
2952 spin_unlock(&fs_info->qgroup_lock);
2953 return ret;
2954 }
2955
2956 /*
2957 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0
2958 * qgroup).
2959 *
2960 * Will handle all higher level qgroup too.
2961 *
2962 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
2963 * This special case is only used for META_PERTRANS type.
2964 */
btrfs_qgroup_free_refroot(struct btrfs_fs_info * fs_info,u64 ref_root,u64 num_bytes,enum btrfs_qgroup_rsv_type type)2965 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
2966 u64 ref_root, u64 num_bytes,
2967 enum btrfs_qgroup_rsv_type type)
2968 {
2969 struct btrfs_root *quota_root;
2970 struct btrfs_qgroup *qgroup;
2971 struct ulist_node *unode;
2972 struct ulist_iterator uiter;
2973 int ret = 0;
2974
2975 if (!is_fstree(ref_root))
2976 return;
2977
2978 if (num_bytes == 0)
2979 return;
2980
2981 if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
2982 WARN(1, "%s: Invalid type to free", __func__);
2983 return;
2984 }
2985 spin_lock(&fs_info->qgroup_lock);
2986
2987 quota_root = fs_info->quota_root;
2988 if (!quota_root)
2989 goto out;
2990
2991 qgroup = find_qgroup_rb(fs_info, ref_root);
2992 if (!qgroup)
2993 goto out;
2994
2995 if (num_bytes == (u64)-1)
2996 /*
2997 * We're freeing all pertrans rsv, get reserved value from
2998 * level 0 qgroup as real num_bytes to free.
2999 */
3000 num_bytes = qgroup->rsv.values[type];
3001
3002 ulist_reinit(fs_info->qgroup_ulist);
3003 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3004 qgroup_to_aux(qgroup), GFP_ATOMIC);
3005 if (ret < 0)
3006 goto out;
3007 ULIST_ITER_INIT(&uiter);
3008 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3009 struct btrfs_qgroup *qg;
3010 struct btrfs_qgroup_list *glist;
3011
3012 qg = unode_aux_to_qgroup(unode);
3013
3014 qgroup_rsv_release(fs_info, qg, num_bytes, type);
3015
3016 list_for_each_entry(glist, &qg->groups, next_group) {
3017 ret = ulist_add(fs_info->qgroup_ulist,
3018 glist->group->qgroupid,
3019 qgroup_to_aux(glist->group), GFP_ATOMIC);
3020 if (ret < 0)
3021 goto out;
3022 }
3023 }
3024
3025 out:
3026 spin_unlock(&fs_info->qgroup_lock);
3027 }
3028
3029 /*
3030 * Check if the leaf is the last leaf. Which means all node pointers
3031 * are at their last position.
3032 */
is_last_leaf(struct btrfs_path * path)3033 static bool is_last_leaf(struct btrfs_path *path)
3034 {
3035 int i;
3036
3037 for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3038 if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3039 return false;
3040 }
3041 return true;
3042 }
3043
3044 /*
3045 * returns < 0 on error, 0 when more leafs are to be scanned.
3046 * returns 1 when done.
3047 */
qgroup_rescan_leaf(struct btrfs_trans_handle * trans,struct btrfs_path * path)3048 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3049 struct btrfs_path *path)
3050 {
3051 struct btrfs_fs_info *fs_info = trans->fs_info;
3052 struct btrfs_key found;
3053 struct extent_buffer *scratch_leaf = NULL;
3054 struct ulist *roots = NULL;
3055 u64 num_bytes;
3056 bool done;
3057 int slot;
3058 int ret;
3059
3060 mutex_lock(&fs_info->qgroup_rescan_lock);
3061 ret = btrfs_search_slot_for_read(fs_info->extent_root,
3062 &fs_info->qgroup_rescan_progress,
3063 path, 1, 0);
3064
3065 btrfs_debug(fs_info,
3066 "current progress key (%llu %u %llu), search_slot ret %d",
3067 fs_info->qgroup_rescan_progress.objectid,
3068 fs_info->qgroup_rescan_progress.type,
3069 fs_info->qgroup_rescan_progress.offset, ret);
3070
3071 if (ret) {
3072 /*
3073 * The rescan is about to end, we will not be scanning any
3074 * further blocks. We cannot unset the RESCAN flag here, because
3075 * we want to commit the transaction if everything went well.
3076 * To make the live accounting work in this phase, we set our
3077 * scan progress pointer such that every real extent objectid
3078 * will be smaller.
3079 */
3080 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3081 btrfs_release_path(path);
3082 mutex_unlock(&fs_info->qgroup_rescan_lock);
3083 return ret;
3084 }
3085 done = is_last_leaf(path);
3086
3087 btrfs_item_key_to_cpu(path->nodes[0], &found,
3088 btrfs_header_nritems(path->nodes[0]) - 1);
3089 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3090
3091 scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3092 if (!scratch_leaf) {
3093 ret = -ENOMEM;
3094 mutex_unlock(&fs_info->qgroup_rescan_lock);
3095 goto out;
3096 }
3097 slot = path->slots[0];
3098 btrfs_release_path(path);
3099 mutex_unlock(&fs_info->qgroup_rescan_lock);
3100
3101 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3102 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3103 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3104 found.type != BTRFS_METADATA_ITEM_KEY)
3105 continue;
3106 if (found.type == BTRFS_METADATA_ITEM_KEY)
3107 num_bytes = fs_info->nodesize;
3108 else
3109 num_bytes = found.offset;
3110
3111 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
3112 &roots, false);
3113 if (ret < 0)
3114 goto out;
3115 /* For rescan, just pass old_roots as NULL */
3116 ret = btrfs_qgroup_account_extent(trans, found.objectid,
3117 num_bytes, NULL, roots);
3118 if (ret < 0)
3119 goto out;
3120 }
3121 out:
3122 if (scratch_leaf)
3123 free_extent_buffer(scratch_leaf);
3124
3125 if (done && !ret) {
3126 ret = 1;
3127 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3128 }
3129 return ret;
3130 }
3131
btrfs_qgroup_rescan_worker(struct btrfs_work * work)3132 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3133 {
3134 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3135 qgroup_rescan_work);
3136 struct btrfs_path *path;
3137 struct btrfs_trans_handle *trans = NULL;
3138 int err = -ENOMEM;
3139 int ret = 0;
3140
3141 path = btrfs_alloc_path();
3142 if (!path)
3143 goto out;
3144 /*
3145 * Rescan should only search for commit root, and any later difference
3146 * should be recorded by qgroup
3147 */
3148 path->search_commit_root = 1;
3149 path->skip_locking = 1;
3150
3151 err = 0;
3152 while (!err && !btrfs_fs_closing(fs_info)) {
3153 trans = btrfs_start_transaction(fs_info->fs_root, 0);
3154 if (IS_ERR(trans)) {
3155 err = PTR_ERR(trans);
3156 break;
3157 }
3158 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
3159 err = -EINTR;
3160 } else {
3161 err = qgroup_rescan_leaf(trans, path);
3162 }
3163 if (err > 0)
3164 btrfs_commit_transaction(trans);
3165 else
3166 btrfs_end_transaction(trans);
3167 }
3168
3169 out:
3170 btrfs_free_path(path);
3171
3172 mutex_lock(&fs_info->qgroup_rescan_lock);
3173 if (err > 0 &&
3174 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3175 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3176 } else if (err < 0) {
3177 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3178 }
3179 mutex_unlock(&fs_info->qgroup_rescan_lock);
3180
3181 /*
3182 * only update status, since the previous part has already updated the
3183 * qgroup info.
3184 */
3185 trans = btrfs_start_transaction(fs_info->quota_root, 1);
3186 if (IS_ERR(trans)) {
3187 err = PTR_ERR(trans);
3188 trans = NULL;
3189 btrfs_err(fs_info,
3190 "fail to start transaction for status update: %d",
3191 err);
3192 }
3193
3194 mutex_lock(&fs_info->qgroup_rescan_lock);
3195 if (!btrfs_fs_closing(fs_info))
3196 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3197 if (trans) {
3198 ret = update_qgroup_status_item(trans);
3199 if (ret < 0) {
3200 err = ret;
3201 btrfs_err(fs_info, "fail to update qgroup status: %d",
3202 err);
3203 }
3204 }
3205 fs_info->qgroup_rescan_running = false;
3206 complete_all(&fs_info->qgroup_rescan_completion);
3207 mutex_unlock(&fs_info->qgroup_rescan_lock);
3208
3209 if (!trans)
3210 return;
3211
3212 btrfs_end_transaction(trans);
3213
3214 if (btrfs_fs_closing(fs_info)) {
3215 btrfs_info(fs_info, "qgroup scan paused");
3216 } else if (err >= 0) {
3217 btrfs_info(fs_info, "qgroup scan completed%s",
3218 err > 0 ? " (inconsistency flag cleared)" : "");
3219 } else {
3220 btrfs_err(fs_info, "qgroup scan failed with %d", err);
3221 }
3222 }
3223
3224 /*
3225 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3226 * memory required for the rescan context.
3227 */
3228 static int
qgroup_rescan_init(struct btrfs_fs_info * fs_info,u64 progress_objectid,int init_flags)3229 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3230 int init_flags)
3231 {
3232 int ret = 0;
3233
3234 if (!init_flags) {
3235 /* we're resuming qgroup rescan at mount time */
3236 if (!(fs_info->qgroup_flags &
3237 BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3238 btrfs_warn(fs_info,
3239 "qgroup rescan init failed, qgroup rescan is not queued");
3240 ret = -EINVAL;
3241 } else if (!(fs_info->qgroup_flags &
3242 BTRFS_QGROUP_STATUS_FLAG_ON)) {
3243 btrfs_warn(fs_info,
3244 "qgroup rescan init failed, qgroup is not enabled");
3245 ret = -EINVAL;
3246 }
3247
3248 if (ret)
3249 return ret;
3250 }
3251
3252 mutex_lock(&fs_info->qgroup_rescan_lock);
3253 spin_lock(&fs_info->qgroup_lock);
3254
3255 if (init_flags) {
3256 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3257 btrfs_warn(fs_info,
3258 "qgroup rescan is already in progress");
3259 ret = -EINPROGRESS;
3260 } else if (!(fs_info->qgroup_flags &
3261 BTRFS_QGROUP_STATUS_FLAG_ON)) {
3262 btrfs_warn(fs_info,
3263 "qgroup rescan init failed, qgroup is not enabled");
3264 ret = -EINVAL;
3265 }
3266
3267 if (ret) {
3268 spin_unlock(&fs_info->qgroup_lock);
3269 mutex_unlock(&fs_info->qgroup_rescan_lock);
3270 return ret;
3271 }
3272 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3273 }
3274
3275 memset(&fs_info->qgroup_rescan_progress, 0,
3276 sizeof(fs_info->qgroup_rescan_progress));
3277 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3278 init_completion(&fs_info->qgroup_rescan_completion);
3279 fs_info->qgroup_rescan_running = true;
3280
3281 spin_unlock(&fs_info->qgroup_lock);
3282 mutex_unlock(&fs_info->qgroup_rescan_lock);
3283
3284 memset(&fs_info->qgroup_rescan_work, 0,
3285 sizeof(fs_info->qgroup_rescan_work));
3286 btrfs_init_work(&fs_info->qgroup_rescan_work,
3287 btrfs_qgroup_rescan_worker, NULL, NULL);
3288 return 0;
3289 }
3290
3291 static void
qgroup_rescan_zero_tracking(struct btrfs_fs_info * fs_info)3292 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3293 {
3294 struct rb_node *n;
3295 struct btrfs_qgroup *qgroup;
3296
3297 spin_lock(&fs_info->qgroup_lock);
3298 /* clear all current qgroup tracking information */
3299 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
3300 qgroup = rb_entry(n, struct btrfs_qgroup, node);
3301 qgroup->rfer = 0;
3302 qgroup->rfer_cmpr = 0;
3303 qgroup->excl = 0;
3304 qgroup->excl_cmpr = 0;
3305 qgroup_dirty(fs_info, qgroup);
3306 }
3307 spin_unlock(&fs_info->qgroup_lock);
3308 }
3309
3310 int
btrfs_qgroup_rescan(struct btrfs_fs_info * fs_info)3311 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
3312 {
3313 int ret = 0;
3314 struct btrfs_trans_handle *trans;
3315
3316 ret = qgroup_rescan_init(fs_info, 0, 1);
3317 if (ret)
3318 return ret;
3319
3320 /*
3321 * We have set the rescan_progress to 0, which means no more
3322 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3323 * However, btrfs_qgroup_account_ref may be right after its call
3324 * to btrfs_find_all_roots, in which case it would still do the
3325 * accounting.
3326 * To solve this, we're committing the transaction, which will
3327 * ensure we run all delayed refs and only after that, we are
3328 * going to clear all tracking information for a clean start.
3329 */
3330
3331 trans = btrfs_join_transaction(fs_info->fs_root);
3332 if (IS_ERR(trans)) {
3333 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3334 return PTR_ERR(trans);
3335 }
3336 ret = btrfs_commit_transaction(trans);
3337 if (ret) {
3338 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3339 return ret;
3340 }
3341
3342 qgroup_rescan_zero_tracking(fs_info);
3343
3344 btrfs_queue_work(fs_info->qgroup_rescan_workers,
3345 &fs_info->qgroup_rescan_work);
3346
3347 return 0;
3348 }
3349
btrfs_qgroup_wait_for_completion(struct btrfs_fs_info * fs_info,bool interruptible)3350 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
3351 bool interruptible)
3352 {
3353 int running;
3354 int ret = 0;
3355
3356 mutex_lock(&fs_info->qgroup_rescan_lock);
3357 spin_lock(&fs_info->qgroup_lock);
3358 running = fs_info->qgroup_rescan_running;
3359 spin_unlock(&fs_info->qgroup_lock);
3360 mutex_unlock(&fs_info->qgroup_rescan_lock);
3361
3362 if (!running)
3363 return 0;
3364
3365 if (interruptible)
3366 ret = wait_for_completion_interruptible(
3367 &fs_info->qgroup_rescan_completion);
3368 else
3369 wait_for_completion(&fs_info->qgroup_rescan_completion);
3370
3371 return ret;
3372 }
3373
3374 /*
3375 * this is only called from open_ctree where we're still single threaded, thus
3376 * locking is omitted here.
3377 */
3378 void
btrfs_qgroup_rescan_resume(struct btrfs_fs_info * fs_info)3379 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
3380 {
3381 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
3382 btrfs_queue_work(fs_info->qgroup_rescan_workers,
3383 &fs_info->qgroup_rescan_work);
3384 }
3385
3386 /*
3387 * Reserve qgroup space for range [start, start + len).
3388 *
3389 * This function will either reserve space from related qgroups or doing
3390 * nothing if the range is already reserved.
3391 *
3392 * Return 0 for successful reserve
3393 * Return <0 for error (including -EQUOT)
3394 *
3395 * NOTE: this function may sleep for memory allocation.
3396 * if btrfs_qgroup_reserve_data() is called multiple times with
3397 * same @reserved, caller must ensure when error happens it's OK
3398 * to free *ALL* reserved space.
3399 */
btrfs_qgroup_reserve_data(struct inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)3400 int btrfs_qgroup_reserve_data(struct inode *inode,
3401 struct extent_changeset **reserved_ret, u64 start,
3402 u64 len)
3403 {
3404 struct btrfs_root *root = BTRFS_I(inode)->root;
3405 struct ulist_node *unode;
3406 struct ulist_iterator uiter;
3407 struct extent_changeset *reserved;
3408 u64 orig_reserved;
3409 u64 to_reserve;
3410 int ret;
3411
3412 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
3413 !is_fstree(root->root_key.objectid) || len == 0)
3414 return 0;
3415
3416 /* @reserved parameter is mandatory for qgroup */
3417 if (WARN_ON(!reserved_ret))
3418 return -EINVAL;
3419 if (!*reserved_ret) {
3420 *reserved_ret = extent_changeset_alloc();
3421 if (!*reserved_ret)
3422 return -ENOMEM;
3423 }
3424 reserved = *reserved_ret;
3425 /* Record already reserved space */
3426 orig_reserved = reserved->bytes_changed;
3427 ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
3428 start + len -1, EXTENT_QGROUP_RESERVED, reserved);
3429
3430 /* Newly reserved space */
3431 to_reserve = reserved->bytes_changed - orig_reserved;
3432 trace_btrfs_qgroup_reserve_data(inode, start, len,
3433 to_reserve, QGROUP_RESERVE);
3434 if (ret < 0)
3435 goto cleanup;
3436 ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
3437 if (ret < 0)
3438 goto cleanup;
3439
3440 return ret;
3441
3442 cleanup:
3443 /* cleanup *ALL* already reserved ranges */
3444 ULIST_ITER_INIT(&uiter);
3445 while ((unode = ulist_next(&reserved->range_changed, &uiter)))
3446 clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
3447 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
3448 /* Also free data bytes of already reserved one */
3449 btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
3450 orig_reserved, BTRFS_QGROUP_RSV_DATA);
3451 extent_changeset_release(reserved);
3452 return ret;
3453 }
3454
3455 /* Free ranges specified by @reserved, normally in error path */
qgroup_free_reserved_data(struct inode * inode,struct extent_changeset * reserved,u64 start,u64 len)3456 static int qgroup_free_reserved_data(struct inode *inode,
3457 struct extent_changeset *reserved, u64 start, u64 len)
3458 {
3459 struct btrfs_root *root = BTRFS_I(inode)->root;
3460 struct ulist_node *unode;
3461 struct ulist_iterator uiter;
3462 struct extent_changeset changeset;
3463 int freed = 0;
3464 int ret;
3465
3466 extent_changeset_init(&changeset);
3467 len = round_up(start + len, root->fs_info->sectorsize);
3468 start = round_down(start, root->fs_info->sectorsize);
3469
3470 ULIST_ITER_INIT(&uiter);
3471 while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
3472 u64 range_start = unode->val;
3473 /* unode->aux is the inclusive end */
3474 u64 range_len = unode->aux - range_start + 1;
3475 u64 free_start;
3476 u64 free_len;
3477
3478 extent_changeset_release(&changeset);
3479
3480 /* Only free range in range [start, start + len) */
3481 if (range_start >= start + len ||
3482 range_start + range_len <= start)
3483 continue;
3484 free_start = max(range_start, start);
3485 free_len = min(start + len, range_start + range_len) -
3486 free_start;
3487 /*
3488 * TODO: To also modify reserved->ranges_reserved to reflect
3489 * the modification.
3490 *
3491 * However as long as we free qgroup reserved according to
3492 * EXTENT_QGROUP_RESERVED, we won't double free.
3493 * So not need to rush.
3494 */
3495 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree,
3496 free_start, free_start + free_len - 1,
3497 EXTENT_QGROUP_RESERVED, &changeset);
3498 if (ret < 0)
3499 goto out;
3500 freed += changeset.bytes_changed;
3501 }
3502 btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
3503 BTRFS_QGROUP_RSV_DATA);
3504 ret = freed;
3505 out:
3506 extent_changeset_release(&changeset);
3507 return ret;
3508 }
3509
__btrfs_qgroup_release_data(struct inode * inode,struct extent_changeset * reserved,u64 start,u64 len,int free)3510 static int __btrfs_qgroup_release_data(struct inode *inode,
3511 struct extent_changeset *reserved, u64 start, u64 len,
3512 int free)
3513 {
3514 struct extent_changeset changeset;
3515 int trace_op = QGROUP_RELEASE;
3516 int ret;
3517
3518 if (!test_bit(BTRFS_FS_QUOTA_ENABLED,
3519 &BTRFS_I(inode)->root->fs_info->flags))
3520 return 0;
3521
3522 /* In release case, we shouldn't have @reserved */
3523 WARN_ON(!free && reserved);
3524 if (free && reserved)
3525 return qgroup_free_reserved_data(inode, reserved, start, len);
3526 extent_changeset_init(&changeset);
3527 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
3528 start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
3529 if (ret < 0)
3530 goto out;
3531
3532 if (free)
3533 trace_op = QGROUP_FREE;
3534 trace_btrfs_qgroup_release_data(inode, start, len,
3535 changeset.bytes_changed, trace_op);
3536 if (free)
3537 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
3538 BTRFS_I(inode)->root->root_key.objectid,
3539 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3540 ret = changeset.bytes_changed;
3541 out:
3542 extent_changeset_release(&changeset);
3543 return ret;
3544 }
3545
3546 /*
3547 * Free a reserved space range from io_tree and related qgroups
3548 *
3549 * Should be called when a range of pages get invalidated before reaching disk.
3550 * Or for error cleanup case.
3551 * if @reserved is given, only reserved range in [@start, @start + @len) will
3552 * be freed.
3553 *
3554 * For data written to disk, use btrfs_qgroup_release_data().
3555 *
3556 * NOTE: This function may sleep for memory allocation.
3557 */
btrfs_qgroup_free_data(struct inode * inode,struct extent_changeset * reserved,u64 start,u64 len)3558 int btrfs_qgroup_free_data(struct inode *inode,
3559 struct extent_changeset *reserved, u64 start, u64 len)
3560 {
3561 return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
3562 }
3563
3564 /*
3565 * Release a reserved space range from io_tree only.
3566 *
3567 * Should be called when a range of pages get written to disk and corresponding
3568 * FILE_EXTENT is inserted into corresponding root.
3569 *
3570 * Since new qgroup accounting framework will only update qgroup numbers at
3571 * commit_transaction() time, its reserved space shouldn't be freed from
3572 * related qgroups.
3573 *
3574 * But we should release the range from io_tree, to allow further write to be
3575 * COWed.
3576 *
3577 * NOTE: This function may sleep for memory allocation.
3578 */
btrfs_qgroup_release_data(struct inode * inode,u64 start,u64 len)3579 int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
3580 {
3581 return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
3582 }
3583
add_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)3584 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3585 enum btrfs_qgroup_rsv_type type)
3586 {
3587 if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3588 type != BTRFS_QGROUP_RSV_META_PERTRANS)
3589 return;
3590 if (num_bytes == 0)
3591 return;
3592
3593 spin_lock(&root->qgroup_meta_rsv_lock);
3594 if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
3595 root->qgroup_meta_rsv_prealloc += num_bytes;
3596 else
3597 root->qgroup_meta_rsv_pertrans += num_bytes;
3598 spin_unlock(&root->qgroup_meta_rsv_lock);
3599 }
3600
sub_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)3601 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3602 enum btrfs_qgroup_rsv_type type)
3603 {
3604 if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3605 type != BTRFS_QGROUP_RSV_META_PERTRANS)
3606 return 0;
3607 if (num_bytes == 0)
3608 return 0;
3609
3610 spin_lock(&root->qgroup_meta_rsv_lock);
3611 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
3612 num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
3613 num_bytes);
3614 root->qgroup_meta_rsv_prealloc -= num_bytes;
3615 } else {
3616 num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
3617 num_bytes);
3618 root->qgroup_meta_rsv_pertrans -= num_bytes;
3619 }
3620 spin_unlock(&root->qgroup_meta_rsv_lock);
3621 return num_bytes;
3622 }
3623
__btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce)3624 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3625 enum btrfs_qgroup_rsv_type type, bool enforce)
3626 {
3627 struct btrfs_fs_info *fs_info = root->fs_info;
3628 int ret;
3629
3630 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3631 !is_fstree(root->root_key.objectid) || num_bytes == 0)
3632 return 0;
3633
3634 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3635 trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
3636 ret = qgroup_reserve(root, num_bytes, enforce, type);
3637 if (ret < 0)
3638 return ret;
3639 /*
3640 * Record what we have reserved into root.
3641 *
3642 * To avoid quota disabled->enabled underflow.
3643 * In that case, we may try to free space we haven't reserved
3644 * (since quota was disabled), so record what we reserved into root.
3645 * And ensure later release won't underflow this number.
3646 */
3647 add_root_meta_rsv(root, num_bytes, type);
3648 return ret;
3649 }
3650
btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root * root)3651 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
3652 {
3653 struct btrfs_fs_info *fs_info = root->fs_info;
3654
3655 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3656 !is_fstree(root->root_key.objectid))
3657 return;
3658
3659 /* TODO: Update trace point to handle such free */
3660 trace_qgroup_meta_free_all_pertrans(root);
3661 /* Special value -1 means to free all reserved space */
3662 btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
3663 BTRFS_QGROUP_RSV_META_PERTRANS);
3664 }
3665
__btrfs_qgroup_free_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)3666 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
3667 enum btrfs_qgroup_rsv_type type)
3668 {
3669 struct btrfs_fs_info *fs_info = root->fs_info;
3670
3671 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3672 !is_fstree(root->root_key.objectid))
3673 return;
3674
3675 /*
3676 * reservation for META_PREALLOC can happen before quota is enabled,
3677 * which can lead to underflow.
3678 * Here ensure we will only free what we really have reserved.
3679 */
3680 num_bytes = sub_root_meta_rsv(root, num_bytes, type);
3681 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3682 trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
3683 btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
3684 num_bytes, type);
3685 }
3686
qgroup_convert_meta(struct btrfs_fs_info * fs_info,u64 ref_root,int num_bytes)3687 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
3688 int num_bytes)
3689 {
3690 struct btrfs_root *quota_root = fs_info->quota_root;
3691 struct btrfs_qgroup *qgroup;
3692 struct ulist_node *unode;
3693 struct ulist_iterator uiter;
3694 int ret = 0;
3695
3696 if (num_bytes == 0)
3697 return;
3698 if (!quota_root)
3699 return;
3700
3701 spin_lock(&fs_info->qgroup_lock);
3702 qgroup = find_qgroup_rb(fs_info, ref_root);
3703 if (!qgroup)
3704 goto out;
3705 ulist_reinit(fs_info->qgroup_ulist);
3706 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3707 qgroup_to_aux(qgroup), GFP_ATOMIC);
3708 if (ret < 0)
3709 goto out;
3710 ULIST_ITER_INIT(&uiter);
3711 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3712 struct btrfs_qgroup *qg;
3713 struct btrfs_qgroup_list *glist;
3714
3715 qg = unode_aux_to_qgroup(unode);
3716
3717 qgroup_rsv_release(fs_info, qg, num_bytes,
3718 BTRFS_QGROUP_RSV_META_PREALLOC);
3719 qgroup_rsv_add(fs_info, qg, num_bytes,
3720 BTRFS_QGROUP_RSV_META_PERTRANS);
3721 list_for_each_entry(glist, &qg->groups, next_group) {
3722 ret = ulist_add(fs_info->qgroup_ulist,
3723 glist->group->qgroupid,
3724 qgroup_to_aux(glist->group), GFP_ATOMIC);
3725 if (ret < 0)
3726 goto out;
3727 }
3728 }
3729 out:
3730 spin_unlock(&fs_info->qgroup_lock);
3731 }
3732
btrfs_qgroup_convert_reserved_meta(struct btrfs_root * root,int num_bytes)3733 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
3734 {
3735 struct btrfs_fs_info *fs_info = root->fs_info;
3736
3737 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3738 !is_fstree(root->root_key.objectid))
3739 return;
3740 /* Same as btrfs_qgroup_free_meta_prealloc() */
3741 num_bytes = sub_root_meta_rsv(root, num_bytes,
3742 BTRFS_QGROUP_RSV_META_PREALLOC);
3743 trace_qgroup_meta_convert(root, num_bytes);
3744 qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
3745 }
3746
3747 /*
3748 * Check qgroup reserved space leaking, normally at destroy inode
3749 * time
3750 */
btrfs_qgroup_check_reserved_leak(struct inode * inode)3751 void btrfs_qgroup_check_reserved_leak(struct inode *inode)
3752 {
3753 struct extent_changeset changeset;
3754 struct ulist_node *unode;
3755 struct ulist_iterator iter;
3756 int ret;
3757
3758 extent_changeset_init(&changeset);
3759 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
3760 EXTENT_QGROUP_RESERVED, &changeset);
3761
3762 WARN_ON(ret < 0);
3763 if (WARN_ON(changeset.bytes_changed)) {
3764 ULIST_ITER_INIT(&iter);
3765 while ((unode = ulist_next(&changeset.range_changed, &iter))) {
3766 btrfs_warn(BTRFS_I(inode)->root->fs_info,
3767 "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
3768 inode->i_ino, unode->val, unode->aux);
3769 }
3770 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
3771 BTRFS_I(inode)->root->root_key.objectid,
3772 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3773
3774 }
3775 extent_changeset_release(&changeset);
3776 }
3777
btrfs_qgroup_init_swapped_blocks(struct btrfs_qgroup_swapped_blocks * swapped_blocks)3778 void btrfs_qgroup_init_swapped_blocks(
3779 struct btrfs_qgroup_swapped_blocks *swapped_blocks)
3780 {
3781 int i;
3782
3783 spin_lock_init(&swapped_blocks->lock);
3784 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3785 swapped_blocks->blocks[i] = RB_ROOT;
3786 swapped_blocks->swapped = false;
3787 }
3788
3789 /*
3790 * Delete all swapped blocks record of @root.
3791 * Every record here means we skipped a full subtree scan for qgroup.
3792 *
3793 * Gets called when committing one transaction.
3794 */
btrfs_qgroup_clean_swapped_blocks(struct btrfs_root * root)3795 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
3796 {
3797 struct btrfs_qgroup_swapped_blocks *swapped_blocks;
3798 int i;
3799
3800 swapped_blocks = &root->swapped_blocks;
3801
3802 spin_lock(&swapped_blocks->lock);
3803 if (!swapped_blocks->swapped)
3804 goto out;
3805 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3806 struct rb_root *cur_root = &swapped_blocks->blocks[i];
3807 struct btrfs_qgroup_swapped_block *entry;
3808 struct btrfs_qgroup_swapped_block *next;
3809
3810 rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
3811 node)
3812 kfree(entry);
3813 swapped_blocks->blocks[i] = RB_ROOT;
3814 }
3815 swapped_blocks->swapped = false;
3816 out:
3817 spin_unlock(&swapped_blocks->lock);
3818 }
3819
3820 /*
3821 * Add subtree roots record into @subvol_root.
3822 *
3823 * @subvol_root: tree root of the subvolume tree get swapped
3824 * @bg: block group under balance
3825 * @subvol_parent/slot: pointer to the subtree root in subvolume tree
3826 * @reloc_parent/slot: pointer to the subtree root in reloc tree
3827 * BOTH POINTERS ARE BEFORE TREE SWAP
3828 * @last_snapshot: last snapshot generation of the subvolume tree
3829 */
btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle * trans,struct btrfs_root * subvol_root,struct btrfs_block_group_cache * bg,struct extent_buffer * subvol_parent,int subvol_slot,struct extent_buffer * reloc_parent,int reloc_slot,u64 last_snapshot)3830 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
3831 struct btrfs_root *subvol_root,
3832 struct btrfs_block_group_cache *bg,
3833 struct extent_buffer *subvol_parent, int subvol_slot,
3834 struct extent_buffer *reloc_parent, int reloc_slot,
3835 u64 last_snapshot)
3836 {
3837 struct btrfs_fs_info *fs_info = subvol_root->fs_info;
3838 struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
3839 struct btrfs_qgroup_swapped_block *block;
3840 struct rb_node **cur;
3841 struct rb_node *parent = NULL;
3842 int level = btrfs_header_level(subvol_parent) - 1;
3843 int ret = 0;
3844
3845 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
3846 return 0;
3847
3848 if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
3849 btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
3850 btrfs_err_rl(fs_info,
3851 "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
3852 __func__,
3853 btrfs_node_ptr_generation(subvol_parent, subvol_slot),
3854 btrfs_node_ptr_generation(reloc_parent, reloc_slot));
3855 return -EUCLEAN;
3856 }
3857
3858 block = kmalloc(sizeof(*block), GFP_NOFS);
3859 if (!block) {
3860 ret = -ENOMEM;
3861 goto out;
3862 }
3863
3864 /*
3865 * @reloc_parent/slot is still before swap, while @block is going to
3866 * record the bytenr after swap, so we do the swap here.
3867 */
3868 block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
3869 block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
3870 reloc_slot);
3871 block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
3872 block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
3873 subvol_slot);
3874 block->last_snapshot = last_snapshot;
3875 block->level = level;
3876
3877 /*
3878 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
3879 * no one else can modify tree blocks thus we qgroup will not change
3880 * no matter the value of trace_leaf.
3881 */
3882 if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
3883 block->trace_leaf = true;
3884 else
3885 block->trace_leaf = false;
3886 btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
3887
3888 /* Insert @block into @blocks */
3889 spin_lock(&blocks->lock);
3890 cur = &blocks->blocks[level].rb_node;
3891 while (*cur) {
3892 struct btrfs_qgroup_swapped_block *entry;
3893
3894 parent = *cur;
3895 entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
3896 node);
3897
3898 if (entry->subvol_bytenr < block->subvol_bytenr) {
3899 cur = &(*cur)->rb_left;
3900 } else if (entry->subvol_bytenr > block->subvol_bytenr) {
3901 cur = &(*cur)->rb_right;
3902 } else {
3903 if (entry->subvol_generation !=
3904 block->subvol_generation ||
3905 entry->reloc_bytenr != block->reloc_bytenr ||
3906 entry->reloc_generation !=
3907 block->reloc_generation) {
3908 /*
3909 * Duplicated but mismatch entry found.
3910 * Shouldn't happen.
3911 *
3912 * Marking qgroup inconsistent should be enough
3913 * for end users.
3914 */
3915 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
3916 ret = -EEXIST;
3917 }
3918 kfree(block);
3919 goto out_unlock;
3920 }
3921 }
3922 rb_link_node(&block->node, parent, cur);
3923 rb_insert_color(&block->node, &blocks->blocks[level]);
3924 blocks->swapped = true;
3925 out_unlock:
3926 spin_unlock(&blocks->lock);
3927 out:
3928 if (ret < 0)
3929 fs_info->qgroup_flags |=
3930 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3931 return ret;
3932 }
3933
3934 /*
3935 * Check if the tree block is a subtree root, and if so do the needed
3936 * delayed subtree trace for qgroup.
3937 *
3938 * This is called during btrfs_cow_block().
3939 */
btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * subvol_eb)3940 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
3941 struct btrfs_root *root,
3942 struct extent_buffer *subvol_eb)
3943 {
3944 struct btrfs_fs_info *fs_info = root->fs_info;
3945 struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
3946 struct btrfs_qgroup_swapped_block *block;
3947 struct extent_buffer *reloc_eb = NULL;
3948 struct rb_node *node;
3949 bool found = false;
3950 bool swapped = false;
3951 int level = btrfs_header_level(subvol_eb);
3952 int ret = 0;
3953 int i;
3954
3955 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
3956 return 0;
3957 if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
3958 return 0;
3959
3960 spin_lock(&blocks->lock);
3961 if (!blocks->swapped) {
3962 spin_unlock(&blocks->lock);
3963 return 0;
3964 }
3965 node = blocks->blocks[level].rb_node;
3966
3967 while (node) {
3968 block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
3969 if (block->subvol_bytenr < subvol_eb->start) {
3970 node = node->rb_left;
3971 } else if (block->subvol_bytenr > subvol_eb->start) {
3972 node = node->rb_right;
3973 } else {
3974 found = true;
3975 break;
3976 }
3977 }
3978 if (!found) {
3979 spin_unlock(&blocks->lock);
3980 goto out;
3981 }
3982 /* Found one, remove it from @blocks first and update blocks->swapped */
3983 rb_erase(&block->node, &blocks->blocks[level]);
3984 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3985 if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
3986 swapped = true;
3987 break;
3988 }
3989 }
3990 blocks->swapped = swapped;
3991 spin_unlock(&blocks->lock);
3992
3993 /* Read out reloc subtree root */
3994 reloc_eb = read_tree_block(fs_info, block->reloc_bytenr,
3995 block->reloc_generation, block->level,
3996 &block->first_key);
3997 if (IS_ERR(reloc_eb)) {
3998 ret = PTR_ERR(reloc_eb);
3999 reloc_eb = NULL;
4000 goto free_out;
4001 }
4002 if (!extent_buffer_uptodate(reloc_eb)) {
4003 ret = -EIO;
4004 goto free_out;
4005 }
4006
4007 ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4008 block->last_snapshot, block->trace_leaf);
4009 free_out:
4010 kfree(block);
4011 free_extent_buffer(reloc_eb);
4012 out:
4013 if (ret < 0) {
4014 btrfs_err_rl(fs_info,
4015 "failed to account subtree at bytenr %llu: %d",
4016 subvol_eb->start, ret);
4017 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
4018 }
4019 return ret;
4020 }
4021