1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2011 STRATO. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/sched.h>
7*4882a593Smuzhiyun #include <linux/pagemap.h>
8*4882a593Smuzhiyun #include <linux/writeback.h>
9*4882a593Smuzhiyun #include <linux/blkdev.h>
10*4882a593Smuzhiyun #include <linux/rbtree.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/workqueue.h>
13*4882a593Smuzhiyun #include <linux/btrfs.h>
14*4882a593Smuzhiyun #include <linux/sched/mm.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "ctree.h"
17*4882a593Smuzhiyun #include "transaction.h"
18*4882a593Smuzhiyun #include "disk-io.h"
19*4882a593Smuzhiyun #include "locking.h"
20*4882a593Smuzhiyun #include "ulist.h"
21*4882a593Smuzhiyun #include "backref.h"
22*4882a593Smuzhiyun #include "extent_io.h"
23*4882a593Smuzhiyun #include "qgroup.h"
24*4882a593Smuzhiyun #include "block-group.h"
25*4882a593Smuzhiyun #include "sysfs.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* TODO XXX FIXME
28*4882a593Smuzhiyun * - subvol delete -> delete when ref goes to 0? delete limits also?
29*4882a593Smuzhiyun * - reorganize keys
30*4882a593Smuzhiyun * - compressed
31*4882a593Smuzhiyun * - sync
32*4882a593Smuzhiyun * - copy also limits on subvol creation
33*4882a593Smuzhiyun * - limit
34*4882a593Smuzhiyun * - caches for ulists
35*4882a593Smuzhiyun * - performance benchmarks
36*4882a593Smuzhiyun * - check all ioctl parameters
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun * Helpers to access qgroup reservation
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * Callers should ensure the lock context and type are valid
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun
qgroup_rsv_total(const struct btrfs_qgroup * qgroup)45*4882a593Smuzhiyun static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun u64 ret = 0;
48*4882a593Smuzhiyun int i;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
51*4882a593Smuzhiyun ret += qgroup->rsv.values[i];
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun return ret;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #ifdef CONFIG_BTRFS_DEBUG
qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)57*4882a593Smuzhiyun static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun if (type == BTRFS_QGROUP_RSV_DATA)
60*4882a593Smuzhiyun return "data";
61*4882a593Smuzhiyun if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
62*4882a593Smuzhiyun return "meta_pertrans";
63*4882a593Smuzhiyun if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
64*4882a593Smuzhiyun return "meta_prealloc";
65*4882a593Smuzhiyun return NULL;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun #endif
68*4882a593Smuzhiyun
qgroup_rsv_add(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)69*4882a593Smuzhiyun static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
70*4882a593Smuzhiyun struct btrfs_qgroup *qgroup, u64 num_bytes,
71*4882a593Smuzhiyun enum btrfs_qgroup_rsv_type type)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
74*4882a593Smuzhiyun qgroup->rsv.values[type] += num_bytes;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
qgroup_rsv_release(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)77*4882a593Smuzhiyun static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
78*4882a593Smuzhiyun struct btrfs_qgroup *qgroup, u64 num_bytes,
79*4882a593Smuzhiyun enum btrfs_qgroup_rsv_type type)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
82*4882a593Smuzhiyun if (qgroup->rsv.values[type] >= num_bytes) {
83*4882a593Smuzhiyun qgroup->rsv.values[type] -= num_bytes;
84*4882a593Smuzhiyun return;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun #ifdef CONFIG_BTRFS_DEBUG
87*4882a593Smuzhiyun WARN_RATELIMIT(1,
88*4882a593Smuzhiyun "qgroup %llu %s reserved space underflow, have %llu to free %llu",
89*4882a593Smuzhiyun qgroup->qgroupid, qgroup_rsv_type_str(type),
90*4882a593Smuzhiyun qgroup->rsv.values[type], num_bytes);
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun qgroup->rsv.values[type] = 0;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
qgroup_rsv_add_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,struct btrfs_qgroup * src)95*4882a593Smuzhiyun static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
96*4882a593Smuzhiyun struct btrfs_qgroup *dest,
97*4882a593Smuzhiyun struct btrfs_qgroup *src)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun int i;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
102*4882a593Smuzhiyun qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
qgroup_rsv_release_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,struct btrfs_qgroup * src)105*4882a593Smuzhiyun static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
106*4882a593Smuzhiyun struct btrfs_qgroup *dest,
107*4882a593Smuzhiyun struct btrfs_qgroup *src)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun int i;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
112*4882a593Smuzhiyun qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)115*4882a593Smuzhiyun static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
116*4882a593Smuzhiyun int mod)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun if (qg->old_refcnt < seq)
119*4882a593Smuzhiyun qg->old_refcnt = seq;
120*4882a593Smuzhiyun qg->old_refcnt += mod;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)123*4882a593Smuzhiyun static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
124*4882a593Smuzhiyun int mod)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun if (qg->new_refcnt < seq)
127*4882a593Smuzhiyun qg->new_refcnt = seq;
128*4882a593Smuzhiyun qg->new_refcnt += mod;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup * qg,u64 seq)131*4882a593Smuzhiyun static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun if (qg->old_refcnt < seq)
134*4882a593Smuzhiyun return 0;
135*4882a593Smuzhiyun return qg->old_refcnt - seq;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup * qg,u64 seq)138*4882a593Smuzhiyun static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun if (qg->new_refcnt < seq)
141*4882a593Smuzhiyun return 0;
142*4882a593Smuzhiyun return qg->new_refcnt - seq;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun * glue structure to represent the relations between qgroups.
147*4882a593Smuzhiyun */
148*4882a593Smuzhiyun struct btrfs_qgroup_list {
149*4882a593Smuzhiyun struct list_head next_group;
150*4882a593Smuzhiyun struct list_head next_member;
151*4882a593Smuzhiyun struct btrfs_qgroup *group;
152*4882a593Smuzhiyun struct btrfs_qgroup *member;
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun
qgroup_to_aux(struct btrfs_qgroup * qg)155*4882a593Smuzhiyun static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun return (u64)(uintptr_t)qg;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
unode_aux_to_qgroup(struct ulist_node * n)160*4882a593Smuzhiyun static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun return (struct btrfs_qgroup *)(uintptr_t)n->aux;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun static int
166*4882a593Smuzhiyun qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
167*4882a593Smuzhiyun int init_flags);
168*4882a593Smuzhiyun static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /* must be called with qgroup_ioctl_lock held */
find_qgroup_rb(struct btrfs_fs_info * fs_info,u64 qgroupid)171*4882a593Smuzhiyun static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
172*4882a593Smuzhiyun u64 qgroupid)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun struct rb_node *n = fs_info->qgroup_tree.rb_node;
175*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun while (n) {
178*4882a593Smuzhiyun qgroup = rb_entry(n, struct btrfs_qgroup, node);
179*4882a593Smuzhiyun if (qgroup->qgroupid < qgroupid)
180*4882a593Smuzhiyun n = n->rb_left;
181*4882a593Smuzhiyun else if (qgroup->qgroupid > qgroupid)
182*4882a593Smuzhiyun n = n->rb_right;
183*4882a593Smuzhiyun else
184*4882a593Smuzhiyun return qgroup;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun return NULL;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* must be called with qgroup_lock held */
add_qgroup_rb(struct btrfs_fs_info * fs_info,u64 qgroupid)190*4882a593Smuzhiyun static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
191*4882a593Smuzhiyun u64 qgroupid)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct rb_node **p = &fs_info->qgroup_tree.rb_node;
194*4882a593Smuzhiyun struct rb_node *parent = NULL;
195*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun while (*p) {
198*4882a593Smuzhiyun parent = *p;
199*4882a593Smuzhiyun qgroup = rb_entry(parent, struct btrfs_qgroup, node);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (qgroup->qgroupid < qgroupid)
202*4882a593Smuzhiyun p = &(*p)->rb_left;
203*4882a593Smuzhiyun else if (qgroup->qgroupid > qgroupid)
204*4882a593Smuzhiyun p = &(*p)->rb_right;
205*4882a593Smuzhiyun else
206*4882a593Smuzhiyun return qgroup;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
210*4882a593Smuzhiyun if (!qgroup)
211*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun qgroup->qgroupid = qgroupid;
214*4882a593Smuzhiyun INIT_LIST_HEAD(&qgroup->groups);
215*4882a593Smuzhiyun INIT_LIST_HEAD(&qgroup->members);
216*4882a593Smuzhiyun INIT_LIST_HEAD(&qgroup->dirty);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun rb_link_node(&qgroup->node, parent, p);
219*4882a593Smuzhiyun rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return qgroup;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
__del_qgroup_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)224*4882a593Smuzhiyun static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
225*4882a593Smuzhiyun struct btrfs_qgroup *qgroup)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct btrfs_qgroup_list *list;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun list_del(&qgroup->dirty);
230*4882a593Smuzhiyun while (!list_empty(&qgroup->groups)) {
231*4882a593Smuzhiyun list = list_first_entry(&qgroup->groups,
232*4882a593Smuzhiyun struct btrfs_qgroup_list, next_group);
233*4882a593Smuzhiyun list_del(&list->next_group);
234*4882a593Smuzhiyun list_del(&list->next_member);
235*4882a593Smuzhiyun kfree(list);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun while (!list_empty(&qgroup->members)) {
239*4882a593Smuzhiyun list = list_first_entry(&qgroup->members,
240*4882a593Smuzhiyun struct btrfs_qgroup_list, next_member);
241*4882a593Smuzhiyun list_del(&list->next_group);
242*4882a593Smuzhiyun list_del(&list->next_member);
243*4882a593Smuzhiyun kfree(list);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* must be called with qgroup_lock held */
del_qgroup_rb(struct btrfs_fs_info * fs_info,u64 qgroupid)248*4882a593Smuzhiyun static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (!qgroup)
253*4882a593Smuzhiyun return -ENOENT;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun rb_erase(&qgroup->node, &fs_info->qgroup_tree);
256*4882a593Smuzhiyun __del_qgroup_rb(fs_info, qgroup);
257*4882a593Smuzhiyun return 0;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* must be called with qgroup_lock held */
add_relation_rb(struct btrfs_fs_info * fs_info,u64 memberid,u64 parentid)261*4882a593Smuzhiyun static int add_relation_rb(struct btrfs_fs_info *fs_info,
262*4882a593Smuzhiyun u64 memberid, u64 parentid)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun struct btrfs_qgroup *member;
265*4882a593Smuzhiyun struct btrfs_qgroup *parent;
266*4882a593Smuzhiyun struct btrfs_qgroup_list *list;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun member = find_qgroup_rb(fs_info, memberid);
269*4882a593Smuzhiyun parent = find_qgroup_rb(fs_info, parentid);
270*4882a593Smuzhiyun if (!member || !parent)
271*4882a593Smuzhiyun return -ENOENT;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun list = kzalloc(sizeof(*list), GFP_ATOMIC);
274*4882a593Smuzhiyun if (!list)
275*4882a593Smuzhiyun return -ENOMEM;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun list->group = parent;
278*4882a593Smuzhiyun list->member = member;
279*4882a593Smuzhiyun list_add_tail(&list->next_group, &member->groups);
280*4882a593Smuzhiyun list_add_tail(&list->next_member, &parent->members);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun return 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* must be called with qgroup_lock held */
del_relation_rb(struct btrfs_fs_info * fs_info,u64 memberid,u64 parentid)286*4882a593Smuzhiyun static int del_relation_rb(struct btrfs_fs_info *fs_info,
287*4882a593Smuzhiyun u64 memberid, u64 parentid)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun struct btrfs_qgroup *member;
290*4882a593Smuzhiyun struct btrfs_qgroup *parent;
291*4882a593Smuzhiyun struct btrfs_qgroup_list *list;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun member = find_qgroup_rb(fs_info, memberid);
294*4882a593Smuzhiyun parent = find_qgroup_rb(fs_info, parentid);
295*4882a593Smuzhiyun if (!member || !parent)
296*4882a593Smuzhiyun return -ENOENT;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun list_for_each_entry(list, &member->groups, next_group) {
299*4882a593Smuzhiyun if (list->group == parent) {
300*4882a593Smuzhiyun list_del(&list->next_group);
301*4882a593Smuzhiyun list_del(&list->next_member);
302*4882a593Smuzhiyun kfree(list);
303*4882a593Smuzhiyun return 0;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun return -ENOENT;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_verify_qgroup_counts(struct btrfs_fs_info * fs_info,u64 qgroupid,u64 rfer,u64 excl)310*4882a593Smuzhiyun int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
311*4882a593Smuzhiyun u64 rfer, u64 excl)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun qgroup = find_qgroup_rb(fs_info, qgroupid);
316*4882a593Smuzhiyun if (!qgroup)
317*4882a593Smuzhiyun return -EINVAL;
318*4882a593Smuzhiyun if (qgroup->rfer != rfer || qgroup->excl != excl)
319*4882a593Smuzhiyun return -EINVAL;
320*4882a593Smuzhiyun return 0;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun #endif
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /*
325*4882a593Smuzhiyun * The full config is read in one go, only called from open_ctree()
326*4882a593Smuzhiyun * It doesn't use any locking, as at this point we're still single-threaded
327*4882a593Smuzhiyun */
btrfs_read_qgroup_config(struct btrfs_fs_info * fs_info)328*4882a593Smuzhiyun int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun struct btrfs_key key;
331*4882a593Smuzhiyun struct btrfs_key found_key;
332*4882a593Smuzhiyun struct btrfs_root *quota_root = fs_info->quota_root;
333*4882a593Smuzhiyun struct btrfs_path *path = NULL;
334*4882a593Smuzhiyun struct extent_buffer *l;
335*4882a593Smuzhiyun int slot;
336*4882a593Smuzhiyun int ret = 0;
337*4882a593Smuzhiyun u64 flags = 0;
338*4882a593Smuzhiyun u64 rescan_progress = 0;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
341*4882a593Smuzhiyun return 0;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
344*4882a593Smuzhiyun if (!fs_info->qgroup_ulist) {
345*4882a593Smuzhiyun ret = -ENOMEM;
346*4882a593Smuzhiyun goto out;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun path = btrfs_alloc_path();
350*4882a593Smuzhiyun if (!path) {
351*4882a593Smuzhiyun ret = -ENOMEM;
352*4882a593Smuzhiyun goto out;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun ret = btrfs_sysfs_add_qgroups(fs_info);
356*4882a593Smuzhiyun if (ret < 0)
357*4882a593Smuzhiyun goto out;
358*4882a593Smuzhiyun /* default this to quota off, in case no status key is found */
359*4882a593Smuzhiyun fs_info->qgroup_flags = 0;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /*
362*4882a593Smuzhiyun * pass 1: read status, all qgroup infos and limits
363*4882a593Smuzhiyun */
364*4882a593Smuzhiyun key.objectid = 0;
365*4882a593Smuzhiyun key.type = 0;
366*4882a593Smuzhiyun key.offset = 0;
367*4882a593Smuzhiyun ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
368*4882a593Smuzhiyun if (ret)
369*4882a593Smuzhiyun goto out;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun while (1) {
372*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun slot = path->slots[0];
375*4882a593Smuzhiyun l = path->nodes[0];
376*4882a593Smuzhiyun btrfs_item_key_to_cpu(l, &found_key, slot);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
379*4882a593Smuzhiyun struct btrfs_qgroup_status_item *ptr;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun ptr = btrfs_item_ptr(l, slot,
382*4882a593Smuzhiyun struct btrfs_qgroup_status_item);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (btrfs_qgroup_status_version(l, ptr) !=
385*4882a593Smuzhiyun BTRFS_QGROUP_STATUS_VERSION) {
386*4882a593Smuzhiyun btrfs_err(fs_info,
387*4882a593Smuzhiyun "old qgroup version, quota disabled");
388*4882a593Smuzhiyun goto out;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun if (btrfs_qgroup_status_generation(l, ptr) !=
391*4882a593Smuzhiyun fs_info->generation) {
392*4882a593Smuzhiyun flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
393*4882a593Smuzhiyun btrfs_err(fs_info,
394*4882a593Smuzhiyun "qgroup generation mismatch, marked as inconsistent");
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
397*4882a593Smuzhiyun ptr);
398*4882a593Smuzhiyun rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
399*4882a593Smuzhiyun goto next1;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
403*4882a593Smuzhiyun found_key.type != BTRFS_QGROUP_LIMIT_KEY)
404*4882a593Smuzhiyun goto next1;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun qgroup = find_qgroup_rb(fs_info, found_key.offset);
407*4882a593Smuzhiyun if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
408*4882a593Smuzhiyun (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
409*4882a593Smuzhiyun btrfs_err(fs_info, "inconsistent qgroup config");
410*4882a593Smuzhiyun flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun if (!qgroup) {
413*4882a593Smuzhiyun qgroup = add_qgroup_rb(fs_info, found_key.offset);
414*4882a593Smuzhiyun if (IS_ERR(qgroup)) {
415*4882a593Smuzhiyun ret = PTR_ERR(qgroup);
416*4882a593Smuzhiyun goto out;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
420*4882a593Smuzhiyun if (ret < 0)
421*4882a593Smuzhiyun goto out;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun switch (found_key.type) {
424*4882a593Smuzhiyun case BTRFS_QGROUP_INFO_KEY: {
425*4882a593Smuzhiyun struct btrfs_qgroup_info_item *ptr;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun ptr = btrfs_item_ptr(l, slot,
428*4882a593Smuzhiyun struct btrfs_qgroup_info_item);
429*4882a593Smuzhiyun qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
430*4882a593Smuzhiyun qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
431*4882a593Smuzhiyun qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
432*4882a593Smuzhiyun qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
433*4882a593Smuzhiyun /* generation currently unused */
434*4882a593Smuzhiyun break;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun case BTRFS_QGROUP_LIMIT_KEY: {
437*4882a593Smuzhiyun struct btrfs_qgroup_limit_item *ptr;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun ptr = btrfs_item_ptr(l, slot,
440*4882a593Smuzhiyun struct btrfs_qgroup_limit_item);
441*4882a593Smuzhiyun qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
442*4882a593Smuzhiyun qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
443*4882a593Smuzhiyun qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
444*4882a593Smuzhiyun qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
445*4882a593Smuzhiyun qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
446*4882a593Smuzhiyun break;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun next1:
450*4882a593Smuzhiyun ret = btrfs_next_item(quota_root, path);
451*4882a593Smuzhiyun if (ret < 0)
452*4882a593Smuzhiyun goto out;
453*4882a593Smuzhiyun if (ret)
454*4882a593Smuzhiyun break;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun btrfs_release_path(path);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /*
459*4882a593Smuzhiyun * pass 2: read all qgroup relations
460*4882a593Smuzhiyun */
461*4882a593Smuzhiyun key.objectid = 0;
462*4882a593Smuzhiyun key.type = BTRFS_QGROUP_RELATION_KEY;
463*4882a593Smuzhiyun key.offset = 0;
464*4882a593Smuzhiyun ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
465*4882a593Smuzhiyun if (ret)
466*4882a593Smuzhiyun goto out;
467*4882a593Smuzhiyun while (1) {
468*4882a593Smuzhiyun slot = path->slots[0];
469*4882a593Smuzhiyun l = path->nodes[0];
470*4882a593Smuzhiyun btrfs_item_key_to_cpu(l, &found_key, slot);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
473*4882a593Smuzhiyun goto next2;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (found_key.objectid > found_key.offset) {
476*4882a593Smuzhiyun /* parent <- member, not needed to build config */
477*4882a593Smuzhiyun /* FIXME should we omit the key completely? */
478*4882a593Smuzhiyun goto next2;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun ret = add_relation_rb(fs_info, found_key.objectid,
482*4882a593Smuzhiyun found_key.offset);
483*4882a593Smuzhiyun if (ret == -ENOENT) {
484*4882a593Smuzhiyun btrfs_warn(fs_info,
485*4882a593Smuzhiyun "orphan qgroup relation 0x%llx->0x%llx",
486*4882a593Smuzhiyun found_key.objectid, found_key.offset);
487*4882a593Smuzhiyun ret = 0; /* ignore the error */
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun if (ret)
490*4882a593Smuzhiyun goto out;
491*4882a593Smuzhiyun next2:
492*4882a593Smuzhiyun ret = btrfs_next_item(quota_root, path);
493*4882a593Smuzhiyun if (ret < 0)
494*4882a593Smuzhiyun goto out;
495*4882a593Smuzhiyun if (ret)
496*4882a593Smuzhiyun break;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun out:
499*4882a593Smuzhiyun btrfs_free_path(path);
500*4882a593Smuzhiyun fs_info->qgroup_flags |= flags;
501*4882a593Smuzhiyun if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
502*4882a593Smuzhiyun clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
503*4882a593Smuzhiyun else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
504*4882a593Smuzhiyun ret >= 0)
505*4882a593Smuzhiyun ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun if (ret < 0) {
508*4882a593Smuzhiyun ulist_free(fs_info->qgroup_ulist);
509*4882a593Smuzhiyun fs_info->qgroup_ulist = NULL;
510*4882a593Smuzhiyun fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
511*4882a593Smuzhiyun btrfs_sysfs_del_qgroups(fs_info);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun return ret < 0 ? ret : 0;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /*
518*4882a593Smuzhiyun * Called in close_ctree() when quota is still enabled. This verifies we don't
519*4882a593Smuzhiyun * leak some reserved space.
520*4882a593Smuzhiyun *
521*4882a593Smuzhiyun * Return false if no reserved space is left.
522*4882a593Smuzhiyun * Return true if some reserved space is leaked.
523*4882a593Smuzhiyun */
btrfs_check_quota_leak(struct btrfs_fs_info * fs_info)524*4882a593Smuzhiyun bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun struct rb_node *node;
527*4882a593Smuzhiyun bool ret = false;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
530*4882a593Smuzhiyun return ret;
531*4882a593Smuzhiyun /*
532*4882a593Smuzhiyun * Since we're unmounting, there is no race and no need to grab qgroup
533*4882a593Smuzhiyun * lock. And here we don't go post-order to provide a more user
534*4882a593Smuzhiyun * friendly sorted result.
535*4882a593Smuzhiyun */
536*4882a593Smuzhiyun for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
537*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
538*4882a593Smuzhiyun int i;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun qgroup = rb_entry(node, struct btrfs_qgroup, node);
541*4882a593Smuzhiyun for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
542*4882a593Smuzhiyun if (qgroup->rsv.values[i]) {
543*4882a593Smuzhiyun ret = true;
544*4882a593Smuzhiyun btrfs_warn(fs_info,
545*4882a593Smuzhiyun "qgroup %hu/%llu has unreleased space, type %d rsv %llu",
546*4882a593Smuzhiyun btrfs_qgroup_level(qgroup->qgroupid),
547*4882a593Smuzhiyun btrfs_qgroup_subvolid(qgroup->qgroupid),
548*4882a593Smuzhiyun i, qgroup->rsv.values[i]);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun return ret;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /*
556*4882a593Smuzhiyun * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
557*4882a593Smuzhiyun * first two are in single-threaded paths.And for the third one, we have set
558*4882a593Smuzhiyun * quota_root to be null with qgroup_lock held before, so it is safe to clean
559*4882a593Smuzhiyun * up the in-memory structures without qgroup_lock held.
560*4882a593Smuzhiyun */
btrfs_free_qgroup_config(struct btrfs_fs_info * fs_info)561*4882a593Smuzhiyun void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun struct rb_node *n;
564*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun while ((n = rb_first(&fs_info->qgroup_tree))) {
567*4882a593Smuzhiyun qgroup = rb_entry(n, struct btrfs_qgroup, node);
568*4882a593Smuzhiyun rb_erase(n, &fs_info->qgroup_tree);
569*4882a593Smuzhiyun __del_qgroup_rb(fs_info, qgroup);
570*4882a593Smuzhiyun btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
571*4882a593Smuzhiyun kfree(qgroup);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun /*
574*4882a593Smuzhiyun * We call btrfs_free_qgroup_config() when unmounting
575*4882a593Smuzhiyun * filesystem and disabling quota, so we set qgroup_ulist
576*4882a593Smuzhiyun * to be null here to avoid double free.
577*4882a593Smuzhiyun */
578*4882a593Smuzhiyun ulist_free(fs_info->qgroup_ulist);
579*4882a593Smuzhiyun fs_info->qgroup_ulist = NULL;
580*4882a593Smuzhiyun btrfs_sysfs_del_qgroups(fs_info);
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
add_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)583*4882a593Smuzhiyun static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
584*4882a593Smuzhiyun u64 dst)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun int ret;
587*4882a593Smuzhiyun struct btrfs_root *quota_root = trans->fs_info->quota_root;
588*4882a593Smuzhiyun struct btrfs_path *path;
589*4882a593Smuzhiyun struct btrfs_key key;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun path = btrfs_alloc_path();
592*4882a593Smuzhiyun if (!path)
593*4882a593Smuzhiyun return -ENOMEM;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun key.objectid = src;
596*4882a593Smuzhiyun key.type = BTRFS_QGROUP_RELATION_KEY;
597*4882a593Smuzhiyun key.offset = dst;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun btrfs_mark_buffer_dirty(path->nodes[0]);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun btrfs_free_path(path);
604*4882a593Smuzhiyun return ret;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
del_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)607*4882a593Smuzhiyun static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
608*4882a593Smuzhiyun u64 dst)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun int ret;
611*4882a593Smuzhiyun struct btrfs_root *quota_root = trans->fs_info->quota_root;
612*4882a593Smuzhiyun struct btrfs_path *path;
613*4882a593Smuzhiyun struct btrfs_key key;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun path = btrfs_alloc_path();
616*4882a593Smuzhiyun if (!path)
617*4882a593Smuzhiyun return -ENOMEM;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun key.objectid = src;
620*4882a593Smuzhiyun key.type = BTRFS_QGROUP_RELATION_KEY;
621*4882a593Smuzhiyun key.offset = dst;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
624*4882a593Smuzhiyun if (ret < 0)
625*4882a593Smuzhiyun goto out;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (ret > 0) {
628*4882a593Smuzhiyun ret = -ENOENT;
629*4882a593Smuzhiyun goto out;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun ret = btrfs_del_item(trans, quota_root, path);
633*4882a593Smuzhiyun out:
634*4882a593Smuzhiyun btrfs_free_path(path);
635*4882a593Smuzhiyun return ret;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
add_qgroup_item(struct btrfs_trans_handle * trans,struct btrfs_root * quota_root,u64 qgroupid)638*4882a593Smuzhiyun static int add_qgroup_item(struct btrfs_trans_handle *trans,
639*4882a593Smuzhiyun struct btrfs_root *quota_root, u64 qgroupid)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun int ret;
642*4882a593Smuzhiyun struct btrfs_path *path;
643*4882a593Smuzhiyun struct btrfs_qgroup_info_item *qgroup_info;
644*4882a593Smuzhiyun struct btrfs_qgroup_limit_item *qgroup_limit;
645*4882a593Smuzhiyun struct extent_buffer *leaf;
646*4882a593Smuzhiyun struct btrfs_key key;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun if (btrfs_is_testing(quota_root->fs_info))
649*4882a593Smuzhiyun return 0;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun path = btrfs_alloc_path();
652*4882a593Smuzhiyun if (!path)
653*4882a593Smuzhiyun return -ENOMEM;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun key.objectid = 0;
656*4882a593Smuzhiyun key.type = BTRFS_QGROUP_INFO_KEY;
657*4882a593Smuzhiyun key.offset = qgroupid;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /*
660*4882a593Smuzhiyun * Avoid a transaction abort by catching -EEXIST here. In that
661*4882a593Smuzhiyun * case, we proceed by re-initializing the existing structure
662*4882a593Smuzhiyun * on disk.
663*4882a593Smuzhiyun */
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
666*4882a593Smuzhiyun sizeof(*qgroup_info));
667*4882a593Smuzhiyun if (ret && ret != -EEXIST)
668*4882a593Smuzhiyun goto out;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun leaf = path->nodes[0];
671*4882a593Smuzhiyun qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
672*4882a593Smuzhiyun struct btrfs_qgroup_info_item);
673*4882a593Smuzhiyun btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
674*4882a593Smuzhiyun btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
675*4882a593Smuzhiyun btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
676*4882a593Smuzhiyun btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
677*4882a593Smuzhiyun btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun btrfs_mark_buffer_dirty(leaf);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun btrfs_release_path(path);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun key.type = BTRFS_QGROUP_LIMIT_KEY;
684*4882a593Smuzhiyun ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
685*4882a593Smuzhiyun sizeof(*qgroup_limit));
686*4882a593Smuzhiyun if (ret && ret != -EEXIST)
687*4882a593Smuzhiyun goto out;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun leaf = path->nodes[0];
690*4882a593Smuzhiyun qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
691*4882a593Smuzhiyun struct btrfs_qgroup_limit_item);
692*4882a593Smuzhiyun btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
693*4882a593Smuzhiyun btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
694*4882a593Smuzhiyun btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
695*4882a593Smuzhiyun btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
696*4882a593Smuzhiyun btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun btrfs_mark_buffer_dirty(leaf);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun ret = 0;
701*4882a593Smuzhiyun out:
702*4882a593Smuzhiyun btrfs_free_path(path);
703*4882a593Smuzhiyun return ret;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
del_qgroup_item(struct btrfs_trans_handle * trans,u64 qgroupid)706*4882a593Smuzhiyun static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun int ret;
709*4882a593Smuzhiyun struct btrfs_root *quota_root = trans->fs_info->quota_root;
710*4882a593Smuzhiyun struct btrfs_path *path;
711*4882a593Smuzhiyun struct btrfs_key key;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun path = btrfs_alloc_path();
714*4882a593Smuzhiyun if (!path)
715*4882a593Smuzhiyun return -ENOMEM;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun key.objectid = 0;
718*4882a593Smuzhiyun key.type = BTRFS_QGROUP_INFO_KEY;
719*4882a593Smuzhiyun key.offset = qgroupid;
720*4882a593Smuzhiyun ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
721*4882a593Smuzhiyun if (ret < 0)
722*4882a593Smuzhiyun goto out;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun if (ret > 0) {
725*4882a593Smuzhiyun ret = -ENOENT;
726*4882a593Smuzhiyun goto out;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun ret = btrfs_del_item(trans, quota_root, path);
730*4882a593Smuzhiyun if (ret)
731*4882a593Smuzhiyun goto out;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun btrfs_release_path(path);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun key.type = BTRFS_QGROUP_LIMIT_KEY;
736*4882a593Smuzhiyun ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
737*4882a593Smuzhiyun if (ret < 0)
738*4882a593Smuzhiyun goto out;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun if (ret > 0) {
741*4882a593Smuzhiyun ret = -ENOENT;
742*4882a593Smuzhiyun goto out;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun ret = btrfs_del_item(trans, quota_root, path);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun out:
748*4882a593Smuzhiyun btrfs_free_path(path);
749*4882a593Smuzhiyun return ret;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
update_qgroup_limit_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)752*4882a593Smuzhiyun static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
753*4882a593Smuzhiyun struct btrfs_qgroup *qgroup)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun struct btrfs_root *quota_root = trans->fs_info->quota_root;
756*4882a593Smuzhiyun struct btrfs_path *path;
757*4882a593Smuzhiyun struct btrfs_key key;
758*4882a593Smuzhiyun struct extent_buffer *l;
759*4882a593Smuzhiyun struct btrfs_qgroup_limit_item *qgroup_limit;
760*4882a593Smuzhiyun int ret;
761*4882a593Smuzhiyun int slot;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun key.objectid = 0;
764*4882a593Smuzhiyun key.type = BTRFS_QGROUP_LIMIT_KEY;
765*4882a593Smuzhiyun key.offset = qgroup->qgroupid;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun path = btrfs_alloc_path();
768*4882a593Smuzhiyun if (!path)
769*4882a593Smuzhiyun return -ENOMEM;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
772*4882a593Smuzhiyun if (ret > 0)
773*4882a593Smuzhiyun ret = -ENOENT;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun if (ret)
776*4882a593Smuzhiyun goto out;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun l = path->nodes[0];
779*4882a593Smuzhiyun slot = path->slots[0];
780*4882a593Smuzhiyun qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
781*4882a593Smuzhiyun btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
782*4882a593Smuzhiyun btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
783*4882a593Smuzhiyun btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
784*4882a593Smuzhiyun btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
785*4882a593Smuzhiyun btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun btrfs_mark_buffer_dirty(l);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun out:
790*4882a593Smuzhiyun btrfs_free_path(path);
791*4882a593Smuzhiyun return ret;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
update_qgroup_info_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)794*4882a593Smuzhiyun static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
795*4882a593Smuzhiyun struct btrfs_qgroup *qgroup)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
798*4882a593Smuzhiyun struct btrfs_root *quota_root = fs_info->quota_root;
799*4882a593Smuzhiyun struct btrfs_path *path;
800*4882a593Smuzhiyun struct btrfs_key key;
801*4882a593Smuzhiyun struct extent_buffer *l;
802*4882a593Smuzhiyun struct btrfs_qgroup_info_item *qgroup_info;
803*4882a593Smuzhiyun int ret;
804*4882a593Smuzhiyun int slot;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun if (btrfs_is_testing(fs_info))
807*4882a593Smuzhiyun return 0;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun key.objectid = 0;
810*4882a593Smuzhiyun key.type = BTRFS_QGROUP_INFO_KEY;
811*4882a593Smuzhiyun key.offset = qgroup->qgroupid;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun path = btrfs_alloc_path();
814*4882a593Smuzhiyun if (!path)
815*4882a593Smuzhiyun return -ENOMEM;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
818*4882a593Smuzhiyun if (ret > 0)
819*4882a593Smuzhiyun ret = -ENOENT;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun if (ret)
822*4882a593Smuzhiyun goto out;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun l = path->nodes[0];
825*4882a593Smuzhiyun slot = path->slots[0];
826*4882a593Smuzhiyun qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
827*4882a593Smuzhiyun btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
828*4882a593Smuzhiyun btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
829*4882a593Smuzhiyun btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
830*4882a593Smuzhiyun btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
831*4882a593Smuzhiyun btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun btrfs_mark_buffer_dirty(l);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun out:
836*4882a593Smuzhiyun btrfs_free_path(path);
837*4882a593Smuzhiyun return ret;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
update_qgroup_status_item(struct btrfs_trans_handle * trans)840*4882a593Smuzhiyun static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
843*4882a593Smuzhiyun struct btrfs_root *quota_root = fs_info->quota_root;
844*4882a593Smuzhiyun struct btrfs_path *path;
845*4882a593Smuzhiyun struct btrfs_key key;
846*4882a593Smuzhiyun struct extent_buffer *l;
847*4882a593Smuzhiyun struct btrfs_qgroup_status_item *ptr;
848*4882a593Smuzhiyun int ret;
849*4882a593Smuzhiyun int slot;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun key.objectid = 0;
852*4882a593Smuzhiyun key.type = BTRFS_QGROUP_STATUS_KEY;
853*4882a593Smuzhiyun key.offset = 0;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun path = btrfs_alloc_path();
856*4882a593Smuzhiyun if (!path)
857*4882a593Smuzhiyun return -ENOMEM;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
860*4882a593Smuzhiyun if (ret > 0)
861*4882a593Smuzhiyun ret = -ENOENT;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun if (ret)
864*4882a593Smuzhiyun goto out;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun l = path->nodes[0];
867*4882a593Smuzhiyun slot = path->slots[0];
868*4882a593Smuzhiyun ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
869*4882a593Smuzhiyun btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
870*4882a593Smuzhiyun btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
871*4882a593Smuzhiyun btrfs_set_qgroup_status_rescan(l, ptr,
872*4882a593Smuzhiyun fs_info->qgroup_rescan_progress.objectid);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun btrfs_mark_buffer_dirty(l);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun out:
877*4882a593Smuzhiyun btrfs_free_path(path);
878*4882a593Smuzhiyun return ret;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun /*
882*4882a593Smuzhiyun * called with qgroup_lock held
883*4882a593Smuzhiyun */
btrfs_clean_quota_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)884*4882a593Smuzhiyun static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
885*4882a593Smuzhiyun struct btrfs_root *root)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun struct btrfs_path *path;
888*4882a593Smuzhiyun struct btrfs_key key;
889*4882a593Smuzhiyun struct extent_buffer *leaf = NULL;
890*4882a593Smuzhiyun int ret;
891*4882a593Smuzhiyun int nr = 0;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun path = btrfs_alloc_path();
894*4882a593Smuzhiyun if (!path)
895*4882a593Smuzhiyun return -ENOMEM;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun path->leave_spinning = 1;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun key.objectid = 0;
900*4882a593Smuzhiyun key.offset = 0;
901*4882a593Smuzhiyun key.type = 0;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun while (1) {
904*4882a593Smuzhiyun ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
905*4882a593Smuzhiyun if (ret < 0)
906*4882a593Smuzhiyun goto out;
907*4882a593Smuzhiyun leaf = path->nodes[0];
908*4882a593Smuzhiyun nr = btrfs_header_nritems(leaf);
909*4882a593Smuzhiyun if (!nr)
910*4882a593Smuzhiyun break;
911*4882a593Smuzhiyun /*
912*4882a593Smuzhiyun * delete the leaf one by one
913*4882a593Smuzhiyun * since the whole tree is going
914*4882a593Smuzhiyun * to be deleted.
915*4882a593Smuzhiyun */
916*4882a593Smuzhiyun path->slots[0] = 0;
917*4882a593Smuzhiyun ret = btrfs_del_items(trans, root, path, 0, nr);
918*4882a593Smuzhiyun if (ret)
919*4882a593Smuzhiyun goto out;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun btrfs_release_path(path);
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun ret = 0;
924*4882a593Smuzhiyun out:
925*4882a593Smuzhiyun btrfs_free_path(path);
926*4882a593Smuzhiyun return ret;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
btrfs_quota_enable(struct btrfs_fs_info * fs_info)929*4882a593Smuzhiyun int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun struct btrfs_root *quota_root;
932*4882a593Smuzhiyun struct btrfs_root *tree_root = fs_info->tree_root;
933*4882a593Smuzhiyun struct btrfs_path *path = NULL;
934*4882a593Smuzhiyun struct btrfs_qgroup_status_item *ptr;
935*4882a593Smuzhiyun struct extent_buffer *leaf;
936*4882a593Smuzhiyun struct btrfs_key key;
937*4882a593Smuzhiyun struct btrfs_key found_key;
938*4882a593Smuzhiyun struct btrfs_qgroup *qgroup = NULL;
939*4882a593Smuzhiyun struct btrfs_trans_handle *trans = NULL;
940*4882a593Smuzhiyun struct ulist *ulist = NULL;
941*4882a593Smuzhiyun int ret = 0;
942*4882a593Smuzhiyun int slot;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /*
945*4882a593Smuzhiyun * We need to have subvol_sem write locked, to prevent races between
946*4882a593Smuzhiyun * concurrent tasks trying to enable quotas, because we will unlock
947*4882a593Smuzhiyun * and relock qgroup_ioctl_lock before setting fs_info->quota_root
948*4882a593Smuzhiyun * and before setting BTRFS_FS_QUOTA_ENABLED.
949*4882a593Smuzhiyun */
950*4882a593Smuzhiyun lockdep_assert_held_write(&fs_info->subvol_sem);
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_ioctl_lock);
953*4882a593Smuzhiyun if (fs_info->quota_root)
954*4882a593Smuzhiyun goto out;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun ulist = ulist_alloc(GFP_KERNEL);
957*4882a593Smuzhiyun if (!ulist) {
958*4882a593Smuzhiyun ret = -ENOMEM;
959*4882a593Smuzhiyun goto out;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun ret = btrfs_sysfs_add_qgroups(fs_info);
963*4882a593Smuzhiyun if (ret < 0)
964*4882a593Smuzhiyun goto out;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun /*
967*4882a593Smuzhiyun * Unlock qgroup_ioctl_lock before starting the transaction. This is to
968*4882a593Smuzhiyun * avoid lock acquisition inversion problems (reported by lockdep) between
969*4882a593Smuzhiyun * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
970*4882a593Smuzhiyun * start a transaction.
971*4882a593Smuzhiyun * After we started the transaction lock qgroup_ioctl_lock again and
972*4882a593Smuzhiyun * check if someone else created the quota root in the meanwhile. If so,
973*4882a593Smuzhiyun * just return success and release the transaction handle.
974*4882a593Smuzhiyun *
975*4882a593Smuzhiyun * Also we don't need to worry about someone else calling
976*4882a593Smuzhiyun * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
977*4882a593Smuzhiyun * that function returns 0 (success) when the sysfs entries already exist.
978*4882a593Smuzhiyun */
979*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_ioctl_lock);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun /*
982*4882a593Smuzhiyun * 1 for quota root item
983*4882a593Smuzhiyun * 1 for BTRFS_QGROUP_STATUS item
984*4882a593Smuzhiyun *
985*4882a593Smuzhiyun * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
986*4882a593Smuzhiyun * per subvolume. However those are not currently reserved since it
987*4882a593Smuzhiyun * would be a lot of overkill.
988*4882a593Smuzhiyun */
989*4882a593Smuzhiyun trans = btrfs_start_transaction(tree_root, 2);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_ioctl_lock);
992*4882a593Smuzhiyun if (IS_ERR(trans)) {
993*4882a593Smuzhiyun ret = PTR_ERR(trans);
994*4882a593Smuzhiyun trans = NULL;
995*4882a593Smuzhiyun goto out;
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (fs_info->quota_root)
999*4882a593Smuzhiyun goto out;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun fs_info->qgroup_ulist = ulist;
1002*4882a593Smuzhiyun ulist = NULL;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun /*
1005*4882a593Smuzhiyun * initially create the quota tree
1006*4882a593Smuzhiyun */
1007*4882a593Smuzhiyun quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
1008*4882a593Smuzhiyun if (IS_ERR(quota_root)) {
1009*4882a593Smuzhiyun ret = PTR_ERR(quota_root);
1010*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1011*4882a593Smuzhiyun goto out;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun path = btrfs_alloc_path();
1015*4882a593Smuzhiyun if (!path) {
1016*4882a593Smuzhiyun ret = -ENOMEM;
1017*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1018*4882a593Smuzhiyun goto out_free_root;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun key.objectid = 0;
1022*4882a593Smuzhiyun key.type = BTRFS_QGROUP_STATUS_KEY;
1023*4882a593Smuzhiyun key.offset = 0;
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
1026*4882a593Smuzhiyun sizeof(*ptr));
1027*4882a593Smuzhiyun if (ret) {
1028*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1029*4882a593Smuzhiyun goto out_free_path;
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun leaf = path->nodes[0];
1033*4882a593Smuzhiyun ptr = btrfs_item_ptr(leaf, path->slots[0],
1034*4882a593Smuzhiyun struct btrfs_qgroup_status_item);
1035*4882a593Smuzhiyun btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1036*4882a593Smuzhiyun btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1037*4882a593Smuzhiyun fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
1038*4882a593Smuzhiyun BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1039*4882a593Smuzhiyun btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
1040*4882a593Smuzhiyun btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun btrfs_mark_buffer_dirty(leaf);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun key.objectid = 0;
1045*4882a593Smuzhiyun key.type = BTRFS_ROOT_REF_KEY;
1046*4882a593Smuzhiyun key.offset = 0;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun btrfs_release_path(path);
1049*4882a593Smuzhiyun ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1050*4882a593Smuzhiyun if (ret > 0)
1051*4882a593Smuzhiyun goto out_add_root;
1052*4882a593Smuzhiyun if (ret < 0) {
1053*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1054*4882a593Smuzhiyun goto out_free_path;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun while (1) {
1058*4882a593Smuzhiyun slot = path->slots[0];
1059*4882a593Smuzhiyun leaf = path->nodes[0];
1060*4882a593Smuzhiyun btrfs_item_key_to_cpu(leaf, &found_key, slot);
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun if (found_key.type == BTRFS_ROOT_REF_KEY) {
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun /* Release locks on tree_root before we access quota_root */
1065*4882a593Smuzhiyun btrfs_release_path(path);
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun ret = add_qgroup_item(trans, quota_root,
1068*4882a593Smuzhiyun found_key.offset);
1069*4882a593Smuzhiyun if (ret) {
1070*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1071*4882a593Smuzhiyun goto out_free_path;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun qgroup = add_qgroup_rb(fs_info, found_key.offset);
1075*4882a593Smuzhiyun if (IS_ERR(qgroup)) {
1076*4882a593Smuzhiyun ret = PTR_ERR(qgroup);
1077*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1078*4882a593Smuzhiyun goto out_free_path;
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1081*4882a593Smuzhiyun if (ret < 0) {
1082*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1083*4882a593Smuzhiyun goto out_free_path;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun ret = btrfs_search_slot_for_read(tree_root, &found_key,
1086*4882a593Smuzhiyun path, 1, 0);
1087*4882a593Smuzhiyun if (ret < 0) {
1088*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1089*4882a593Smuzhiyun goto out_free_path;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun if (ret > 0) {
1092*4882a593Smuzhiyun /*
1093*4882a593Smuzhiyun * Shouldn't happen, but in case it does we
1094*4882a593Smuzhiyun * don't need to do the btrfs_next_item, just
1095*4882a593Smuzhiyun * continue.
1096*4882a593Smuzhiyun */
1097*4882a593Smuzhiyun continue;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun ret = btrfs_next_item(tree_root, path);
1101*4882a593Smuzhiyun if (ret < 0) {
1102*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1103*4882a593Smuzhiyun goto out_free_path;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun if (ret)
1106*4882a593Smuzhiyun break;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun out_add_root:
1110*4882a593Smuzhiyun btrfs_release_path(path);
1111*4882a593Smuzhiyun ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1112*4882a593Smuzhiyun if (ret) {
1113*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1114*4882a593Smuzhiyun goto out_free_path;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
1118*4882a593Smuzhiyun if (IS_ERR(qgroup)) {
1119*4882a593Smuzhiyun ret = PTR_ERR(qgroup);
1120*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1121*4882a593Smuzhiyun goto out_free_path;
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1124*4882a593Smuzhiyun if (ret < 0) {
1125*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1126*4882a593Smuzhiyun goto out_free_path;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_ioctl_lock);
1130*4882a593Smuzhiyun /*
1131*4882a593Smuzhiyun * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
1132*4882a593Smuzhiyun * a deadlock with tasks concurrently doing other qgroup operations, such
1133*4882a593Smuzhiyun * adding/removing qgroups or adding/deleting qgroup relations for example,
1134*4882a593Smuzhiyun * because all qgroup operations first start or join a transaction and then
1135*4882a593Smuzhiyun * lock the qgroup_ioctl_lock mutex.
1136*4882a593Smuzhiyun * We are safe from a concurrent task trying to enable quotas, by calling
1137*4882a593Smuzhiyun * this function, since we are serialized by fs_info->subvol_sem.
1138*4882a593Smuzhiyun */
1139*4882a593Smuzhiyun ret = btrfs_commit_transaction(trans);
1140*4882a593Smuzhiyun trans = NULL;
1141*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_ioctl_lock);
1142*4882a593Smuzhiyun if (ret)
1143*4882a593Smuzhiyun goto out_free_path;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun /*
1146*4882a593Smuzhiyun * Set quota enabled flag after committing the transaction, to avoid
1147*4882a593Smuzhiyun * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1148*4882a593Smuzhiyun * creation.
1149*4882a593Smuzhiyun */
1150*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
1151*4882a593Smuzhiyun fs_info->quota_root = quota_root;
1152*4882a593Smuzhiyun set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1153*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun ret = qgroup_rescan_init(fs_info, 0, 1);
1156*4882a593Smuzhiyun if (!ret) {
1157*4882a593Smuzhiyun qgroup_rescan_zero_tracking(fs_info);
1158*4882a593Smuzhiyun fs_info->qgroup_rescan_running = true;
1159*4882a593Smuzhiyun btrfs_queue_work(fs_info->qgroup_rescan_workers,
1160*4882a593Smuzhiyun &fs_info->qgroup_rescan_work);
1161*4882a593Smuzhiyun } else {
1162*4882a593Smuzhiyun /*
1163*4882a593Smuzhiyun * We have set both BTRFS_FS_QUOTA_ENABLED and
1164*4882a593Smuzhiyun * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
1165*4882a593Smuzhiyun * -EINPROGRESS. That can happen because someone started the
1166*4882a593Smuzhiyun * rescan worker by calling quota rescan ioctl before we
1167*4882a593Smuzhiyun * attempted to initialize the rescan worker. Failure due to
1168*4882a593Smuzhiyun * quotas disabled in the meanwhile is not possible, because
1169*4882a593Smuzhiyun * we are holding a write lock on fs_info->subvol_sem, which
1170*4882a593Smuzhiyun * is also acquired when disabling quotas.
1171*4882a593Smuzhiyun * Ignore such error, and any other error would need to undo
1172*4882a593Smuzhiyun * everything we did in the transaction we just committed.
1173*4882a593Smuzhiyun */
1174*4882a593Smuzhiyun ASSERT(ret == -EINPROGRESS);
1175*4882a593Smuzhiyun ret = 0;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun out_free_path:
1179*4882a593Smuzhiyun btrfs_free_path(path);
1180*4882a593Smuzhiyun out_free_root:
1181*4882a593Smuzhiyun if (ret)
1182*4882a593Smuzhiyun btrfs_put_root(quota_root);
1183*4882a593Smuzhiyun out:
1184*4882a593Smuzhiyun if (ret) {
1185*4882a593Smuzhiyun ulist_free(fs_info->qgroup_ulist);
1186*4882a593Smuzhiyun fs_info->qgroup_ulist = NULL;
1187*4882a593Smuzhiyun btrfs_sysfs_del_qgroups(fs_info);
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_ioctl_lock);
1190*4882a593Smuzhiyun if (ret && trans)
1191*4882a593Smuzhiyun btrfs_end_transaction(trans);
1192*4882a593Smuzhiyun else if (trans)
1193*4882a593Smuzhiyun ret = btrfs_end_transaction(trans);
1194*4882a593Smuzhiyun ulist_free(ulist);
1195*4882a593Smuzhiyun return ret;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun
btrfs_quota_disable(struct btrfs_fs_info * fs_info)1198*4882a593Smuzhiyun int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun struct btrfs_root *quota_root;
1201*4882a593Smuzhiyun struct btrfs_trans_handle *trans = NULL;
1202*4882a593Smuzhiyun int ret = 0;
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun /*
1205*4882a593Smuzhiyun * We need to have subvol_sem write locked, to prevent races between
1206*4882a593Smuzhiyun * concurrent tasks trying to disable quotas, because we will unlock
1207*4882a593Smuzhiyun * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
1208*4882a593Smuzhiyun */
1209*4882a593Smuzhiyun lockdep_assert_held_write(&fs_info->subvol_sem);
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_ioctl_lock);
1212*4882a593Smuzhiyun if (!fs_info->quota_root)
1213*4882a593Smuzhiyun goto out;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun /*
1216*4882a593Smuzhiyun * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
1217*4882a593Smuzhiyun * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
1218*4882a593Smuzhiyun * to lock that mutex while holding a transaction handle and the rescan
1219*4882a593Smuzhiyun * worker needs to commit a transaction.
1220*4882a593Smuzhiyun */
1221*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_ioctl_lock);
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun /*
1224*4882a593Smuzhiyun * Request qgroup rescan worker to complete and wait for it. This wait
1225*4882a593Smuzhiyun * must be done before transaction start for quota disable since it may
1226*4882a593Smuzhiyun * deadlock with transaction by the qgroup rescan worker.
1227*4882a593Smuzhiyun */
1228*4882a593Smuzhiyun clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1229*4882a593Smuzhiyun btrfs_qgroup_wait_for_completion(fs_info, false);
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun /*
1232*4882a593Smuzhiyun * 1 For the root item
1233*4882a593Smuzhiyun *
1234*4882a593Smuzhiyun * We should also reserve enough items for the quota tree deletion in
1235*4882a593Smuzhiyun * btrfs_clean_quota_tree but this is not done.
1236*4882a593Smuzhiyun *
1237*4882a593Smuzhiyun * Also, we must always start a transaction without holding the mutex
1238*4882a593Smuzhiyun * qgroup_ioctl_lock, see btrfs_quota_enable().
1239*4882a593Smuzhiyun */
1240*4882a593Smuzhiyun trans = btrfs_start_transaction(fs_info->tree_root, 1);
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_ioctl_lock);
1243*4882a593Smuzhiyun if (IS_ERR(trans)) {
1244*4882a593Smuzhiyun ret = PTR_ERR(trans);
1245*4882a593Smuzhiyun trans = NULL;
1246*4882a593Smuzhiyun set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1247*4882a593Smuzhiyun goto out;
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun if (!fs_info->quota_root)
1251*4882a593Smuzhiyun goto out;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
1254*4882a593Smuzhiyun quota_root = fs_info->quota_root;
1255*4882a593Smuzhiyun fs_info->quota_root = NULL;
1256*4882a593Smuzhiyun fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1257*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun btrfs_free_qgroup_config(fs_info);
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun ret = btrfs_clean_quota_tree(trans, quota_root);
1262*4882a593Smuzhiyun if (ret) {
1263*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1264*4882a593Smuzhiyun goto out;
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun ret = btrfs_del_root(trans, "a_root->root_key);
1268*4882a593Smuzhiyun if (ret) {
1269*4882a593Smuzhiyun btrfs_abort_transaction(trans, ret);
1270*4882a593Smuzhiyun goto out;
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun list_del("a_root->dirty_list);
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun btrfs_tree_lock(quota_root->node);
1276*4882a593Smuzhiyun btrfs_clean_tree_block(quota_root->node);
1277*4882a593Smuzhiyun btrfs_tree_unlock(quota_root->node);
1278*4882a593Smuzhiyun btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun btrfs_put_root(quota_root);
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun out:
1283*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_ioctl_lock);
1284*4882a593Smuzhiyun if (ret && trans)
1285*4882a593Smuzhiyun btrfs_end_transaction(trans);
1286*4882a593Smuzhiyun else if (trans)
1287*4882a593Smuzhiyun ret = btrfs_end_transaction(trans);
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun return ret;
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun
qgroup_dirty(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1292*4882a593Smuzhiyun static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1293*4882a593Smuzhiyun struct btrfs_qgroup *qgroup)
1294*4882a593Smuzhiyun {
1295*4882a593Smuzhiyun if (list_empty(&qgroup->dirty))
1296*4882a593Smuzhiyun list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun /*
1300*4882a593Smuzhiyun * The easy accounting, we're updating qgroup relationship whose child qgroup
1301*4882a593Smuzhiyun * only has exclusive extents.
1302*4882a593Smuzhiyun *
1303*4882a593Smuzhiyun * In this case, all exclusive extents will also be exclusive for parent, so
1304*4882a593Smuzhiyun * excl/rfer just get added/removed.
1305*4882a593Smuzhiyun *
1306*4882a593Smuzhiyun * So is qgroup reservation space, which should also be added/removed to
1307*4882a593Smuzhiyun * parent.
1308*4882a593Smuzhiyun * Or when child tries to release reservation space, parent will underflow its
1309*4882a593Smuzhiyun * reservation (for relationship adding case).
1310*4882a593Smuzhiyun *
1311*4882a593Smuzhiyun * Caller should hold fs_info->qgroup_lock.
1312*4882a593Smuzhiyun */
__qgroup_excl_accounting(struct btrfs_fs_info * fs_info,struct ulist * tmp,u64 ref_root,struct btrfs_qgroup * src,int sign)1313*4882a593Smuzhiyun static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1314*4882a593Smuzhiyun struct ulist *tmp, u64 ref_root,
1315*4882a593Smuzhiyun struct btrfs_qgroup *src, int sign)
1316*4882a593Smuzhiyun {
1317*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
1318*4882a593Smuzhiyun struct btrfs_qgroup_list *glist;
1319*4882a593Smuzhiyun struct ulist_node *unode;
1320*4882a593Smuzhiyun struct ulist_iterator uiter;
1321*4882a593Smuzhiyun u64 num_bytes = src->excl;
1322*4882a593Smuzhiyun int ret = 0;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun qgroup = find_qgroup_rb(fs_info, ref_root);
1325*4882a593Smuzhiyun if (!qgroup)
1326*4882a593Smuzhiyun goto out;
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun qgroup->rfer += sign * num_bytes;
1329*4882a593Smuzhiyun qgroup->rfer_cmpr += sign * num_bytes;
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1332*4882a593Smuzhiyun qgroup->excl += sign * num_bytes;
1333*4882a593Smuzhiyun qgroup->excl_cmpr += sign * num_bytes;
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun if (sign > 0)
1336*4882a593Smuzhiyun qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1337*4882a593Smuzhiyun else
1338*4882a593Smuzhiyun qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun qgroup_dirty(fs_info, qgroup);
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun /* Get all of the parent groups that contain this qgroup */
1343*4882a593Smuzhiyun list_for_each_entry(glist, &qgroup->groups, next_group) {
1344*4882a593Smuzhiyun ret = ulist_add(tmp, glist->group->qgroupid,
1345*4882a593Smuzhiyun qgroup_to_aux(glist->group), GFP_ATOMIC);
1346*4882a593Smuzhiyun if (ret < 0)
1347*4882a593Smuzhiyun goto out;
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun /* Iterate all of the parents and adjust their reference counts */
1351*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
1352*4882a593Smuzhiyun while ((unode = ulist_next(tmp, &uiter))) {
1353*4882a593Smuzhiyun qgroup = unode_aux_to_qgroup(unode);
1354*4882a593Smuzhiyun qgroup->rfer += sign * num_bytes;
1355*4882a593Smuzhiyun qgroup->rfer_cmpr += sign * num_bytes;
1356*4882a593Smuzhiyun WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1357*4882a593Smuzhiyun qgroup->excl += sign * num_bytes;
1358*4882a593Smuzhiyun if (sign > 0)
1359*4882a593Smuzhiyun qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1360*4882a593Smuzhiyun else
1361*4882a593Smuzhiyun qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1362*4882a593Smuzhiyun qgroup->excl_cmpr += sign * num_bytes;
1363*4882a593Smuzhiyun qgroup_dirty(fs_info, qgroup);
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun /* Add any parents of the parents */
1366*4882a593Smuzhiyun list_for_each_entry(glist, &qgroup->groups, next_group) {
1367*4882a593Smuzhiyun ret = ulist_add(tmp, glist->group->qgroupid,
1368*4882a593Smuzhiyun qgroup_to_aux(glist->group), GFP_ATOMIC);
1369*4882a593Smuzhiyun if (ret < 0)
1370*4882a593Smuzhiyun goto out;
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun ret = 0;
1374*4882a593Smuzhiyun out:
1375*4882a593Smuzhiyun return ret;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun /*
1380*4882a593Smuzhiyun * Quick path for updating qgroup with only excl refs.
1381*4882a593Smuzhiyun *
1382*4882a593Smuzhiyun * In that case, just update all parent will be enough.
1383*4882a593Smuzhiyun * Or we needs to do a full rescan.
1384*4882a593Smuzhiyun * Caller should also hold fs_info->qgroup_lock.
1385*4882a593Smuzhiyun *
1386*4882a593Smuzhiyun * Return 0 for quick update, return >0 for need to full rescan
1387*4882a593Smuzhiyun * and mark INCONSISTENT flag.
1388*4882a593Smuzhiyun * Return < 0 for other error.
1389*4882a593Smuzhiyun */
quick_update_accounting(struct btrfs_fs_info * fs_info,struct ulist * tmp,u64 src,u64 dst,int sign)1390*4882a593Smuzhiyun static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1391*4882a593Smuzhiyun struct ulist *tmp, u64 src, u64 dst,
1392*4882a593Smuzhiyun int sign)
1393*4882a593Smuzhiyun {
1394*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
1395*4882a593Smuzhiyun int ret = 1;
1396*4882a593Smuzhiyun int err = 0;
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun qgroup = find_qgroup_rb(fs_info, src);
1399*4882a593Smuzhiyun if (!qgroup)
1400*4882a593Smuzhiyun goto out;
1401*4882a593Smuzhiyun if (qgroup->excl == qgroup->rfer) {
1402*4882a593Smuzhiyun ret = 0;
1403*4882a593Smuzhiyun err = __qgroup_excl_accounting(fs_info, tmp, dst,
1404*4882a593Smuzhiyun qgroup, sign);
1405*4882a593Smuzhiyun if (err < 0) {
1406*4882a593Smuzhiyun ret = err;
1407*4882a593Smuzhiyun goto out;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun out:
1411*4882a593Smuzhiyun if (ret)
1412*4882a593Smuzhiyun fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1413*4882a593Smuzhiyun return ret;
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun
btrfs_add_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1416*4882a593Smuzhiyun int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1417*4882a593Smuzhiyun u64 dst)
1418*4882a593Smuzhiyun {
1419*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
1420*4882a593Smuzhiyun struct btrfs_qgroup *parent;
1421*4882a593Smuzhiyun struct btrfs_qgroup *member;
1422*4882a593Smuzhiyun struct btrfs_qgroup_list *list;
1423*4882a593Smuzhiyun struct ulist *tmp;
1424*4882a593Smuzhiyun unsigned int nofs_flag;
1425*4882a593Smuzhiyun int ret = 0;
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun /* Check the level of src and dst first */
1428*4882a593Smuzhiyun if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1429*4882a593Smuzhiyun return -EINVAL;
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun /* We hold a transaction handle open, must do a NOFS allocation. */
1432*4882a593Smuzhiyun nofs_flag = memalloc_nofs_save();
1433*4882a593Smuzhiyun tmp = ulist_alloc(GFP_KERNEL);
1434*4882a593Smuzhiyun memalloc_nofs_restore(nofs_flag);
1435*4882a593Smuzhiyun if (!tmp)
1436*4882a593Smuzhiyun return -ENOMEM;
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_ioctl_lock);
1439*4882a593Smuzhiyun if (!fs_info->quota_root) {
1440*4882a593Smuzhiyun ret = -ENOTCONN;
1441*4882a593Smuzhiyun goto out;
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun member = find_qgroup_rb(fs_info, src);
1444*4882a593Smuzhiyun parent = find_qgroup_rb(fs_info, dst);
1445*4882a593Smuzhiyun if (!member || !parent) {
1446*4882a593Smuzhiyun ret = -EINVAL;
1447*4882a593Smuzhiyun goto out;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun /* check if such qgroup relation exist firstly */
1451*4882a593Smuzhiyun list_for_each_entry(list, &member->groups, next_group) {
1452*4882a593Smuzhiyun if (list->group == parent) {
1453*4882a593Smuzhiyun ret = -EEXIST;
1454*4882a593Smuzhiyun goto out;
1455*4882a593Smuzhiyun }
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun ret = add_qgroup_relation_item(trans, src, dst);
1459*4882a593Smuzhiyun if (ret)
1460*4882a593Smuzhiyun goto out;
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun ret = add_qgroup_relation_item(trans, dst, src);
1463*4882a593Smuzhiyun if (ret) {
1464*4882a593Smuzhiyun del_qgroup_relation_item(trans, src, dst);
1465*4882a593Smuzhiyun goto out;
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
1469*4882a593Smuzhiyun ret = add_relation_rb(fs_info, src, dst);
1470*4882a593Smuzhiyun if (ret < 0) {
1471*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
1472*4882a593Smuzhiyun goto out;
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1475*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
1476*4882a593Smuzhiyun out:
1477*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_ioctl_lock);
1478*4882a593Smuzhiyun ulist_free(tmp);
1479*4882a593Smuzhiyun return ret;
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun
__del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1482*4882a593Smuzhiyun static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1483*4882a593Smuzhiyun u64 dst)
1484*4882a593Smuzhiyun {
1485*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
1486*4882a593Smuzhiyun struct btrfs_qgroup *parent;
1487*4882a593Smuzhiyun struct btrfs_qgroup *member;
1488*4882a593Smuzhiyun struct btrfs_qgroup_list *list;
1489*4882a593Smuzhiyun struct ulist *tmp;
1490*4882a593Smuzhiyun bool found = false;
1491*4882a593Smuzhiyun unsigned int nofs_flag;
1492*4882a593Smuzhiyun int ret = 0;
1493*4882a593Smuzhiyun int ret2;
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun /* We hold a transaction handle open, must do a NOFS allocation. */
1496*4882a593Smuzhiyun nofs_flag = memalloc_nofs_save();
1497*4882a593Smuzhiyun tmp = ulist_alloc(GFP_KERNEL);
1498*4882a593Smuzhiyun memalloc_nofs_restore(nofs_flag);
1499*4882a593Smuzhiyun if (!tmp)
1500*4882a593Smuzhiyun return -ENOMEM;
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun if (!fs_info->quota_root) {
1503*4882a593Smuzhiyun ret = -ENOTCONN;
1504*4882a593Smuzhiyun goto out;
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun member = find_qgroup_rb(fs_info, src);
1508*4882a593Smuzhiyun parent = find_qgroup_rb(fs_info, dst);
1509*4882a593Smuzhiyun /*
1510*4882a593Smuzhiyun * The parent/member pair doesn't exist, then try to delete the dead
1511*4882a593Smuzhiyun * relation items only.
1512*4882a593Smuzhiyun */
1513*4882a593Smuzhiyun if (!member || !parent)
1514*4882a593Smuzhiyun goto delete_item;
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun /* check if such qgroup relation exist firstly */
1517*4882a593Smuzhiyun list_for_each_entry(list, &member->groups, next_group) {
1518*4882a593Smuzhiyun if (list->group == parent) {
1519*4882a593Smuzhiyun found = true;
1520*4882a593Smuzhiyun break;
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun delete_item:
1525*4882a593Smuzhiyun ret = del_qgroup_relation_item(trans, src, dst);
1526*4882a593Smuzhiyun if (ret < 0 && ret != -ENOENT)
1527*4882a593Smuzhiyun goto out;
1528*4882a593Smuzhiyun ret2 = del_qgroup_relation_item(trans, dst, src);
1529*4882a593Smuzhiyun if (ret2 < 0 && ret2 != -ENOENT)
1530*4882a593Smuzhiyun goto out;
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun /* At least one deletion succeeded, return 0 */
1533*4882a593Smuzhiyun if (!ret || !ret2)
1534*4882a593Smuzhiyun ret = 0;
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun if (found) {
1537*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
1538*4882a593Smuzhiyun del_relation_rb(fs_info, src, dst);
1539*4882a593Smuzhiyun ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1540*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun out:
1543*4882a593Smuzhiyun ulist_free(tmp);
1544*4882a593Smuzhiyun return ret;
1545*4882a593Smuzhiyun }
1546*4882a593Smuzhiyun
btrfs_del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1547*4882a593Smuzhiyun int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1548*4882a593Smuzhiyun u64 dst)
1549*4882a593Smuzhiyun {
1550*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
1551*4882a593Smuzhiyun int ret = 0;
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_ioctl_lock);
1554*4882a593Smuzhiyun ret = __del_qgroup_relation(trans, src, dst);
1555*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_ioctl_lock);
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun return ret;
1558*4882a593Smuzhiyun }
1559*4882a593Smuzhiyun
btrfs_create_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1560*4882a593Smuzhiyun int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1561*4882a593Smuzhiyun {
1562*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
1563*4882a593Smuzhiyun struct btrfs_root *quota_root;
1564*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
1565*4882a593Smuzhiyun int ret = 0;
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_ioctl_lock);
1568*4882a593Smuzhiyun if (!fs_info->quota_root) {
1569*4882a593Smuzhiyun ret = -ENOTCONN;
1570*4882a593Smuzhiyun goto out;
1571*4882a593Smuzhiyun }
1572*4882a593Smuzhiyun quota_root = fs_info->quota_root;
1573*4882a593Smuzhiyun qgroup = find_qgroup_rb(fs_info, qgroupid);
1574*4882a593Smuzhiyun if (qgroup) {
1575*4882a593Smuzhiyun ret = -EEXIST;
1576*4882a593Smuzhiyun goto out;
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun ret = add_qgroup_item(trans, quota_root, qgroupid);
1580*4882a593Smuzhiyun if (ret)
1581*4882a593Smuzhiyun goto out;
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
1584*4882a593Smuzhiyun qgroup = add_qgroup_rb(fs_info, qgroupid);
1585*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun if (IS_ERR(qgroup)) {
1588*4882a593Smuzhiyun ret = PTR_ERR(qgroup);
1589*4882a593Smuzhiyun goto out;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1592*4882a593Smuzhiyun out:
1593*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_ioctl_lock);
1594*4882a593Smuzhiyun return ret;
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun
btrfs_remove_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1597*4882a593Smuzhiyun int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1598*4882a593Smuzhiyun {
1599*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
1600*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
1601*4882a593Smuzhiyun struct btrfs_qgroup_list *list;
1602*4882a593Smuzhiyun int ret = 0;
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_ioctl_lock);
1605*4882a593Smuzhiyun if (!fs_info->quota_root) {
1606*4882a593Smuzhiyun ret = -ENOTCONN;
1607*4882a593Smuzhiyun goto out;
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun qgroup = find_qgroup_rb(fs_info, qgroupid);
1611*4882a593Smuzhiyun if (!qgroup) {
1612*4882a593Smuzhiyun ret = -ENOENT;
1613*4882a593Smuzhiyun goto out;
1614*4882a593Smuzhiyun }
1615*4882a593Smuzhiyun
1616*4882a593Smuzhiyun /* Check if there are no children of this qgroup */
1617*4882a593Smuzhiyun if (!list_empty(&qgroup->members)) {
1618*4882a593Smuzhiyun ret = -EBUSY;
1619*4882a593Smuzhiyun goto out;
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun ret = del_qgroup_item(trans, qgroupid);
1623*4882a593Smuzhiyun if (ret && ret != -ENOENT)
1624*4882a593Smuzhiyun goto out;
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun while (!list_empty(&qgroup->groups)) {
1627*4882a593Smuzhiyun list = list_first_entry(&qgroup->groups,
1628*4882a593Smuzhiyun struct btrfs_qgroup_list, next_group);
1629*4882a593Smuzhiyun ret = __del_qgroup_relation(trans, qgroupid,
1630*4882a593Smuzhiyun list->group->qgroupid);
1631*4882a593Smuzhiyun if (ret)
1632*4882a593Smuzhiyun goto out;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
1636*4882a593Smuzhiyun del_qgroup_rb(fs_info, qgroupid);
1637*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun /*
1640*4882a593Smuzhiyun * Remove the qgroup from sysfs now without holding the qgroup_lock
1641*4882a593Smuzhiyun * spinlock, since the sysfs_remove_group() function needs to take
1642*4882a593Smuzhiyun * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1643*4882a593Smuzhiyun */
1644*4882a593Smuzhiyun btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1645*4882a593Smuzhiyun kfree(qgroup);
1646*4882a593Smuzhiyun out:
1647*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_ioctl_lock);
1648*4882a593Smuzhiyun return ret;
1649*4882a593Smuzhiyun }
1650*4882a593Smuzhiyun
btrfs_limit_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid,struct btrfs_qgroup_limit * limit)1651*4882a593Smuzhiyun int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1652*4882a593Smuzhiyun struct btrfs_qgroup_limit *limit)
1653*4882a593Smuzhiyun {
1654*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
1655*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
1656*4882a593Smuzhiyun int ret = 0;
1657*4882a593Smuzhiyun /* Sometimes we would want to clear the limit on this qgroup.
1658*4882a593Smuzhiyun * To meet this requirement, we treat the -1 as a special value
1659*4882a593Smuzhiyun * which tell kernel to clear the limit on this qgroup.
1660*4882a593Smuzhiyun */
1661*4882a593Smuzhiyun const u64 CLEAR_VALUE = -1;
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_ioctl_lock);
1664*4882a593Smuzhiyun if (!fs_info->quota_root) {
1665*4882a593Smuzhiyun ret = -ENOTCONN;
1666*4882a593Smuzhiyun goto out;
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun qgroup = find_qgroup_rb(fs_info, qgroupid);
1670*4882a593Smuzhiyun if (!qgroup) {
1671*4882a593Smuzhiyun ret = -ENOENT;
1672*4882a593Smuzhiyun goto out;
1673*4882a593Smuzhiyun }
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
1676*4882a593Smuzhiyun if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1677*4882a593Smuzhiyun if (limit->max_rfer == CLEAR_VALUE) {
1678*4882a593Smuzhiyun qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1679*4882a593Smuzhiyun limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1680*4882a593Smuzhiyun qgroup->max_rfer = 0;
1681*4882a593Smuzhiyun } else {
1682*4882a593Smuzhiyun qgroup->max_rfer = limit->max_rfer;
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun }
1685*4882a593Smuzhiyun if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1686*4882a593Smuzhiyun if (limit->max_excl == CLEAR_VALUE) {
1687*4882a593Smuzhiyun qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1688*4882a593Smuzhiyun limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1689*4882a593Smuzhiyun qgroup->max_excl = 0;
1690*4882a593Smuzhiyun } else {
1691*4882a593Smuzhiyun qgroup->max_excl = limit->max_excl;
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1695*4882a593Smuzhiyun if (limit->rsv_rfer == CLEAR_VALUE) {
1696*4882a593Smuzhiyun qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1697*4882a593Smuzhiyun limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1698*4882a593Smuzhiyun qgroup->rsv_rfer = 0;
1699*4882a593Smuzhiyun } else {
1700*4882a593Smuzhiyun qgroup->rsv_rfer = limit->rsv_rfer;
1701*4882a593Smuzhiyun }
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1704*4882a593Smuzhiyun if (limit->rsv_excl == CLEAR_VALUE) {
1705*4882a593Smuzhiyun qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1706*4882a593Smuzhiyun limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1707*4882a593Smuzhiyun qgroup->rsv_excl = 0;
1708*4882a593Smuzhiyun } else {
1709*4882a593Smuzhiyun qgroup->rsv_excl = limit->rsv_excl;
1710*4882a593Smuzhiyun }
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun qgroup->lim_flags |= limit->flags;
1713*4882a593Smuzhiyun
1714*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun ret = update_qgroup_limit_item(trans, qgroup);
1717*4882a593Smuzhiyun if (ret) {
1718*4882a593Smuzhiyun fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1719*4882a593Smuzhiyun btrfs_info(fs_info, "unable to update quota limit for %llu",
1720*4882a593Smuzhiyun qgroupid);
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyun out:
1724*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_ioctl_lock);
1725*4882a593Smuzhiyun return ret;
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun
btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_qgroup_extent_record * record)1728*4882a593Smuzhiyun int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1729*4882a593Smuzhiyun struct btrfs_delayed_ref_root *delayed_refs,
1730*4882a593Smuzhiyun struct btrfs_qgroup_extent_record *record)
1731*4882a593Smuzhiyun {
1732*4882a593Smuzhiyun struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1733*4882a593Smuzhiyun struct rb_node *parent_node = NULL;
1734*4882a593Smuzhiyun struct btrfs_qgroup_extent_record *entry;
1735*4882a593Smuzhiyun u64 bytenr = record->bytenr;
1736*4882a593Smuzhiyun
1737*4882a593Smuzhiyun lockdep_assert_held(&delayed_refs->lock);
1738*4882a593Smuzhiyun trace_btrfs_qgroup_trace_extent(fs_info, record);
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun while (*p) {
1741*4882a593Smuzhiyun parent_node = *p;
1742*4882a593Smuzhiyun entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1743*4882a593Smuzhiyun node);
1744*4882a593Smuzhiyun if (bytenr < entry->bytenr) {
1745*4882a593Smuzhiyun p = &(*p)->rb_left;
1746*4882a593Smuzhiyun } else if (bytenr > entry->bytenr) {
1747*4882a593Smuzhiyun p = &(*p)->rb_right;
1748*4882a593Smuzhiyun } else {
1749*4882a593Smuzhiyun if (record->data_rsv && !entry->data_rsv) {
1750*4882a593Smuzhiyun entry->data_rsv = record->data_rsv;
1751*4882a593Smuzhiyun entry->data_rsv_refroot =
1752*4882a593Smuzhiyun record->data_rsv_refroot;
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun return 1;
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun rb_link_node(&record->node, parent_node, p);
1759*4882a593Smuzhiyun rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1760*4882a593Smuzhiyun return 0;
1761*4882a593Smuzhiyun }
1762*4882a593Smuzhiyun
btrfs_qgroup_trace_extent_post(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_extent_record * qrecord)1763*4882a593Smuzhiyun int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
1764*4882a593Smuzhiyun struct btrfs_qgroup_extent_record *qrecord)
1765*4882a593Smuzhiyun {
1766*4882a593Smuzhiyun struct ulist *old_root;
1767*4882a593Smuzhiyun u64 bytenr = qrecord->bytenr;
1768*4882a593Smuzhiyun int ret;
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
1771*4882a593Smuzhiyun if (ret < 0) {
1772*4882a593Smuzhiyun fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1773*4882a593Smuzhiyun btrfs_warn(fs_info,
1774*4882a593Smuzhiyun "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1775*4882a593Smuzhiyun ret);
1776*4882a593Smuzhiyun return 0;
1777*4882a593Smuzhiyun }
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun /*
1780*4882a593Smuzhiyun * Here we don't need to get the lock of
1781*4882a593Smuzhiyun * trans->transaction->delayed_refs, since inserted qrecord won't
1782*4882a593Smuzhiyun * be deleted, only qrecord->node may be modified (new qrecord insert)
1783*4882a593Smuzhiyun *
1784*4882a593Smuzhiyun * So modifying qrecord->old_roots is safe here
1785*4882a593Smuzhiyun */
1786*4882a593Smuzhiyun qrecord->old_roots = old_root;
1787*4882a593Smuzhiyun return 0;
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun
btrfs_qgroup_trace_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,gfp_t gfp_flag)1790*4882a593Smuzhiyun int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
1791*4882a593Smuzhiyun u64 num_bytes, gfp_t gfp_flag)
1792*4882a593Smuzhiyun {
1793*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
1794*4882a593Smuzhiyun struct btrfs_qgroup_extent_record *record;
1795*4882a593Smuzhiyun struct btrfs_delayed_ref_root *delayed_refs;
1796*4882a593Smuzhiyun int ret;
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1799*4882a593Smuzhiyun || bytenr == 0 || num_bytes == 0)
1800*4882a593Smuzhiyun return 0;
1801*4882a593Smuzhiyun record = kzalloc(sizeof(*record), gfp_flag);
1802*4882a593Smuzhiyun if (!record)
1803*4882a593Smuzhiyun return -ENOMEM;
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun delayed_refs = &trans->transaction->delayed_refs;
1806*4882a593Smuzhiyun record->bytenr = bytenr;
1807*4882a593Smuzhiyun record->num_bytes = num_bytes;
1808*4882a593Smuzhiyun record->old_roots = NULL;
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun spin_lock(&delayed_refs->lock);
1811*4882a593Smuzhiyun ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
1812*4882a593Smuzhiyun spin_unlock(&delayed_refs->lock);
1813*4882a593Smuzhiyun if (ret > 0) {
1814*4882a593Smuzhiyun kfree(record);
1815*4882a593Smuzhiyun return 0;
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun return btrfs_qgroup_trace_extent_post(fs_info, record);
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun
btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle * trans,struct extent_buffer * eb)1820*4882a593Smuzhiyun int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
1821*4882a593Smuzhiyun struct extent_buffer *eb)
1822*4882a593Smuzhiyun {
1823*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
1824*4882a593Smuzhiyun int nr = btrfs_header_nritems(eb);
1825*4882a593Smuzhiyun int i, extent_type, ret;
1826*4882a593Smuzhiyun struct btrfs_key key;
1827*4882a593Smuzhiyun struct btrfs_file_extent_item *fi;
1828*4882a593Smuzhiyun u64 bytenr, num_bytes;
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun /* We can be called directly from walk_up_proc() */
1831*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1832*4882a593Smuzhiyun return 0;
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun for (i = 0; i < nr; i++) {
1835*4882a593Smuzhiyun btrfs_item_key_to_cpu(eb, &key, i);
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun if (key.type != BTRFS_EXTENT_DATA_KEY)
1838*4882a593Smuzhiyun continue;
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
1841*4882a593Smuzhiyun /* filter out non qgroup-accountable extents */
1842*4882a593Smuzhiyun extent_type = btrfs_file_extent_type(eb, fi);
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1845*4882a593Smuzhiyun continue;
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1848*4882a593Smuzhiyun if (!bytenr)
1849*4882a593Smuzhiyun continue;
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes,
1854*4882a593Smuzhiyun GFP_NOFS);
1855*4882a593Smuzhiyun if (ret)
1856*4882a593Smuzhiyun return ret;
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun cond_resched();
1859*4882a593Smuzhiyun return 0;
1860*4882a593Smuzhiyun }
1861*4882a593Smuzhiyun
1862*4882a593Smuzhiyun /*
1863*4882a593Smuzhiyun * Walk up the tree from the bottom, freeing leaves and any interior
1864*4882a593Smuzhiyun * nodes which have had all slots visited. If a node (leaf or
1865*4882a593Smuzhiyun * interior) is freed, the node above it will have it's slot
1866*4882a593Smuzhiyun * incremented. The root node will never be freed.
1867*4882a593Smuzhiyun *
1868*4882a593Smuzhiyun * At the end of this function, we should have a path which has all
1869*4882a593Smuzhiyun * slots incremented to the next position for a search. If we need to
1870*4882a593Smuzhiyun * read a new node it will be NULL and the node above it will have the
1871*4882a593Smuzhiyun * correct slot selected for a later read.
1872*4882a593Smuzhiyun *
1873*4882a593Smuzhiyun * If we increment the root nodes slot counter past the number of
1874*4882a593Smuzhiyun * elements, 1 is returned to signal completion of the search.
1875*4882a593Smuzhiyun */
adjust_slots_upwards(struct btrfs_path * path,int root_level)1876*4882a593Smuzhiyun static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1877*4882a593Smuzhiyun {
1878*4882a593Smuzhiyun int level = 0;
1879*4882a593Smuzhiyun int nr, slot;
1880*4882a593Smuzhiyun struct extent_buffer *eb;
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun if (root_level == 0)
1883*4882a593Smuzhiyun return 1;
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun while (level <= root_level) {
1886*4882a593Smuzhiyun eb = path->nodes[level];
1887*4882a593Smuzhiyun nr = btrfs_header_nritems(eb);
1888*4882a593Smuzhiyun path->slots[level]++;
1889*4882a593Smuzhiyun slot = path->slots[level];
1890*4882a593Smuzhiyun if (slot >= nr || level == 0) {
1891*4882a593Smuzhiyun /*
1892*4882a593Smuzhiyun * Don't free the root - we will detect this
1893*4882a593Smuzhiyun * condition after our loop and return a
1894*4882a593Smuzhiyun * positive value for caller to stop walking the tree.
1895*4882a593Smuzhiyun */
1896*4882a593Smuzhiyun if (level != root_level) {
1897*4882a593Smuzhiyun btrfs_tree_unlock_rw(eb, path->locks[level]);
1898*4882a593Smuzhiyun path->locks[level] = 0;
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun free_extent_buffer(eb);
1901*4882a593Smuzhiyun path->nodes[level] = NULL;
1902*4882a593Smuzhiyun path->slots[level] = 0;
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun } else {
1905*4882a593Smuzhiyun /*
1906*4882a593Smuzhiyun * We have a valid slot to walk back down
1907*4882a593Smuzhiyun * from. Stop here so caller can process these
1908*4882a593Smuzhiyun * new nodes.
1909*4882a593Smuzhiyun */
1910*4882a593Smuzhiyun break;
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun level++;
1914*4882a593Smuzhiyun }
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun eb = path->nodes[root_level];
1917*4882a593Smuzhiyun if (path->slots[root_level] >= btrfs_header_nritems(eb))
1918*4882a593Smuzhiyun return 1;
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun return 0;
1921*4882a593Smuzhiyun }
1922*4882a593Smuzhiyun
1923*4882a593Smuzhiyun /*
1924*4882a593Smuzhiyun * Helper function to trace a subtree tree block swap.
1925*4882a593Smuzhiyun *
1926*4882a593Smuzhiyun * The swap will happen in highest tree block, but there may be a lot of
1927*4882a593Smuzhiyun * tree blocks involved.
1928*4882a593Smuzhiyun *
1929*4882a593Smuzhiyun * For example:
1930*4882a593Smuzhiyun * OO = Old tree blocks
1931*4882a593Smuzhiyun * NN = New tree blocks allocated during balance
1932*4882a593Smuzhiyun *
1933*4882a593Smuzhiyun * File tree (257) Reloc tree for 257
1934*4882a593Smuzhiyun * L2 OO NN
1935*4882a593Smuzhiyun * / \ / \
1936*4882a593Smuzhiyun * L1 OO OO (a) OO NN (a)
1937*4882a593Smuzhiyun * / \ / \ / \ / \
1938*4882a593Smuzhiyun * L0 OO OO OO OO OO OO NN NN
1939*4882a593Smuzhiyun * (b) (c) (b) (c)
1940*4882a593Smuzhiyun *
1941*4882a593Smuzhiyun * When calling qgroup_trace_extent_swap(), we will pass:
1942*4882a593Smuzhiyun * @src_eb = OO(a)
1943*4882a593Smuzhiyun * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
1944*4882a593Smuzhiyun * @dst_level = 0
1945*4882a593Smuzhiyun * @root_level = 1
1946*4882a593Smuzhiyun *
1947*4882a593Smuzhiyun * In that case, qgroup_trace_extent_swap() will search from OO(a) to
1948*4882a593Smuzhiyun * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
1949*4882a593Smuzhiyun *
1950*4882a593Smuzhiyun * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
1951*4882a593Smuzhiyun *
1952*4882a593Smuzhiyun * 1) Tree search from @src_eb
1953*4882a593Smuzhiyun * It should acts as a simplified btrfs_search_slot().
1954*4882a593Smuzhiyun * The key for search can be extracted from @dst_path->nodes[dst_level]
1955*4882a593Smuzhiyun * (first key).
1956*4882a593Smuzhiyun *
1957*4882a593Smuzhiyun * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
1958*4882a593Smuzhiyun * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1959*4882a593Smuzhiyun * They should be marked during previous (@dst_level = 1) iteration.
1960*4882a593Smuzhiyun *
1961*4882a593Smuzhiyun * 3) Mark file extents in leaves dirty
1962*4882a593Smuzhiyun * We don't have good way to pick out new file extents only.
1963*4882a593Smuzhiyun * So we still follow the old method by scanning all file extents in
1964*4882a593Smuzhiyun * the leave.
1965*4882a593Smuzhiyun *
1966*4882a593Smuzhiyun * This function can free us from keeping two paths, thus later we only need
1967*4882a593Smuzhiyun * to care about how to iterate all new tree blocks in reloc tree.
1968*4882a593Smuzhiyun */
qgroup_trace_extent_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int dst_level,int root_level,bool trace_leaf)1969*4882a593Smuzhiyun static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
1970*4882a593Smuzhiyun struct extent_buffer *src_eb,
1971*4882a593Smuzhiyun struct btrfs_path *dst_path,
1972*4882a593Smuzhiyun int dst_level, int root_level,
1973*4882a593Smuzhiyun bool trace_leaf)
1974*4882a593Smuzhiyun {
1975*4882a593Smuzhiyun struct btrfs_key key;
1976*4882a593Smuzhiyun struct btrfs_path *src_path;
1977*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
1978*4882a593Smuzhiyun u32 nodesize = fs_info->nodesize;
1979*4882a593Smuzhiyun int cur_level = root_level;
1980*4882a593Smuzhiyun int ret;
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun BUG_ON(dst_level > root_level);
1983*4882a593Smuzhiyun /* Level mismatch */
1984*4882a593Smuzhiyun if (btrfs_header_level(src_eb) != root_level)
1985*4882a593Smuzhiyun return -EINVAL;
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun src_path = btrfs_alloc_path();
1988*4882a593Smuzhiyun if (!src_path) {
1989*4882a593Smuzhiyun ret = -ENOMEM;
1990*4882a593Smuzhiyun goto out;
1991*4882a593Smuzhiyun }
1992*4882a593Smuzhiyun
1993*4882a593Smuzhiyun if (dst_level)
1994*4882a593Smuzhiyun btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
1995*4882a593Smuzhiyun else
1996*4882a593Smuzhiyun btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
1997*4882a593Smuzhiyun
1998*4882a593Smuzhiyun /* For src_path */
1999*4882a593Smuzhiyun atomic_inc(&src_eb->refs);
2000*4882a593Smuzhiyun src_path->nodes[root_level] = src_eb;
2001*4882a593Smuzhiyun src_path->slots[root_level] = dst_path->slots[root_level];
2002*4882a593Smuzhiyun src_path->locks[root_level] = 0;
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun /* A simplified version of btrfs_search_slot() */
2005*4882a593Smuzhiyun while (cur_level >= dst_level) {
2006*4882a593Smuzhiyun struct btrfs_key src_key;
2007*4882a593Smuzhiyun struct btrfs_key dst_key;
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun if (src_path->nodes[cur_level] == NULL) {
2010*4882a593Smuzhiyun struct btrfs_key first_key;
2011*4882a593Smuzhiyun struct extent_buffer *eb;
2012*4882a593Smuzhiyun int parent_slot;
2013*4882a593Smuzhiyun u64 child_gen;
2014*4882a593Smuzhiyun u64 child_bytenr;
2015*4882a593Smuzhiyun
2016*4882a593Smuzhiyun eb = src_path->nodes[cur_level + 1];
2017*4882a593Smuzhiyun parent_slot = src_path->slots[cur_level + 1];
2018*4882a593Smuzhiyun child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2019*4882a593Smuzhiyun child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2020*4882a593Smuzhiyun btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun eb = read_tree_block(fs_info, child_bytenr, child_gen,
2023*4882a593Smuzhiyun cur_level, &first_key);
2024*4882a593Smuzhiyun if (IS_ERR(eb)) {
2025*4882a593Smuzhiyun ret = PTR_ERR(eb);
2026*4882a593Smuzhiyun goto out;
2027*4882a593Smuzhiyun } else if (!extent_buffer_uptodate(eb)) {
2028*4882a593Smuzhiyun free_extent_buffer(eb);
2029*4882a593Smuzhiyun ret = -EIO;
2030*4882a593Smuzhiyun goto out;
2031*4882a593Smuzhiyun }
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun src_path->nodes[cur_level] = eb;
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun btrfs_tree_read_lock(eb);
2036*4882a593Smuzhiyun btrfs_set_lock_blocking_read(eb);
2037*4882a593Smuzhiyun src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
2038*4882a593Smuzhiyun }
2039*4882a593Smuzhiyun
2040*4882a593Smuzhiyun src_path->slots[cur_level] = dst_path->slots[cur_level];
2041*4882a593Smuzhiyun if (cur_level) {
2042*4882a593Smuzhiyun btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
2043*4882a593Smuzhiyun &dst_key, dst_path->slots[cur_level]);
2044*4882a593Smuzhiyun btrfs_node_key_to_cpu(src_path->nodes[cur_level],
2045*4882a593Smuzhiyun &src_key, src_path->slots[cur_level]);
2046*4882a593Smuzhiyun } else {
2047*4882a593Smuzhiyun btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
2048*4882a593Smuzhiyun &dst_key, dst_path->slots[cur_level]);
2049*4882a593Smuzhiyun btrfs_item_key_to_cpu(src_path->nodes[cur_level],
2050*4882a593Smuzhiyun &src_key, src_path->slots[cur_level]);
2051*4882a593Smuzhiyun }
2052*4882a593Smuzhiyun /* Content mismatch, something went wrong */
2053*4882a593Smuzhiyun if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
2054*4882a593Smuzhiyun ret = -ENOENT;
2055*4882a593Smuzhiyun goto out;
2056*4882a593Smuzhiyun }
2057*4882a593Smuzhiyun cur_level--;
2058*4882a593Smuzhiyun }
2059*4882a593Smuzhiyun
2060*4882a593Smuzhiyun /*
2061*4882a593Smuzhiyun * Now both @dst_path and @src_path have been populated, record the tree
2062*4882a593Smuzhiyun * blocks for qgroup accounting.
2063*4882a593Smuzhiyun */
2064*4882a593Smuzhiyun ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
2065*4882a593Smuzhiyun nodesize, GFP_NOFS);
2066*4882a593Smuzhiyun if (ret < 0)
2067*4882a593Smuzhiyun goto out;
2068*4882a593Smuzhiyun ret = btrfs_qgroup_trace_extent(trans,
2069*4882a593Smuzhiyun dst_path->nodes[dst_level]->start,
2070*4882a593Smuzhiyun nodesize, GFP_NOFS);
2071*4882a593Smuzhiyun if (ret < 0)
2072*4882a593Smuzhiyun goto out;
2073*4882a593Smuzhiyun
2074*4882a593Smuzhiyun /* Record leaf file extents */
2075*4882a593Smuzhiyun if (dst_level == 0 && trace_leaf) {
2076*4882a593Smuzhiyun ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
2077*4882a593Smuzhiyun if (ret < 0)
2078*4882a593Smuzhiyun goto out;
2079*4882a593Smuzhiyun ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
2080*4882a593Smuzhiyun }
2081*4882a593Smuzhiyun out:
2082*4882a593Smuzhiyun btrfs_free_path(src_path);
2083*4882a593Smuzhiyun return ret;
2084*4882a593Smuzhiyun }
2085*4882a593Smuzhiyun
2086*4882a593Smuzhiyun /*
2087*4882a593Smuzhiyun * Helper function to do recursive generation-aware depth-first search, to
2088*4882a593Smuzhiyun * locate all new tree blocks in a subtree of reloc tree.
2089*4882a593Smuzhiyun *
2090*4882a593Smuzhiyun * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2091*4882a593Smuzhiyun * reloc tree
2092*4882a593Smuzhiyun * L2 NN (a)
2093*4882a593Smuzhiyun * / \
2094*4882a593Smuzhiyun * L1 OO NN (b)
2095*4882a593Smuzhiyun * / \ / \
2096*4882a593Smuzhiyun * L0 OO OO OO NN
2097*4882a593Smuzhiyun * (c) (d)
2098*4882a593Smuzhiyun * If we pass:
2099*4882a593Smuzhiyun * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2100*4882a593Smuzhiyun * @cur_level = 1
2101*4882a593Smuzhiyun * @root_level = 1
2102*4882a593Smuzhiyun *
2103*4882a593Smuzhiyun * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2104*4882a593Smuzhiyun * above tree blocks along with their counter parts in file tree.
2105*4882a593Smuzhiyun * While during search, old tree blocks OO(c) will be skipped as tree block swap
2106*4882a593Smuzhiyun * won't affect OO(c).
2107*4882a593Smuzhiyun */
qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int cur_level,int root_level,u64 last_snapshot,bool trace_leaf)2108*4882a593Smuzhiyun static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2109*4882a593Smuzhiyun struct extent_buffer *src_eb,
2110*4882a593Smuzhiyun struct btrfs_path *dst_path,
2111*4882a593Smuzhiyun int cur_level, int root_level,
2112*4882a593Smuzhiyun u64 last_snapshot, bool trace_leaf)
2113*4882a593Smuzhiyun {
2114*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
2115*4882a593Smuzhiyun struct extent_buffer *eb;
2116*4882a593Smuzhiyun bool need_cleanup = false;
2117*4882a593Smuzhiyun int ret = 0;
2118*4882a593Smuzhiyun int i;
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun /* Level sanity check */
2121*4882a593Smuzhiyun if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2122*4882a593Smuzhiyun root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2123*4882a593Smuzhiyun root_level < cur_level) {
2124*4882a593Smuzhiyun btrfs_err_rl(fs_info,
2125*4882a593Smuzhiyun "%s: bad levels, cur_level=%d root_level=%d",
2126*4882a593Smuzhiyun __func__, cur_level, root_level);
2127*4882a593Smuzhiyun return -EUCLEAN;
2128*4882a593Smuzhiyun }
2129*4882a593Smuzhiyun
2130*4882a593Smuzhiyun /* Read the tree block if needed */
2131*4882a593Smuzhiyun if (dst_path->nodes[cur_level] == NULL) {
2132*4882a593Smuzhiyun struct btrfs_key first_key;
2133*4882a593Smuzhiyun int parent_slot;
2134*4882a593Smuzhiyun u64 child_gen;
2135*4882a593Smuzhiyun u64 child_bytenr;
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun /*
2138*4882a593Smuzhiyun * dst_path->nodes[root_level] must be initialized before
2139*4882a593Smuzhiyun * calling this function.
2140*4882a593Smuzhiyun */
2141*4882a593Smuzhiyun if (cur_level == root_level) {
2142*4882a593Smuzhiyun btrfs_err_rl(fs_info,
2143*4882a593Smuzhiyun "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2144*4882a593Smuzhiyun __func__, root_level, root_level, cur_level);
2145*4882a593Smuzhiyun return -EUCLEAN;
2146*4882a593Smuzhiyun }
2147*4882a593Smuzhiyun
2148*4882a593Smuzhiyun /*
2149*4882a593Smuzhiyun * We need to get child blockptr/gen from parent before we can
2150*4882a593Smuzhiyun * read it.
2151*4882a593Smuzhiyun */
2152*4882a593Smuzhiyun eb = dst_path->nodes[cur_level + 1];
2153*4882a593Smuzhiyun parent_slot = dst_path->slots[cur_level + 1];
2154*4882a593Smuzhiyun child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2155*4882a593Smuzhiyun child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2156*4882a593Smuzhiyun btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
2157*4882a593Smuzhiyun
2158*4882a593Smuzhiyun /* This node is old, no need to trace */
2159*4882a593Smuzhiyun if (child_gen < last_snapshot)
2160*4882a593Smuzhiyun goto out;
2161*4882a593Smuzhiyun
2162*4882a593Smuzhiyun eb = read_tree_block(fs_info, child_bytenr, child_gen,
2163*4882a593Smuzhiyun cur_level, &first_key);
2164*4882a593Smuzhiyun if (IS_ERR(eb)) {
2165*4882a593Smuzhiyun ret = PTR_ERR(eb);
2166*4882a593Smuzhiyun goto out;
2167*4882a593Smuzhiyun } else if (!extent_buffer_uptodate(eb)) {
2168*4882a593Smuzhiyun free_extent_buffer(eb);
2169*4882a593Smuzhiyun ret = -EIO;
2170*4882a593Smuzhiyun goto out;
2171*4882a593Smuzhiyun }
2172*4882a593Smuzhiyun
2173*4882a593Smuzhiyun dst_path->nodes[cur_level] = eb;
2174*4882a593Smuzhiyun dst_path->slots[cur_level] = 0;
2175*4882a593Smuzhiyun
2176*4882a593Smuzhiyun btrfs_tree_read_lock(eb);
2177*4882a593Smuzhiyun btrfs_set_lock_blocking_read(eb);
2178*4882a593Smuzhiyun dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
2179*4882a593Smuzhiyun need_cleanup = true;
2180*4882a593Smuzhiyun }
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun /* Now record this tree block and its counter part for qgroups */
2183*4882a593Smuzhiyun ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2184*4882a593Smuzhiyun root_level, trace_leaf);
2185*4882a593Smuzhiyun if (ret < 0)
2186*4882a593Smuzhiyun goto cleanup;
2187*4882a593Smuzhiyun
2188*4882a593Smuzhiyun eb = dst_path->nodes[cur_level];
2189*4882a593Smuzhiyun
2190*4882a593Smuzhiyun if (cur_level > 0) {
2191*4882a593Smuzhiyun /* Iterate all child tree blocks */
2192*4882a593Smuzhiyun for (i = 0; i < btrfs_header_nritems(eb); i++) {
2193*4882a593Smuzhiyun /* Skip old tree blocks as they won't be swapped */
2194*4882a593Smuzhiyun if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2195*4882a593Smuzhiyun continue;
2196*4882a593Smuzhiyun dst_path->slots[cur_level] = i;
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun /* Recursive call (at most 7 times) */
2199*4882a593Smuzhiyun ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2200*4882a593Smuzhiyun dst_path, cur_level - 1, root_level,
2201*4882a593Smuzhiyun last_snapshot, trace_leaf);
2202*4882a593Smuzhiyun if (ret < 0)
2203*4882a593Smuzhiyun goto cleanup;
2204*4882a593Smuzhiyun }
2205*4882a593Smuzhiyun }
2206*4882a593Smuzhiyun
2207*4882a593Smuzhiyun cleanup:
2208*4882a593Smuzhiyun if (need_cleanup) {
2209*4882a593Smuzhiyun /* Clean up */
2210*4882a593Smuzhiyun btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2211*4882a593Smuzhiyun dst_path->locks[cur_level]);
2212*4882a593Smuzhiyun free_extent_buffer(dst_path->nodes[cur_level]);
2213*4882a593Smuzhiyun dst_path->nodes[cur_level] = NULL;
2214*4882a593Smuzhiyun dst_path->slots[cur_level] = 0;
2215*4882a593Smuzhiyun dst_path->locks[cur_level] = 0;
2216*4882a593Smuzhiyun }
2217*4882a593Smuzhiyun out:
2218*4882a593Smuzhiyun return ret;
2219*4882a593Smuzhiyun }
2220*4882a593Smuzhiyun
qgroup_trace_subtree_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct extent_buffer * dst_eb,u64 last_snapshot,bool trace_leaf)2221*4882a593Smuzhiyun static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2222*4882a593Smuzhiyun struct extent_buffer *src_eb,
2223*4882a593Smuzhiyun struct extent_buffer *dst_eb,
2224*4882a593Smuzhiyun u64 last_snapshot, bool trace_leaf)
2225*4882a593Smuzhiyun {
2226*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
2227*4882a593Smuzhiyun struct btrfs_path *dst_path = NULL;
2228*4882a593Smuzhiyun int level;
2229*4882a593Smuzhiyun int ret;
2230*4882a593Smuzhiyun
2231*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2232*4882a593Smuzhiyun return 0;
2233*4882a593Smuzhiyun
2234*4882a593Smuzhiyun /* Wrong parameter order */
2235*4882a593Smuzhiyun if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2236*4882a593Smuzhiyun btrfs_err_rl(fs_info,
2237*4882a593Smuzhiyun "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2238*4882a593Smuzhiyun btrfs_header_generation(src_eb),
2239*4882a593Smuzhiyun btrfs_header_generation(dst_eb));
2240*4882a593Smuzhiyun return -EUCLEAN;
2241*4882a593Smuzhiyun }
2242*4882a593Smuzhiyun
2243*4882a593Smuzhiyun if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2244*4882a593Smuzhiyun ret = -EIO;
2245*4882a593Smuzhiyun goto out;
2246*4882a593Smuzhiyun }
2247*4882a593Smuzhiyun
2248*4882a593Smuzhiyun level = btrfs_header_level(dst_eb);
2249*4882a593Smuzhiyun dst_path = btrfs_alloc_path();
2250*4882a593Smuzhiyun if (!dst_path) {
2251*4882a593Smuzhiyun ret = -ENOMEM;
2252*4882a593Smuzhiyun goto out;
2253*4882a593Smuzhiyun }
2254*4882a593Smuzhiyun /* For dst_path */
2255*4882a593Smuzhiyun atomic_inc(&dst_eb->refs);
2256*4882a593Smuzhiyun dst_path->nodes[level] = dst_eb;
2257*4882a593Smuzhiyun dst_path->slots[level] = 0;
2258*4882a593Smuzhiyun dst_path->locks[level] = 0;
2259*4882a593Smuzhiyun
2260*4882a593Smuzhiyun /* Do the generation aware breadth-first search */
2261*4882a593Smuzhiyun ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2262*4882a593Smuzhiyun level, last_snapshot, trace_leaf);
2263*4882a593Smuzhiyun if (ret < 0)
2264*4882a593Smuzhiyun goto out;
2265*4882a593Smuzhiyun ret = 0;
2266*4882a593Smuzhiyun
2267*4882a593Smuzhiyun out:
2268*4882a593Smuzhiyun btrfs_free_path(dst_path);
2269*4882a593Smuzhiyun if (ret < 0)
2270*4882a593Smuzhiyun fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2271*4882a593Smuzhiyun return ret;
2272*4882a593Smuzhiyun }
2273*4882a593Smuzhiyun
btrfs_qgroup_trace_subtree(struct btrfs_trans_handle * trans,struct extent_buffer * root_eb,u64 root_gen,int root_level)2274*4882a593Smuzhiyun int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2275*4882a593Smuzhiyun struct extent_buffer *root_eb,
2276*4882a593Smuzhiyun u64 root_gen, int root_level)
2277*4882a593Smuzhiyun {
2278*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
2279*4882a593Smuzhiyun int ret = 0;
2280*4882a593Smuzhiyun int level;
2281*4882a593Smuzhiyun struct extent_buffer *eb = root_eb;
2282*4882a593Smuzhiyun struct btrfs_path *path = NULL;
2283*4882a593Smuzhiyun
2284*4882a593Smuzhiyun BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
2285*4882a593Smuzhiyun BUG_ON(root_eb == NULL);
2286*4882a593Smuzhiyun
2287*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2288*4882a593Smuzhiyun return 0;
2289*4882a593Smuzhiyun
2290*4882a593Smuzhiyun if (!extent_buffer_uptodate(root_eb)) {
2291*4882a593Smuzhiyun ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL);
2292*4882a593Smuzhiyun if (ret)
2293*4882a593Smuzhiyun goto out;
2294*4882a593Smuzhiyun }
2295*4882a593Smuzhiyun
2296*4882a593Smuzhiyun if (root_level == 0) {
2297*4882a593Smuzhiyun ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2298*4882a593Smuzhiyun goto out;
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun path = btrfs_alloc_path();
2302*4882a593Smuzhiyun if (!path)
2303*4882a593Smuzhiyun return -ENOMEM;
2304*4882a593Smuzhiyun
2305*4882a593Smuzhiyun /*
2306*4882a593Smuzhiyun * Walk down the tree. Missing extent blocks are filled in as
2307*4882a593Smuzhiyun * we go. Metadata is accounted every time we read a new
2308*4882a593Smuzhiyun * extent block.
2309*4882a593Smuzhiyun *
2310*4882a593Smuzhiyun * When we reach a leaf, we account for file extent items in it,
2311*4882a593Smuzhiyun * walk back up the tree (adjusting slot pointers as we go)
2312*4882a593Smuzhiyun * and restart the search process.
2313*4882a593Smuzhiyun */
2314*4882a593Smuzhiyun atomic_inc(&root_eb->refs); /* For path */
2315*4882a593Smuzhiyun path->nodes[root_level] = root_eb;
2316*4882a593Smuzhiyun path->slots[root_level] = 0;
2317*4882a593Smuzhiyun path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2318*4882a593Smuzhiyun walk_down:
2319*4882a593Smuzhiyun level = root_level;
2320*4882a593Smuzhiyun while (level >= 0) {
2321*4882a593Smuzhiyun if (path->nodes[level] == NULL) {
2322*4882a593Smuzhiyun struct btrfs_key first_key;
2323*4882a593Smuzhiyun int parent_slot;
2324*4882a593Smuzhiyun u64 child_gen;
2325*4882a593Smuzhiyun u64 child_bytenr;
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun /*
2328*4882a593Smuzhiyun * We need to get child blockptr/gen from parent before
2329*4882a593Smuzhiyun * we can read it.
2330*4882a593Smuzhiyun */
2331*4882a593Smuzhiyun eb = path->nodes[level + 1];
2332*4882a593Smuzhiyun parent_slot = path->slots[level + 1];
2333*4882a593Smuzhiyun child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2334*4882a593Smuzhiyun child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2335*4882a593Smuzhiyun btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
2336*4882a593Smuzhiyun
2337*4882a593Smuzhiyun eb = read_tree_block(fs_info, child_bytenr, child_gen,
2338*4882a593Smuzhiyun level, &first_key);
2339*4882a593Smuzhiyun if (IS_ERR(eb)) {
2340*4882a593Smuzhiyun ret = PTR_ERR(eb);
2341*4882a593Smuzhiyun goto out;
2342*4882a593Smuzhiyun } else if (!extent_buffer_uptodate(eb)) {
2343*4882a593Smuzhiyun free_extent_buffer(eb);
2344*4882a593Smuzhiyun ret = -EIO;
2345*4882a593Smuzhiyun goto out;
2346*4882a593Smuzhiyun }
2347*4882a593Smuzhiyun
2348*4882a593Smuzhiyun path->nodes[level] = eb;
2349*4882a593Smuzhiyun path->slots[level] = 0;
2350*4882a593Smuzhiyun
2351*4882a593Smuzhiyun btrfs_tree_read_lock(eb);
2352*4882a593Smuzhiyun btrfs_set_lock_blocking_read(eb);
2353*4882a593Smuzhiyun path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
2354*4882a593Smuzhiyun
2355*4882a593Smuzhiyun ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2356*4882a593Smuzhiyun fs_info->nodesize,
2357*4882a593Smuzhiyun GFP_NOFS);
2358*4882a593Smuzhiyun if (ret)
2359*4882a593Smuzhiyun goto out;
2360*4882a593Smuzhiyun }
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun if (level == 0) {
2363*4882a593Smuzhiyun ret = btrfs_qgroup_trace_leaf_items(trans,
2364*4882a593Smuzhiyun path->nodes[level]);
2365*4882a593Smuzhiyun if (ret)
2366*4882a593Smuzhiyun goto out;
2367*4882a593Smuzhiyun
2368*4882a593Smuzhiyun /* Nonzero return here means we completed our search */
2369*4882a593Smuzhiyun ret = adjust_slots_upwards(path, root_level);
2370*4882a593Smuzhiyun if (ret)
2371*4882a593Smuzhiyun break;
2372*4882a593Smuzhiyun
2373*4882a593Smuzhiyun /* Restart search with new slots */
2374*4882a593Smuzhiyun goto walk_down;
2375*4882a593Smuzhiyun }
2376*4882a593Smuzhiyun
2377*4882a593Smuzhiyun level--;
2378*4882a593Smuzhiyun }
2379*4882a593Smuzhiyun
2380*4882a593Smuzhiyun ret = 0;
2381*4882a593Smuzhiyun out:
2382*4882a593Smuzhiyun btrfs_free_path(path);
2383*4882a593Smuzhiyun
2384*4882a593Smuzhiyun return ret;
2385*4882a593Smuzhiyun }
2386*4882a593Smuzhiyun
2387*4882a593Smuzhiyun #define UPDATE_NEW 0
2388*4882a593Smuzhiyun #define UPDATE_OLD 1
2389*4882a593Smuzhiyun /*
2390*4882a593Smuzhiyun * Walk all of the roots that points to the bytenr and adjust their refcnts.
2391*4882a593Smuzhiyun */
qgroup_update_refcnt(struct btrfs_fs_info * fs_info,struct ulist * roots,struct ulist * tmp,struct ulist * qgroups,u64 seq,int update_old)2392*4882a593Smuzhiyun static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2393*4882a593Smuzhiyun struct ulist *roots, struct ulist *tmp,
2394*4882a593Smuzhiyun struct ulist *qgroups, u64 seq, int update_old)
2395*4882a593Smuzhiyun {
2396*4882a593Smuzhiyun struct ulist_node *unode;
2397*4882a593Smuzhiyun struct ulist_iterator uiter;
2398*4882a593Smuzhiyun struct ulist_node *tmp_unode;
2399*4882a593Smuzhiyun struct ulist_iterator tmp_uiter;
2400*4882a593Smuzhiyun struct btrfs_qgroup *qg;
2401*4882a593Smuzhiyun int ret = 0;
2402*4882a593Smuzhiyun
2403*4882a593Smuzhiyun if (!roots)
2404*4882a593Smuzhiyun return 0;
2405*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
2406*4882a593Smuzhiyun while ((unode = ulist_next(roots, &uiter))) {
2407*4882a593Smuzhiyun qg = find_qgroup_rb(fs_info, unode->val);
2408*4882a593Smuzhiyun if (!qg)
2409*4882a593Smuzhiyun continue;
2410*4882a593Smuzhiyun
2411*4882a593Smuzhiyun ulist_reinit(tmp);
2412*4882a593Smuzhiyun ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
2413*4882a593Smuzhiyun GFP_ATOMIC);
2414*4882a593Smuzhiyun if (ret < 0)
2415*4882a593Smuzhiyun return ret;
2416*4882a593Smuzhiyun ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
2417*4882a593Smuzhiyun if (ret < 0)
2418*4882a593Smuzhiyun return ret;
2419*4882a593Smuzhiyun ULIST_ITER_INIT(&tmp_uiter);
2420*4882a593Smuzhiyun while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
2421*4882a593Smuzhiyun struct btrfs_qgroup_list *glist;
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun qg = unode_aux_to_qgroup(tmp_unode);
2424*4882a593Smuzhiyun if (update_old)
2425*4882a593Smuzhiyun btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2426*4882a593Smuzhiyun else
2427*4882a593Smuzhiyun btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2428*4882a593Smuzhiyun list_for_each_entry(glist, &qg->groups, next_group) {
2429*4882a593Smuzhiyun ret = ulist_add(qgroups, glist->group->qgroupid,
2430*4882a593Smuzhiyun qgroup_to_aux(glist->group),
2431*4882a593Smuzhiyun GFP_ATOMIC);
2432*4882a593Smuzhiyun if (ret < 0)
2433*4882a593Smuzhiyun return ret;
2434*4882a593Smuzhiyun ret = ulist_add(tmp, glist->group->qgroupid,
2435*4882a593Smuzhiyun qgroup_to_aux(glist->group),
2436*4882a593Smuzhiyun GFP_ATOMIC);
2437*4882a593Smuzhiyun if (ret < 0)
2438*4882a593Smuzhiyun return ret;
2439*4882a593Smuzhiyun }
2440*4882a593Smuzhiyun }
2441*4882a593Smuzhiyun }
2442*4882a593Smuzhiyun return 0;
2443*4882a593Smuzhiyun }
2444*4882a593Smuzhiyun
2445*4882a593Smuzhiyun /*
2446*4882a593Smuzhiyun * Update qgroup rfer/excl counters.
2447*4882a593Smuzhiyun * Rfer update is easy, codes can explain themselves.
2448*4882a593Smuzhiyun *
2449*4882a593Smuzhiyun * Excl update is tricky, the update is split into 2 parts.
2450*4882a593Smuzhiyun * Part 1: Possible exclusive <-> sharing detect:
2451*4882a593Smuzhiyun * | A | !A |
2452*4882a593Smuzhiyun * -------------------------------------
2453*4882a593Smuzhiyun * B | * | - |
2454*4882a593Smuzhiyun * -------------------------------------
2455*4882a593Smuzhiyun * !B | + | ** |
2456*4882a593Smuzhiyun * -------------------------------------
2457*4882a593Smuzhiyun *
2458*4882a593Smuzhiyun * Conditions:
2459*4882a593Smuzhiyun * A: cur_old_roots < nr_old_roots (not exclusive before)
2460*4882a593Smuzhiyun * !A: cur_old_roots == nr_old_roots (possible exclusive before)
2461*4882a593Smuzhiyun * B: cur_new_roots < nr_new_roots (not exclusive now)
2462*4882a593Smuzhiyun * !B: cur_new_roots == nr_new_roots (possible exclusive now)
2463*4882a593Smuzhiyun *
2464*4882a593Smuzhiyun * Results:
2465*4882a593Smuzhiyun * +: Possible sharing -> exclusive -: Possible exclusive -> sharing
2466*4882a593Smuzhiyun * *: Definitely not changed. **: Possible unchanged.
2467*4882a593Smuzhiyun *
2468*4882a593Smuzhiyun * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2469*4882a593Smuzhiyun *
2470*4882a593Smuzhiyun * To make the logic clear, we first use condition A and B to split
2471*4882a593Smuzhiyun * combination into 4 results.
2472*4882a593Smuzhiyun *
2473*4882a593Smuzhiyun * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2474*4882a593Smuzhiyun * only on variant maybe 0.
2475*4882a593Smuzhiyun *
2476*4882a593Smuzhiyun * Lastly, check result **, since there are 2 variants maybe 0, split them
2477*4882a593Smuzhiyun * again(2x2).
2478*4882a593Smuzhiyun * But this time we don't need to consider other things, the codes and logic
2479*4882a593Smuzhiyun * is easy to understand now.
2480*4882a593Smuzhiyun */
qgroup_update_counters(struct btrfs_fs_info * fs_info,struct ulist * qgroups,u64 nr_old_roots,u64 nr_new_roots,u64 num_bytes,u64 seq)2481*4882a593Smuzhiyun static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
2482*4882a593Smuzhiyun struct ulist *qgroups,
2483*4882a593Smuzhiyun u64 nr_old_roots,
2484*4882a593Smuzhiyun u64 nr_new_roots,
2485*4882a593Smuzhiyun u64 num_bytes, u64 seq)
2486*4882a593Smuzhiyun {
2487*4882a593Smuzhiyun struct ulist_node *unode;
2488*4882a593Smuzhiyun struct ulist_iterator uiter;
2489*4882a593Smuzhiyun struct btrfs_qgroup *qg;
2490*4882a593Smuzhiyun u64 cur_new_count, cur_old_count;
2491*4882a593Smuzhiyun
2492*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
2493*4882a593Smuzhiyun while ((unode = ulist_next(qgroups, &uiter))) {
2494*4882a593Smuzhiyun bool dirty = false;
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun qg = unode_aux_to_qgroup(unode);
2497*4882a593Smuzhiyun cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2498*4882a593Smuzhiyun cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2499*4882a593Smuzhiyun
2500*4882a593Smuzhiyun trace_qgroup_update_counters(fs_info, qg, cur_old_count,
2501*4882a593Smuzhiyun cur_new_count);
2502*4882a593Smuzhiyun
2503*4882a593Smuzhiyun /* Rfer update part */
2504*4882a593Smuzhiyun if (cur_old_count == 0 && cur_new_count > 0) {
2505*4882a593Smuzhiyun qg->rfer += num_bytes;
2506*4882a593Smuzhiyun qg->rfer_cmpr += num_bytes;
2507*4882a593Smuzhiyun dirty = true;
2508*4882a593Smuzhiyun }
2509*4882a593Smuzhiyun if (cur_old_count > 0 && cur_new_count == 0) {
2510*4882a593Smuzhiyun qg->rfer -= num_bytes;
2511*4882a593Smuzhiyun qg->rfer_cmpr -= num_bytes;
2512*4882a593Smuzhiyun dirty = true;
2513*4882a593Smuzhiyun }
2514*4882a593Smuzhiyun
2515*4882a593Smuzhiyun /* Excl update part */
2516*4882a593Smuzhiyun /* Exclusive/none -> shared case */
2517*4882a593Smuzhiyun if (cur_old_count == nr_old_roots &&
2518*4882a593Smuzhiyun cur_new_count < nr_new_roots) {
2519*4882a593Smuzhiyun /* Exclusive -> shared */
2520*4882a593Smuzhiyun if (cur_old_count != 0) {
2521*4882a593Smuzhiyun qg->excl -= num_bytes;
2522*4882a593Smuzhiyun qg->excl_cmpr -= num_bytes;
2523*4882a593Smuzhiyun dirty = true;
2524*4882a593Smuzhiyun }
2525*4882a593Smuzhiyun }
2526*4882a593Smuzhiyun
2527*4882a593Smuzhiyun /* Shared -> exclusive/none case */
2528*4882a593Smuzhiyun if (cur_old_count < nr_old_roots &&
2529*4882a593Smuzhiyun cur_new_count == nr_new_roots) {
2530*4882a593Smuzhiyun /* Shared->exclusive */
2531*4882a593Smuzhiyun if (cur_new_count != 0) {
2532*4882a593Smuzhiyun qg->excl += num_bytes;
2533*4882a593Smuzhiyun qg->excl_cmpr += num_bytes;
2534*4882a593Smuzhiyun dirty = true;
2535*4882a593Smuzhiyun }
2536*4882a593Smuzhiyun }
2537*4882a593Smuzhiyun
2538*4882a593Smuzhiyun /* Exclusive/none -> exclusive/none case */
2539*4882a593Smuzhiyun if (cur_old_count == nr_old_roots &&
2540*4882a593Smuzhiyun cur_new_count == nr_new_roots) {
2541*4882a593Smuzhiyun if (cur_old_count == 0) {
2542*4882a593Smuzhiyun /* None -> exclusive/none */
2543*4882a593Smuzhiyun
2544*4882a593Smuzhiyun if (cur_new_count != 0) {
2545*4882a593Smuzhiyun /* None -> exclusive */
2546*4882a593Smuzhiyun qg->excl += num_bytes;
2547*4882a593Smuzhiyun qg->excl_cmpr += num_bytes;
2548*4882a593Smuzhiyun dirty = true;
2549*4882a593Smuzhiyun }
2550*4882a593Smuzhiyun /* None -> none, nothing changed */
2551*4882a593Smuzhiyun } else {
2552*4882a593Smuzhiyun /* Exclusive -> exclusive/none */
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun if (cur_new_count == 0) {
2555*4882a593Smuzhiyun /* Exclusive -> none */
2556*4882a593Smuzhiyun qg->excl -= num_bytes;
2557*4882a593Smuzhiyun qg->excl_cmpr -= num_bytes;
2558*4882a593Smuzhiyun dirty = true;
2559*4882a593Smuzhiyun }
2560*4882a593Smuzhiyun /* Exclusive -> exclusive, nothing changed */
2561*4882a593Smuzhiyun }
2562*4882a593Smuzhiyun }
2563*4882a593Smuzhiyun
2564*4882a593Smuzhiyun if (dirty)
2565*4882a593Smuzhiyun qgroup_dirty(fs_info, qg);
2566*4882a593Smuzhiyun }
2567*4882a593Smuzhiyun return 0;
2568*4882a593Smuzhiyun }
2569*4882a593Smuzhiyun
2570*4882a593Smuzhiyun /*
2571*4882a593Smuzhiyun * Check if the @roots potentially is a list of fs tree roots
2572*4882a593Smuzhiyun *
2573*4882a593Smuzhiyun * Return 0 for definitely not a fs/subvol tree roots ulist
2574*4882a593Smuzhiyun * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2575*4882a593Smuzhiyun * one as well)
2576*4882a593Smuzhiyun */
maybe_fs_roots(struct ulist * roots)2577*4882a593Smuzhiyun static int maybe_fs_roots(struct ulist *roots)
2578*4882a593Smuzhiyun {
2579*4882a593Smuzhiyun struct ulist_node *unode;
2580*4882a593Smuzhiyun struct ulist_iterator uiter;
2581*4882a593Smuzhiyun
2582*4882a593Smuzhiyun /* Empty one, still possible for fs roots */
2583*4882a593Smuzhiyun if (!roots || roots->nnodes == 0)
2584*4882a593Smuzhiyun return 1;
2585*4882a593Smuzhiyun
2586*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
2587*4882a593Smuzhiyun unode = ulist_next(roots, &uiter);
2588*4882a593Smuzhiyun if (!unode)
2589*4882a593Smuzhiyun return 1;
2590*4882a593Smuzhiyun
2591*4882a593Smuzhiyun /*
2592*4882a593Smuzhiyun * If it contains fs tree roots, then it must belong to fs/subvol
2593*4882a593Smuzhiyun * trees.
2594*4882a593Smuzhiyun * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2595*4882a593Smuzhiyun */
2596*4882a593Smuzhiyun return is_fstree(unode->val);
2597*4882a593Smuzhiyun }
2598*4882a593Smuzhiyun
btrfs_qgroup_account_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct ulist * old_roots,struct ulist * new_roots)2599*4882a593Smuzhiyun int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2600*4882a593Smuzhiyun u64 num_bytes, struct ulist *old_roots,
2601*4882a593Smuzhiyun struct ulist *new_roots)
2602*4882a593Smuzhiyun {
2603*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
2604*4882a593Smuzhiyun struct ulist *qgroups = NULL;
2605*4882a593Smuzhiyun struct ulist *tmp = NULL;
2606*4882a593Smuzhiyun u64 seq;
2607*4882a593Smuzhiyun u64 nr_new_roots = 0;
2608*4882a593Smuzhiyun u64 nr_old_roots = 0;
2609*4882a593Smuzhiyun int ret = 0;
2610*4882a593Smuzhiyun
2611*4882a593Smuzhiyun /*
2612*4882a593Smuzhiyun * If quotas get disabled meanwhile, the resouces need to be freed and
2613*4882a593Smuzhiyun * we can't just exit here.
2614*4882a593Smuzhiyun */
2615*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2616*4882a593Smuzhiyun goto out_free;
2617*4882a593Smuzhiyun
2618*4882a593Smuzhiyun if (new_roots) {
2619*4882a593Smuzhiyun if (!maybe_fs_roots(new_roots))
2620*4882a593Smuzhiyun goto out_free;
2621*4882a593Smuzhiyun nr_new_roots = new_roots->nnodes;
2622*4882a593Smuzhiyun }
2623*4882a593Smuzhiyun if (old_roots) {
2624*4882a593Smuzhiyun if (!maybe_fs_roots(old_roots))
2625*4882a593Smuzhiyun goto out_free;
2626*4882a593Smuzhiyun nr_old_roots = old_roots->nnodes;
2627*4882a593Smuzhiyun }
2628*4882a593Smuzhiyun
2629*4882a593Smuzhiyun /* Quick exit, either not fs tree roots, or won't affect any qgroup */
2630*4882a593Smuzhiyun if (nr_old_roots == 0 && nr_new_roots == 0)
2631*4882a593Smuzhiyun goto out_free;
2632*4882a593Smuzhiyun
2633*4882a593Smuzhiyun BUG_ON(!fs_info->quota_root);
2634*4882a593Smuzhiyun
2635*4882a593Smuzhiyun trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2636*4882a593Smuzhiyun num_bytes, nr_old_roots, nr_new_roots);
2637*4882a593Smuzhiyun
2638*4882a593Smuzhiyun qgroups = ulist_alloc(GFP_NOFS);
2639*4882a593Smuzhiyun if (!qgroups) {
2640*4882a593Smuzhiyun ret = -ENOMEM;
2641*4882a593Smuzhiyun goto out_free;
2642*4882a593Smuzhiyun }
2643*4882a593Smuzhiyun tmp = ulist_alloc(GFP_NOFS);
2644*4882a593Smuzhiyun if (!tmp) {
2645*4882a593Smuzhiyun ret = -ENOMEM;
2646*4882a593Smuzhiyun goto out_free;
2647*4882a593Smuzhiyun }
2648*4882a593Smuzhiyun
2649*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_rescan_lock);
2650*4882a593Smuzhiyun if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2651*4882a593Smuzhiyun if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2652*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_rescan_lock);
2653*4882a593Smuzhiyun ret = 0;
2654*4882a593Smuzhiyun goto out_free;
2655*4882a593Smuzhiyun }
2656*4882a593Smuzhiyun }
2657*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_rescan_lock);
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
2660*4882a593Smuzhiyun seq = fs_info->qgroup_seq;
2661*4882a593Smuzhiyun
2662*4882a593Smuzhiyun /* Update old refcnts using old_roots */
2663*4882a593Smuzhiyun ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
2664*4882a593Smuzhiyun UPDATE_OLD);
2665*4882a593Smuzhiyun if (ret < 0)
2666*4882a593Smuzhiyun goto out;
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun /* Update new refcnts using new_roots */
2669*4882a593Smuzhiyun ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
2670*4882a593Smuzhiyun UPDATE_NEW);
2671*4882a593Smuzhiyun if (ret < 0)
2672*4882a593Smuzhiyun goto out;
2673*4882a593Smuzhiyun
2674*4882a593Smuzhiyun qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
2675*4882a593Smuzhiyun num_bytes, seq);
2676*4882a593Smuzhiyun
2677*4882a593Smuzhiyun /*
2678*4882a593Smuzhiyun * Bump qgroup_seq to avoid seq overlap
2679*4882a593Smuzhiyun */
2680*4882a593Smuzhiyun fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2681*4882a593Smuzhiyun out:
2682*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
2683*4882a593Smuzhiyun out_free:
2684*4882a593Smuzhiyun ulist_free(tmp);
2685*4882a593Smuzhiyun ulist_free(qgroups);
2686*4882a593Smuzhiyun ulist_free(old_roots);
2687*4882a593Smuzhiyun ulist_free(new_roots);
2688*4882a593Smuzhiyun return ret;
2689*4882a593Smuzhiyun }
2690*4882a593Smuzhiyun
btrfs_qgroup_account_extents(struct btrfs_trans_handle * trans)2691*4882a593Smuzhiyun int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2692*4882a593Smuzhiyun {
2693*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
2694*4882a593Smuzhiyun struct btrfs_qgroup_extent_record *record;
2695*4882a593Smuzhiyun struct btrfs_delayed_ref_root *delayed_refs;
2696*4882a593Smuzhiyun struct ulist *new_roots = NULL;
2697*4882a593Smuzhiyun struct rb_node *node;
2698*4882a593Smuzhiyun u64 num_dirty_extents = 0;
2699*4882a593Smuzhiyun u64 qgroup_to_skip;
2700*4882a593Smuzhiyun int ret = 0;
2701*4882a593Smuzhiyun
2702*4882a593Smuzhiyun delayed_refs = &trans->transaction->delayed_refs;
2703*4882a593Smuzhiyun qgroup_to_skip = delayed_refs->qgroup_to_skip;
2704*4882a593Smuzhiyun while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2705*4882a593Smuzhiyun record = rb_entry(node, struct btrfs_qgroup_extent_record,
2706*4882a593Smuzhiyun node);
2707*4882a593Smuzhiyun
2708*4882a593Smuzhiyun num_dirty_extents++;
2709*4882a593Smuzhiyun trace_btrfs_qgroup_account_extents(fs_info, record);
2710*4882a593Smuzhiyun
2711*4882a593Smuzhiyun if (!ret) {
2712*4882a593Smuzhiyun /*
2713*4882a593Smuzhiyun * Old roots should be searched when inserting qgroup
2714*4882a593Smuzhiyun * extent record
2715*4882a593Smuzhiyun */
2716*4882a593Smuzhiyun if (WARN_ON(!record->old_roots)) {
2717*4882a593Smuzhiyun /* Search commit root to find old_roots */
2718*4882a593Smuzhiyun ret = btrfs_find_all_roots(NULL, fs_info,
2719*4882a593Smuzhiyun record->bytenr, 0,
2720*4882a593Smuzhiyun &record->old_roots, false);
2721*4882a593Smuzhiyun if (ret < 0)
2722*4882a593Smuzhiyun goto cleanup;
2723*4882a593Smuzhiyun }
2724*4882a593Smuzhiyun
2725*4882a593Smuzhiyun /* Free the reserved data space */
2726*4882a593Smuzhiyun btrfs_qgroup_free_refroot(fs_info,
2727*4882a593Smuzhiyun record->data_rsv_refroot,
2728*4882a593Smuzhiyun record->data_rsv,
2729*4882a593Smuzhiyun BTRFS_QGROUP_RSV_DATA);
2730*4882a593Smuzhiyun /*
2731*4882a593Smuzhiyun * Use SEQ_LAST as time_seq to do special search, which
2732*4882a593Smuzhiyun * doesn't lock tree or delayed_refs and search current
2733*4882a593Smuzhiyun * root. It's safe inside commit_transaction().
2734*4882a593Smuzhiyun */
2735*4882a593Smuzhiyun ret = btrfs_find_all_roots(trans, fs_info,
2736*4882a593Smuzhiyun record->bytenr, SEQ_LAST, &new_roots, false);
2737*4882a593Smuzhiyun if (ret < 0)
2738*4882a593Smuzhiyun goto cleanup;
2739*4882a593Smuzhiyun if (qgroup_to_skip) {
2740*4882a593Smuzhiyun ulist_del(new_roots, qgroup_to_skip, 0);
2741*4882a593Smuzhiyun ulist_del(record->old_roots, qgroup_to_skip,
2742*4882a593Smuzhiyun 0);
2743*4882a593Smuzhiyun }
2744*4882a593Smuzhiyun ret = btrfs_qgroup_account_extent(trans, record->bytenr,
2745*4882a593Smuzhiyun record->num_bytes,
2746*4882a593Smuzhiyun record->old_roots,
2747*4882a593Smuzhiyun new_roots);
2748*4882a593Smuzhiyun record->old_roots = NULL;
2749*4882a593Smuzhiyun new_roots = NULL;
2750*4882a593Smuzhiyun }
2751*4882a593Smuzhiyun cleanup:
2752*4882a593Smuzhiyun ulist_free(record->old_roots);
2753*4882a593Smuzhiyun ulist_free(new_roots);
2754*4882a593Smuzhiyun new_roots = NULL;
2755*4882a593Smuzhiyun rb_erase(node, &delayed_refs->dirty_extent_root);
2756*4882a593Smuzhiyun kfree(record);
2757*4882a593Smuzhiyun
2758*4882a593Smuzhiyun }
2759*4882a593Smuzhiyun trace_qgroup_num_dirty_extents(fs_info, trans->transid,
2760*4882a593Smuzhiyun num_dirty_extents);
2761*4882a593Smuzhiyun return ret;
2762*4882a593Smuzhiyun }
2763*4882a593Smuzhiyun
2764*4882a593Smuzhiyun /*
2765*4882a593Smuzhiyun * called from commit_transaction. Writes all changed qgroups to disk.
2766*4882a593Smuzhiyun */
btrfs_run_qgroups(struct btrfs_trans_handle * trans)2767*4882a593Smuzhiyun int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
2768*4882a593Smuzhiyun {
2769*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
2770*4882a593Smuzhiyun int ret = 0;
2771*4882a593Smuzhiyun
2772*4882a593Smuzhiyun if (!fs_info->quota_root)
2773*4882a593Smuzhiyun return ret;
2774*4882a593Smuzhiyun
2775*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
2776*4882a593Smuzhiyun while (!list_empty(&fs_info->dirty_qgroups)) {
2777*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
2778*4882a593Smuzhiyun qgroup = list_first_entry(&fs_info->dirty_qgroups,
2779*4882a593Smuzhiyun struct btrfs_qgroup, dirty);
2780*4882a593Smuzhiyun list_del_init(&qgroup->dirty);
2781*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
2782*4882a593Smuzhiyun ret = update_qgroup_info_item(trans, qgroup);
2783*4882a593Smuzhiyun if (ret)
2784*4882a593Smuzhiyun fs_info->qgroup_flags |=
2785*4882a593Smuzhiyun BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2786*4882a593Smuzhiyun ret = update_qgroup_limit_item(trans, qgroup);
2787*4882a593Smuzhiyun if (ret)
2788*4882a593Smuzhiyun fs_info->qgroup_flags |=
2789*4882a593Smuzhiyun BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2790*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
2791*4882a593Smuzhiyun }
2792*4882a593Smuzhiyun if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2793*4882a593Smuzhiyun fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2794*4882a593Smuzhiyun else
2795*4882a593Smuzhiyun fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2796*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
2797*4882a593Smuzhiyun
2798*4882a593Smuzhiyun ret = update_qgroup_status_item(trans);
2799*4882a593Smuzhiyun if (ret)
2800*4882a593Smuzhiyun fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2801*4882a593Smuzhiyun
2802*4882a593Smuzhiyun return ret;
2803*4882a593Smuzhiyun }
2804*4882a593Smuzhiyun
2805*4882a593Smuzhiyun /*
2806*4882a593Smuzhiyun * Copy the accounting information between qgroups. This is necessary
2807*4882a593Smuzhiyun * when a snapshot or a subvolume is created. Throwing an error will
2808*4882a593Smuzhiyun * cause a transaction abort so we take extra care here to only error
2809*4882a593Smuzhiyun * when a readonly fs is a reasonable outcome.
2810*4882a593Smuzhiyun */
btrfs_qgroup_inherit(struct btrfs_trans_handle * trans,u64 srcid,u64 objectid,struct btrfs_qgroup_inherit * inherit)2811*4882a593Smuzhiyun int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
2812*4882a593Smuzhiyun u64 objectid, struct btrfs_qgroup_inherit *inherit)
2813*4882a593Smuzhiyun {
2814*4882a593Smuzhiyun int ret = 0;
2815*4882a593Smuzhiyun int i;
2816*4882a593Smuzhiyun u64 *i_qgroups;
2817*4882a593Smuzhiyun bool committing = false;
2818*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
2819*4882a593Smuzhiyun struct btrfs_root *quota_root;
2820*4882a593Smuzhiyun struct btrfs_qgroup *srcgroup;
2821*4882a593Smuzhiyun struct btrfs_qgroup *dstgroup;
2822*4882a593Smuzhiyun bool need_rescan = false;
2823*4882a593Smuzhiyun u32 level_size = 0;
2824*4882a593Smuzhiyun u64 nums;
2825*4882a593Smuzhiyun
2826*4882a593Smuzhiyun /*
2827*4882a593Smuzhiyun * There are only two callers of this function.
2828*4882a593Smuzhiyun *
2829*4882a593Smuzhiyun * One in create_subvol() in the ioctl context, which needs to hold
2830*4882a593Smuzhiyun * the qgroup_ioctl_lock.
2831*4882a593Smuzhiyun *
2832*4882a593Smuzhiyun * The other one in create_pending_snapshot() where no other qgroup
2833*4882a593Smuzhiyun * code can modify the fs as they all need to either start a new trans
2834*4882a593Smuzhiyun * or hold a trans handler, thus we don't need to hold
2835*4882a593Smuzhiyun * qgroup_ioctl_lock.
2836*4882a593Smuzhiyun * This would avoid long and complex lock chain and make lockdep happy.
2837*4882a593Smuzhiyun */
2838*4882a593Smuzhiyun spin_lock(&fs_info->trans_lock);
2839*4882a593Smuzhiyun if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
2840*4882a593Smuzhiyun committing = true;
2841*4882a593Smuzhiyun spin_unlock(&fs_info->trans_lock);
2842*4882a593Smuzhiyun
2843*4882a593Smuzhiyun if (!committing)
2844*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_ioctl_lock);
2845*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2846*4882a593Smuzhiyun goto out;
2847*4882a593Smuzhiyun
2848*4882a593Smuzhiyun quota_root = fs_info->quota_root;
2849*4882a593Smuzhiyun if (!quota_root) {
2850*4882a593Smuzhiyun ret = -EINVAL;
2851*4882a593Smuzhiyun goto out;
2852*4882a593Smuzhiyun }
2853*4882a593Smuzhiyun
2854*4882a593Smuzhiyun if (inherit) {
2855*4882a593Smuzhiyun i_qgroups = (u64 *)(inherit + 1);
2856*4882a593Smuzhiyun nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2857*4882a593Smuzhiyun 2 * inherit->num_excl_copies;
2858*4882a593Smuzhiyun for (i = 0; i < nums; ++i) {
2859*4882a593Smuzhiyun srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2860*4882a593Smuzhiyun
2861*4882a593Smuzhiyun /*
2862*4882a593Smuzhiyun * Zero out invalid groups so we can ignore
2863*4882a593Smuzhiyun * them later.
2864*4882a593Smuzhiyun */
2865*4882a593Smuzhiyun if (!srcgroup ||
2866*4882a593Smuzhiyun ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
2867*4882a593Smuzhiyun *i_qgroups = 0ULL;
2868*4882a593Smuzhiyun
2869*4882a593Smuzhiyun ++i_qgroups;
2870*4882a593Smuzhiyun }
2871*4882a593Smuzhiyun }
2872*4882a593Smuzhiyun
2873*4882a593Smuzhiyun /*
2874*4882a593Smuzhiyun * create a tracking group for the subvol itself
2875*4882a593Smuzhiyun */
2876*4882a593Smuzhiyun ret = add_qgroup_item(trans, quota_root, objectid);
2877*4882a593Smuzhiyun if (ret)
2878*4882a593Smuzhiyun goto out;
2879*4882a593Smuzhiyun
2880*4882a593Smuzhiyun /*
2881*4882a593Smuzhiyun * add qgroup to all inherited groups
2882*4882a593Smuzhiyun */
2883*4882a593Smuzhiyun if (inherit) {
2884*4882a593Smuzhiyun i_qgroups = (u64 *)(inherit + 1);
2885*4882a593Smuzhiyun for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
2886*4882a593Smuzhiyun if (*i_qgroups == 0)
2887*4882a593Smuzhiyun continue;
2888*4882a593Smuzhiyun ret = add_qgroup_relation_item(trans, objectid,
2889*4882a593Smuzhiyun *i_qgroups);
2890*4882a593Smuzhiyun if (ret && ret != -EEXIST)
2891*4882a593Smuzhiyun goto out;
2892*4882a593Smuzhiyun ret = add_qgroup_relation_item(trans, *i_qgroups,
2893*4882a593Smuzhiyun objectid);
2894*4882a593Smuzhiyun if (ret && ret != -EEXIST)
2895*4882a593Smuzhiyun goto out;
2896*4882a593Smuzhiyun }
2897*4882a593Smuzhiyun ret = 0;
2898*4882a593Smuzhiyun }
2899*4882a593Smuzhiyun
2900*4882a593Smuzhiyun
2901*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
2902*4882a593Smuzhiyun
2903*4882a593Smuzhiyun dstgroup = add_qgroup_rb(fs_info, objectid);
2904*4882a593Smuzhiyun if (IS_ERR(dstgroup)) {
2905*4882a593Smuzhiyun ret = PTR_ERR(dstgroup);
2906*4882a593Smuzhiyun goto unlock;
2907*4882a593Smuzhiyun }
2908*4882a593Smuzhiyun
2909*4882a593Smuzhiyun if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2910*4882a593Smuzhiyun dstgroup->lim_flags = inherit->lim.flags;
2911*4882a593Smuzhiyun dstgroup->max_rfer = inherit->lim.max_rfer;
2912*4882a593Smuzhiyun dstgroup->max_excl = inherit->lim.max_excl;
2913*4882a593Smuzhiyun dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2914*4882a593Smuzhiyun dstgroup->rsv_excl = inherit->lim.rsv_excl;
2915*4882a593Smuzhiyun
2916*4882a593Smuzhiyun qgroup_dirty(fs_info, dstgroup);
2917*4882a593Smuzhiyun }
2918*4882a593Smuzhiyun
2919*4882a593Smuzhiyun if (srcid) {
2920*4882a593Smuzhiyun srcgroup = find_qgroup_rb(fs_info, srcid);
2921*4882a593Smuzhiyun if (!srcgroup)
2922*4882a593Smuzhiyun goto unlock;
2923*4882a593Smuzhiyun
2924*4882a593Smuzhiyun /*
2925*4882a593Smuzhiyun * We call inherit after we clone the root in order to make sure
2926*4882a593Smuzhiyun * our counts don't go crazy, so at this point the only
2927*4882a593Smuzhiyun * difference between the two roots should be the root node.
2928*4882a593Smuzhiyun */
2929*4882a593Smuzhiyun level_size = fs_info->nodesize;
2930*4882a593Smuzhiyun dstgroup->rfer = srcgroup->rfer;
2931*4882a593Smuzhiyun dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2932*4882a593Smuzhiyun dstgroup->excl = level_size;
2933*4882a593Smuzhiyun dstgroup->excl_cmpr = level_size;
2934*4882a593Smuzhiyun srcgroup->excl = level_size;
2935*4882a593Smuzhiyun srcgroup->excl_cmpr = level_size;
2936*4882a593Smuzhiyun
2937*4882a593Smuzhiyun /* inherit the limit info */
2938*4882a593Smuzhiyun dstgroup->lim_flags = srcgroup->lim_flags;
2939*4882a593Smuzhiyun dstgroup->max_rfer = srcgroup->max_rfer;
2940*4882a593Smuzhiyun dstgroup->max_excl = srcgroup->max_excl;
2941*4882a593Smuzhiyun dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2942*4882a593Smuzhiyun dstgroup->rsv_excl = srcgroup->rsv_excl;
2943*4882a593Smuzhiyun
2944*4882a593Smuzhiyun qgroup_dirty(fs_info, dstgroup);
2945*4882a593Smuzhiyun qgroup_dirty(fs_info, srcgroup);
2946*4882a593Smuzhiyun }
2947*4882a593Smuzhiyun
2948*4882a593Smuzhiyun if (!inherit)
2949*4882a593Smuzhiyun goto unlock;
2950*4882a593Smuzhiyun
2951*4882a593Smuzhiyun i_qgroups = (u64 *)(inherit + 1);
2952*4882a593Smuzhiyun for (i = 0; i < inherit->num_qgroups; ++i) {
2953*4882a593Smuzhiyun if (*i_qgroups) {
2954*4882a593Smuzhiyun ret = add_relation_rb(fs_info, objectid, *i_qgroups);
2955*4882a593Smuzhiyun if (ret)
2956*4882a593Smuzhiyun goto unlock;
2957*4882a593Smuzhiyun }
2958*4882a593Smuzhiyun ++i_qgroups;
2959*4882a593Smuzhiyun
2960*4882a593Smuzhiyun /*
2961*4882a593Smuzhiyun * If we're doing a snapshot, and adding the snapshot to a new
2962*4882a593Smuzhiyun * qgroup, the numbers are guaranteed to be incorrect.
2963*4882a593Smuzhiyun */
2964*4882a593Smuzhiyun if (srcid)
2965*4882a593Smuzhiyun need_rescan = true;
2966*4882a593Smuzhiyun }
2967*4882a593Smuzhiyun
2968*4882a593Smuzhiyun for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
2969*4882a593Smuzhiyun struct btrfs_qgroup *src;
2970*4882a593Smuzhiyun struct btrfs_qgroup *dst;
2971*4882a593Smuzhiyun
2972*4882a593Smuzhiyun if (!i_qgroups[0] || !i_qgroups[1])
2973*4882a593Smuzhiyun continue;
2974*4882a593Smuzhiyun
2975*4882a593Smuzhiyun src = find_qgroup_rb(fs_info, i_qgroups[0]);
2976*4882a593Smuzhiyun dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2977*4882a593Smuzhiyun
2978*4882a593Smuzhiyun if (!src || !dst) {
2979*4882a593Smuzhiyun ret = -EINVAL;
2980*4882a593Smuzhiyun goto unlock;
2981*4882a593Smuzhiyun }
2982*4882a593Smuzhiyun
2983*4882a593Smuzhiyun dst->rfer = src->rfer - level_size;
2984*4882a593Smuzhiyun dst->rfer_cmpr = src->rfer_cmpr - level_size;
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun /* Manually tweaking numbers certainly needs a rescan */
2987*4882a593Smuzhiyun need_rescan = true;
2988*4882a593Smuzhiyun }
2989*4882a593Smuzhiyun for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
2990*4882a593Smuzhiyun struct btrfs_qgroup *src;
2991*4882a593Smuzhiyun struct btrfs_qgroup *dst;
2992*4882a593Smuzhiyun
2993*4882a593Smuzhiyun if (!i_qgroups[0] || !i_qgroups[1])
2994*4882a593Smuzhiyun continue;
2995*4882a593Smuzhiyun
2996*4882a593Smuzhiyun src = find_qgroup_rb(fs_info, i_qgroups[0]);
2997*4882a593Smuzhiyun dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun if (!src || !dst) {
3000*4882a593Smuzhiyun ret = -EINVAL;
3001*4882a593Smuzhiyun goto unlock;
3002*4882a593Smuzhiyun }
3003*4882a593Smuzhiyun
3004*4882a593Smuzhiyun dst->excl = src->excl + level_size;
3005*4882a593Smuzhiyun dst->excl_cmpr = src->excl_cmpr + level_size;
3006*4882a593Smuzhiyun need_rescan = true;
3007*4882a593Smuzhiyun }
3008*4882a593Smuzhiyun
3009*4882a593Smuzhiyun unlock:
3010*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
3011*4882a593Smuzhiyun if (!ret)
3012*4882a593Smuzhiyun ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
3013*4882a593Smuzhiyun out:
3014*4882a593Smuzhiyun if (!committing)
3015*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_ioctl_lock);
3016*4882a593Smuzhiyun if (need_rescan)
3017*4882a593Smuzhiyun fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3018*4882a593Smuzhiyun return ret;
3019*4882a593Smuzhiyun }
3020*4882a593Smuzhiyun
qgroup_check_limits(const struct btrfs_qgroup * qg,u64 num_bytes)3021*4882a593Smuzhiyun static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
3022*4882a593Smuzhiyun {
3023*4882a593Smuzhiyun if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
3024*4882a593Smuzhiyun qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
3025*4882a593Smuzhiyun return false;
3026*4882a593Smuzhiyun
3027*4882a593Smuzhiyun if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
3028*4882a593Smuzhiyun qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
3029*4882a593Smuzhiyun return false;
3030*4882a593Smuzhiyun
3031*4882a593Smuzhiyun return true;
3032*4882a593Smuzhiyun }
3033*4882a593Smuzhiyun
qgroup_reserve(struct btrfs_root * root,u64 num_bytes,bool enforce,enum btrfs_qgroup_rsv_type type)3034*4882a593Smuzhiyun static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
3035*4882a593Smuzhiyun enum btrfs_qgroup_rsv_type type)
3036*4882a593Smuzhiyun {
3037*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
3038*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = root->fs_info;
3039*4882a593Smuzhiyun u64 ref_root = root->root_key.objectid;
3040*4882a593Smuzhiyun int ret = 0;
3041*4882a593Smuzhiyun struct ulist_node *unode;
3042*4882a593Smuzhiyun struct ulist_iterator uiter;
3043*4882a593Smuzhiyun
3044*4882a593Smuzhiyun if (!is_fstree(ref_root))
3045*4882a593Smuzhiyun return 0;
3046*4882a593Smuzhiyun
3047*4882a593Smuzhiyun if (num_bytes == 0)
3048*4882a593Smuzhiyun return 0;
3049*4882a593Smuzhiyun
3050*4882a593Smuzhiyun if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
3051*4882a593Smuzhiyun capable(CAP_SYS_RESOURCE))
3052*4882a593Smuzhiyun enforce = false;
3053*4882a593Smuzhiyun
3054*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
3055*4882a593Smuzhiyun if (!fs_info->quota_root)
3056*4882a593Smuzhiyun goto out;
3057*4882a593Smuzhiyun
3058*4882a593Smuzhiyun qgroup = find_qgroup_rb(fs_info, ref_root);
3059*4882a593Smuzhiyun if (!qgroup)
3060*4882a593Smuzhiyun goto out;
3061*4882a593Smuzhiyun
3062*4882a593Smuzhiyun /*
3063*4882a593Smuzhiyun * in a first step, we check all affected qgroups if any limits would
3064*4882a593Smuzhiyun * be exceeded
3065*4882a593Smuzhiyun */
3066*4882a593Smuzhiyun ulist_reinit(fs_info->qgroup_ulist);
3067*4882a593Smuzhiyun ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3068*4882a593Smuzhiyun qgroup_to_aux(qgroup), GFP_ATOMIC);
3069*4882a593Smuzhiyun if (ret < 0)
3070*4882a593Smuzhiyun goto out;
3071*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
3072*4882a593Smuzhiyun while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3073*4882a593Smuzhiyun struct btrfs_qgroup *qg;
3074*4882a593Smuzhiyun struct btrfs_qgroup_list *glist;
3075*4882a593Smuzhiyun
3076*4882a593Smuzhiyun qg = unode_aux_to_qgroup(unode);
3077*4882a593Smuzhiyun
3078*4882a593Smuzhiyun if (enforce && !qgroup_check_limits(qg, num_bytes)) {
3079*4882a593Smuzhiyun ret = -EDQUOT;
3080*4882a593Smuzhiyun goto out;
3081*4882a593Smuzhiyun }
3082*4882a593Smuzhiyun
3083*4882a593Smuzhiyun list_for_each_entry(glist, &qg->groups, next_group) {
3084*4882a593Smuzhiyun ret = ulist_add(fs_info->qgroup_ulist,
3085*4882a593Smuzhiyun glist->group->qgroupid,
3086*4882a593Smuzhiyun qgroup_to_aux(glist->group), GFP_ATOMIC);
3087*4882a593Smuzhiyun if (ret < 0)
3088*4882a593Smuzhiyun goto out;
3089*4882a593Smuzhiyun }
3090*4882a593Smuzhiyun }
3091*4882a593Smuzhiyun ret = 0;
3092*4882a593Smuzhiyun /*
3093*4882a593Smuzhiyun * no limits exceeded, now record the reservation into all qgroups
3094*4882a593Smuzhiyun */
3095*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
3096*4882a593Smuzhiyun while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3097*4882a593Smuzhiyun struct btrfs_qgroup *qg;
3098*4882a593Smuzhiyun
3099*4882a593Smuzhiyun qg = unode_aux_to_qgroup(unode);
3100*4882a593Smuzhiyun
3101*4882a593Smuzhiyun qgroup_rsv_add(fs_info, qg, num_bytes, type);
3102*4882a593Smuzhiyun }
3103*4882a593Smuzhiyun
3104*4882a593Smuzhiyun out:
3105*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
3106*4882a593Smuzhiyun return ret;
3107*4882a593Smuzhiyun }
3108*4882a593Smuzhiyun
3109*4882a593Smuzhiyun /*
3110*4882a593Smuzhiyun * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0
3111*4882a593Smuzhiyun * qgroup).
3112*4882a593Smuzhiyun *
3113*4882a593Smuzhiyun * Will handle all higher level qgroup too.
3114*4882a593Smuzhiyun *
3115*4882a593Smuzhiyun * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3116*4882a593Smuzhiyun * This special case is only used for META_PERTRANS type.
3117*4882a593Smuzhiyun */
btrfs_qgroup_free_refroot(struct btrfs_fs_info * fs_info,u64 ref_root,u64 num_bytes,enum btrfs_qgroup_rsv_type type)3118*4882a593Smuzhiyun void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3119*4882a593Smuzhiyun u64 ref_root, u64 num_bytes,
3120*4882a593Smuzhiyun enum btrfs_qgroup_rsv_type type)
3121*4882a593Smuzhiyun {
3122*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
3123*4882a593Smuzhiyun struct ulist_node *unode;
3124*4882a593Smuzhiyun struct ulist_iterator uiter;
3125*4882a593Smuzhiyun int ret = 0;
3126*4882a593Smuzhiyun
3127*4882a593Smuzhiyun if (!is_fstree(ref_root))
3128*4882a593Smuzhiyun return;
3129*4882a593Smuzhiyun
3130*4882a593Smuzhiyun if (num_bytes == 0)
3131*4882a593Smuzhiyun return;
3132*4882a593Smuzhiyun
3133*4882a593Smuzhiyun if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3134*4882a593Smuzhiyun WARN(1, "%s: Invalid type to free", __func__);
3135*4882a593Smuzhiyun return;
3136*4882a593Smuzhiyun }
3137*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
3138*4882a593Smuzhiyun
3139*4882a593Smuzhiyun if (!fs_info->quota_root)
3140*4882a593Smuzhiyun goto out;
3141*4882a593Smuzhiyun
3142*4882a593Smuzhiyun qgroup = find_qgroup_rb(fs_info, ref_root);
3143*4882a593Smuzhiyun if (!qgroup)
3144*4882a593Smuzhiyun goto out;
3145*4882a593Smuzhiyun
3146*4882a593Smuzhiyun if (num_bytes == (u64)-1)
3147*4882a593Smuzhiyun /*
3148*4882a593Smuzhiyun * We're freeing all pertrans rsv, get reserved value from
3149*4882a593Smuzhiyun * level 0 qgroup as real num_bytes to free.
3150*4882a593Smuzhiyun */
3151*4882a593Smuzhiyun num_bytes = qgroup->rsv.values[type];
3152*4882a593Smuzhiyun
3153*4882a593Smuzhiyun ulist_reinit(fs_info->qgroup_ulist);
3154*4882a593Smuzhiyun ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3155*4882a593Smuzhiyun qgroup_to_aux(qgroup), GFP_ATOMIC);
3156*4882a593Smuzhiyun if (ret < 0)
3157*4882a593Smuzhiyun goto out;
3158*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
3159*4882a593Smuzhiyun while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3160*4882a593Smuzhiyun struct btrfs_qgroup *qg;
3161*4882a593Smuzhiyun struct btrfs_qgroup_list *glist;
3162*4882a593Smuzhiyun
3163*4882a593Smuzhiyun qg = unode_aux_to_qgroup(unode);
3164*4882a593Smuzhiyun
3165*4882a593Smuzhiyun qgroup_rsv_release(fs_info, qg, num_bytes, type);
3166*4882a593Smuzhiyun
3167*4882a593Smuzhiyun list_for_each_entry(glist, &qg->groups, next_group) {
3168*4882a593Smuzhiyun ret = ulist_add(fs_info->qgroup_ulist,
3169*4882a593Smuzhiyun glist->group->qgroupid,
3170*4882a593Smuzhiyun qgroup_to_aux(glist->group), GFP_ATOMIC);
3171*4882a593Smuzhiyun if (ret < 0)
3172*4882a593Smuzhiyun goto out;
3173*4882a593Smuzhiyun }
3174*4882a593Smuzhiyun }
3175*4882a593Smuzhiyun
3176*4882a593Smuzhiyun out:
3177*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
3178*4882a593Smuzhiyun }
3179*4882a593Smuzhiyun
3180*4882a593Smuzhiyun /*
3181*4882a593Smuzhiyun * Check if the leaf is the last leaf. Which means all node pointers
3182*4882a593Smuzhiyun * are at their last position.
3183*4882a593Smuzhiyun */
is_last_leaf(struct btrfs_path * path)3184*4882a593Smuzhiyun static bool is_last_leaf(struct btrfs_path *path)
3185*4882a593Smuzhiyun {
3186*4882a593Smuzhiyun int i;
3187*4882a593Smuzhiyun
3188*4882a593Smuzhiyun for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3189*4882a593Smuzhiyun if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3190*4882a593Smuzhiyun return false;
3191*4882a593Smuzhiyun }
3192*4882a593Smuzhiyun return true;
3193*4882a593Smuzhiyun }
3194*4882a593Smuzhiyun
3195*4882a593Smuzhiyun /*
3196*4882a593Smuzhiyun * returns < 0 on error, 0 when more leafs are to be scanned.
3197*4882a593Smuzhiyun * returns 1 when done.
3198*4882a593Smuzhiyun */
qgroup_rescan_leaf(struct btrfs_trans_handle * trans,struct btrfs_path * path)3199*4882a593Smuzhiyun static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3200*4882a593Smuzhiyun struct btrfs_path *path)
3201*4882a593Smuzhiyun {
3202*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = trans->fs_info;
3203*4882a593Smuzhiyun struct btrfs_key found;
3204*4882a593Smuzhiyun struct extent_buffer *scratch_leaf = NULL;
3205*4882a593Smuzhiyun struct ulist *roots = NULL;
3206*4882a593Smuzhiyun u64 num_bytes;
3207*4882a593Smuzhiyun bool done;
3208*4882a593Smuzhiyun int slot;
3209*4882a593Smuzhiyun int ret;
3210*4882a593Smuzhiyun
3211*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_rescan_lock);
3212*4882a593Smuzhiyun ret = btrfs_search_slot_for_read(fs_info->extent_root,
3213*4882a593Smuzhiyun &fs_info->qgroup_rescan_progress,
3214*4882a593Smuzhiyun path, 1, 0);
3215*4882a593Smuzhiyun
3216*4882a593Smuzhiyun btrfs_debug(fs_info,
3217*4882a593Smuzhiyun "current progress key (%llu %u %llu), search_slot ret %d",
3218*4882a593Smuzhiyun fs_info->qgroup_rescan_progress.objectid,
3219*4882a593Smuzhiyun fs_info->qgroup_rescan_progress.type,
3220*4882a593Smuzhiyun fs_info->qgroup_rescan_progress.offset, ret);
3221*4882a593Smuzhiyun
3222*4882a593Smuzhiyun if (ret) {
3223*4882a593Smuzhiyun /*
3224*4882a593Smuzhiyun * The rescan is about to end, we will not be scanning any
3225*4882a593Smuzhiyun * further blocks. We cannot unset the RESCAN flag here, because
3226*4882a593Smuzhiyun * we want to commit the transaction if everything went well.
3227*4882a593Smuzhiyun * To make the live accounting work in this phase, we set our
3228*4882a593Smuzhiyun * scan progress pointer such that every real extent objectid
3229*4882a593Smuzhiyun * will be smaller.
3230*4882a593Smuzhiyun */
3231*4882a593Smuzhiyun fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3232*4882a593Smuzhiyun btrfs_release_path(path);
3233*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_rescan_lock);
3234*4882a593Smuzhiyun return ret;
3235*4882a593Smuzhiyun }
3236*4882a593Smuzhiyun done = is_last_leaf(path);
3237*4882a593Smuzhiyun
3238*4882a593Smuzhiyun btrfs_item_key_to_cpu(path->nodes[0], &found,
3239*4882a593Smuzhiyun btrfs_header_nritems(path->nodes[0]) - 1);
3240*4882a593Smuzhiyun fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3241*4882a593Smuzhiyun
3242*4882a593Smuzhiyun scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3243*4882a593Smuzhiyun if (!scratch_leaf) {
3244*4882a593Smuzhiyun ret = -ENOMEM;
3245*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_rescan_lock);
3246*4882a593Smuzhiyun goto out;
3247*4882a593Smuzhiyun }
3248*4882a593Smuzhiyun slot = path->slots[0];
3249*4882a593Smuzhiyun btrfs_release_path(path);
3250*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_rescan_lock);
3251*4882a593Smuzhiyun
3252*4882a593Smuzhiyun for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3253*4882a593Smuzhiyun btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3254*4882a593Smuzhiyun if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3255*4882a593Smuzhiyun found.type != BTRFS_METADATA_ITEM_KEY)
3256*4882a593Smuzhiyun continue;
3257*4882a593Smuzhiyun if (found.type == BTRFS_METADATA_ITEM_KEY)
3258*4882a593Smuzhiyun num_bytes = fs_info->nodesize;
3259*4882a593Smuzhiyun else
3260*4882a593Smuzhiyun num_bytes = found.offset;
3261*4882a593Smuzhiyun
3262*4882a593Smuzhiyun ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
3263*4882a593Smuzhiyun &roots, false);
3264*4882a593Smuzhiyun if (ret < 0)
3265*4882a593Smuzhiyun goto out;
3266*4882a593Smuzhiyun /* For rescan, just pass old_roots as NULL */
3267*4882a593Smuzhiyun ret = btrfs_qgroup_account_extent(trans, found.objectid,
3268*4882a593Smuzhiyun num_bytes, NULL, roots);
3269*4882a593Smuzhiyun if (ret < 0)
3270*4882a593Smuzhiyun goto out;
3271*4882a593Smuzhiyun }
3272*4882a593Smuzhiyun out:
3273*4882a593Smuzhiyun if (scratch_leaf)
3274*4882a593Smuzhiyun free_extent_buffer(scratch_leaf);
3275*4882a593Smuzhiyun
3276*4882a593Smuzhiyun if (done && !ret) {
3277*4882a593Smuzhiyun ret = 1;
3278*4882a593Smuzhiyun fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3279*4882a593Smuzhiyun }
3280*4882a593Smuzhiyun return ret;
3281*4882a593Smuzhiyun }
3282*4882a593Smuzhiyun
rescan_should_stop(struct btrfs_fs_info * fs_info)3283*4882a593Smuzhiyun static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
3284*4882a593Smuzhiyun {
3285*4882a593Smuzhiyun return btrfs_fs_closing(fs_info) ||
3286*4882a593Smuzhiyun test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state) ||
3287*4882a593Smuzhiyun !test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
3288*4882a593Smuzhiyun }
3289*4882a593Smuzhiyun
btrfs_qgroup_rescan_worker(struct btrfs_work * work)3290*4882a593Smuzhiyun static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3291*4882a593Smuzhiyun {
3292*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3293*4882a593Smuzhiyun qgroup_rescan_work);
3294*4882a593Smuzhiyun struct btrfs_path *path;
3295*4882a593Smuzhiyun struct btrfs_trans_handle *trans = NULL;
3296*4882a593Smuzhiyun int err = -ENOMEM;
3297*4882a593Smuzhiyun int ret = 0;
3298*4882a593Smuzhiyun bool stopped = false;
3299*4882a593Smuzhiyun
3300*4882a593Smuzhiyun path = btrfs_alloc_path();
3301*4882a593Smuzhiyun if (!path)
3302*4882a593Smuzhiyun goto out;
3303*4882a593Smuzhiyun /*
3304*4882a593Smuzhiyun * Rescan should only search for commit root, and any later difference
3305*4882a593Smuzhiyun * should be recorded by qgroup
3306*4882a593Smuzhiyun */
3307*4882a593Smuzhiyun path->search_commit_root = 1;
3308*4882a593Smuzhiyun path->skip_locking = 1;
3309*4882a593Smuzhiyun
3310*4882a593Smuzhiyun err = 0;
3311*4882a593Smuzhiyun while (!err && !(stopped = rescan_should_stop(fs_info))) {
3312*4882a593Smuzhiyun trans = btrfs_start_transaction(fs_info->fs_root, 0);
3313*4882a593Smuzhiyun if (IS_ERR(trans)) {
3314*4882a593Smuzhiyun err = PTR_ERR(trans);
3315*4882a593Smuzhiyun break;
3316*4882a593Smuzhiyun }
3317*4882a593Smuzhiyun
3318*4882a593Smuzhiyun err = qgroup_rescan_leaf(trans, path);
3319*4882a593Smuzhiyun
3320*4882a593Smuzhiyun if (err > 0)
3321*4882a593Smuzhiyun btrfs_commit_transaction(trans);
3322*4882a593Smuzhiyun else
3323*4882a593Smuzhiyun btrfs_end_transaction(trans);
3324*4882a593Smuzhiyun }
3325*4882a593Smuzhiyun
3326*4882a593Smuzhiyun out:
3327*4882a593Smuzhiyun btrfs_free_path(path);
3328*4882a593Smuzhiyun
3329*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_rescan_lock);
3330*4882a593Smuzhiyun if (err > 0 &&
3331*4882a593Smuzhiyun fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3332*4882a593Smuzhiyun fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3333*4882a593Smuzhiyun } else if (err < 0 || stopped) {
3334*4882a593Smuzhiyun fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3335*4882a593Smuzhiyun }
3336*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_rescan_lock);
3337*4882a593Smuzhiyun
3338*4882a593Smuzhiyun /*
3339*4882a593Smuzhiyun * only update status, since the previous part has already updated the
3340*4882a593Smuzhiyun * qgroup info.
3341*4882a593Smuzhiyun */
3342*4882a593Smuzhiyun trans = btrfs_start_transaction(fs_info->quota_root, 1);
3343*4882a593Smuzhiyun if (IS_ERR(trans)) {
3344*4882a593Smuzhiyun err = PTR_ERR(trans);
3345*4882a593Smuzhiyun trans = NULL;
3346*4882a593Smuzhiyun btrfs_err(fs_info,
3347*4882a593Smuzhiyun "fail to start transaction for status update: %d",
3348*4882a593Smuzhiyun err);
3349*4882a593Smuzhiyun }
3350*4882a593Smuzhiyun
3351*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_rescan_lock);
3352*4882a593Smuzhiyun if (!stopped)
3353*4882a593Smuzhiyun fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3354*4882a593Smuzhiyun if (trans) {
3355*4882a593Smuzhiyun ret = update_qgroup_status_item(trans);
3356*4882a593Smuzhiyun if (ret < 0) {
3357*4882a593Smuzhiyun err = ret;
3358*4882a593Smuzhiyun btrfs_err(fs_info, "fail to update qgroup status: %d",
3359*4882a593Smuzhiyun err);
3360*4882a593Smuzhiyun }
3361*4882a593Smuzhiyun }
3362*4882a593Smuzhiyun fs_info->qgroup_rescan_running = false;
3363*4882a593Smuzhiyun complete_all(&fs_info->qgroup_rescan_completion);
3364*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_rescan_lock);
3365*4882a593Smuzhiyun
3366*4882a593Smuzhiyun if (!trans)
3367*4882a593Smuzhiyun return;
3368*4882a593Smuzhiyun
3369*4882a593Smuzhiyun btrfs_end_transaction(trans);
3370*4882a593Smuzhiyun
3371*4882a593Smuzhiyun if (stopped) {
3372*4882a593Smuzhiyun btrfs_info(fs_info, "qgroup scan paused");
3373*4882a593Smuzhiyun } else if (err >= 0) {
3374*4882a593Smuzhiyun btrfs_info(fs_info, "qgroup scan completed%s",
3375*4882a593Smuzhiyun err > 0 ? " (inconsistency flag cleared)" : "");
3376*4882a593Smuzhiyun } else {
3377*4882a593Smuzhiyun btrfs_err(fs_info, "qgroup scan failed with %d", err);
3378*4882a593Smuzhiyun }
3379*4882a593Smuzhiyun }
3380*4882a593Smuzhiyun
3381*4882a593Smuzhiyun /*
3382*4882a593Smuzhiyun * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3383*4882a593Smuzhiyun * memory required for the rescan context.
3384*4882a593Smuzhiyun */
3385*4882a593Smuzhiyun static int
qgroup_rescan_init(struct btrfs_fs_info * fs_info,u64 progress_objectid,int init_flags)3386*4882a593Smuzhiyun qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3387*4882a593Smuzhiyun int init_flags)
3388*4882a593Smuzhiyun {
3389*4882a593Smuzhiyun int ret = 0;
3390*4882a593Smuzhiyun
3391*4882a593Smuzhiyun if (!init_flags) {
3392*4882a593Smuzhiyun /* we're resuming qgroup rescan at mount time */
3393*4882a593Smuzhiyun if (!(fs_info->qgroup_flags &
3394*4882a593Smuzhiyun BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3395*4882a593Smuzhiyun btrfs_warn(fs_info,
3396*4882a593Smuzhiyun "qgroup rescan init failed, qgroup rescan is not queued");
3397*4882a593Smuzhiyun ret = -EINVAL;
3398*4882a593Smuzhiyun } else if (!(fs_info->qgroup_flags &
3399*4882a593Smuzhiyun BTRFS_QGROUP_STATUS_FLAG_ON)) {
3400*4882a593Smuzhiyun btrfs_warn(fs_info,
3401*4882a593Smuzhiyun "qgroup rescan init failed, qgroup is not enabled");
3402*4882a593Smuzhiyun ret = -EINVAL;
3403*4882a593Smuzhiyun }
3404*4882a593Smuzhiyun
3405*4882a593Smuzhiyun if (ret)
3406*4882a593Smuzhiyun return ret;
3407*4882a593Smuzhiyun }
3408*4882a593Smuzhiyun
3409*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_rescan_lock);
3410*4882a593Smuzhiyun
3411*4882a593Smuzhiyun if (init_flags) {
3412*4882a593Smuzhiyun if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3413*4882a593Smuzhiyun btrfs_warn(fs_info,
3414*4882a593Smuzhiyun "qgroup rescan is already in progress");
3415*4882a593Smuzhiyun ret = -EINPROGRESS;
3416*4882a593Smuzhiyun } else if (!(fs_info->qgroup_flags &
3417*4882a593Smuzhiyun BTRFS_QGROUP_STATUS_FLAG_ON)) {
3418*4882a593Smuzhiyun btrfs_warn(fs_info,
3419*4882a593Smuzhiyun "qgroup rescan init failed, qgroup is not enabled");
3420*4882a593Smuzhiyun ret = -EINVAL;
3421*4882a593Smuzhiyun } else if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
3422*4882a593Smuzhiyun /* Quota disable is in progress */
3423*4882a593Smuzhiyun ret = -EBUSY;
3424*4882a593Smuzhiyun }
3425*4882a593Smuzhiyun
3426*4882a593Smuzhiyun if (ret) {
3427*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_rescan_lock);
3428*4882a593Smuzhiyun return ret;
3429*4882a593Smuzhiyun }
3430*4882a593Smuzhiyun fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3431*4882a593Smuzhiyun }
3432*4882a593Smuzhiyun
3433*4882a593Smuzhiyun memset(&fs_info->qgroup_rescan_progress, 0,
3434*4882a593Smuzhiyun sizeof(fs_info->qgroup_rescan_progress));
3435*4882a593Smuzhiyun fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3436*4882a593Smuzhiyun init_completion(&fs_info->qgroup_rescan_completion);
3437*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_rescan_lock);
3438*4882a593Smuzhiyun
3439*4882a593Smuzhiyun btrfs_init_work(&fs_info->qgroup_rescan_work,
3440*4882a593Smuzhiyun btrfs_qgroup_rescan_worker, NULL, NULL);
3441*4882a593Smuzhiyun return 0;
3442*4882a593Smuzhiyun }
3443*4882a593Smuzhiyun
3444*4882a593Smuzhiyun static void
qgroup_rescan_zero_tracking(struct btrfs_fs_info * fs_info)3445*4882a593Smuzhiyun qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3446*4882a593Smuzhiyun {
3447*4882a593Smuzhiyun struct rb_node *n;
3448*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
3449*4882a593Smuzhiyun
3450*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
3451*4882a593Smuzhiyun /* clear all current qgroup tracking information */
3452*4882a593Smuzhiyun for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
3453*4882a593Smuzhiyun qgroup = rb_entry(n, struct btrfs_qgroup, node);
3454*4882a593Smuzhiyun qgroup->rfer = 0;
3455*4882a593Smuzhiyun qgroup->rfer_cmpr = 0;
3456*4882a593Smuzhiyun qgroup->excl = 0;
3457*4882a593Smuzhiyun qgroup->excl_cmpr = 0;
3458*4882a593Smuzhiyun qgroup_dirty(fs_info, qgroup);
3459*4882a593Smuzhiyun }
3460*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
3461*4882a593Smuzhiyun }
3462*4882a593Smuzhiyun
3463*4882a593Smuzhiyun int
btrfs_qgroup_rescan(struct btrfs_fs_info * fs_info)3464*4882a593Smuzhiyun btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
3465*4882a593Smuzhiyun {
3466*4882a593Smuzhiyun int ret = 0;
3467*4882a593Smuzhiyun struct btrfs_trans_handle *trans;
3468*4882a593Smuzhiyun
3469*4882a593Smuzhiyun ret = qgroup_rescan_init(fs_info, 0, 1);
3470*4882a593Smuzhiyun if (ret)
3471*4882a593Smuzhiyun return ret;
3472*4882a593Smuzhiyun
3473*4882a593Smuzhiyun /*
3474*4882a593Smuzhiyun * We have set the rescan_progress to 0, which means no more
3475*4882a593Smuzhiyun * delayed refs will be accounted by btrfs_qgroup_account_ref.
3476*4882a593Smuzhiyun * However, btrfs_qgroup_account_ref may be right after its call
3477*4882a593Smuzhiyun * to btrfs_find_all_roots, in which case it would still do the
3478*4882a593Smuzhiyun * accounting.
3479*4882a593Smuzhiyun * To solve this, we're committing the transaction, which will
3480*4882a593Smuzhiyun * ensure we run all delayed refs and only after that, we are
3481*4882a593Smuzhiyun * going to clear all tracking information for a clean start.
3482*4882a593Smuzhiyun */
3483*4882a593Smuzhiyun
3484*4882a593Smuzhiyun trans = btrfs_join_transaction(fs_info->fs_root);
3485*4882a593Smuzhiyun if (IS_ERR(trans)) {
3486*4882a593Smuzhiyun fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3487*4882a593Smuzhiyun return PTR_ERR(trans);
3488*4882a593Smuzhiyun }
3489*4882a593Smuzhiyun ret = btrfs_commit_transaction(trans);
3490*4882a593Smuzhiyun if (ret) {
3491*4882a593Smuzhiyun fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3492*4882a593Smuzhiyun return ret;
3493*4882a593Smuzhiyun }
3494*4882a593Smuzhiyun
3495*4882a593Smuzhiyun qgroup_rescan_zero_tracking(fs_info);
3496*4882a593Smuzhiyun
3497*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_rescan_lock);
3498*4882a593Smuzhiyun fs_info->qgroup_rescan_running = true;
3499*4882a593Smuzhiyun btrfs_queue_work(fs_info->qgroup_rescan_workers,
3500*4882a593Smuzhiyun &fs_info->qgroup_rescan_work);
3501*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_rescan_lock);
3502*4882a593Smuzhiyun
3503*4882a593Smuzhiyun return 0;
3504*4882a593Smuzhiyun }
3505*4882a593Smuzhiyun
btrfs_qgroup_wait_for_completion(struct btrfs_fs_info * fs_info,bool interruptible)3506*4882a593Smuzhiyun int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
3507*4882a593Smuzhiyun bool interruptible)
3508*4882a593Smuzhiyun {
3509*4882a593Smuzhiyun int running;
3510*4882a593Smuzhiyun int ret = 0;
3511*4882a593Smuzhiyun
3512*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_rescan_lock);
3513*4882a593Smuzhiyun running = fs_info->qgroup_rescan_running;
3514*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_rescan_lock);
3515*4882a593Smuzhiyun
3516*4882a593Smuzhiyun if (!running)
3517*4882a593Smuzhiyun return 0;
3518*4882a593Smuzhiyun
3519*4882a593Smuzhiyun if (interruptible)
3520*4882a593Smuzhiyun ret = wait_for_completion_interruptible(
3521*4882a593Smuzhiyun &fs_info->qgroup_rescan_completion);
3522*4882a593Smuzhiyun else
3523*4882a593Smuzhiyun wait_for_completion(&fs_info->qgroup_rescan_completion);
3524*4882a593Smuzhiyun
3525*4882a593Smuzhiyun return ret;
3526*4882a593Smuzhiyun }
3527*4882a593Smuzhiyun
3528*4882a593Smuzhiyun /*
3529*4882a593Smuzhiyun * this is only called from open_ctree where we're still single threaded, thus
3530*4882a593Smuzhiyun * locking is omitted here.
3531*4882a593Smuzhiyun */
3532*4882a593Smuzhiyun void
btrfs_qgroup_rescan_resume(struct btrfs_fs_info * fs_info)3533*4882a593Smuzhiyun btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
3534*4882a593Smuzhiyun {
3535*4882a593Smuzhiyun if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3536*4882a593Smuzhiyun mutex_lock(&fs_info->qgroup_rescan_lock);
3537*4882a593Smuzhiyun fs_info->qgroup_rescan_running = true;
3538*4882a593Smuzhiyun btrfs_queue_work(fs_info->qgroup_rescan_workers,
3539*4882a593Smuzhiyun &fs_info->qgroup_rescan_work);
3540*4882a593Smuzhiyun mutex_unlock(&fs_info->qgroup_rescan_lock);
3541*4882a593Smuzhiyun }
3542*4882a593Smuzhiyun }
3543*4882a593Smuzhiyun
3544*4882a593Smuzhiyun #define rbtree_iterate_from_safe(node, next, start) \
3545*4882a593Smuzhiyun for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
3546*4882a593Smuzhiyun
qgroup_unreserve_range(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len)3547*4882a593Smuzhiyun static int qgroup_unreserve_range(struct btrfs_inode *inode,
3548*4882a593Smuzhiyun struct extent_changeset *reserved, u64 start,
3549*4882a593Smuzhiyun u64 len)
3550*4882a593Smuzhiyun {
3551*4882a593Smuzhiyun struct rb_node *node;
3552*4882a593Smuzhiyun struct rb_node *next;
3553*4882a593Smuzhiyun struct ulist_node *entry;
3554*4882a593Smuzhiyun int ret = 0;
3555*4882a593Smuzhiyun
3556*4882a593Smuzhiyun node = reserved->range_changed.root.rb_node;
3557*4882a593Smuzhiyun if (!node)
3558*4882a593Smuzhiyun return 0;
3559*4882a593Smuzhiyun while (node) {
3560*4882a593Smuzhiyun entry = rb_entry(node, struct ulist_node, rb_node);
3561*4882a593Smuzhiyun if (entry->val < start)
3562*4882a593Smuzhiyun node = node->rb_right;
3563*4882a593Smuzhiyun else
3564*4882a593Smuzhiyun node = node->rb_left;
3565*4882a593Smuzhiyun }
3566*4882a593Smuzhiyun
3567*4882a593Smuzhiyun if (entry->val > start && rb_prev(&entry->rb_node))
3568*4882a593Smuzhiyun entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
3569*4882a593Smuzhiyun rb_node);
3570*4882a593Smuzhiyun
3571*4882a593Smuzhiyun rbtree_iterate_from_safe(node, next, &entry->rb_node) {
3572*4882a593Smuzhiyun u64 entry_start;
3573*4882a593Smuzhiyun u64 entry_end;
3574*4882a593Smuzhiyun u64 entry_len;
3575*4882a593Smuzhiyun int clear_ret;
3576*4882a593Smuzhiyun
3577*4882a593Smuzhiyun entry = rb_entry(node, struct ulist_node, rb_node);
3578*4882a593Smuzhiyun entry_start = entry->val;
3579*4882a593Smuzhiyun entry_end = entry->aux;
3580*4882a593Smuzhiyun entry_len = entry_end - entry_start + 1;
3581*4882a593Smuzhiyun
3582*4882a593Smuzhiyun if (entry_start >= start + len)
3583*4882a593Smuzhiyun break;
3584*4882a593Smuzhiyun if (entry_start + entry_len <= start)
3585*4882a593Smuzhiyun continue;
3586*4882a593Smuzhiyun /*
3587*4882a593Smuzhiyun * Now the entry is in [start, start + len), revert the
3588*4882a593Smuzhiyun * EXTENT_QGROUP_RESERVED bit.
3589*4882a593Smuzhiyun */
3590*4882a593Smuzhiyun clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
3591*4882a593Smuzhiyun entry_end, EXTENT_QGROUP_RESERVED);
3592*4882a593Smuzhiyun if (!ret && clear_ret < 0)
3593*4882a593Smuzhiyun ret = clear_ret;
3594*4882a593Smuzhiyun
3595*4882a593Smuzhiyun ulist_del(&reserved->range_changed, entry->val, entry->aux);
3596*4882a593Smuzhiyun if (likely(reserved->bytes_changed >= entry_len)) {
3597*4882a593Smuzhiyun reserved->bytes_changed -= entry_len;
3598*4882a593Smuzhiyun } else {
3599*4882a593Smuzhiyun WARN_ON(1);
3600*4882a593Smuzhiyun reserved->bytes_changed = 0;
3601*4882a593Smuzhiyun }
3602*4882a593Smuzhiyun }
3603*4882a593Smuzhiyun
3604*4882a593Smuzhiyun return ret;
3605*4882a593Smuzhiyun }
3606*4882a593Smuzhiyun
3607*4882a593Smuzhiyun /*
3608*4882a593Smuzhiyun * Try to free some space for qgroup.
3609*4882a593Smuzhiyun *
3610*4882a593Smuzhiyun * For qgroup, there are only 3 ways to free qgroup space:
3611*4882a593Smuzhiyun * - Flush nodatacow write
3612*4882a593Smuzhiyun * Any nodatacow write will free its reserved data space at run_delalloc_range().
3613*4882a593Smuzhiyun * In theory, we should only flush nodatacow inodes, but it's not yet
3614*4882a593Smuzhiyun * possible, so we need to flush the whole root.
3615*4882a593Smuzhiyun *
3616*4882a593Smuzhiyun * - Wait for ordered extents
3617*4882a593Smuzhiyun * When ordered extents are finished, their reserved metadata is finally
3618*4882a593Smuzhiyun * converted to per_trans status, which can be freed by later commit
3619*4882a593Smuzhiyun * transaction.
3620*4882a593Smuzhiyun *
3621*4882a593Smuzhiyun * - Commit transaction
3622*4882a593Smuzhiyun * This would free the meta_per_trans space.
3623*4882a593Smuzhiyun * In theory this shouldn't provide much space, but any more qgroup space
3624*4882a593Smuzhiyun * is needed.
3625*4882a593Smuzhiyun */
try_flush_qgroup(struct btrfs_root * root)3626*4882a593Smuzhiyun static int try_flush_qgroup(struct btrfs_root *root)
3627*4882a593Smuzhiyun {
3628*4882a593Smuzhiyun struct btrfs_trans_handle *trans;
3629*4882a593Smuzhiyun int ret;
3630*4882a593Smuzhiyun bool can_commit = true;
3631*4882a593Smuzhiyun
3632*4882a593Smuzhiyun /*
3633*4882a593Smuzhiyun * If current process holds a transaction, we shouldn't flush, as we
3634*4882a593Smuzhiyun * assume all space reservation happens before a transaction handle is
3635*4882a593Smuzhiyun * held.
3636*4882a593Smuzhiyun *
3637*4882a593Smuzhiyun * But there are cases like btrfs_delayed_item_reserve_metadata() where
3638*4882a593Smuzhiyun * we try to reserve space with one transction handle already held.
3639*4882a593Smuzhiyun * In that case we can't commit transaction, but at least try to end it
3640*4882a593Smuzhiyun * and hope the started data writes can free some space.
3641*4882a593Smuzhiyun */
3642*4882a593Smuzhiyun if (current->journal_info &&
3643*4882a593Smuzhiyun current->journal_info != BTRFS_SEND_TRANS_STUB)
3644*4882a593Smuzhiyun can_commit = false;
3645*4882a593Smuzhiyun
3646*4882a593Smuzhiyun /*
3647*4882a593Smuzhiyun * We don't want to run flush again and again, so if there is a running
3648*4882a593Smuzhiyun * one, we won't try to start a new flush, but exit directly.
3649*4882a593Smuzhiyun */
3650*4882a593Smuzhiyun if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
3651*4882a593Smuzhiyun /*
3652*4882a593Smuzhiyun * We are already holding a transaction, thus we can block other
3653*4882a593Smuzhiyun * threads from flushing. So exit right now. This increases
3654*4882a593Smuzhiyun * the chance of EDQUOT for heavy load and near limit cases.
3655*4882a593Smuzhiyun * But we can argue that if we're already near limit, EDQUOT is
3656*4882a593Smuzhiyun * unavoidable anyway.
3657*4882a593Smuzhiyun */
3658*4882a593Smuzhiyun if (!can_commit)
3659*4882a593Smuzhiyun return 0;
3660*4882a593Smuzhiyun
3661*4882a593Smuzhiyun wait_event(root->qgroup_flush_wait,
3662*4882a593Smuzhiyun !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
3663*4882a593Smuzhiyun return 0;
3664*4882a593Smuzhiyun }
3665*4882a593Smuzhiyun
3666*4882a593Smuzhiyun ret = btrfs_start_delalloc_snapshot(root);
3667*4882a593Smuzhiyun if (ret < 0)
3668*4882a593Smuzhiyun goto out;
3669*4882a593Smuzhiyun btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
3670*4882a593Smuzhiyun
3671*4882a593Smuzhiyun trans = btrfs_join_transaction(root);
3672*4882a593Smuzhiyun if (IS_ERR(trans)) {
3673*4882a593Smuzhiyun ret = PTR_ERR(trans);
3674*4882a593Smuzhiyun goto out;
3675*4882a593Smuzhiyun }
3676*4882a593Smuzhiyun
3677*4882a593Smuzhiyun if (can_commit)
3678*4882a593Smuzhiyun ret = btrfs_commit_transaction(trans);
3679*4882a593Smuzhiyun else
3680*4882a593Smuzhiyun ret = btrfs_end_transaction(trans);
3681*4882a593Smuzhiyun out:
3682*4882a593Smuzhiyun clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
3683*4882a593Smuzhiyun wake_up(&root->qgroup_flush_wait);
3684*4882a593Smuzhiyun return ret;
3685*4882a593Smuzhiyun }
3686*4882a593Smuzhiyun
qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)3687*4882a593Smuzhiyun static int qgroup_reserve_data(struct btrfs_inode *inode,
3688*4882a593Smuzhiyun struct extent_changeset **reserved_ret, u64 start,
3689*4882a593Smuzhiyun u64 len)
3690*4882a593Smuzhiyun {
3691*4882a593Smuzhiyun struct btrfs_root *root = inode->root;
3692*4882a593Smuzhiyun struct extent_changeset *reserved;
3693*4882a593Smuzhiyun bool new_reserved = false;
3694*4882a593Smuzhiyun u64 orig_reserved;
3695*4882a593Smuzhiyun u64 to_reserve;
3696*4882a593Smuzhiyun int ret;
3697*4882a593Smuzhiyun
3698*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
3699*4882a593Smuzhiyun !is_fstree(root->root_key.objectid) || len == 0)
3700*4882a593Smuzhiyun return 0;
3701*4882a593Smuzhiyun
3702*4882a593Smuzhiyun /* @reserved parameter is mandatory for qgroup */
3703*4882a593Smuzhiyun if (WARN_ON(!reserved_ret))
3704*4882a593Smuzhiyun return -EINVAL;
3705*4882a593Smuzhiyun if (!*reserved_ret) {
3706*4882a593Smuzhiyun new_reserved = true;
3707*4882a593Smuzhiyun *reserved_ret = extent_changeset_alloc();
3708*4882a593Smuzhiyun if (!*reserved_ret)
3709*4882a593Smuzhiyun return -ENOMEM;
3710*4882a593Smuzhiyun }
3711*4882a593Smuzhiyun reserved = *reserved_ret;
3712*4882a593Smuzhiyun /* Record already reserved space */
3713*4882a593Smuzhiyun orig_reserved = reserved->bytes_changed;
3714*4882a593Smuzhiyun ret = set_record_extent_bits(&inode->io_tree, start,
3715*4882a593Smuzhiyun start + len -1, EXTENT_QGROUP_RESERVED, reserved);
3716*4882a593Smuzhiyun
3717*4882a593Smuzhiyun /* Newly reserved space */
3718*4882a593Smuzhiyun to_reserve = reserved->bytes_changed - orig_reserved;
3719*4882a593Smuzhiyun trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
3720*4882a593Smuzhiyun to_reserve, QGROUP_RESERVE);
3721*4882a593Smuzhiyun if (ret < 0)
3722*4882a593Smuzhiyun goto out;
3723*4882a593Smuzhiyun ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
3724*4882a593Smuzhiyun if (ret < 0)
3725*4882a593Smuzhiyun goto cleanup;
3726*4882a593Smuzhiyun
3727*4882a593Smuzhiyun return ret;
3728*4882a593Smuzhiyun
3729*4882a593Smuzhiyun cleanup:
3730*4882a593Smuzhiyun qgroup_unreserve_range(inode, reserved, start, len);
3731*4882a593Smuzhiyun out:
3732*4882a593Smuzhiyun if (new_reserved) {
3733*4882a593Smuzhiyun extent_changeset_release(reserved);
3734*4882a593Smuzhiyun kfree(reserved);
3735*4882a593Smuzhiyun *reserved_ret = NULL;
3736*4882a593Smuzhiyun }
3737*4882a593Smuzhiyun return ret;
3738*4882a593Smuzhiyun }
3739*4882a593Smuzhiyun
3740*4882a593Smuzhiyun /*
3741*4882a593Smuzhiyun * Reserve qgroup space for range [start, start + len).
3742*4882a593Smuzhiyun *
3743*4882a593Smuzhiyun * This function will either reserve space from related qgroups or do nothing
3744*4882a593Smuzhiyun * if the range is already reserved.
3745*4882a593Smuzhiyun *
3746*4882a593Smuzhiyun * Return 0 for successful reservation
3747*4882a593Smuzhiyun * Return <0 for error (including -EQUOT)
3748*4882a593Smuzhiyun *
3749*4882a593Smuzhiyun * NOTE: This function may sleep for memory allocation, dirty page flushing and
3750*4882a593Smuzhiyun * commit transaction. So caller should not hold any dirty page locked.
3751*4882a593Smuzhiyun */
btrfs_qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)3752*4882a593Smuzhiyun int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
3753*4882a593Smuzhiyun struct extent_changeset **reserved_ret, u64 start,
3754*4882a593Smuzhiyun u64 len)
3755*4882a593Smuzhiyun {
3756*4882a593Smuzhiyun int ret;
3757*4882a593Smuzhiyun
3758*4882a593Smuzhiyun ret = qgroup_reserve_data(inode, reserved_ret, start, len);
3759*4882a593Smuzhiyun if (ret <= 0 && ret != -EDQUOT)
3760*4882a593Smuzhiyun return ret;
3761*4882a593Smuzhiyun
3762*4882a593Smuzhiyun ret = try_flush_qgroup(inode->root);
3763*4882a593Smuzhiyun if (ret < 0)
3764*4882a593Smuzhiyun return ret;
3765*4882a593Smuzhiyun return qgroup_reserve_data(inode, reserved_ret, start, len);
3766*4882a593Smuzhiyun }
3767*4882a593Smuzhiyun
3768*4882a593Smuzhiyun /* Free ranges specified by @reserved, normally in error path */
qgroup_free_reserved_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len)3769*4882a593Smuzhiyun static int qgroup_free_reserved_data(struct btrfs_inode *inode,
3770*4882a593Smuzhiyun struct extent_changeset *reserved, u64 start, u64 len)
3771*4882a593Smuzhiyun {
3772*4882a593Smuzhiyun struct btrfs_root *root = inode->root;
3773*4882a593Smuzhiyun struct ulist_node *unode;
3774*4882a593Smuzhiyun struct ulist_iterator uiter;
3775*4882a593Smuzhiyun struct extent_changeset changeset;
3776*4882a593Smuzhiyun int freed = 0;
3777*4882a593Smuzhiyun int ret;
3778*4882a593Smuzhiyun
3779*4882a593Smuzhiyun extent_changeset_init(&changeset);
3780*4882a593Smuzhiyun len = round_up(start + len, root->fs_info->sectorsize);
3781*4882a593Smuzhiyun start = round_down(start, root->fs_info->sectorsize);
3782*4882a593Smuzhiyun
3783*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
3784*4882a593Smuzhiyun while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
3785*4882a593Smuzhiyun u64 range_start = unode->val;
3786*4882a593Smuzhiyun /* unode->aux is the inclusive end */
3787*4882a593Smuzhiyun u64 range_len = unode->aux - range_start + 1;
3788*4882a593Smuzhiyun u64 free_start;
3789*4882a593Smuzhiyun u64 free_len;
3790*4882a593Smuzhiyun
3791*4882a593Smuzhiyun extent_changeset_release(&changeset);
3792*4882a593Smuzhiyun
3793*4882a593Smuzhiyun /* Only free range in range [start, start + len) */
3794*4882a593Smuzhiyun if (range_start >= start + len ||
3795*4882a593Smuzhiyun range_start + range_len <= start)
3796*4882a593Smuzhiyun continue;
3797*4882a593Smuzhiyun free_start = max(range_start, start);
3798*4882a593Smuzhiyun free_len = min(start + len, range_start + range_len) -
3799*4882a593Smuzhiyun free_start;
3800*4882a593Smuzhiyun /*
3801*4882a593Smuzhiyun * TODO: To also modify reserved->ranges_reserved to reflect
3802*4882a593Smuzhiyun * the modification.
3803*4882a593Smuzhiyun *
3804*4882a593Smuzhiyun * However as long as we free qgroup reserved according to
3805*4882a593Smuzhiyun * EXTENT_QGROUP_RESERVED, we won't double free.
3806*4882a593Smuzhiyun * So not need to rush.
3807*4882a593Smuzhiyun */
3808*4882a593Smuzhiyun ret = clear_record_extent_bits(&inode->io_tree, free_start,
3809*4882a593Smuzhiyun free_start + free_len - 1,
3810*4882a593Smuzhiyun EXTENT_QGROUP_RESERVED, &changeset);
3811*4882a593Smuzhiyun if (ret < 0)
3812*4882a593Smuzhiyun goto out;
3813*4882a593Smuzhiyun freed += changeset.bytes_changed;
3814*4882a593Smuzhiyun }
3815*4882a593Smuzhiyun btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
3816*4882a593Smuzhiyun BTRFS_QGROUP_RSV_DATA);
3817*4882a593Smuzhiyun ret = freed;
3818*4882a593Smuzhiyun out:
3819*4882a593Smuzhiyun extent_changeset_release(&changeset);
3820*4882a593Smuzhiyun return ret;
3821*4882a593Smuzhiyun }
3822*4882a593Smuzhiyun
__btrfs_qgroup_release_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,int free)3823*4882a593Smuzhiyun static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
3824*4882a593Smuzhiyun struct extent_changeset *reserved, u64 start, u64 len,
3825*4882a593Smuzhiyun int free)
3826*4882a593Smuzhiyun {
3827*4882a593Smuzhiyun struct extent_changeset changeset;
3828*4882a593Smuzhiyun int trace_op = QGROUP_RELEASE;
3829*4882a593Smuzhiyun int ret;
3830*4882a593Smuzhiyun
3831*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &inode->root->fs_info->flags))
3832*4882a593Smuzhiyun return 0;
3833*4882a593Smuzhiyun
3834*4882a593Smuzhiyun /* In release case, we shouldn't have @reserved */
3835*4882a593Smuzhiyun WARN_ON(!free && reserved);
3836*4882a593Smuzhiyun if (free && reserved)
3837*4882a593Smuzhiyun return qgroup_free_reserved_data(inode, reserved, start, len);
3838*4882a593Smuzhiyun extent_changeset_init(&changeset);
3839*4882a593Smuzhiyun ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
3840*4882a593Smuzhiyun EXTENT_QGROUP_RESERVED, &changeset);
3841*4882a593Smuzhiyun if (ret < 0)
3842*4882a593Smuzhiyun goto out;
3843*4882a593Smuzhiyun
3844*4882a593Smuzhiyun if (free)
3845*4882a593Smuzhiyun trace_op = QGROUP_FREE;
3846*4882a593Smuzhiyun trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
3847*4882a593Smuzhiyun changeset.bytes_changed, trace_op);
3848*4882a593Smuzhiyun if (free)
3849*4882a593Smuzhiyun btrfs_qgroup_free_refroot(inode->root->fs_info,
3850*4882a593Smuzhiyun inode->root->root_key.objectid,
3851*4882a593Smuzhiyun changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3852*4882a593Smuzhiyun ret = changeset.bytes_changed;
3853*4882a593Smuzhiyun out:
3854*4882a593Smuzhiyun extent_changeset_release(&changeset);
3855*4882a593Smuzhiyun return ret;
3856*4882a593Smuzhiyun }
3857*4882a593Smuzhiyun
3858*4882a593Smuzhiyun /*
3859*4882a593Smuzhiyun * Free a reserved space range from io_tree and related qgroups
3860*4882a593Smuzhiyun *
3861*4882a593Smuzhiyun * Should be called when a range of pages get invalidated before reaching disk.
3862*4882a593Smuzhiyun * Or for error cleanup case.
3863*4882a593Smuzhiyun * if @reserved is given, only reserved range in [@start, @start + @len) will
3864*4882a593Smuzhiyun * be freed.
3865*4882a593Smuzhiyun *
3866*4882a593Smuzhiyun * For data written to disk, use btrfs_qgroup_release_data().
3867*4882a593Smuzhiyun *
3868*4882a593Smuzhiyun * NOTE: This function may sleep for memory allocation.
3869*4882a593Smuzhiyun */
btrfs_qgroup_free_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len)3870*4882a593Smuzhiyun int btrfs_qgroup_free_data(struct btrfs_inode *inode,
3871*4882a593Smuzhiyun struct extent_changeset *reserved, u64 start, u64 len)
3872*4882a593Smuzhiyun {
3873*4882a593Smuzhiyun return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
3874*4882a593Smuzhiyun }
3875*4882a593Smuzhiyun
3876*4882a593Smuzhiyun /*
3877*4882a593Smuzhiyun * Release a reserved space range from io_tree only.
3878*4882a593Smuzhiyun *
3879*4882a593Smuzhiyun * Should be called when a range of pages get written to disk and corresponding
3880*4882a593Smuzhiyun * FILE_EXTENT is inserted into corresponding root.
3881*4882a593Smuzhiyun *
3882*4882a593Smuzhiyun * Since new qgroup accounting framework will only update qgroup numbers at
3883*4882a593Smuzhiyun * commit_transaction() time, its reserved space shouldn't be freed from
3884*4882a593Smuzhiyun * related qgroups.
3885*4882a593Smuzhiyun *
3886*4882a593Smuzhiyun * But we should release the range from io_tree, to allow further write to be
3887*4882a593Smuzhiyun * COWed.
3888*4882a593Smuzhiyun *
3889*4882a593Smuzhiyun * NOTE: This function may sleep for memory allocation.
3890*4882a593Smuzhiyun */
btrfs_qgroup_release_data(struct btrfs_inode * inode,u64 start,u64 len)3891*4882a593Smuzhiyun int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len)
3892*4882a593Smuzhiyun {
3893*4882a593Smuzhiyun return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
3894*4882a593Smuzhiyun }
3895*4882a593Smuzhiyun
add_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)3896*4882a593Smuzhiyun static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3897*4882a593Smuzhiyun enum btrfs_qgroup_rsv_type type)
3898*4882a593Smuzhiyun {
3899*4882a593Smuzhiyun if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3900*4882a593Smuzhiyun type != BTRFS_QGROUP_RSV_META_PERTRANS)
3901*4882a593Smuzhiyun return;
3902*4882a593Smuzhiyun if (num_bytes == 0)
3903*4882a593Smuzhiyun return;
3904*4882a593Smuzhiyun
3905*4882a593Smuzhiyun spin_lock(&root->qgroup_meta_rsv_lock);
3906*4882a593Smuzhiyun if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
3907*4882a593Smuzhiyun root->qgroup_meta_rsv_prealloc += num_bytes;
3908*4882a593Smuzhiyun else
3909*4882a593Smuzhiyun root->qgroup_meta_rsv_pertrans += num_bytes;
3910*4882a593Smuzhiyun spin_unlock(&root->qgroup_meta_rsv_lock);
3911*4882a593Smuzhiyun }
3912*4882a593Smuzhiyun
sub_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)3913*4882a593Smuzhiyun static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3914*4882a593Smuzhiyun enum btrfs_qgroup_rsv_type type)
3915*4882a593Smuzhiyun {
3916*4882a593Smuzhiyun if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3917*4882a593Smuzhiyun type != BTRFS_QGROUP_RSV_META_PERTRANS)
3918*4882a593Smuzhiyun return 0;
3919*4882a593Smuzhiyun if (num_bytes == 0)
3920*4882a593Smuzhiyun return 0;
3921*4882a593Smuzhiyun
3922*4882a593Smuzhiyun spin_lock(&root->qgroup_meta_rsv_lock);
3923*4882a593Smuzhiyun if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
3924*4882a593Smuzhiyun num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
3925*4882a593Smuzhiyun num_bytes);
3926*4882a593Smuzhiyun root->qgroup_meta_rsv_prealloc -= num_bytes;
3927*4882a593Smuzhiyun } else {
3928*4882a593Smuzhiyun num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
3929*4882a593Smuzhiyun num_bytes);
3930*4882a593Smuzhiyun root->qgroup_meta_rsv_pertrans -= num_bytes;
3931*4882a593Smuzhiyun }
3932*4882a593Smuzhiyun spin_unlock(&root->qgroup_meta_rsv_lock);
3933*4882a593Smuzhiyun return num_bytes;
3934*4882a593Smuzhiyun }
3935*4882a593Smuzhiyun
btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce)3936*4882a593Smuzhiyun int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3937*4882a593Smuzhiyun enum btrfs_qgroup_rsv_type type, bool enforce)
3938*4882a593Smuzhiyun {
3939*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = root->fs_info;
3940*4882a593Smuzhiyun int ret;
3941*4882a593Smuzhiyun
3942*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3943*4882a593Smuzhiyun !is_fstree(root->root_key.objectid) || num_bytes == 0)
3944*4882a593Smuzhiyun return 0;
3945*4882a593Smuzhiyun
3946*4882a593Smuzhiyun BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
3947*4882a593Smuzhiyun trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
3948*4882a593Smuzhiyun ret = qgroup_reserve(root, num_bytes, enforce, type);
3949*4882a593Smuzhiyun if (ret < 0)
3950*4882a593Smuzhiyun return ret;
3951*4882a593Smuzhiyun /*
3952*4882a593Smuzhiyun * Record what we have reserved into root.
3953*4882a593Smuzhiyun *
3954*4882a593Smuzhiyun * To avoid quota disabled->enabled underflow.
3955*4882a593Smuzhiyun * In that case, we may try to free space we haven't reserved
3956*4882a593Smuzhiyun * (since quota was disabled), so record what we reserved into root.
3957*4882a593Smuzhiyun * And ensure later release won't underflow this number.
3958*4882a593Smuzhiyun */
3959*4882a593Smuzhiyun add_root_meta_rsv(root, num_bytes, type);
3960*4882a593Smuzhiyun return ret;
3961*4882a593Smuzhiyun }
3962*4882a593Smuzhiyun
__btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce)3963*4882a593Smuzhiyun int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3964*4882a593Smuzhiyun enum btrfs_qgroup_rsv_type type, bool enforce)
3965*4882a593Smuzhiyun {
3966*4882a593Smuzhiyun int ret;
3967*4882a593Smuzhiyun
3968*4882a593Smuzhiyun ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
3969*4882a593Smuzhiyun if (ret <= 0 && ret != -EDQUOT)
3970*4882a593Smuzhiyun return ret;
3971*4882a593Smuzhiyun
3972*4882a593Smuzhiyun ret = try_flush_qgroup(root);
3973*4882a593Smuzhiyun if (ret < 0)
3974*4882a593Smuzhiyun return ret;
3975*4882a593Smuzhiyun return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
3976*4882a593Smuzhiyun }
3977*4882a593Smuzhiyun
btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root * root)3978*4882a593Smuzhiyun void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
3979*4882a593Smuzhiyun {
3980*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = root->fs_info;
3981*4882a593Smuzhiyun
3982*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3983*4882a593Smuzhiyun !is_fstree(root->root_key.objectid))
3984*4882a593Smuzhiyun return;
3985*4882a593Smuzhiyun
3986*4882a593Smuzhiyun /* TODO: Update trace point to handle such free */
3987*4882a593Smuzhiyun trace_qgroup_meta_free_all_pertrans(root);
3988*4882a593Smuzhiyun /* Special value -1 means to free all reserved space */
3989*4882a593Smuzhiyun btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
3990*4882a593Smuzhiyun BTRFS_QGROUP_RSV_META_PERTRANS);
3991*4882a593Smuzhiyun }
3992*4882a593Smuzhiyun
__btrfs_qgroup_free_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)3993*4882a593Smuzhiyun void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
3994*4882a593Smuzhiyun enum btrfs_qgroup_rsv_type type)
3995*4882a593Smuzhiyun {
3996*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = root->fs_info;
3997*4882a593Smuzhiyun
3998*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3999*4882a593Smuzhiyun !is_fstree(root->root_key.objectid))
4000*4882a593Smuzhiyun return;
4001*4882a593Smuzhiyun
4002*4882a593Smuzhiyun /*
4003*4882a593Smuzhiyun * reservation for META_PREALLOC can happen before quota is enabled,
4004*4882a593Smuzhiyun * which can lead to underflow.
4005*4882a593Smuzhiyun * Here ensure we will only free what we really have reserved.
4006*4882a593Smuzhiyun */
4007*4882a593Smuzhiyun num_bytes = sub_root_meta_rsv(root, num_bytes, type);
4008*4882a593Smuzhiyun BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4009*4882a593Smuzhiyun trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
4010*4882a593Smuzhiyun btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
4011*4882a593Smuzhiyun num_bytes, type);
4012*4882a593Smuzhiyun }
4013*4882a593Smuzhiyun
qgroup_convert_meta(struct btrfs_fs_info * fs_info,u64 ref_root,int num_bytes)4014*4882a593Smuzhiyun static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
4015*4882a593Smuzhiyun int num_bytes)
4016*4882a593Smuzhiyun {
4017*4882a593Smuzhiyun struct btrfs_qgroup *qgroup;
4018*4882a593Smuzhiyun struct ulist_node *unode;
4019*4882a593Smuzhiyun struct ulist_iterator uiter;
4020*4882a593Smuzhiyun int ret = 0;
4021*4882a593Smuzhiyun
4022*4882a593Smuzhiyun if (num_bytes == 0)
4023*4882a593Smuzhiyun return;
4024*4882a593Smuzhiyun if (!fs_info->quota_root)
4025*4882a593Smuzhiyun return;
4026*4882a593Smuzhiyun
4027*4882a593Smuzhiyun spin_lock(&fs_info->qgroup_lock);
4028*4882a593Smuzhiyun qgroup = find_qgroup_rb(fs_info, ref_root);
4029*4882a593Smuzhiyun if (!qgroup)
4030*4882a593Smuzhiyun goto out;
4031*4882a593Smuzhiyun ulist_reinit(fs_info->qgroup_ulist);
4032*4882a593Smuzhiyun ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
4033*4882a593Smuzhiyun qgroup_to_aux(qgroup), GFP_ATOMIC);
4034*4882a593Smuzhiyun if (ret < 0)
4035*4882a593Smuzhiyun goto out;
4036*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
4037*4882a593Smuzhiyun while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
4038*4882a593Smuzhiyun struct btrfs_qgroup *qg;
4039*4882a593Smuzhiyun struct btrfs_qgroup_list *glist;
4040*4882a593Smuzhiyun
4041*4882a593Smuzhiyun qg = unode_aux_to_qgroup(unode);
4042*4882a593Smuzhiyun
4043*4882a593Smuzhiyun qgroup_rsv_release(fs_info, qg, num_bytes,
4044*4882a593Smuzhiyun BTRFS_QGROUP_RSV_META_PREALLOC);
4045*4882a593Smuzhiyun qgroup_rsv_add(fs_info, qg, num_bytes,
4046*4882a593Smuzhiyun BTRFS_QGROUP_RSV_META_PERTRANS);
4047*4882a593Smuzhiyun list_for_each_entry(glist, &qg->groups, next_group) {
4048*4882a593Smuzhiyun ret = ulist_add(fs_info->qgroup_ulist,
4049*4882a593Smuzhiyun glist->group->qgroupid,
4050*4882a593Smuzhiyun qgroup_to_aux(glist->group), GFP_ATOMIC);
4051*4882a593Smuzhiyun if (ret < 0)
4052*4882a593Smuzhiyun goto out;
4053*4882a593Smuzhiyun }
4054*4882a593Smuzhiyun }
4055*4882a593Smuzhiyun out:
4056*4882a593Smuzhiyun spin_unlock(&fs_info->qgroup_lock);
4057*4882a593Smuzhiyun }
4058*4882a593Smuzhiyun
btrfs_qgroup_convert_reserved_meta(struct btrfs_root * root,int num_bytes)4059*4882a593Smuzhiyun void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
4060*4882a593Smuzhiyun {
4061*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = root->fs_info;
4062*4882a593Smuzhiyun
4063*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
4064*4882a593Smuzhiyun !is_fstree(root->root_key.objectid))
4065*4882a593Smuzhiyun return;
4066*4882a593Smuzhiyun /* Same as btrfs_qgroup_free_meta_prealloc() */
4067*4882a593Smuzhiyun num_bytes = sub_root_meta_rsv(root, num_bytes,
4068*4882a593Smuzhiyun BTRFS_QGROUP_RSV_META_PREALLOC);
4069*4882a593Smuzhiyun trace_qgroup_meta_convert(root, num_bytes);
4070*4882a593Smuzhiyun qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
4071*4882a593Smuzhiyun }
4072*4882a593Smuzhiyun
4073*4882a593Smuzhiyun /*
4074*4882a593Smuzhiyun * Check qgroup reserved space leaking, normally at destroy inode
4075*4882a593Smuzhiyun * time
4076*4882a593Smuzhiyun */
btrfs_qgroup_check_reserved_leak(struct btrfs_inode * inode)4077*4882a593Smuzhiyun void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
4078*4882a593Smuzhiyun {
4079*4882a593Smuzhiyun struct extent_changeset changeset;
4080*4882a593Smuzhiyun struct ulist_node *unode;
4081*4882a593Smuzhiyun struct ulist_iterator iter;
4082*4882a593Smuzhiyun int ret;
4083*4882a593Smuzhiyun
4084*4882a593Smuzhiyun extent_changeset_init(&changeset);
4085*4882a593Smuzhiyun ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
4086*4882a593Smuzhiyun EXTENT_QGROUP_RESERVED, &changeset);
4087*4882a593Smuzhiyun
4088*4882a593Smuzhiyun WARN_ON(ret < 0);
4089*4882a593Smuzhiyun if (WARN_ON(changeset.bytes_changed)) {
4090*4882a593Smuzhiyun ULIST_ITER_INIT(&iter);
4091*4882a593Smuzhiyun while ((unode = ulist_next(&changeset.range_changed, &iter))) {
4092*4882a593Smuzhiyun btrfs_warn(inode->root->fs_info,
4093*4882a593Smuzhiyun "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4094*4882a593Smuzhiyun btrfs_ino(inode), unode->val, unode->aux);
4095*4882a593Smuzhiyun }
4096*4882a593Smuzhiyun btrfs_qgroup_free_refroot(inode->root->fs_info,
4097*4882a593Smuzhiyun inode->root->root_key.objectid,
4098*4882a593Smuzhiyun changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4099*4882a593Smuzhiyun
4100*4882a593Smuzhiyun }
4101*4882a593Smuzhiyun extent_changeset_release(&changeset);
4102*4882a593Smuzhiyun }
4103*4882a593Smuzhiyun
btrfs_qgroup_init_swapped_blocks(struct btrfs_qgroup_swapped_blocks * swapped_blocks)4104*4882a593Smuzhiyun void btrfs_qgroup_init_swapped_blocks(
4105*4882a593Smuzhiyun struct btrfs_qgroup_swapped_blocks *swapped_blocks)
4106*4882a593Smuzhiyun {
4107*4882a593Smuzhiyun int i;
4108*4882a593Smuzhiyun
4109*4882a593Smuzhiyun spin_lock_init(&swapped_blocks->lock);
4110*4882a593Smuzhiyun for (i = 0; i < BTRFS_MAX_LEVEL; i++)
4111*4882a593Smuzhiyun swapped_blocks->blocks[i] = RB_ROOT;
4112*4882a593Smuzhiyun swapped_blocks->swapped = false;
4113*4882a593Smuzhiyun }
4114*4882a593Smuzhiyun
4115*4882a593Smuzhiyun /*
4116*4882a593Smuzhiyun * Delete all swapped blocks record of @root.
4117*4882a593Smuzhiyun * Every record here means we skipped a full subtree scan for qgroup.
4118*4882a593Smuzhiyun *
4119*4882a593Smuzhiyun * Gets called when committing one transaction.
4120*4882a593Smuzhiyun */
btrfs_qgroup_clean_swapped_blocks(struct btrfs_root * root)4121*4882a593Smuzhiyun void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
4122*4882a593Smuzhiyun {
4123*4882a593Smuzhiyun struct btrfs_qgroup_swapped_blocks *swapped_blocks;
4124*4882a593Smuzhiyun int i;
4125*4882a593Smuzhiyun
4126*4882a593Smuzhiyun swapped_blocks = &root->swapped_blocks;
4127*4882a593Smuzhiyun
4128*4882a593Smuzhiyun spin_lock(&swapped_blocks->lock);
4129*4882a593Smuzhiyun if (!swapped_blocks->swapped)
4130*4882a593Smuzhiyun goto out;
4131*4882a593Smuzhiyun for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4132*4882a593Smuzhiyun struct rb_root *cur_root = &swapped_blocks->blocks[i];
4133*4882a593Smuzhiyun struct btrfs_qgroup_swapped_block *entry;
4134*4882a593Smuzhiyun struct btrfs_qgroup_swapped_block *next;
4135*4882a593Smuzhiyun
4136*4882a593Smuzhiyun rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
4137*4882a593Smuzhiyun node)
4138*4882a593Smuzhiyun kfree(entry);
4139*4882a593Smuzhiyun swapped_blocks->blocks[i] = RB_ROOT;
4140*4882a593Smuzhiyun }
4141*4882a593Smuzhiyun swapped_blocks->swapped = false;
4142*4882a593Smuzhiyun out:
4143*4882a593Smuzhiyun spin_unlock(&swapped_blocks->lock);
4144*4882a593Smuzhiyun }
4145*4882a593Smuzhiyun
4146*4882a593Smuzhiyun /*
4147*4882a593Smuzhiyun * Add subtree roots record into @subvol_root.
4148*4882a593Smuzhiyun *
4149*4882a593Smuzhiyun * @subvol_root: tree root of the subvolume tree get swapped
4150*4882a593Smuzhiyun * @bg: block group under balance
4151*4882a593Smuzhiyun * @subvol_parent/slot: pointer to the subtree root in subvolume tree
4152*4882a593Smuzhiyun * @reloc_parent/slot: pointer to the subtree root in reloc tree
4153*4882a593Smuzhiyun * BOTH POINTERS ARE BEFORE TREE SWAP
4154*4882a593Smuzhiyun * @last_snapshot: last snapshot generation of the subvolume tree
4155*4882a593Smuzhiyun */
btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle * trans,struct btrfs_root * subvol_root,struct btrfs_block_group * bg,struct extent_buffer * subvol_parent,int subvol_slot,struct extent_buffer * reloc_parent,int reloc_slot,u64 last_snapshot)4156*4882a593Smuzhiyun int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
4157*4882a593Smuzhiyun struct btrfs_root *subvol_root,
4158*4882a593Smuzhiyun struct btrfs_block_group *bg,
4159*4882a593Smuzhiyun struct extent_buffer *subvol_parent, int subvol_slot,
4160*4882a593Smuzhiyun struct extent_buffer *reloc_parent, int reloc_slot,
4161*4882a593Smuzhiyun u64 last_snapshot)
4162*4882a593Smuzhiyun {
4163*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4164*4882a593Smuzhiyun struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4165*4882a593Smuzhiyun struct btrfs_qgroup_swapped_block *block;
4166*4882a593Smuzhiyun struct rb_node **cur;
4167*4882a593Smuzhiyun struct rb_node *parent = NULL;
4168*4882a593Smuzhiyun int level = btrfs_header_level(subvol_parent) - 1;
4169*4882a593Smuzhiyun int ret = 0;
4170*4882a593Smuzhiyun
4171*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
4172*4882a593Smuzhiyun return 0;
4173*4882a593Smuzhiyun
4174*4882a593Smuzhiyun if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4175*4882a593Smuzhiyun btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
4176*4882a593Smuzhiyun btrfs_err_rl(fs_info,
4177*4882a593Smuzhiyun "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4178*4882a593Smuzhiyun __func__,
4179*4882a593Smuzhiyun btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4180*4882a593Smuzhiyun btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4181*4882a593Smuzhiyun return -EUCLEAN;
4182*4882a593Smuzhiyun }
4183*4882a593Smuzhiyun
4184*4882a593Smuzhiyun block = kmalloc(sizeof(*block), GFP_NOFS);
4185*4882a593Smuzhiyun if (!block) {
4186*4882a593Smuzhiyun ret = -ENOMEM;
4187*4882a593Smuzhiyun goto out;
4188*4882a593Smuzhiyun }
4189*4882a593Smuzhiyun
4190*4882a593Smuzhiyun /*
4191*4882a593Smuzhiyun * @reloc_parent/slot is still before swap, while @block is going to
4192*4882a593Smuzhiyun * record the bytenr after swap, so we do the swap here.
4193*4882a593Smuzhiyun */
4194*4882a593Smuzhiyun block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4195*4882a593Smuzhiyun block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4196*4882a593Smuzhiyun reloc_slot);
4197*4882a593Smuzhiyun block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4198*4882a593Smuzhiyun block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4199*4882a593Smuzhiyun subvol_slot);
4200*4882a593Smuzhiyun block->last_snapshot = last_snapshot;
4201*4882a593Smuzhiyun block->level = level;
4202*4882a593Smuzhiyun
4203*4882a593Smuzhiyun /*
4204*4882a593Smuzhiyun * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4205*4882a593Smuzhiyun * no one else can modify tree blocks thus we qgroup will not change
4206*4882a593Smuzhiyun * no matter the value of trace_leaf.
4207*4882a593Smuzhiyun */
4208*4882a593Smuzhiyun if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4209*4882a593Smuzhiyun block->trace_leaf = true;
4210*4882a593Smuzhiyun else
4211*4882a593Smuzhiyun block->trace_leaf = false;
4212*4882a593Smuzhiyun btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4213*4882a593Smuzhiyun
4214*4882a593Smuzhiyun /* Insert @block into @blocks */
4215*4882a593Smuzhiyun spin_lock(&blocks->lock);
4216*4882a593Smuzhiyun cur = &blocks->blocks[level].rb_node;
4217*4882a593Smuzhiyun while (*cur) {
4218*4882a593Smuzhiyun struct btrfs_qgroup_swapped_block *entry;
4219*4882a593Smuzhiyun
4220*4882a593Smuzhiyun parent = *cur;
4221*4882a593Smuzhiyun entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
4222*4882a593Smuzhiyun node);
4223*4882a593Smuzhiyun
4224*4882a593Smuzhiyun if (entry->subvol_bytenr < block->subvol_bytenr) {
4225*4882a593Smuzhiyun cur = &(*cur)->rb_left;
4226*4882a593Smuzhiyun } else if (entry->subvol_bytenr > block->subvol_bytenr) {
4227*4882a593Smuzhiyun cur = &(*cur)->rb_right;
4228*4882a593Smuzhiyun } else {
4229*4882a593Smuzhiyun if (entry->subvol_generation !=
4230*4882a593Smuzhiyun block->subvol_generation ||
4231*4882a593Smuzhiyun entry->reloc_bytenr != block->reloc_bytenr ||
4232*4882a593Smuzhiyun entry->reloc_generation !=
4233*4882a593Smuzhiyun block->reloc_generation) {
4234*4882a593Smuzhiyun /*
4235*4882a593Smuzhiyun * Duplicated but mismatch entry found.
4236*4882a593Smuzhiyun * Shouldn't happen.
4237*4882a593Smuzhiyun *
4238*4882a593Smuzhiyun * Marking qgroup inconsistent should be enough
4239*4882a593Smuzhiyun * for end users.
4240*4882a593Smuzhiyun */
4241*4882a593Smuzhiyun WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4242*4882a593Smuzhiyun ret = -EEXIST;
4243*4882a593Smuzhiyun }
4244*4882a593Smuzhiyun kfree(block);
4245*4882a593Smuzhiyun goto out_unlock;
4246*4882a593Smuzhiyun }
4247*4882a593Smuzhiyun }
4248*4882a593Smuzhiyun rb_link_node(&block->node, parent, cur);
4249*4882a593Smuzhiyun rb_insert_color(&block->node, &blocks->blocks[level]);
4250*4882a593Smuzhiyun blocks->swapped = true;
4251*4882a593Smuzhiyun out_unlock:
4252*4882a593Smuzhiyun spin_unlock(&blocks->lock);
4253*4882a593Smuzhiyun out:
4254*4882a593Smuzhiyun if (ret < 0)
4255*4882a593Smuzhiyun fs_info->qgroup_flags |=
4256*4882a593Smuzhiyun BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
4257*4882a593Smuzhiyun return ret;
4258*4882a593Smuzhiyun }
4259*4882a593Smuzhiyun
4260*4882a593Smuzhiyun /*
4261*4882a593Smuzhiyun * Check if the tree block is a subtree root, and if so do the needed
4262*4882a593Smuzhiyun * delayed subtree trace for qgroup.
4263*4882a593Smuzhiyun *
4264*4882a593Smuzhiyun * This is called during btrfs_cow_block().
4265*4882a593Smuzhiyun */
btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * subvol_eb)4266*4882a593Smuzhiyun int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4267*4882a593Smuzhiyun struct btrfs_root *root,
4268*4882a593Smuzhiyun struct extent_buffer *subvol_eb)
4269*4882a593Smuzhiyun {
4270*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = root->fs_info;
4271*4882a593Smuzhiyun struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4272*4882a593Smuzhiyun struct btrfs_qgroup_swapped_block *block;
4273*4882a593Smuzhiyun struct extent_buffer *reloc_eb = NULL;
4274*4882a593Smuzhiyun struct rb_node *node;
4275*4882a593Smuzhiyun bool found = false;
4276*4882a593Smuzhiyun bool swapped = false;
4277*4882a593Smuzhiyun int level = btrfs_header_level(subvol_eb);
4278*4882a593Smuzhiyun int ret = 0;
4279*4882a593Smuzhiyun int i;
4280*4882a593Smuzhiyun
4281*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
4282*4882a593Smuzhiyun return 0;
4283*4882a593Smuzhiyun if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
4284*4882a593Smuzhiyun return 0;
4285*4882a593Smuzhiyun
4286*4882a593Smuzhiyun spin_lock(&blocks->lock);
4287*4882a593Smuzhiyun if (!blocks->swapped) {
4288*4882a593Smuzhiyun spin_unlock(&blocks->lock);
4289*4882a593Smuzhiyun return 0;
4290*4882a593Smuzhiyun }
4291*4882a593Smuzhiyun node = blocks->blocks[level].rb_node;
4292*4882a593Smuzhiyun
4293*4882a593Smuzhiyun while (node) {
4294*4882a593Smuzhiyun block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4295*4882a593Smuzhiyun if (block->subvol_bytenr < subvol_eb->start) {
4296*4882a593Smuzhiyun node = node->rb_left;
4297*4882a593Smuzhiyun } else if (block->subvol_bytenr > subvol_eb->start) {
4298*4882a593Smuzhiyun node = node->rb_right;
4299*4882a593Smuzhiyun } else {
4300*4882a593Smuzhiyun found = true;
4301*4882a593Smuzhiyun break;
4302*4882a593Smuzhiyun }
4303*4882a593Smuzhiyun }
4304*4882a593Smuzhiyun if (!found) {
4305*4882a593Smuzhiyun spin_unlock(&blocks->lock);
4306*4882a593Smuzhiyun goto out;
4307*4882a593Smuzhiyun }
4308*4882a593Smuzhiyun /* Found one, remove it from @blocks first and update blocks->swapped */
4309*4882a593Smuzhiyun rb_erase(&block->node, &blocks->blocks[level]);
4310*4882a593Smuzhiyun for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4311*4882a593Smuzhiyun if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4312*4882a593Smuzhiyun swapped = true;
4313*4882a593Smuzhiyun break;
4314*4882a593Smuzhiyun }
4315*4882a593Smuzhiyun }
4316*4882a593Smuzhiyun blocks->swapped = swapped;
4317*4882a593Smuzhiyun spin_unlock(&blocks->lock);
4318*4882a593Smuzhiyun
4319*4882a593Smuzhiyun /* Read out reloc subtree root */
4320*4882a593Smuzhiyun reloc_eb = read_tree_block(fs_info, block->reloc_bytenr,
4321*4882a593Smuzhiyun block->reloc_generation, block->level,
4322*4882a593Smuzhiyun &block->first_key);
4323*4882a593Smuzhiyun if (IS_ERR(reloc_eb)) {
4324*4882a593Smuzhiyun ret = PTR_ERR(reloc_eb);
4325*4882a593Smuzhiyun reloc_eb = NULL;
4326*4882a593Smuzhiyun goto free_out;
4327*4882a593Smuzhiyun }
4328*4882a593Smuzhiyun if (!extent_buffer_uptodate(reloc_eb)) {
4329*4882a593Smuzhiyun ret = -EIO;
4330*4882a593Smuzhiyun goto free_out;
4331*4882a593Smuzhiyun }
4332*4882a593Smuzhiyun
4333*4882a593Smuzhiyun ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4334*4882a593Smuzhiyun block->last_snapshot, block->trace_leaf);
4335*4882a593Smuzhiyun free_out:
4336*4882a593Smuzhiyun kfree(block);
4337*4882a593Smuzhiyun free_extent_buffer(reloc_eb);
4338*4882a593Smuzhiyun out:
4339*4882a593Smuzhiyun if (ret < 0) {
4340*4882a593Smuzhiyun btrfs_err_rl(fs_info,
4341*4882a593Smuzhiyun "failed to account subtree at bytenr %llu: %d",
4342*4882a593Smuzhiyun subvol_eb->start, ret);
4343*4882a593Smuzhiyun fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
4344*4882a593Smuzhiyun }
4345*4882a593Smuzhiyun return ret;
4346*4882a593Smuzhiyun }
4347*4882a593Smuzhiyun
btrfs_qgroup_destroy_extent_records(struct btrfs_transaction * trans)4348*4882a593Smuzhiyun void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4349*4882a593Smuzhiyun {
4350*4882a593Smuzhiyun struct btrfs_qgroup_extent_record *entry;
4351*4882a593Smuzhiyun struct btrfs_qgroup_extent_record *next;
4352*4882a593Smuzhiyun struct rb_root *root;
4353*4882a593Smuzhiyun
4354*4882a593Smuzhiyun root = &trans->delayed_refs.dirty_extent_root;
4355*4882a593Smuzhiyun rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
4356*4882a593Smuzhiyun ulist_free(entry->old_roots);
4357*4882a593Smuzhiyun kfree(entry);
4358*4882a593Smuzhiyun }
4359*4882a593Smuzhiyun }
4360