xref: /OK3568_Linux_fs/kernel/fs/btrfs/qgroup.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2014 Facebook.  All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef BTRFS_QGROUP_H
7*4882a593Smuzhiyun #define BTRFS_QGROUP_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/spinlock.h>
10*4882a593Smuzhiyun #include <linux/rbtree.h>
11*4882a593Smuzhiyun #include <linux/kobject.h>
12*4882a593Smuzhiyun #include "ulist.h"
13*4882a593Smuzhiyun #include "delayed-ref.h"
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun  * Btrfs qgroup overview
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * Btrfs qgroup splits into 3 main part:
19*4882a593Smuzhiyun  * 1) Reserve
20*4882a593Smuzhiyun  *    Reserve metadata/data space for incoming operations
21*4882a593Smuzhiyun  *    Affect how qgroup limit works
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * 2) Trace
24*4882a593Smuzhiyun  *    Tell btrfs qgroup to trace dirty extents.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  *    Dirty extents including:
27*4882a593Smuzhiyun  *    - Newly allocated extents
28*4882a593Smuzhiyun  *    - Extents going to be deleted (in this trans)
29*4882a593Smuzhiyun  *    - Extents whose owner is going to be modified
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  *    This is the main part affects whether qgroup numbers will stay
32*4882a593Smuzhiyun  *    consistent.
33*4882a593Smuzhiyun  *    Btrfs qgroup can trace clean extents and won't cause any problem,
34*4882a593Smuzhiyun  *    but it will consume extra CPU time, it should be avoided if possible.
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  * 3) Account
37*4882a593Smuzhiyun  *    Btrfs qgroup will updates its numbers, based on dirty extents traced
38*4882a593Smuzhiyun  *    in previous step.
39*4882a593Smuzhiyun  *
40*4882a593Smuzhiyun  *    Normally at qgroup rescan and transaction commit time.
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * Special performance optimization for balance.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * For balance, we need to swap subtree of subvolume and reloc trees.
47*4882a593Smuzhiyun  * In theory, we need to trace all subtree blocks of both subvolume and reloc
48*4882a593Smuzhiyun  * trees, since their owner has changed during such swap.
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  * However since balance has ensured that both subtrees are containing the
51*4882a593Smuzhiyun  * same contents and have the same tree structures, such swap won't cause
52*4882a593Smuzhiyun  * qgroup number change.
53*4882a593Smuzhiyun  *
54*4882a593Smuzhiyun  * But there is a race window between subtree swap and transaction commit,
55*4882a593Smuzhiyun  * during that window, if we increase/decrease tree level or merge/split tree
56*4882a593Smuzhiyun  * blocks, we still need to trace the original subtrees.
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  * So for balance, we use a delayed subtree tracing, whose workflow is:
59*4882a593Smuzhiyun  *
60*4882a593Smuzhiyun  * 1) Record the subtree root block get swapped.
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  *    During subtree swap:
63*4882a593Smuzhiyun  *    O = Old tree blocks
64*4882a593Smuzhiyun  *    N = New tree blocks
65*4882a593Smuzhiyun  *          reloc tree                     subvolume tree X
66*4882a593Smuzhiyun  *             Root                               Root
67*4882a593Smuzhiyun  *            /    \                             /    \
68*4882a593Smuzhiyun  *          NA     OB                          OA      OB
69*4882a593Smuzhiyun  *        /  |     |  \                      /  |      |  \
70*4882a593Smuzhiyun  *      NC  ND     OE  OF                   OC  OD     OE  OF
71*4882a593Smuzhiyun  *
72*4882a593Smuzhiyun  *   In this case, NA and OA are going to be swapped, record (NA, OA) into
73*4882a593Smuzhiyun  *   subvolume tree X.
74*4882a593Smuzhiyun  *
75*4882a593Smuzhiyun  * 2) After subtree swap.
76*4882a593Smuzhiyun  *          reloc tree                     subvolume tree X
77*4882a593Smuzhiyun  *             Root                               Root
78*4882a593Smuzhiyun  *            /    \                             /    \
79*4882a593Smuzhiyun  *          OA     OB                          NA      OB
80*4882a593Smuzhiyun  *        /  |     |  \                      /  |      |  \
81*4882a593Smuzhiyun  *      OC  OD     OE  OF                   NC  ND     OE  OF
82*4882a593Smuzhiyun  *
83*4882a593Smuzhiyun  * 3a) COW happens for OB
84*4882a593Smuzhiyun  *     If we are going to COW tree block OB, we check OB's bytenr against
85*4882a593Smuzhiyun  *     tree X's swapped_blocks structure.
86*4882a593Smuzhiyun  *     If it doesn't fit any, nothing will happen.
87*4882a593Smuzhiyun  *
88*4882a593Smuzhiyun  * 3b) COW happens for NA
89*4882a593Smuzhiyun  *     Check NA's bytenr against tree X's swapped_blocks, and get a hit.
90*4882a593Smuzhiyun  *     Then we do subtree scan on both subtrees OA and NA.
91*4882a593Smuzhiyun  *     Resulting 6 tree blocks to be scanned (OA, OC, OD, NA, NC, ND).
92*4882a593Smuzhiyun  *
93*4882a593Smuzhiyun  *     Then no matter what we do to subvolume tree X, qgroup numbers will
94*4882a593Smuzhiyun  *     still be correct.
95*4882a593Smuzhiyun  *     Then NA's record gets removed from X's swapped_blocks.
96*4882a593Smuzhiyun  *
97*4882a593Smuzhiyun  * 4)  Transaction commit
98*4882a593Smuzhiyun  *     Any record in X's swapped_blocks gets removed, since there is no
99*4882a593Smuzhiyun  *     modification to the swapped subtrees, no need to trigger heavy qgroup
100*4882a593Smuzhiyun  *     subtree rescan for them.
101*4882a593Smuzhiyun  */
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun  * Record a dirty extent, and info qgroup to update quota on it
105*4882a593Smuzhiyun  * TODO: Use kmem cache to alloc it.
106*4882a593Smuzhiyun  */
107*4882a593Smuzhiyun struct btrfs_qgroup_extent_record {
108*4882a593Smuzhiyun 	struct rb_node node;
109*4882a593Smuzhiyun 	u64 bytenr;
110*4882a593Smuzhiyun 	u64 num_bytes;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/*
113*4882a593Smuzhiyun 	 * For qgroup reserved data space freeing.
114*4882a593Smuzhiyun 	 *
115*4882a593Smuzhiyun 	 * @data_rsv_refroot and @data_rsv will be recorded after
116*4882a593Smuzhiyun 	 * BTRFS_ADD_DELAYED_EXTENT is called.
117*4882a593Smuzhiyun 	 * And will be used to free reserved qgroup space at
118*4882a593Smuzhiyun 	 * transaction commit time.
119*4882a593Smuzhiyun 	 */
120*4882a593Smuzhiyun 	u32 data_rsv;		/* reserved data space needs to be freed */
121*4882a593Smuzhiyun 	u64 data_rsv_refroot;	/* which root the reserved data belongs to */
122*4882a593Smuzhiyun 	struct ulist *old_roots;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun struct btrfs_qgroup_swapped_block {
126*4882a593Smuzhiyun 	struct rb_node node;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	int level;
129*4882a593Smuzhiyun 	bool trace_leaf;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/* bytenr/generation of the tree block in subvolume tree after swap */
132*4882a593Smuzhiyun 	u64 subvol_bytenr;
133*4882a593Smuzhiyun 	u64 subvol_generation;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* bytenr/generation of the tree block in reloc tree after swap */
136*4882a593Smuzhiyun 	u64 reloc_bytenr;
137*4882a593Smuzhiyun 	u64 reloc_generation;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	u64 last_snapshot;
140*4882a593Smuzhiyun 	struct btrfs_key first_key;
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun  * Qgroup reservation types:
145*4882a593Smuzhiyun  *
146*4882a593Smuzhiyun  * DATA:
147*4882a593Smuzhiyun  *	space reserved for data
148*4882a593Smuzhiyun  *
149*4882a593Smuzhiyun  * META_PERTRANS:
150*4882a593Smuzhiyun  * 	Space reserved for metadata (per-transaction)
151*4882a593Smuzhiyun  * 	Due to the fact that qgroup data is only updated at transaction commit
152*4882a593Smuzhiyun  * 	time, reserved space for metadata must be kept until transaction
153*4882a593Smuzhiyun  * 	commits.
154*4882a593Smuzhiyun  * 	Any metadata reserved that are used in btrfs_start_transaction() should
155*4882a593Smuzhiyun  * 	be of this type.
156*4882a593Smuzhiyun  *
157*4882a593Smuzhiyun  * META_PREALLOC:
158*4882a593Smuzhiyun  *	There are cases where metadata space is reserved before starting
159*4882a593Smuzhiyun  *	transaction, and then btrfs_join_transaction() to get a trans handle.
160*4882a593Smuzhiyun  *	Any metadata reserved for such usage should be of this type.
161*4882a593Smuzhiyun  *	And after join_transaction() part (or all) of such reservation should
162*4882a593Smuzhiyun  *	be converted into META_PERTRANS.
163*4882a593Smuzhiyun  */
164*4882a593Smuzhiyun enum btrfs_qgroup_rsv_type {
165*4882a593Smuzhiyun 	BTRFS_QGROUP_RSV_DATA,
166*4882a593Smuzhiyun 	BTRFS_QGROUP_RSV_META_PERTRANS,
167*4882a593Smuzhiyun 	BTRFS_QGROUP_RSV_META_PREALLOC,
168*4882a593Smuzhiyun 	BTRFS_QGROUP_RSV_LAST,
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun /*
172*4882a593Smuzhiyun  * Represents how many bytes we have reserved for this qgroup.
173*4882a593Smuzhiyun  *
174*4882a593Smuzhiyun  * Each type should have different reservation behavior.
175*4882a593Smuzhiyun  * E.g, data follows its io_tree flag modification, while
176*4882a593Smuzhiyun  * *currently* meta is just reserve-and-clear during transaction.
177*4882a593Smuzhiyun  *
178*4882a593Smuzhiyun  * TODO: Add new type for reservation which can survive transaction commit.
179*4882a593Smuzhiyun  * Current metadata reservation behavior is not suitable for such case.
180*4882a593Smuzhiyun  */
181*4882a593Smuzhiyun struct btrfs_qgroup_rsv {
182*4882a593Smuzhiyun 	u64 values[BTRFS_QGROUP_RSV_LAST];
183*4882a593Smuzhiyun };
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun /*
186*4882a593Smuzhiyun  * one struct for each qgroup, organized in fs_info->qgroup_tree.
187*4882a593Smuzhiyun  */
188*4882a593Smuzhiyun struct btrfs_qgroup {
189*4882a593Smuzhiyun 	u64 qgroupid;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	/*
192*4882a593Smuzhiyun 	 * state
193*4882a593Smuzhiyun 	 */
194*4882a593Smuzhiyun 	u64 rfer;	/* referenced */
195*4882a593Smuzhiyun 	u64 rfer_cmpr;	/* referenced compressed */
196*4882a593Smuzhiyun 	u64 excl;	/* exclusive */
197*4882a593Smuzhiyun 	u64 excl_cmpr;	/* exclusive compressed */
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/*
200*4882a593Smuzhiyun 	 * limits
201*4882a593Smuzhiyun 	 */
202*4882a593Smuzhiyun 	u64 lim_flags;	/* which limits are set */
203*4882a593Smuzhiyun 	u64 max_rfer;
204*4882a593Smuzhiyun 	u64 max_excl;
205*4882a593Smuzhiyun 	u64 rsv_rfer;
206*4882a593Smuzhiyun 	u64 rsv_excl;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	/*
209*4882a593Smuzhiyun 	 * reservation tracking
210*4882a593Smuzhiyun 	 */
211*4882a593Smuzhiyun 	struct btrfs_qgroup_rsv rsv;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/*
214*4882a593Smuzhiyun 	 * lists
215*4882a593Smuzhiyun 	 */
216*4882a593Smuzhiyun 	struct list_head groups;  /* groups this group is member of */
217*4882a593Smuzhiyun 	struct list_head members; /* groups that are members of this group */
218*4882a593Smuzhiyun 	struct list_head dirty;   /* dirty groups */
219*4882a593Smuzhiyun 	struct rb_node node;	  /* tree of qgroups */
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	/*
222*4882a593Smuzhiyun 	 * temp variables for accounting operations
223*4882a593Smuzhiyun 	 * Refer to qgroup_shared_accounting() for details.
224*4882a593Smuzhiyun 	 */
225*4882a593Smuzhiyun 	u64 old_refcnt;
226*4882a593Smuzhiyun 	u64 new_refcnt;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	/*
229*4882a593Smuzhiyun 	 * Sysfs kobjectid
230*4882a593Smuzhiyun 	 */
231*4882a593Smuzhiyun 	struct kobject kobj;
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun 
btrfs_qgroup_subvolid(u64 qgroupid)234*4882a593Smuzhiyun static inline u64 btrfs_qgroup_subvolid(u64 qgroupid)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1));
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun  * For qgroup event trace points only
241*4882a593Smuzhiyun  */
242*4882a593Smuzhiyun #define QGROUP_RESERVE		(1<<0)
243*4882a593Smuzhiyun #define QGROUP_RELEASE		(1<<1)
244*4882a593Smuzhiyun #define QGROUP_FREE		(1<<2)
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun int btrfs_quota_enable(struct btrfs_fs_info *fs_info);
247*4882a593Smuzhiyun int btrfs_quota_disable(struct btrfs_fs_info *fs_info);
248*4882a593Smuzhiyun int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
249*4882a593Smuzhiyun void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
250*4882a593Smuzhiyun int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
251*4882a593Smuzhiyun 				     bool interruptible);
252*4882a593Smuzhiyun int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
253*4882a593Smuzhiyun 			      u64 dst);
254*4882a593Smuzhiyun int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
255*4882a593Smuzhiyun 			      u64 dst);
256*4882a593Smuzhiyun int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
257*4882a593Smuzhiyun int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
258*4882a593Smuzhiyun int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
259*4882a593Smuzhiyun 		       struct btrfs_qgroup_limit *limit);
260*4882a593Smuzhiyun int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
261*4882a593Smuzhiyun void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
262*4882a593Smuzhiyun struct btrfs_delayed_extent_op;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun /*
265*4882a593Smuzhiyun  * Inform qgroup to trace one dirty extent, its info is recorded in @record.
266*4882a593Smuzhiyun  * So qgroup can account it at transaction committing time.
267*4882a593Smuzhiyun  *
268*4882a593Smuzhiyun  * No lock version, caller must acquire delayed ref lock and allocated memory,
269*4882a593Smuzhiyun  * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
270*4882a593Smuzhiyun  *
271*4882a593Smuzhiyun  * Return 0 for success insert
272*4882a593Smuzhiyun  * Return >0 for existing record, caller can free @record safely.
273*4882a593Smuzhiyun  * Error is not possible
274*4882a593Smuzhiyun  */
275*4882a593Smuzhiyun int btrfs_qgroup_trace_extent_nolock(
276*4882a593Smuzhiyun 		struct btrfs_fs_info *fs_info,
277*4882a593Smuzhiyun 		struct btrfs_delayed_ref_root *delayed_refs,
278*4882a593Smuzhiyun 		struct btrfs_qgroup_extent_record *record);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun  * Post handler after qgroup_trace_extent_nolock().
282*4882a593Smuzhiyun  *
283*4882a593Smuzhiyun  * NOTE: Current qgroup does the expensive backref walk at transaction
284*4882a593Smuzhiyun  * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
285*4882a593Smuzhiyun  * new transaction.
286*4882a593Smuzhiyun  * This is designed to allow btrfs_find_all_roots() to get correct new_roots
287*4882a593Smuzhiyun  * result.
288*4882a593Smuzhiyun  *
289*4882a593Smuzhiyun  * However for old_roots there is no need to do backref walk at that time,
290*4882a593Smuzhiyun  * since we search commit roots to walk backref and result will always be
291*4882a593Smuzhiyun  * correct.
292*4882a593Smuzhiyun  *
293*4882a593Smuzhiyun  * Due to the nature of no lock version, we can't do backref there.
294*4882a593Smuzhiyun  * So we must call btrfs_qgroup_trace_extent_post() after exiting
295*4882a593Smuzhiyun  * spinlock context.
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
298*4882a593Smuzhiyun  * using current root, then we can move all expensive backref walk out of
299*4882a593Smuzhiyun  * transaction committing, but not now as qgroup accounting will be wrong again.
300*4882a593Smuzhiyun  */
301*4882a593Smuzhiyun int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
302*4882a593Smuzhiyun 				   struct btrfs_qgroup_extent_record *qrecord);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun  * Inform qgroup to trace one dirty extent, specified by @bytenr and
306*4882a593Smuzhiyun  * @num_bytes.
307*4882a593Smuzhiyun  * So qgroup can account it at commit trans time.
308*4882a593Smuzhiyun  *
309*4882a593Smuzhiyun  * Better encapsulated version, with memory allocation and backref walk for
310*4882a593Smuzhiyun  * commit roots.
311*4882a593Smuzhiyun  * So this can sleep.
312*4882a593Smuzhiyun  *
313*4882a593Smuzhiyun  * Return 0 if the operation is done.
314*4882a593Smuzhiyun  * Return <0 for error, like memory allocation failure or invalid parameter
315*4882a593Smuzhiyun  * (NULL trans)
316*4882a593Smuzhiyun  */
317*4882a593Smuzhiyun int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
318*4882a593Smuzhiyun 			      u64 num_bytes, gfp_t gfp_flag);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun /*
321*4882a593Smuzhiyun  * Inform qgroup to trace all leaf items of data
322*4882a593Smuzhiyun  *
323*4882a593Smuzhiyun  * Return 0 for success
324*4882a593Smuzhiyun  * Return <0 for error(ENOMEM)
325*4882a593Smuzhiyun  */
326*4882a593Smuzhiyun int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
327*4882a593Smuzhiyun 				  struct extent_buffer *eb);
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun  * Inform qgroup to trace a whole subtree, including all its child tree
330*4882a593Smuzhiyun  * blocks and data.
331*4882a593Smuzhiyun  * The root tree block is specified by @root_eb.
332*4882a593Smuzhiyun  *
333*4882a593Smuzhiyun  * Normally used by relocation(tree block swap) and subvolume deletion.
334*4882a593Smuzhiyun  *
335*4882a593Smuzhiyun  * Return 0 for success
336*4882a593Smuzhiyun  * Return <0 for error(ENOMEM or tree search error)
337*4882a593Smuzhiyun  */
338*4882a593Smuzhiyun int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
339*4882a593Smuzhiyun 			       struct extent_buffer *root_eb,
340*4882a593Smuzhiyun 			       u64 root_gen, int root_level);
341*4882a593Smuzhiyun int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
342*4882a593Smuzhiyun 				u64 num_bytes, struct ulist *old_roots,
343*4882a593Smuzhiyun 				struct ulist *new_roots);
344*4882a593Smuzhiyun int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans);
345*4882a593Smuzhiyun int btrfs_run_qgroups(struct btrfs_trans_handle *trans);
346*4882a593Smuzhiyun int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
347*4882a593Smuzhiyun 			 u64 objectid, struct btrfs_qgroup_inherit *inherit);
348*4882a593Smuzhiyun void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
349*4882a593Smuzhiyun 			       u64 ref_root, u64 num_bytes,
350*4882a593Smuzhiyun 			       enum btrfs_qgroup_rsv_type type);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
353*4882a593Smuzhiyun int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
354*4882a593Smuzhiyun 			       u64 rfer, u64 excl);
355*4882a593Smuzhiyun #endif
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun /* New io_tree based accurate qgroup reserve API */
358*4882a593Smuzhiyun int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
359*4882a593Smuzhiyun 			struct extent_changeset **reserved, u64 start, u64 len);
360*4882a593Smuzhiyun int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len);
361*4882a593Smuzhiyun int btrfs_qgroup_free_data(struct btrfs_inode *inode,
362*4882a593Smuzhiyun 			   struct extent_changeset *reserved, u64 start,
363*4882a593Smuzhiyun 			   u64 len);
364*4882a593Smuzhiyun int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
365*4882a593Smuzhiyun 			      enum btrfs_qgroup_rsv_type type, bool enforce);
366*4882a593Smuzhiyun int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
367*4882a593Smuzhiyun 				enum btrfs_qgroup_rsv_type type, bool enforce);
368*4882a593Smuzhiyun /* Reserve metadata space for pertrans and prealloc type */
btrfs_qgroup_reserve_meta_pertrans(struct btrfs_root * root,int num_bytes,bool enforce)369*4882a593Smuzhiyun static inline int btrfs_qgroup_reserve_meta_pertrans(struct btrfs_root *root,
370*4882a593Smuzhiyun 				int num_bytes, bool enforce)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	return __btrfs_qgroup_reserve_meta(root, num_bytes,
373*4882a593Smuzhiyun 			BTRFS_QGROUP_RSV_META_PERTRANS, enforce);
374*4882a593Smuzhiyun }
btrfs_qgroup_reserve_meta_prealloc(struct btrfs_root * root,int num_bytes,bool enforce)375*4882a593Smuzhiyun static inline int btrfs_qgroup_reserve_meta_prealloc(struct btrfs_root *root,
376*4882a593Smuzhiyun 				int num_bytes, bool enforce)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	return __btrfs_qgroup_reserve_meta(root, num_bytes,
379*4882a593Smuzhiyun 			BTRFS_QGROUP_RSV_META_PREALLOC, enforce);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
383*4882a593Smuzhiyun 			     enum btrfs_qgroup_rsv_type type);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun /* Free per-transaction meta reservation for error handling */
btrfs_qgroup_free_meta_pertrans(struct btrfs_root * root,int num_bytes)386*4882a593Smuzhiyun static inline void btrfs_qgroup_free_meta_pertrans(struct btrfs_root *root,
387*4882a593Smuzhiyun 						   int num_bytes)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	__btrfs_qgroup_free_meta(root, num_bytes,
390*4882a593Smuzhiyun 			BTRFS_QGROUP_RSV_META_PERTRANS);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun /* Pre-allocated meta reservation can be freed at need */
btrfs_qgroup_free_meta_prealloc(struct btrfs_root * root,int num_bytes)394*4882a593Smuzhiyun static inline void btrfs_qgroup_free_meta_prealloc(struct btrfs_root *root,
395*4882a593Smuzhiyun 						   int num_bytes)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	__btrfs_qgroup_free_meta(root, num_bytes,
398*4882a593Smuzhiyun 			BTRFS_QGROUP_RSV_META_PREALLOC);
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun  * Per-transaction meta reservation should be all freed at transaction commit
403*4882a593Smuzhiyun  * time
404*4882a593Smuzhiyun  */
405*4882a593Smuzhiyun void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun /*
408*4882a593Smuzhiyun  * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
409*4882a593Smuzhiyun  *
410*4882a593Smuzhiyun  * This is called when preallocated meta reservation needs to be used.
411*4882a593Smuzhiyun  * Normally after btrfs_join_transaction() call.
412*4882a593Smuzhiyun  */
413*4882a593Smuzhiyun void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun /* btrfs_qgroup_swapped_blocks related functions */
418*4882a593Smuzhiyun void btrfs_qgroup_init_swapped_blocks(
419*4882a593Smuzhiyun 	struct btrfs_qgroup_swapped_blocks *swapped_blocks);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root);
422*4882a593Smuzhiyun int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
423*4882a593Smuzhiyun 		struct btrfs_root *subvol_root,
424*4882a593Smuzhiyun 		struct btrfs_block_group *bg,
425*4882a593Smuzhiyun 		struct extent_buffer *subvol_parent, int subvol_slot,
426*4882a593Smuzhiyun 		struct extent_buffer *reloc_parent, int reloc_slot,
427*4882a593Smuzhiyun 		u64 last_snapshot);
428*4882a593Smuzhiyun int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
429*4882a593Smuzhiyun 		struct btrfs_root *root, struct extent_buffer *eb);
430*4882a593Smuzhiyun void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
431*4882a593Smuzhiyun bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun #endif
434