1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2016 Oracle. All Rights Reserved.
4*4882a593Smuzhiyun * Author: Darrick J. Wong <darrick.wong@oracle.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_log_format.h"
11*4882a593Smuzhiyun #include "xfs_trans_resv.h"
12*4882a593Smuzhiyun #include "xfs_mount.h"
13*4882a593Smuzhiyun #include "xfs_alloc.h"
14*4882a593Smuzhiyun #include "xfs_errortag.h"
15*4882a593Smuzhiyun #include "xfs_error.h"
16*4882a593Smuzhiyun #include "xfs_trace.h"
17*4882a593Smuzhiyun #include "xfs_trans.h"
18*4882a593Smuzhiyun #include "xfs_rmap_btree.h"
19*4882a593Smuzhiyun #include "xfs_btree.h"
20*4882a593Smuzhiyun #include "xfs_refcount_btree.h"
21*4882a593Smuzhiyun #include "xfs_ialloc_btree.h"
22*4882a593Smuzhiyun #include "xfs_sb.h"
23*4882a593Smuzhiyun #include "xfs_ag_resv.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * Per-AG Block Reservations
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * For some kinds of allocation group metadata structures, it is advantageous
29*4882a593Smuzhiyun * to reserve a small number of blocks in each AG so that future expansions of
30*4882a593Smuzhiyun * that data structure do not encounter ENOSPC because errors during a btree
31*4882a593Smuzhiyun * split cause the filesystem to go offline.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * Prior to the introduction of reflink, this wasn't an issue because the free
34*4882a593Smuzhiyun * space btrees maintain a reserve of space (the AGFL) to handle any expansion
35*4882a593Smuzhiyun * that may be necessary; and allocations of other metadata (inodes, BMBT,
36*4882a593Smuzhiyun * dir/attr) aren't restricted to a single AG. However, with reflink it is
37*4882a593Smuzhiyun * possible to allocate all the space in an AG, have subsequent reflink/CoW
38*4882a593Smuzhiyun * activity expand the refcount btree, and discover that there's no space left
39*4882a593Smuzhiyun * to handle that expansion. Since we can calculate the maximum size of the
40*4882a593Smuzhiyun * refcount btree, we can reserve space for it and avoid ENOSPC.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * Handling per-AG reservations consists of three changes to the allocator's
43*4882a593Smuzhiyun * behavior: First, because these reservations are always needed, we decrease
44*4882a593Smuzhiyun * the ag_max_usable counter to reflect the size of the AG after the reserved
45*4882a593Smuzhiyun * blocks are taken. Second, the reservations must be reflected in the
46*4882a593Smuzhiyun * fdblocks count to maintain proper accounting. Third, each AG must maintain
47*4882a593Smuzhiyun * its own reserved block counter so that we can calculate the amount of space
48*4882a593Smuzhiyun * that must remain free to maintain the reservations. Fourth, the "remaining
49*4882a593Smuzhiyun * reserved blocks" count must be used when calculating the length of the
50*4882a593Smuzhiyun * longest free extent in an AG and to clamp maxlen in the per-AG allocation
51*4882a593Smuzhiyun * functions. In other words, we maintain a virtual allocation via in-core
52*4882a593Smuzhiyun * accounting tricks so that we don't have to clean up after a crash. :)
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * Reserved blocks can be managed by passing one of the enum xfs_ag_resv_type
55*4882a593Smuzhiyun * values via struct xfs_alloc_arg or directly to the xfs_free_extent
56*4882a593Smuzhiyun * function. It might seem a little funny to maintain a reservoir of blocks
57*4882a593Smuzhiyun * to feed another reservoir, but the AGFL only holds enough blocks to get
58*4882a593Smuzhiyun * through the next transaction. The per-AG reservation is to ensure (we
59*4882a593Smuzhiyun * hope) that each AG never runs out of blocks. Each data structure wanting
60*4882a593Smuzhiyun * to use the reservation system should update ask/used in xfs_ag_resv_init.
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * Are we critically low on blocks? For now we'll define that as the number
65*4882a593Smuzhiyun * of blocks we can get our hands on being less than 10% of what we reserved
66*4882a593Smuzhiyun * or less than some arbitrary number (maximum btree height).
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun bool
xfs_ag_resv_critical(struct xfs_perag * pag,enum xfs_ag_resv_type type)69*4882a593Smuzhiyun xfs_ag_resv_critical(
70*4882a593Smuzhiyun struct xfs_perag *pag,
71*4882a593Smuzhiyun enum xfs_ag_resv_type type)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun xfs_extlen_t avail;
74*4882a593Smuzhiyun xfs_extlen_t orig;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun switch (type) {
77*4882a593Smuzhiyun case XFS_AG_RESV_METADATA:
78*4882a593Smuzhiyun avail = pag->pagf_freeblks - pag->pag_rmapbt_resv.ar_reserved;
79*4882a593Smuzhiyun orig = pag->pag_meta_resv.ar_asked;
80*4882a593Smuzhiyun break;
81*4882a593Smuzhiyun case XFS_AG_RESV_RMAPBT:
82*4882a593Smuzhiyun avail = pag->pagf_freeblks + pag->pagf_flcount -
83*4882a593Smuzhiyun pag->pag_meta_resv.ar_reserved;
84*4882a593Smuzhiyun orig = pag->pag_rmapbt_resv.ar_asked;
85*4882a593Smuzhiyun break;
86*4882a593Smuzhiyun default:
87*4882a593Smuzhiyun ASSERT(0);
88*4882a593Smuzhiyun return false;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun trace_xfs_ag_resv_critical(pag, type, avail);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* Critically low if less than 10% or max btree height remains. */
94*4882a593Smuzhiyun return XFS_TEST_ERROR(avail < orig / 10 || avail < XFS_BTREE_MAXLEVELS,
95*4882a593Smuzhiyun pag->pag_mount, XFS_ERRTAG_AG_RESV_CRITICAL);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * How many blocks are reserved but not used, and therefore must not be
100*4882a593Smuzhiyun * allocated away?
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun xfs_extlen_t
xfs_ag_resv_needed(struct xfs_perag * pag,enum xfs_ag_resv_type type)103*4882a593Smuzhiyun xfs_ag_resv_needed(
104*4882a593Smuzhiyun struct xfs_perag *pag,
105*4882a593Smuzhiyun enum xfs_ag_resv_type type)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun xfs_extlen_t len;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun len = pag->pag_meta_resv.ar_reserved + pag->pag_rmapbt_resv.ar_reserved;
110*4882a593Smuzhiyun switch (type) {
111*4882a593Smuzhiyun case XFS_AG_RESV_METADATA:
112*4882a593Smuzhiyun case XFS_AG_RESV_RMAPBT:
113*4882a593Smuzhiyun len -= xfs_perag_resv(pag, type)->ar_reserved;
114*4882a593Smuzhiyun break;
115*4882a593Smuzhiyun case XFS_AG_RESV_NONE:
116*4882a593Smuzhiyun /* empty */
117*4882a593Smuzhiyun break;
118*4882a593Smuzhiyun default:
119*4882a593Smuzhiyun ASSERT(0);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun trace_xfs_ag_resv_needed(pag, type, len);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun return len;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* Clean out a reservation */
128*4882a593Smuzhiyun static int
__xfs_ag_resv_free(struct xfs_perag * pag,enum xfs_ag_resv_type type)129*4882a593Smuzhiyun __xfs_ag_resv_free(
130*4882a593Smuzhiyun struct xfs_perag *pag,
131*4882a593Smuzhiyun enum xfs_ag_resv_type type)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct xfs_ag_resv *resv;
134*4882a593Smuzhiyun xfs_extlen_t oldresv;
135*4882a593Smuzhiyun int error;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun trace_xfs_ag_resv_free(pag, type, 0);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun resv = xfs_perag_resv(pag, type);
140*4882a593Smuzhiyun if (pag->pag_agno == 0)
141*4882a593Smuzhiyun pag->pag_mount->m_ag_max_usable += resv->ar_asked;
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * RMAPBT blocks come from the AGFL and AGFL blocks are always
144*4882a593Smuzhiyun * considered "free", so whatever was reserved at mount time must be
145*4882a593Smuzhiyun * given back at umount.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun if (type == XFS_AG_RESV_RMAPBT)
148*4882a593Smuzhiyun oldresv = resv->ar_orig_reserved;
149*4882a593Smuzhiyun else
150*4882a593Smuzhiyun oldresv = resv->ar_reserved;
151*4882a593Smuzhiyun error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true);
152*4882a593Smuzhiyun resv->ar_reserved = 0;
153*4882a593Smuzhiyun resv->ar_asked = 0;
154*4882a593Smuzhiyun resv->ar_orig_reserved = 0;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (error)
157*4882a593Smuzhiyun trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno,
158*4882a593Smuzhiyun error, _RET_IP_);
159*4882a593Smuzhiyun return error;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* Free a per-AG reservation. */
163*4882a593Smuzhiyun int
xfs_ag_resv_free(struct xfs_perag * pag)164*4882a593Smuzhiyun xfs_ag_resv_free(
165*4882a593Smuzhiyun struct xfs_perag *pag)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun int error;
168*4882a593Smuzhiyun int err2;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun error = __xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT);
171*4882a593Smuzhiyun err2 = __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA);
172*4882a593Smuzhiyun if (err2 && !error)
173*4882a593Smuzhiyun error = err2;
174*4882a593Smuzhiyun return error;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun static int
__xfs_ag_resv_init(struct xfs_perag * pag,enum xfs_ag_resv_type type,xfs_extlen_t ask,xfs_extlen_t used)178*4882a593Smuzhiyun __xfs_ag_resv_init(
179*4882a593Smuzhiyun struct xfs_perag *pag,
180*4882a593Smuzhiyun enum xfs_ag_resv_type type,
181*4882a593Smuzhiyun xfs_extlen_t ask,
182*4882a593Smuzhiyun xfs_extlen_t used)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun struct xfs_mount *mp = pag->pag_mount;
185*4882a593Smuzhiyun struct xfs_ag_resv *resv;
186*4882a593Smuzhiyun int error;
187*4882a593Smuzhiyun xfs_extlen_t hidden_space;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (used > ask)
190*4882a593Smuzhiyun ask = used;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun switch (type) {
193*4882a593Smuzhiyun case XFS_AG_RESV_RMAPBT:
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * Space taken by the rmapbt is not subtracted from fdblocks
196*4882a593Smuzhiyun * because the rmapbt lives in the free space. Here we must
197*4882a593Smuzhiyun * subtract the entire reservation from fdblocks so that we
198*4882a593Smuzhiyun * always have blocks available for rmapbt expansion.
199*4882a593Smuzhiyun */
200*4882a593Smuzhiyun hidden_space = ask;
201*4882a593Smuzhiyun break;
202*4882a593Smuzhiyun case XFS_AG_RESV_METADATA:
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * Space taken by all other metadata btrees are accounted
205*4882a593Smuzhiyun * on-disk as used space. We therefore only hide the space
206*4882a593Smuzhiyun * that is reserved but not used by the trees.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun hidden_space = ask - used;
209*4882a593Smuzhiyun break;
210*4882a593Smuzhiyun default:
211*4882a593Smuzhiyun ASSERT(0);
212*4882a593Smuzhiyun return -EINVAL;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true);
215*4882a593Smuzhiyun if (error) {
216*4882a593Smuzhiyun trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
217*4882a593Smuzhiyun error, _RET_IP_);
218*4882a593Smuzhiyun xfs_warn(mp,
219*4882a593Smuzhiyun "Per-AG reservation for AG %u failed. Filesystem may run out of space.",
220*4882a593Smuzhiyun pag->pag_agno);
221*4882a593Smuzhiyun return error;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /*
225*4882a593Smuzhiyun * Reduce the maximum per-AG allocation length by however much we're
226*4882a593Smuzhiyun * trying to reserve for an AG. Since this is a filesystem-wide
227*4882a593Smuzhiyun * counter, we only make the adjustment for AG 0. This assumes that
228*4882a593Smuzhiyun * there aren't any AGs hungrier for per-AG reservation than AG 0.
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun if (pag->pag_agno == 0)
231*4882a593Smuzhiyun mp->m_ag_max_usable -= ask;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun resv = xfs_perag_resv(pag, type);
234*4882a593Smuzhiyun resv->ar_asked = ask;
235*4882a593Smuzhiyun resv->ar_orig_reserved = hidden_space;
236*4882a593Smuzhiyun resv->ar_reserved = ask - used;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun trace_xfs_ag_resv_init(pag, type, ask);
239*4882a593Smuzhiyun return 0;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* Create a per-AG block reservation. */
243*4882a593Smuzhiyun int
xfs_ag_resv_init(struct xfs_perag * pag,struct xfs_trans * tp)244*4882a593Smuzhiyun xfs_ag_resv_init(
245*4882a593Smuzhiyun struct xfs_perag *pag,
246*4882a593Smuzhiyun struct xfs_trans *tp)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun struct xfs_mount *mp = pag->pag_mount;
249*4882a593Smuzhiyun xfs_agnumber_t agno = pag->pag_agno;
250*4882a593Smuzhiyun xfs_extlen_t ask;
251*4882a593Smuzhiyun xfs_extlen_t used;
252*4882a593Smuzhiyun int error = 0;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* Create the metadata reservation. */
255*4882a593Smuzhiyun if (pag->pag_meta_resv.ar_asked == 0) {
256*4882a593Smuzhiyun ask = used = 0;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask, &used);
259*4882a593Smuzhiyun if (error)
260*4882a593Smuzhiyun goto out;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun error = xfs_finobt_calc_reserves(mp, tp, agno, &ask, &used);
263*4882a593Smuzhiyun if (error)
264*4882a593Smuzhiyun goto out;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
267*4882a593Smuzhiyun ask, used);
268*4882a593Smuzhiyun if (error) {
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun * Because we didn't have per-AG reservations when the
271*4882a593Smuzhiyun * finobt feature was added we might not be able to
272*4882a593Smuzhiyun * reserve all needed blocks. Warn and fall back to the
273*4882a593Smuzhiyun * old and potentially buggy code in that case, but
274*4882a593Smuzhiyun * ensure we do have the reservation for the refcountbt.
275*4882a593Smuzhiyun */
276*4882a593Smuzhiyun ask = used = 0;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun mp->m_finobt_nores = true;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask,
281*4882a593Smuzhiyun &used);
282*4882a593Smuzhiyun if (error)
283*4882a593Smuzhiyun goto out;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
286*4882a593Smuzhiyun ask, used);
287*4882a593Smuzhiyun if (error)
288*4882a593Smuzhiyun goto out;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* Create the RMAPBT metadata reservation */
293*4882a593Smuzhiyun if (pag->pag_rmapbt_resv.ar_asked == 0) {
294*4882a593Smuzhiyun ask = used = 0;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun error = xfs_rmapbt_calc_reserves(mp, tp, agno, &ask, &used);
297*4882a593Smuzhiyun if (error)
298*4882a593Smuzhiyun goto out;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun error = __xfs_ag_resv_init(pag, XFS_AG_RESV_RMAPBT, ask, used);
301*4882a593Smuzhiyun if (error)
302*4882a593Smuzhiyun goto out;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun #ifdef DEBUG
306*4882a593Smuzhiyun /* need to read in the AGF for the ASSERT below to work */
307*4882a593Smuzhiyun error = xfs_alloc_pagf_init(pag->pag_mount, tp, pag->pag_agno, 0);
308*4882a593Smuzhiyun if (error)
309*4882a593Smuzhiyun return error;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
312*4882a593Smuzhiyun xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved <=
313*4882a593Smuzhiyun pag->pagf_freeblks + pag->pagf_flcount);
314*4882a593Smuzhiyun #endif
315*4882a593Smuzhiyun out:
316*4882a593Smuzhiyun return error;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* Allocate a block from the reservation. */
320*4882a593Smuzhiyun void
xfs_ag_resv_alloc_extent(struct xfs_perag * pag,enum xfs_ag_resv_type type,struct xfs_alloc_arg * args)321*4882a593Smuzhiyun xfs_ag_resv_alloc_extent(
322*4882a593Smuzhiyun struct xfs_perag *pag,
323*4882a593Smuzhiyun enum xfs_ag_resv_type type,
324*4882a593Smuzhiyun struct xfs_alloc_arg *args)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun struct xfs_ag_resv *resv;
327*4882a593Smuzhiyun xfs_extlen_t len;
328*4882a593Smuzhiyun uint field;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun trace_xfs_ag_resv_alloc_extent(pag, type, args->len);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun switch (type) {
333*4882a593Smuzhiyun case XFS_AG_RESV_AGFL:
334*4882a593Smuzhiyun return;
335*4882a593Smuzhiyun case XFS_AG_RESV_METADATA:
336*4882a593Smuzhiyun case XFS_AG_RESV_RMAPBT:
337*4882a593Smuzhiyun resv = xfs_perag_resv(pag, type);
338*4882a593Smuzhiyun break;
339*4882a593Smuzhiyun default:
340*4882a593Smuzhiyun ASSERT(0);
341*4882a593Smuzhiyun /* fall through */
342*4882a593Smuzhiyun case XFS_AG_RESV_NONE:
343*4882a593Smuzhiyun field = args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS :
344*4882a593Smuzhiyun XFS_TRANS_SB_FDBLOCKS;
345*4882a593Smuzhiyun xfs_trans_mod_sb(args->tp, field, -(int64_t)args->len);
346*4882a593Smuzhiyun return;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun len = min_t(xfs_extlen_t, args->len, resv->ar_reserved);
350*4882a593Smuzhiyun resv->ar_reserved -= len;
351*4882a593Smuzhiyun if (type == XFS_AG_RESV_RMAPBT)
352*4882a593Smuzhiyun return;
353*4882a593Smuzhiyun /* Allocations of reserved blocks only need on-disk sb updates... */
354*4882a593Smuzhiyun xfs_trans_mod_sb(args->tp, XFS_TRANS_SB_RES_FDBLOCKS, -(int64_t)len);
355*4882a593Smuzhiyun /* ...but non-reserved blocks need in-core and on-disk updates. */
356*4882a593Smuzhiyun if (args->len > len)
357*4882a593Smuzhiyun xfs_trans_mod_sb(args->tp, XFS_TRANS_SB_FDBLOCKS,
358*4882a593Smuzhiyun -((int64_t)args->len - len));
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* Free a block to the reservation. */
362*4882a593Smuzhiyun void
xfs_ag_resv_free_extent(struct xfs_perag * pag,enum xfs_ag_resv_type type,struct xfs_trans * tp,xfs_extlen_t len)363*4882a593Smuzhiyun xfs_ag_resv_free_extent(
364*4882a593Smuzhiyun struct xfs_perag *pag,
365*4882a593Smuzhiyun enum xfs_ag_resv_type type,
366*4882a593Smuzhiyun struct xfs_trans *tp,
367*4882a593Smuzhiyun xfs_extlen_t len)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun xfs_extlen_t leftover;
370*4882a593Smuzhiyun struct xfs_ag_resv *resv;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun trace_xfs_ag_resv_free_extent(pag, type, len);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun switch (type) {
375*4882a593Smuzhiyun case XFS_AG_RESV_AGFL:
376*4882a593Smuzhiyun return;
377*4882a593Smuzhiyun case XFS_AG_RESV_METADATA:
378*4882a593Smuzhiyun case XFS_AG_RESV_RMAPBT:
379*4882a593Smuzhiyun resv = xfs_perag_resv(pag, type);
380*4882a593Smuzhiyun break;
381*4882a593Smuzhiyun default:
382*4882a593Smuzhiyun ASSERT(0);
383*4882a593Smuzhiyun /* fall through */
384*4882a593Smuzhiyun case XFS_AG_RESV_NONE:
385*4882a593Smuzhiyun xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (int64_t)len);
386*4882a593Smuzhiyun return;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun leftover = min_t(xfs_extlen_t, len, resv->ar_asked - resv->ar_reserved);
390*4882a593Smuzhiyun resv->ar_reserved += leftover;
391*4882a593Smuzhiyun if (type == XFS_AG_RESV_RMAPBT)
392*4882a593Smuzhiyun return;
393*4882a593Smuzhiyun /* Freeing into the reserved pool only requires on-disk update... */
394*4882a593Smuzhiyun xfs_trans_mod_sb(tp, XFS_TRANS_SB_RES_FDBLOCKS, len);
395*4882a593Smuzhiyun /* ...but freeing beyond that requires in-core and on-disk update. */
396*4882a593Smuzhiyun if (len > leftover)
397*4882a593Smuzhiyun xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, len - leftover);
398*4882a593Smuzhiyun }
399