1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4*4882a593Smuzhiyun * All Rights Reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_log_format.h"
11*4882a593Smuzhiyun #include "xfs_trans_resv.h"
12*4882a593Smuzhiyun #include "xfs_bit.h"
13*4882a593Smuzhiyun #include "xfs_sb.h"
14*4882a593Smuzhiyun #include "xfs_mount.h"
15*4882a593Smuzhiyun #include "xfs_inode.h"
16*4882a593Smuzhiyun #include "xfs_iwalk.h"
17*4882a593Smuzhiyun #include "xfs_quota.h"
18*4882a593Smuzhiyun #include "xfs_bmap.h"
19*4882a593Smuzhiyun #include "xfs_bmap_util.h"
20*4882a593Smuzhiyun #include "xfs_trans.h"
21*4882a593Smuzhiyun #include "xfs_trans_space.h"
22*4882a593Smuzhiyun #include "xfs_qm.h"
23*4882a593Smuzhiyun #include "xfs_trace.h"
24*4882a593Smuzhiyun #include "xfs_icache.h"
25*4882a593Smuzhiyun #include "xfs_error.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * The global quota manager. There is only one of these for the entire
29*4882a593Smuzhiyun * system, _not_ one per file system. XQM keeps track of the overall
30*4882a593Smuzhiyun * quota functionality, including maintaining the freelist and hash
31*4882a593Smuzhiyun * tables of dquots.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
34*4882a593Smuzhiyun STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
37*4882a593Smuzhiyun STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * We use the batch lookup interface to iterate over the dquots as it
40*4882a593Smuzhiyun * currently is the only interface into the radix tree code that allows
41*4882a593Smuzhiyun * fuzzy lookups instead of exact matches. Holding the lock over multiple
42*4882a593Smuzhiyun * operations is fine as all callers are used either during mount/umount
43*4882a593Smuzhiyun * or quotaoff.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun #define XFS_DQ_LOOKUP_BATCH 32
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun STATIC int
xfs_qm_dquot_walk(struct xfs_mount * mp,xfs_dqtype_t type,int (* execute)(struct xfs_dquot * dqp,void * data),void * data)48*4882a593Smuzhiyun xfs_qm_dquot_walk(
49*4882a593Smuzhiyun struct xfs_mount *mp,
50*4882a593Smuzhiyun xfs_dqtype_t type,
51*4882a593Smuzhiyun int (*execute)(struct xfs_dquot *dqp, void *data),
52*4882a593Smuzhiyun void *data)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun struct xfs_quotainfo *qi = mp->m_quotainfo;
55*4882a593Smuzhiyun struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
56*4882a593Smuzhiyun uint32_t next_index;
57*4882a593Smuzhiyun int last_error = 0;
58*4882a593Smuzhiyun int skipped;
59*4882a593Smuzhiyun int nr_found;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun restart:
62*4882a593Smuzhiyun skipped = 0;
63*4882a593Smuzhiyun next_index = 0;
64*4882a593Smuzhiyun nr_found = 0;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun while (1) {
67*4882a593Smuzhiyun struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
68*4882a593Smuzhiyun int error = 0;
69*4882a593Smuzhiyun int i;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun mutex_lock(&qi->qi_tree_lock);
72*4882a593Smuzhiyun nr_found = radix_tree_gang_lookup(tree, (void **)batch,
73*4882a593Smuzhiyun next_index, XFS_DQ_LOOKUP_BATCH);
74*4882a593Smuzhiyun if (!nr_found) {
75*4882a593Smuzhiyun mutex_unlock(&qi->qi_tree_lock);
76*4882a593Smuzhiyun break;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun for (i = 0; i < nr_found; i++) {
80*4882a593Smuzhiyun struct xfs_dquot *dqp = batch[i];
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun next_index = dqp->q_id + 1;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun error = execute(batch[i], data);
85*4882a593Smuzhiyun if (error == -EAGAIN) {
86*4882a593Smuzhiyun skipped++;
87*4882a593Smuzhiyun continue;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun if (error && last_error != -EFSCORRUPTED)
90*4882a593Smuzhiyun last_error = error;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun mutex_unlock(&qi->qi_tree_lock);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* bail out if the filesystem is corrupted. */
96*4882a593Smuzhiyun if (last_error == -EFSCORRUPTED) {
97*4882a593Smuzhiyun skipped = 0;
98*4882a593Smuzhiyun break;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun /* we're done if id overflows back to zero */
101*4882a593Smuzhiyun if (!next_index)
102*4882a593Smuzhiyun break;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (skipped) {
106*4882a593Smuzhiyun delay(1);
107*4882a593Smuzhiyun goto restart;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun return last_error;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun * Purge a dquot from all tracking data structures and free it.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun STATIC int
xfs_qm_dqpurge(struct xfs_dquot * dqp,void * data)118*4882a593Smuzhiyun xfs_qm_dqpurge(
119*4882a593Smuzhiyun struct xfs_dquot *dqp,
120*4882a593Smuzhiyun void *data)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun struct xfs_mount *mp = dqp->q_mount;
123*4882a593Smuzhiyun struct xfs_quotainfo *qi = mp->m_quotainfo;
124*4882a593Smuzhiyun int error = -EAGAIN;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun xfs_dqlock(dqp);
127*4882a593Smuzhiyun if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
128*4882a593Smuzhiyun goto out_unlock;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun dqp->q_flags |= XFS_DQFLAG_FREEING;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun xfs_dqflock(dqp);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /*
135*4882a593Smuzhiyun * If we are turning this type of quotas off, we don't care
136*4882a593Smuzhiyun * about the dirty metadata sitting in this dquot. OTOH, if
137*4882a593Smuzhiyun * we're unmounting, we do care, so we flush it and wait.
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun if (XFS_DQ_IS_DIRTY(dqp)) {
140*4882a593Smuzhiyun struct xfs_buf *bp = NULL;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * We don't care about getting disk errors here. We need
144*4882a593Smuzhiyun * to purge this dquot anyway, so we go ahead regardless.
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun error = xfs_qm_dqflush(dqp, &bp);
147*4882a593Smuzhiyun if (!error) {
148*4882a593Smuzhiyun error = xfs_bwrite(bp);
149*4882a593Smuzhiyun xfs_buf_relse(bp);
150*4882a593Smuzhiyun } else if (error == -EAGAIN) {
151*4882a593Smuzhiyun dqp->q_flags &= ~XFS_DQFLAG_FREEING;
152*4882a593Smuzhiyun goto out_unlock;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun xfs_dqflock(dqp);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun ASSERT(atomic_read(&dqp->q_pincount) == 0);
158*4882a593Smuzhiyun ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
159*4882a593Smuzhiyun !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun xfs_dqfunlock(dqp);
162*4882a593Smuzhiyun xfs_dqunlock(dqp);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
165*4882a593Smuzhiyun qi->qi_dquots--;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun * We move dquots to the freelist as soon as their reference count
169*4882a593Smuzhiyun * hits zero, so it really should be on the freelist here.
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun ASSERT(!list_empty(&dqp->q_lru));
172*4882a593Smuzhiyun list_lru_del(&qi->qi_lru, &dqp->q_lru);
173*4882a593Smuzhiyun XFS_STATS_DEC(mp, xs_qm_dquot_unused);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun xfs_qm_dqdestroy(dqp);
176*4882a593Smuzhiyun return 0;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun out_unlock:
179*4882a593Smuzhiyun xfs_dqunlock(dqp);
180*4882a593Smuzhiyun return error;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * Purge the dquot cache.
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun void
xfs_qm_dqpurge_all(struct xfs_mount * mp,uint flags)187*4882a593Smuzhiyun xfs_qm_dqpurge_all(
188*4882a593Smuzhiyun struct xfs_mount *mp,
189*4882a593Smuzhiyun uint flags)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun if (flags & XFS_QMOPT_UQUOTA)
192*4882a593Smuzhiyun xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
193*4882a593Smuzhiyun if (flags & XFS_QMOPT_GQUOTA)
194*4882a593Smuzhiyun xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
195*4882a593Smuzhiyun if (flags & XFS_QMOPT_PQUOTA)
196*4882a593Smuzhiyun xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /*
200*4882a593Smuzhiyun * Just destroy the quotainfo structure.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun void
xfs_qm_unmount(struct xfs_mount * mp)203*4882a593Smuzhiyun xfs_qm_unmount(
204*4882a593Smuzhiyun struct xfs_mount *mp)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun if (mp->m_quotainfo) {
207*4882a593Smuzhiyun xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
208*4882a593Smuzhiyun xfs_qm_destroy_quotainfo(mp);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun * Called from the vfsops layer.
214*4882a593Smuzhiyun */
215*4882a593Smuzhiyun void
xfs_qm_unmount_quotas(xfs_mount_t * mp)216*4882a593Smuzhiyun xfs_qm_unmount_quotas(
217*4882a593Smuzhiyun xfs_mount_t *mp)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun * Release the dquots that root inode, et al might be holding,
221*4882a593Smuzhiyun * before we flush quotas and blow away the quotainfo structure.
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun ASSERT(mp->m_rootip);
224*4882a593Smuzhiyun xfs_qm_dqdetach(mp->m_rootip);
225*4882a593Smuzhiyun if (mp->m_rbmip)
226*4882a593Smuzhiyun xfs_qm_dqdetach(mp->m_rbmip);
227*4882a593Smuzhiyun if (mp->m_rsumip)
228*4882a593Smuzhiyun xfs_qm_dqdetach(mp->m_rsumip);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun * Release the quota inodes.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun if (mp->m_quotainfo) {
234*4882a593Smuzhiyun if (mp->m_quotainfo->qi_uquotaip) {
235*4882a593Smuzhiyun xfs_irele(mp->m_quotainfo->qi_uquotaip);
236*4882a593Smuzhiyun mp->m_quotainfo->qi_uquotaip = NULL;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun if (mp->m_quotainfo->qi_gquotaip) {
239*4882a593Smuzhiyun xfs_irele(mp->m_quotainfo->qi_gquotaip);
240*4882a593Smuzhiyun mp->m_quotainfo->qi_gquotaip = NULL;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun if (mp->m_quotainfo->qi_pquotaip) {
243*4882a593Smuzhiyun xfs_irele(mp->m_quotainfo->qi_pquotaip);
244*4882a593Smuzhiyun mp->m_quotainfo->qi_pquotaip = NULL;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun STATIC int
xfs_qm_dqattach_one(struct xfs_inode * ip,xfs_dqtype_t type,bool doalloc,struct xfs_dquot ** IO_idqpp)250*4882a593Smuzhiyun xfs_qm_dqattach_one(
251*4882a593Smuzhiyun struct xfs_inode *ip,
252*4882a593Smuzhiyun xfs_dqtype_t type,
253*4882a593Smuzhiyun bool doalloc,
254*4882a593Smuzhiyun struct xfs_dquot **IO_idqpp)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun struct xfs_dquot *dqp;
257*4882a593Smuzhiyun int error;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
260*4882a593Smuzhiyun error = 0;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun * See if we already have it in the inode itself. IO_idqpp is &i_udquot
264*4882a593Smuzhiyun * or &i_gdquot. This made the code look weird, but made the logic a lot
265*4882a593Smuzhiyun * simpler.
266*4882a593Smuzhiyun */
267*4882a593Smuzhiyun dqp = *IO_idqpp;
268*4882a593Smuzhiyun if (dqp) {
269*4882a593Smuzhiyun trace_xfs_dqattach_found(dqp);
270*4882a593Smuzhiyun return 0;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun * Find the dquot from somewhere. This bumps the reference count of
275*4882a593Smuzhiyun * dquot and returns it locked. This can return ENOENT if dquot didn't
276*4882a593Smuzhiyun * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
277*4882a593Smuzhiyun * turned off suddenly.
278*4882a593Smuzhiyun */
279*4882a593Smuzhiyun error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
280*4882a593Smuzhiyun if (error)
281*4882a593Smuzhiyun return error;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun trace_xfs_dqattach_get(dqp);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun * dqget may have dropped and re-acquired the ilock, but it guarantees
287*4882a593Smuzhiyun * that the dquot returned is the one that should go in the inode.
288*4882a593Smuzhiyun */
289*4882a593Smuzhiyun *IO_idqpp = dqp;
290*4882a593Smuzhiyun xfs_dqunlock(dqp);
291*4882a593Smuzhiyun return 0;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun static bool
xfs_qm_need_dqattach(struct xfs_inode * ip)295*4882a593Smuzhiyun xfs_qm_need_dqattach(
296*4882a593Smuzhiyun struct xfs_inode *ip)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun struct xfs_mount *mp = ip->i_mount;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (!XFS_IS_QUOTA_RUNNING(mp))
301*4882a593Smuzhiyun return false;
302*4882a593Smuzhiyun if (!XFS_IS_QUOTA_ON(mp))
303*4882a593Smuzhiyun return false;
304*4882a593Smuzhiyun if (!XFS_NOT_DQATTACHED(mp, ip))
305*4882a593Smuzhiyun return false;
306*4882a593Smuzhiyun if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
307*4882a593Smuzhiyun return false;
308*4882a593Smuzhiyun return true;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
313*4882a593Smuzhiyun * into account.
314*4882a593Smuzhiyun * If @doalloc is true, the dquot(s) will be allocated if needed.
315*4882a593Smuzhiyun * Inode may get unlocked and relocked in here, and the caller must deal with
316*4882a593Smuzhiyun * the consequences.
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun int
xfs_qm_dqattach_locked(xfs_inode_t * ip,bool doalloc)319*4882a593Smuzhiyun xfs_qm_dqattach_locked(
320*4882a593Smuzhiyun xfs_inode_t *ip,
321*4882a593Smuzhiyun bool doalloc)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun xfs_mount_t *mp = ip->i_mount;
324*4882a593Smuzhiyun int error = 0;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (!xfs_qm_need_dqattach(ip))
327*4882a593Smuzhiyun return 0;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
332*4882a593Smuzhiyun error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
333*4882a593Smuzhiyun doalloc, &ip->i_udquot);
334*4882a593Smuzhiyun if (error)
335*4882a593Smuzhiyun goto done;
336*4882a593Smuzhiyun ASSERT(ip->i_udquot);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
340*4882a593Smuzhiyun error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
341*4882a593Smuzhiyun doalloc, &ip->i_gdquot);
342*4882a593Smuzhiyun if (error)
343*4882a593Smuzhiyun goto done;
344*4882a593Smuzhiyun ASSERT(ip->i_gdquot);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
348*4882a593Smuzhiyun error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
349*4882a593Smuzhiyun doalloc, &ip->i_pdquot);
350*4882a593Smuzhiyun if (error)
351*4882a593Smuzhiyun goto done;
352*4882a593Smuzhiyun ASSERT(ip->i_pdquot);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun done:
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun * Don't worry about the dquots that we may have attached before any
358*4882a593Smuzhiyun * error - they'll get detached later if it has not already been done.
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
361*4882a593Smuzhiyun return error;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun int
xfs_qm_dqattach(struct xfs_inode * ip)365*4882a593Smuzhiyun xfs_qm_dqattach(
366*4882a593Smuzhiyun struct xfs_inode *ip)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun int error;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun if (!xfs_qm_need_dqattach(ip))
371*4882a593Smuzhiyun return 0;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun xfs_ilock(ip, XFS_ILOCK_EXCL);
374*4882a593Smuzhiyun error = xfs_qm_dqattach_locked(ip, false);
375*4882a593Smuzhiyun xfs_iunlock(ip, XFS_ILOCK_EXCL);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun return error;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /*
381*4882a593Smuzhiyun * Release dquots (and their references) if any.
382*4882a593Smuzhiyun * The inode should be locked EXCL except when this's called by
383*4882a593Smuzhiyun * xfs_ireclaim.
384*4882a593Smuzhiyun */
385*4882a593Smuzhiyun void
xfs_qm_dqdetach(xfs_inode_t * ip)386*4882a593Smuzhiyun xfs_qm_dqdetach(
387*4882a593Smuzhiyun xfs_inode_t *ip)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
390*4882a593Smuzhiyun return;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun trace_xfs_dquot_dqdetach(ip);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
395*4882a593Smuzhiyun if (ip->i_udquot) {
396*4882a593Smuzhiyun xfs_qm_dqrele(ip->i_udquot);
397*4882a593Smuzhiyun ip->i_udquot = NULL;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun if (ip->i_gdquot) {
400*4882a593Smuzhiyun xfs_qm_dqrele(ip->i_gdquot);
401*4882a593Smuzhiyun ip->i_gdquot = NULL;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun if (ip->i_pdquot) {
404*4882a593Smuzhiyun xfs_qm_dqrele(ip->i_pdquot);
405*4882a593Smuzhiyun ip->i_pdquot = NULL;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun struct xfs_qm_isolate {
410*4882a593Smuzhiyun struct list_head buffers;
411*4882a593Smuzhiyun struct list_head dispose;
412*4882a593Smuzhiyun };
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun static enum lru_status
xfs_qm_dquot_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)415*4882a593Smuzhiyun xfs_qm_dquot_isolate(
416*4882a593Smuzhiyun struct list_head *item,
417*4882a593Smuzhiyun struct list_lru_one *lru,
418*4882a593Smuzhiyun spinlock_t *lru_lock,
419*4882a593Smuzhiyun void *arg)
420*4882a593Smuzhiyun __releases(lru_lock) __acquires(lru_lock)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct xfs_dquot *dqp = container_of(item,
423*4882a593Smuzhiyun struct xfs_dquot, q_lru);
424*4882a593Smuzhiyun struct xfs_qm_isolate *isol = arg;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (!xfs_dqlock_nowait(dqp))
427*4882a593Smuzhiyun goto out_miss_busy;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /*
430*4882a593Smuzhiyun * This dquot has acquired a reference in the meantime remove it from
431*4882a593Smuzhiyun * the freelist and try again.
432*4882a593Smuzhiyun */
433*4882a593Smuzhiyun if (dqp->q_nrefs) {
434*4882a593Smuzhiyun xfs_dqunlock(dqp);
435*4882a593Smuzhiyun XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun trace_xfs_dqreclaim_want(dqp);
438*4882a593Smuzhiyun list_lru_isolate(lru, &dqp->q_lru);
439*4882a593Smuzhiyun XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
440*4882a593Smuzhiyun return LRU_REMOVED;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /*
444*4882a593Smuzhiyun * If the dquot is dirty, flush it. If it's already being flushed, just
445*4882a593Smuzhiyun * skip it so there is time for the IO to complete before we try to
446*4882a593Smuzhiyun * reclaim it again on the next LRU pass.
447*4882a593Smuzhiyun */
448*4882a593Smuzhiyun if (!xfs_dqflock_nowait(dqp)) {
449*4882a593Smuzhiyun xfs_dqunlock(dqp);
450*4882a593Smuzhiyun goto out_miss_busy;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (XFS_DQ_IS_DIRTY(dqp)) {
454*4882a593Smuzhiyun struct xfs_buf *bp = NULL;
455*4882a593Smuzhiyun int error;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun trace_xfs_dqreclaim_dirty(dqp);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* we have to drop the LRU lock to flush the dquot */
460*4882a593Smuzhiyun spin_unlock(lru_lock);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun error = xfs_qm_dqflush(dqp, &bp);
463*4882a593Smuzhiyun if (error)
464*4882a593Smuzhiyun goto out_unlock_dirty;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun xfs_buf_delwri_queue(bp, &isol->buffers);
467*4882a593Smuzhiyun xfs_buf_relse(bp);
468*4882a593Smuzhiyun goto out_unlock_dirty;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun xfs_dqfunlock(dqp);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /*
473*4882a593Smuzhiyun * Prevent lookups now that we are past the point of no return.
474*4882a593Smuzhiyun */
475*4882a593Smuzhiyun dqp->q_flags |= XFS_DQFLAG_FREEING;
476*4882a593Smuzhiyun xfs_dqunlock(dqp);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun ASSERT(dqp->q_nrefs == 0);
479*4882a593Smuzhiyun list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
480*4882a593Smuzhiyun XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
481*4882a593Smuzhiyun trace_xfs_dqreclaim_done(dqp);
482*4882a593Smuzhiyun XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
483*4882a593Smuzhiyun return LRU_REMOVED;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun out_miss_busy:
486*4882a593Smuzhiyun trace_xfs_dqreclaim_busy(dqp);
487*4882a593Smuzhiyun XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
488*4882a593Smuzhiyun return LRU_SKIP;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun out_unlock_dirty:
491*4882a593Smuzhiyun trace_xfs_dqreclaim_busy(dqp);
492*4882a593Smuzhiyun XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
493*4882a593Smuzhiyun xfs_dqunlock(dqp);
494*4882a593Smuzhiyun spin_lock(lru_lock);
495*4882a593Smuzhiyun return LRU_RETRY;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun static unsigned long
xfs_qm_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)499*4882a593Smuzhiyun xfs_qm_shrink_scan(
500*4882a593Smuzhiyun struct shrinker *shrink,
501*4882a593Smuzhiyun struct shrink_control *sc)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct xfs_quotainfo *qi = container_of(shrink,
504*4882a593Smuzhiyun struct xfs_quotainfo, qi_shrinker);
505*4882a593Smuzhiyun struct xfs_qm_isolate isol;
506*4882a593Smuzhiyun unsigned long freed;
507*4882a593Smuzhiyun int error;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
510*4882a593Smuzhiyun return 0;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun INIT_LIST_HEAD(&isol.buffers);
513*4882a593Smuzhiyun INIT_LIST_HEAD(&isol.dispose);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun freed = list_lru_shrink_walk(&qi->qi_lru, sc,
516*4882a593Smuzhiyun xfs_qm_dquot_isolate, &isol);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun error = xfs_buf_delwri_submit(&isol.buffers);
519*4882a593Smuzhiyun if (error)
520*4882a593Smuzhiyun xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun while (!list_empty(&isol.dispose)) {
523*4882a593Smuzhiyun struct xfs_dquot *dqp;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
526*4882a593Smuzhiyun list_del_init(&dqp->q_lru);
527*4882a593Smuzhiyun xfs_qm_dqfree_one(dqp);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun return freed;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun static unsigned long
xfs_qm_shrink_count(struct shrinker * shrink,struct shrink_control * sc)534*4882a593Smuzhiyun xfs_qm_shrink_count(
535*4882a593Smuzhiyun struct shrinker *shrink,
536*4882a593Smuzhiyun struct shrink_control *sc)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun struct xfs_quotainfo *qi = container_of(shrink,
539*4882a593Smuzhiyun struct xfs_quotainfo, qi_shrinker);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun return list_lru_shrink_count(&qi->qi_lru, sc);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun STATIC void
xfs_qm_set_defquota(struct xfs_mount * mp,xfs_dqtype_t type,struct xfs_quotainfo * qinf)545*4882a593Smuzhiyun xfs_qm_set_defquota(
546*4882a593Smuzhiyun struct xfs_mount *mp,
547*4882a593Smuzhiyun xfs_dqtype_t type,
548*4882a593Smuzhiyun struct xfs_quotainfo *qinf)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun struct xfs_dquot *dqp;
551*4882a593Smuzhiyun struct xfs_def_quota *defq;
552*4882a593Smuzhiyun int error;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
555*4882a593Smuzhiyun if (error)
556*4882a593Smuzhiyun return;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /*
561*4882a593Smuzhiyun * Timers and warnings have been already set, let's just set the
562*4882a593Smuzhiyun * default limits for this quota type
563*4882a593Smuzhiyun */
564*4882a593Smuzhiyun defq->blk.hard = dqp->q_blk.hardlimit;
565*4882a593Smuzhiyun defq->blk.soft = dqp->q_blk.softlimit;
566*4882a593Smuzhiyun defq->ino.hard = dqp->q_ino.hardlimit;
567*4882a593Smuzhiyun defq->ino.soft = dqp->q_ino.softlimit;
568*4882a593Smuzhiyun defq->rtb.hard = dqp->q_rtb.hardlimit;
569*4882a593Smuzhiyun defq->rtb.soft = dqp->q_rtb.softlimit;
570*4882a593Smuzhiyun xfs_qm_dqdestroy(dqp);
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /* Initialize quota time limits from the root dquot. */
574*4882a593Smuzhiyun static void
xfs_qm_init_timelimits(struct xfs_mount * mp,xfs_dqtype_t type)575*4882a593Smuzhiyun xfs_qm_init_timelimits(
576*4882a593Smuzhiyun struct xfs_mount *mp,
577*4882a593Smuzhiyun xfs_dqtype_t type)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun struct xfs_quotainfo *qinf = mp->m_quotainfo;
580*4882a593Smuzhiyun struct xfs_def_quota *defq;
581*4882a593Smuzhiyun struct xfs_dquot *dqp;
582*4882a593Smuzhiyun int error;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun defq = xfs_get_defquota(qinf, type);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun defq->blk.time = XFS_QM_BTIMELIMIT;
587*4882a593Smuzhiyun defq->ino.time = XFS_QM_ITIMELIMIT;
588*4882a593Smuzhiyun defq->rtb.time = XFS_QM_RTBTIMELIMIT;
589*4882a593Smuzhiyun defq->blk.warn = XFS_QM_BWARNLIMIT;
590*4882a593Smuzhiyun defq->ino.warn = XFS_QM_IWARNLIMIT;
591*4882a593Smuzhiyun defq->rtb.warn = XFS_QM_RTBWARNLIMIT;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /*
594*4882a593Smuzhiyun * We try to get the limits from the superuser's limits fields.
595*4882a593Smuzhiyun * This is quite hacky, but it is standard quota practice.
596*4882a593Smuzhiyun *
597*4882a593Smuzhiyun * Since we may not have done a quotacheck by this point, just read
598*4882a593Smuzhiyun * the dquot without attaching it to any hashtables or lists.
599*4882a593Smuzhiyun */
600*4882a593Smuzhiyun error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
601*4882a593Smuzhiyun if (error)
602*4882a593Smuzhiyun return;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun /*
605*4882a593Smuzhiyun * The warnings and timers set the grace period given to
606*4882a593Smuzhiyun * a user or group before he or she can not perform any
607*4882a593Smuzhiyun * more writing. If it is zero, a default is used.
608*4882a593Smuzhiyun */
609*4882a593Smuzhiyun if (dqp->q_blk.timer)
610*4882a593Smuzhiyun defq->blk.time = dqp->q_blk.timer;
611*4882a593Smuzhiyun if (dqp->q_ino.timer)
612*4882a593Smuzhiyun defq->ino.time = dqp->q_ino.timer;
613*4882a593Smuzhiyun if (dqp->q_rtb.timer)
614*4882a593Smuzhiyun defq->rtb.time = dqp->q_rtb.timer;
615*4882a593Smuzhiyun if (dqp->q_blk.warnings)
616*4882a593Smuzhiyun defq->blk.warn = dqp->q_blk.warnings;
617*4882a593Smuzhiyun if (dqp->q_ino.warnings)
618*4882a593Smuzhiyun defq->ino.warn = dqp->q_ino.warnings;
619*4882a593Smuzhiyun if (dqp->q_rtb.warnings)
620*4882a593Smuzhiyun defq->rtb.warn = dqp->q_rtb.warnings;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun xfs_qm_dqdestroy(dqp);
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /*
626*4882a593Smuzhiyun * This initializes all the quota information that's kept in the
627*4882a593Smuzhiyun * mount structure
628*4882a593Smuzhiyun */
629*4882a593Smuzhiyun STATIC int
xfs_qm_init_quotainfo(struct xfs_mount * mp)630*4882a593Smuzhiyun xfs_qm_init_quotainfo(
631*4882a593Smuzhiyun struct xfs_mount *mp)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun struct xfs_quotainfo *qinf;
634*4882a593Smuzhiyun int error;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun ASSERT(XFS_IS_QUOTA_RUNNING(mp));
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun error = list_lru_init(&qinf->qi_lru);
641*4882a593Smuzhiyun if (error)
642*4882a593Smuzhiyun goto out_free_qinf;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /*
645*4882a593Smuzhiyun * See if quotainodes are setup, and if not, allocate them,
646*4882a593Smuzhiyun * and change the superblock accordingly.
647*4882a593Smuzhiyun */
648*4882a593Smuzhiyun error = xfs_qm_init_quotainos(mp);
649*4882a593Smuzhiyun if (error)
650*4882a593Smuzhiyun goto out_free_lru;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
653*4882a593Smuzhiyun INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
654*4882a593Smuzhiyun INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
655*4882a593Smuzhiyun mutex_init(&qinf->qi_tree_lock);
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /* mutex used to serialize quotaoffs */
658*4882a593Smuzhiyun mutex_init(&qinf->qi_quotaofflock);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /* Precalc some constants */
661*4882a593Smuzhiyun qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
662*4882a593Smuzhiyun qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
663*4882a593Smuzhiyun if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
664*4882a593Smuzhiyun qinf->qi_expiry_min =
665*4882a593Smuzhiyun xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
666*4882a593Smuzhiyun qinf->qi_expiry_max =
667*4882a593Smuzhiyun xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
668*4882a593Smuzhiyun } else {
669*4882a593Smuzhiyun qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
670*4882a593Smuzhiyun qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
673*4882a593Smuzhiyun qinf->qi_expiry_max);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
678*4882a593Smuzhiyun xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
679*4882a593Smuzhiyun xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun if (XFS_IS_UQUOTA_RUNNING(mp))
682*4882a593Smuzhiyun xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
683*4882a593Smuzhiyun if (XFS_IS_GQUOTA_RUNNING(mp))
684*4882a593Smuzhiyun xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
685*4882a593Smuzhiyun if (XFS_IS_PQUOTA_RUNNING(mp))
686*4882a593Smuzhiyun xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
689*4882a593Smuzhiyun qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
690*4882a593Smuzhiyun qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
691*4882a593Smuzhiyun qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun error = register_shrinker(&qinf->qi_shrinker);
694*4882a593Smuzhiyun if (error)
695*4882a593Smuzhiyun goto out_free_inos;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun return 0;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun out_free_inos:
700*4882a593Smuzhiyun mutex_destroy(&qinf->qi_quotaofflock);
701*4882a593Smuzhiyun mutex_destroy(&qinf->qi_tree_lock);
702*4882a593Smuzhiyun xfs_qm_destroy_quotainos(qinf);
703*4882a593Smuzhiyun out_free_lru:
704*4882a593Smuzhiyun list_lru_destroy(&qinf->qi_lru);
705*4882a593Smuzhiyun out_free_qinf:
706*4882a593Smuzhiyun kmem_free(qinf);
707*4882a593Smuzhiyun mp->m_quotainfo = NULL;
708*4882a593Smuzhiyun return error;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun /*
712*4882a593Smuzhiyun * Gets called when unmounting a filesystem or when all quotas get
713*4882a593Smuzhiyun * turned off.
714*4882a593Smuzhiyun * This purges the quota inodes, destroys locks and frees itself.
715*4882a593Smuzhiyun */
716*4882a593Smuzhiyun void
xfs_qm_destroy_quotainfo(struct xfs_mount * mp)717*4882a593Smuzhiyun xfs_qm_destroy_quotainfo(
718*4882a593Smuzhiyun struct xfs_mount *mp)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun struct xfs_quotainfo *qi;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun qi = mp->m_quotainfo;
723*4882a593Smuzhiyun ASSERT(qi != NULL);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun unregister_shrinker(&qi->qi_shrinker);
726*4882a593Smuzhiyun list_lru_destroy(&qi->qi_lru);
727*4882a593Smuzhiyun xfs_qm_destroy_quotainos(qi);
728*4882a593Smuzhiyun mutex_destroy(&qi->qi_tree_lock);
729*4882a593Smuzhiyun mutex_destroy(&qi->qi_quotaofflock);
730*4882a593Smuzhiyun kmem_free(qi);
731*4882a593Smuzhiyun mp->m_quotainfo = NULL;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /*
735*4882a593Smuzhiyun * Create an inode and return with a reference already taken, but unlocked
736*4882a593Smuzhiyun * This is how we create quota inodes
737*4882a593Smuzhiyun */
738*4882a593Smuzhiyun STATIC int
xfs_qm_qino_alloc(xfs_mount_t * mp,xfs_inode_t ** ip,uint flags)739*4882a593Smuzhiyun xfs_qm_qino_alloc(
740*4882a593Smuzhiyun xfs_mount_t *mp,
741*4882a593Smuzhiyun xfs_inode_t **ip,
742*4882a593Smuzhiyun uint flags)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun xfs_trans_t *tp;
745*4882a593Smuzhiyun int error;
746*4882a593Smuzhiyun bool need_alloc = true;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun *ip = NULL;
749*4882a593Smuzhiyun /*
750*4882a593Smuzhiyun * With superblock that doesn't have separate pquotino, we
751*4882a593Smuzhiyun * share an inode between gquota and pquota. If the on-disk
752*4882a593Smuzhiyun * superblock has GQUOTA and the filesystem is now mounted
753*4882a593Smuzhiyun * with PQUOTA, just use sb_gquotino for sb_pquotino and
754*4882a593Smuzhiyun * vice-versa.
755*4882a593Smuzhiyun */
756*4882a593Smuzhiyun if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
757*4882a593Smuzhiyun (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
758*4882a593Smuzhiyun xfs_ino_t ino = NULLFSINO;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun if ((flags & XFS_QMOPT_PQUOTA) &&
761*4882a593Smuzhiyun (mp->m_sb.sb_gquotino != NULLFSINO)) {
762*4882a593Smuzhiyun ino = mp->m_sb.sb_gquotino;
763*4882a593Smuzhiyun if (XFS_IS_CORRUPT(mp,
764*4882a593Smuzhiyun mp->m_sb.sb_pquotino != NULLFSINO))
765*4882a593Smuzhiyun return -EFSCORRUPTED;
766*4882a593Smuzhiyun } else if ((flags & XFS_QMOPT_GQUOTA) &&
767*4882a593Smuzhiyun (mp->m_sb.sb_pquotino != NULLFSINO)) {
768*4882a593Smuzhiyun ino = mp->m_sb.sb_pquotino;
769*4882a593Smuzhiyun if (XFS_IS_CORRUPT(mp,
770*4882a593Smuzhiyun mp->m_sb.sb_gquotino != NULLFSINO))
771*4882a593Smuzhiyun return -EFSCORRUPTED;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun if (ino != NULLFSINO) {
774*4882a593Smuzhiyun error = xfs_iget(mp, NULL, ino, 0, 0, ip);
775*4882a593Smuzhiyun if (error)
776*4882a593Smuzhiyun return error;
777*4882a593Smuzhiyun mp->m_sb.sb_gquotino = NULLFSINO;
778*4882a593Smuzhiyun mp->m_sb.sb_pquotino = NULLFSINO;
779*4882a593Smuzhiyun need_alloc = false;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
784*4882a593Smuzhiyun need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
785*4882a593Smuzhiyun 0, 0, &tp);
786*4882a593Smuzhiyun if (error)
787*4882a593Smuzhiyun return error;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun if (need_alloc) {
790*4882a593Smuzhiyun error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
791*4882a593Smuzhiyun if (error) {
792*4882a593Smuzhiyun xfs_trans_cancel(tp);
793*4882a593Smuzhiyun return error;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /*
798*4882a593Smuzhiyun * Make the changes in the superblock, and log those too.
799*4882a593Smuzhiyun * sbfields arg may contain fields other than *QUOTINO;
800*4882a593Smuzhiyun * VERSIONNUM for example.
801*4882a593Smuzhiyun */
802*4882a593Smuzhiyun spin_lock(&mp->m_sb_lock);
803*4882a593Smuzhiyun if (flags & XFS_QMOPT_SBVERSION) {
804*4882a593Smuzhiyun ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun xfs_sb_version_addquota(&mp->m_sb);
807*4882a593Smuzhiyun mp->m_sb.sb_uquotino = NULLFSINO;
808*4882a593Smuzhiyun mp->m_sb.sb_gquotino = NULLFSINO;
809*4882a593Smuzhiyun mp->m_sb.sb_pquotino = NULLFSINO;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun /* qflags will get updated fully _after_ quotacheck */
812*4882a593Smuzhiyun mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun if (flags & XFS_QMOPT_UQUOTA)
815*4882a593Smuzhiyun mp->m_sb.sb_uquotino = (*ip)->i_ino;
816*4882a593Smuzhiyun else if (flags & XFS_QMOPT_GQUOTA)
817*4882a593Smuzhiyun mp->m_sb.sb_gquotino = (*ip)->i_ino;
818*4882a593Smuzhiyun else
819*4882a593Smuzhiyun mp->m_sb.sb_pquotino = (*ip)->i_ino;
820*4882a593Smuzhiyun spin_unlock(&mp->m_sb_lock);
821*4882a593Smuzhiyun xfs_log_sb(tp);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun error = xfs_trans_commit(tp);
824*4882a593Smuzhiyun if (error) {
825*4882a593Smuzhiyun ASSERT(XFS_FORCED_SHUTDOWN(mp));
826*4882a593Smuzhiyun xfs_alert(mp, "%s failed (error %d)!", __func__, error);
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun if (need_alloc)
829*4882a593Smuzhiyun xfs_finish_inode_setup(*ip);
830*4882a593Smuzhiyun return error;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun STATIC void
xfs_qm_reset_dqcounts(struct xfs_mount * mp,struct xfs_buf * bp,xfs_dqid_t id,xfs_dqtype_t type)835*4882a593Smuzhiyun xfs_qm_reset_dqcounts(
836*4882a593Smuzhiyun struct xfs_mount *mp,
837*4882a593Smuzhiyun struct xfs_buf *bp,
838*4882a593Smuzhiyun xfs_dqid_t id,
839*4882a593Smuzhiyun xfs_dqtype_t type)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun struct xfs_dqblk *dqb;
842*4882a593Smuzhiyun int j;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun trace_xfs_reset_dqcounts(bp, _RET_IP_);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun /*
847*4882a593Smuzhiyun * Reset all counters and timers. They'll be
848*4882a593Smuzhiyun * started afresh by xfs_qm_quotacheck.
849*4882a593Smuzhiyun */
850*4882a593Smuzhiyun #ifdef DEBUG
851*4882a593Smuzhiyun j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
852*4882a593Smuzhiyun sizeof(xfs_dqblk_t);
853*4882a593Smuzhiyun ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
854*4882a593Smuzhiyun #endif
855*4882a593Smuzhiyun dqb = bp->b_addr;
856*4882a593Smuzhiyun for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
857*4882a593Smuzhiyun struct xfs_disk_dquot *ddq;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun ddq = (struct xfs_disk_dquot *)&dqb[j];
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun /*
862*4882a593Smuzhiyun * Do a sanity check, and if needed, repair the dqblk. Don't
863*4882a593Smuzhiyun * output any warnings because it's perfectly possible to
864*4882a593Smuzhiyun * find uninitialised dquot blks. See comment in
865*4882a593Smuzhiyun * xfs_dquot_verify.
866*4882a593Smuzhiyun */
867*4882a593Smuzhiyun if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
868*4882a593Smuzhiyun (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
869*4882a593Smuzhiyun xfs_dqblk_repair(mp, &dqb[j], id + j, type);
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun /*
872*4882a593Smuzhiyun * Reset type in case we are reusing group quota file for
873*4882a593Smuzhiyun * project quotas or vice versa
874*4882a593Smuzhiyun */
875*4882a593Smuzhiyun ddq->d_type = type;
876*4882a593Smuzhiyun ddq->d_bcount = 0;
877*4882a593Smuzhiyun ddq->d_icount = 0;
878*4882a593Smuzhiyun ddq->d_rtbcount = 0;
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /*
881*4882a593Smuzhiyun * dquot id 0 stores the default grace period and the maximum
882*4882a593Smuzhiyun * warning limit that were set by the administrator, so we
883*4882a593Smuzhiyun * should not reset them.
884*4882a593Smuzhiyun */
885*4882a593Smuzhiyun if (ddq->d_id != 0) {
886*4882a593Smuzhiyun ddq->d_btimer = 0;
887*4882a593Smuzhiyun ddq->d_itimer = 0;
888*4882a593Smuzhiyun ddq->d_rtbtimer = 0;
889*4882a593Smuzhiyun ddq->d_bwarns = 0;
890*4882a593Smuzhiyun ddq->d_iwarns = 0;
891*4882a593Smuzhiyun ddq->d_rtbwarns = 0;
892*4882a593Smuzhiyun if (xfs_sb_version_hasbigtime(&mp->m_sb))
893*4882a593Smuzhiyun ddq->d_type |= XFS_DQTYPE_BIGTIME;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun if (xfs_sb_version_hascrc(&mp->m_sb)) {
897*4882a593Smuzhiyun xfs_update_cksum((char *)&dqb[j],
898*4882a593Smuzhiyun sizeof(struct xfs_dqblk),
899*4882a593Smuzhiyun XFS_DQUOT_CRC_OFF);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun STATIC int
xfs_qm_reset_dqcounts_all(struct xfs_mount * mp,xfs_dqid_t firstid,xfs_fsblock_t bno,xfs_filblks_t blkcnt,xfs_dqtype_t type,struct list_head * buffer_list)905*4882a593Smuzhiyun xfs_qm_reset_dqcounts_all(
906*4882a593Smuzhiyun struct xfs_mount *mp,
907*4882a593Smuzhiyun xfs_dqid_t firstid,
908*4882a593Smuzhiyun xfs_fsblock_t bno,
909*4882a593Smuzhiyun xfs_filblks_t blkcnt,
910*4882a593Smuzhiyun xfs_dqtype_t type,
911*4882a593Smuzhiyun struct list_head *buffer_list)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun struct xfs_buf *bp;
914*4882a593Smuzhiyun int error = 0;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun ASSERT(blkcnt > 0);
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun /*
919*4882a593Smuzhiyun * Blkcnt arg can be a very big number, and might even be
920*4882a593Smuzhiyun * larger than the log itself. So, we have to break it up into
921*4882a593Smuzhiyun * manageable-sized transactions.
922*4882a593Smuzhiyun * Note that we don't start a permanent transaction here; we might
923*4882a593Smuzhiyun * not be able to get a log reservation for the whole thing up front,
924*4882a593Smuzhiyun * and we don't really care to either, because we just discard
925*4882a593Smuzhiyun * everything if we were to crash in the middle of this loop.
926*4882a593Smuzhiyun */
927*4882a593Smuzhiyun while (blkcnt--) {
928*4882a593Smuzhiyun error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
929*4882a593Smuzhiyun XFS_FSB_TO_DADDR(mp, bno),
930*4882a593Smuzhiyun mp->m_quotainfo->qi_dqchunklen, 0, &bp,
931*4882a593Smuzhiyun &xfs_dquot_buf_ops);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun /*
934*4882a593Smuzhiyun * CRC and validation errors will return a EFSCORRUPTED here. If
935*4882a593Smuzhiyun * this occurs, re-read without CRC validation so that we can
936*4882a593Smuzhiyun * repair the damage via xfs_qm_reset_dqcounts(). This process
937*4882a593Smuzhiyun * will leave a trace in the log indicating corruption has
938*4882a593Smuzhiyun * been detected.
939*4882a593Smuzhiyun */
940*4882a593Smuzhiyun if (error == -EFSCORRUPTED) {
941*4882a593Smuzhiyun error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
942*4882a593Smuzhiyun XFS_FSB_TO_DADDR(mp, bno),
943*4882a593Smuzhiyun mp->m_quotainfo->qi_dqchunklen, 0, &bp,
944*4882a593Smuzhiyun NULL);
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun if (error)
948*4882a593Smuzhiyun break;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun /*
951*4882a593Smuzhiyun * A corrupt buffer might not have a verifier attached, so
952*4882a593Smuzhiyun * make sure we have the correct one attached before writeback
953*4882a593Smuzhiyun * occurs.
954*4882a593Smuzhiyun */
955*4882a593Smuzhiyun bp->b_ops = &xfs_dquot_buf_ops;
956*4882a593Smuzhiyun xfs_qm_reset_dqcounts(mp, bp, firstid, type);
957*4882a593Smuzhiyun xfs_buf_delwri_queue(bp, buffer_list);
958*4882a593Smuzhiyun xfs_buf_relse(bp);
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun /* goto the next block. */
961*4882a593Smuzhiyun bno++;
962*4882a593Smuzhiyun firstid += mp->m_quotainfo->qi_dqperchunk;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun return error;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /*
969*4882a593Smuzhiyun * Iterate over all allocated dquot blocks in this quota inode, zeroing all
970*4882a593Smuzhiyun * counters for every chunk of dquots that we find.
971*4882a593Smuzhiyun */
972*4882a593Smuzhiyun STATIC int
xfs_qm_reset_dqcounts_buf(struct xfs_mount * mp,struct xfs_inode * qip,xfs_dqtype_t type,struct list_head * buffer_list)973*4882a593Smuzhiyun xfs_qm_reset_dqcounts_buf(
974*4882a593Smuzhiyun struct xfs_mount *mp,
975*4882a593Smuzhiyun struct xfs_inode *qip,
976*4882a593Smuzhiyun xfs_dqtype_t type,
977*4882a593Smuzhiyun struct list_head *buffer_list)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun struct xfs_bmbt_irec *map;
980*4882a593Smuzhiyun int i, nmaps; /* number of map entries */
981*4882a593Smuzhiyun int error; /* return value */
982*4882a593Smuzhiyun xfs_fileoff_t lblkno;
983*4882a593Smuzhiyun xfs_filblks_t maxlblkcnt;
984*4882a593Smuzhiyun xfs_dqid_t firstid;
985*4882a593Smuzhiyun xfs_fsblock_t rablkno;
986*4882a593Smuzhiyun xfs_filblks_t rablkcnt;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun error = 0;
989*4882a593Smuzhiyun /*
990*4882a593Smuzhiyun * This looks racy, but we can't keep an inode lock across a
991*4882a593Smuzhiyun * trans_reserve. But, this gets called during quotacheck, and that
992*4882a593Smuzhiyun * happens only at mount time which is single threaded.
993*4882a593Smuzhiyun */
994*4882a593Smuzhiyun if (qip->i_d.di_nblocks == 0)
995*4882a593Smuzhiyun return 0;
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun lblkno = 0;
1000*4882a593Smuzhiyun maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1001*4882a593Smuzhiyun do {
1002*4882a593Smuzhiyun uint lock_mode;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun nmaps = XFS_DQITER_MAP_SIZE;
1005*4882a593Smuzhiyun /*
1006*4882a593Smuzhiyun * We aren't changing the inode itself. Just changing
1007*4882a593Smuzhiyun * some of its data. No new blocks are added here, and
1008*4882a593Smuzhiyun * the inode is never added to the transaction.
1009*4882a593Smuzhiyun */
1010*4882a593Smuzhiyun lock_mode = xfs_ilock_data_map_shared(qip);
1011*4882a593Smuzhiyun error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1012*4882a593Smuzhiyun map, &nmaps, 0);
1013*4882a593Smuzhiyun xfs_iunlock(qip, lock_mode);
1014*4882a593Smuzhiyun if (error)
1015*4882a593Smuzhiyun break;
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1018*4882a593Smuzhiyun for (i = 0; i < nmaps; i++) {
1019*4882a593Smuzhiyun ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1020*4882a593Smuzhiyun ASSERT(map[i].br_blockcount);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun lblkno += map[i].br_blockcount;
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun if (map[i].br_startblock == HOLESTARTBLOCK)
1026*4882a593Smuzhiyun continue;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun firstid = (xfs_dqid_t) map[i].br_startoff *
1029*4882a593Smuzhiyun mp->m_quotainfo->qi_dqperchunk;
1030*4882a593Smuzhiyun /*
1031*4882a593Smuzhiyun * Do a read-ahead on the next extent.
1032*4882a593Smuzhiyun */
1033*4882a593Smuzhiyun if ((i+1 < nmaps) &&
1034*4882a593Smuzhiyun (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1035*4882a593Smuzhiyun rablkcnt = map[i+1].br_blockcount;
1036*4882a593Smuzhiyun rablkno = map[i+1].br_startblock;
1037*4882a593Smuzhiyun while (rablkcnt--) {
1038*4882a593Smuzhiyun xfs_buf_readahead(mp->m_ddev_targp,
1039*4882a593Smuzhiyun XFS_FSB_TO_DADDR(mp, rablkno),
1040*4882a593Smuzhiyun mp->m_quotainfo->qi_dqchunklen,
1041*4882a593Smuzhiyun &xfs_dquot_buf_ops);
1042*4882a593Smuzhiyun rablkno++;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun /*
1046*4882a593Smuzhiyun * Iterate thru all the blks in the extent and
1047*4882a593Smuzhiyun * reset the counters of all the dquots inside them.
1048*4882a593Smuzhiyun */
1049*4882a593Smuzhiyun error = xfs_qm_reset_dqcounts_all(mp, firstid,
1050*4882a593Smuzhiyun map[i].br_startblock,
1051*4882a593Smuzhiyun map[i].br_blockcount,
1052*4882a593Smuzhiyun type, buffer_list);
1053*4882a593Smuzhiyun if (error)
1054*4882a593Smuzhiyun goto out;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun } while (nmaps > 0);
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun out:
1059*4882a593Smuzhiyun kmem_free(map);
1060*4882a593Smuzhiyun return error;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun /*
1064*4882a593Smuzhiyun * Called by dqusage_adjust in doing a quotacheck.
1065*4882a593Smuzhiyun *
1066*4882a593Smuzhiyun * Given the inode, and a dquot id this updates both the incore dqout as well
1067*4882a593Smuzhiyun * as the buffer copy. This is so that once the quotacheck is done, we can
1068*4882a593Smuzhiyun * just log all the buffers, as opposed to logging numerous updates to
1069*4882a593Smuzhiyun * individual dquots.
1070*4882a593Smuzhiyun */
1071*4882a593Smuzhiyun STATIC int
xfs_qm_quotacheck_dqadjust(struct xfs_inode * ip,xfs_dqtype_t type,xfs_qcnt_t nblks,xfs_qcnt_t rtblks)1072*4882a593Smuzhiyun xfs_qm_quotacheck_dqadjust(
1073*4882a593Smuzhiyun struct xfs_inode *ip,
1074*4882a593Smuzhiyun xfs_dqtype_t type,
1075*4882a593Smuzhiyun xfs_qcnt_t nblks,
1076*4882a593Smuzhiyun xfs_qcnt_t rtblks)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun struct xfs_mount *mp = ip->i_mount;
1079*4882a593Smuzhiyun struct xfs_dquot *dqp;
1080*4882a593Smuzhiyun xfs_dqid_t id;
1081*4882a593Smuzhiyun int error;
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun id = xfs_qm_id_for_quotatype(ip, type);
1084*4882a593Smuzhiyun error = xfs_qm_dqget(mp, id, type, true, &dqp);
1085*4882a593Smuzhiyun if (error) {
1086*4882a593Smuzhiyun /*
1087*4882a593Smuzhiyun * Shouldn't be able to turn off quotas here.
1088*4882a593Smuzhiyun */
1089*4882a593Smuzhiyun ASSERT(error != -ESRCH);
1090*4882a593Smuzhiyun ASSERT(error != -ENOENT);
1091*4882a593Smuzhiyun return error;
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun trace_xfs_dqadjust(dqp);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /*
1097*4882a593Smuzhiyun * Adjust the inode count and the block count to reflect this inode's
1098*4882a593Smuzhiyun * resource usage.
1099*4882a593Smuzhiyun */
1100*4882a593Smuzhiyun dqp->q_ino.count++;
1101*4882a593Smuzhiyun dqp->q_ino.reserved++;
1102*4882a593Smuzhiyun if (nblks) {
1103*4882a593Smuzhiyun dqp->q_blk.count += nblks;
1104*4882a593Smuzhiyun dqp->q_blk.reserved += nblks;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun if (rtblks) {
1107*4882a593Smuzhiyun dqp->q_rtb.count += rtblks;
1108*4882a593Smuzhiyun dqp->q_rtb.reserved += rtblks;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun /*
1112*4882a593Smuzhiyun * Set default limits, adjust timers (since we changed usages)
1113*4882a593Smuzhiyun *
1114*4882a593Smuzhiyun * There are no timers for the default values set in the root dquot.
1115*4882a593Smuzhiyun */
1116*4882a593Smuzhiyun if (dqp->q_id) {
1117*4882a593Smuzhiyun xfs_qm_adjust_dqlimits(dqp);
1118*4882a593Smuzhiyun xfs_qm_adjust_dqtimers(dqp);
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun dqp->q_flags |= XFS_DQFLAG_DIRTY;
1122*4882a593Smuzhiyun xfs_qm_dqput(dqp);
1123*4882a593Smuzhiyun return 0;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun /*
1127*4882a593Smuzhiyun * callback routine supplied to bulkstat(). Given an inumber, find its
1128*4882a593Smuzhiyun * dquots and update them to account for resources taken by that inode.
1129*4882a593Smuzhiyun */
1130*4882a593Smuzhiyun /* ARGSUSED */
1131*4882a593Smuzhiyun STATIC int
xfs_qm_dqusage_adjust(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,void * data)1132*4882a593Smuzhiyun xfs_qm_dqusage_adjust(
1133*4882a593Smuzhiyun struct xfs_mount *mp,
1134*4882a593Smuzhiyun struct xfs_trans *tp,
1135*4882a593Smuzhiyun xfs_ino_t ino,
1136*4882a593Smuzhiyun void *data)
1137*4882a593Smuzhiyun {
1138*4882a593Smuzhiyun struct xfs_inode *ip;
1139*4882a593Smuzhiyun xfs_qcnt_t nblks;
1140*4882a593Smuzhiyun xfs_filblks_t rtblks = 0; /* total rt blks */
1141*4882a593Smuzhiyun int error;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun /*
1146*4882a593Smuzhiyun * rootino must have its resources accounted for, not so with the quota
1147*4882a593Smuzhiyun * inodes.
1148*4882a593Smuzhiyun */
1149*4882a593Smuzhiyun if (xfs_is_quota_inode(&mp->m_sb, ino))
1150*4882a593Smuzhiyun return 0;
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun /*
1153*4882a593Smuzhiyun * We don't _need_ to take the ilock EXCL here because quotacheck runs
1154*4882a593Smuzhiyun * at mount time and therefore nobody will be racing chown/chproj.
1155*4882a593Smuzhiyun */
1156*4882a593Smuzhiyun error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1157*4882a593Smuzhiyun if (error == -EINVAL || error == -ENOENT)
1158*4882a593Smuzhiyun return 0;
1159*4882a593Smuzhiyun if (error)
1160*4882a593Smuzhiyun return error;
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun ASSERT(ip->i_delayed_blks == 0);
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun if (XFS_IS_REALTIME_INODE(ip)) {
1165*4882a593Smuzhiyun struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1168*4882a593Smuzhiyun error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1169*4882a593Smuzhiyun if (error)
1170*4882a593Smuzhiyun goto error0;
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun xfs_bmap_count_leaves(ifp, &rtblks);
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun /*
1179*4882a593Smuzhiyun * Add the (disk blocks and inode) resources occupied by this
1180*4882a593Smuzhiyun * inode to its dquots. We do this adjustment in the incore dquot,
1181*4882a593Smuzhiyun * and also copy the changes to its buffer.
1182*4882a593Smuzhiyun * We don't care about putting these changes in a transaction
1183*4882a593Smuzhiyun * envelope because if we crash in the middle of a 'quotacheck'
1184*4882a593Smuzhiyun * we have to start from the beginning anyway.
1185*4882a593Smuzhiyun * Once we're done, we'll log all the dquot bufs.
1186*4882a593Smuzhiyun *
1187*4882a593Smuzhiyun * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1188*4882a593Smuzhiyun * and quotaoffs don't race. (Quotachecks happen at mount time only).
1189*4882a593Smuzhiyun */
1190*4882a593Smuzhiyun if (XFS_IS_UQUOTA_ON(mp)) {
1191*4882a593Smuzhiyun error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1192*4882a593Smuzhiyun rtblks);
1193*4882a593Smuzhiyun if (error)
1194*4882a593Smuzhiyun goto error0;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun if (XFS_IS_GQUOTA_ON(mp)) {
1198*4882a593Smuzhiyun error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1199*4882a593Smuzhiyun rtblks);
1200*4882a593Smuzhiyun if (error)
1201*4882a593Smuzhiyun goto error0;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun if (XFS_IS_PQUOTA_ON(mp)) {
1205*4882a593Smuzhiyun error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1206*4882a593Smuzhiyun rtblks);
1207*4882a593Smuzhiyun if (error)
1208*4882a593Smuzhiyun goto error0;
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun error0:
1212*4882a593Smuzhiyun xfs_irele(ip);
1213*4882a593Smuzhiyun return error;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun STATIC int
xfs_qm_flush_one(struct xfs_dquot * dqp,void * data)1217*4882a593Smuzhiyun xfs_qm_flush_one(
1218*4882a593Smuzhiyun struct xfs_dquot *dqp,
1219*4882a593Smuzhiyun void *data)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun struct xfs_mount *mp = dqp->q_mount;
1222*4882a593Smuzhiyun struct list_head *buffer_list = data;
1223*4882a593Smuzhiyun struct xfs_buf *bp = NULL;
1224*4882a593Smuzhiyun int error = 0;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun xfs_dqlock(dqp);
1227*4882a593Smuzhiyun if (dqp->q_flags & XFS_DQFLAG_FREEING)
1228*4882a593Smuzhiyun goto out_unlock;
1229*4882a593Smuzhiyun if (!XFS_DQ_IS_DIRTY(dqp))
1230*4882a593Smuzhiyun goto out_unlock;
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun /*
1233*4882a593Smuzhiyun * The only way the dquot is already flush locked by the time quotacheck
1234*4882a593Smuzhiyun * gets here is if reclaim flushed it before the dqadjust walk dirtied
1235*4882a593Smuzhiyun * it for the final time. Quotacheck collects all dquot bufs in the
1236*4882a593Smuzhiyun * local delwri queue before dquots are dirtied, so reclaim can't have
1237*4882a593Smuzhiyun * possibly queued it for I/O. The only way out is to push the buffer to
1238*4882a593Smuzhiyun * cycle the flush lock.
1239*4882a593Smuzhiyun */
1240*4882a593Smuzhiyun if (!xfs_dqflock_nowait(dqp)) {
1241*4882a593Smuzhiyun /* buf is pinned in-core by delwri list */
1242*4882a593Smuzhiyun bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1243*4882a593Smuzhiyun mp->m_quotainfo->qi_dqchunklen, 0);
1244*4882a593Smuzhiyun if (!bp) {
1245*4882a593Smuzhiyun error = -EINVAL;
1246*4882a593Smuzhiyun goto out_unlock;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun xfs_buf_unlock(bp);
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun xfs_buf_delwri_pushbuf(bp, buffer_list);
1251*4882a593Smuzhiyun xfs_buf_rele(bp);
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun error = -EAGAIN;
1254*4882a593Smuzhiyun goto out_unlock;
1255*4882a593Smuzhiyun }
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun error = xfs_qm_dqflush(dqp, &bp);
1258*4882a593Smuzhiyun if (error)
1259*4882a593Smuzhiyun goto out_unlock;
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun xfs_buf_delwri_queue(bp, buffer_list);
1262*4882a593Smuzhiyun xfs_buf_relse(bp);
1263*4882a593Smuzhiyun out_unlock:
1264*4882a593Smuzhiyun xfs_dqunlock(dqp);
1265*4882a593Smuzhiyun return error;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun /*
1269*4882a593Smuzhiyun * Walk thru all the filesystem inodes and construct a consistent view
1270*4882a593Smuzhiyun * of the disk quota world. If the quotacheck fails, disable quotas.
1271*4882a593Smuzhiyun */
1272*4882a593Smuzhiyun STATIC int
xfs_qm_quotacheck(xfs_mount_t * mp)1273*4882a593Smuzhiyun xfs_qm_quotacheck(
1274*4882a593Smuzhiyun xfs_mount_t *mp)
1275*4882a593Smuzhiyun {
1276*4882a593Smuzhiyun int error, error2;
1277*4882a593Smuzhiyun uint flags;
1278*4882a593Smuzhiyun LIST_HEAD (buffer_list);
1279*4882a593Smuzhiyun struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1280*4882a593Smuzhiyun struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1281*4882a593Smuzhiyun struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun flags = 0;
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun ASSERT(uip || gip || pip);
1286*4882a593Smuzhiyun ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun xfs_notice(mp, "Quotacheck needed: Please wait.");
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun /*
1291*4882a593Smuzhiyun * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1292*4882a593Smuzhiyun * their counters to zero. We need a clean slate.
1293*4882a593Smuzhiyun * We don't log our changes till later.
1294*4882a593Smuzhiyun */
1295*4882a593Smuzhiyun if (uip) {
1296*4882a593Smuzhiyun error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1297*4882a593Smuzhiyun &buffer_list);
1298*4882a593Smuzhiyun if (error)
1299*4882a593Smuzhiyun goto error_return;
1300*4882a593Smuzhiyun flags |= XFS_UQUOTA_CHKD;
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun if (gip) {
1304*4882a593Smuzhiyun error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1305*4882a593Smuzhiyun &buffer_list);
1306*4882a593Smuzhiyun if (error)
1307*4882a593Smuzhiyun goto error_return;
1308*4882a593Smuzhiyun flags |= XFS_GQUOTA_CHKD;
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun if (pip) {
1312*4882a593Smuzhiyun error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1313*4882a593Smuzhiyun &buffer_list);
1314*4882a593Smuzhiyun if (error)
1315*4882a593Smuzhiyun goto error_return;
1316*4882a593Smuzhiyun flags |= XFS_PQUOTA_CHKD;
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1320*4882a593Smuzhiyun NULL);
1321*4882a593Smuzhiyun if (error)
1322*4882a593Smuzhiyun goto error_return;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun /*
1325*4882a593Smuzhiyun * We've made all the changes that we need to make incore. Flush them
1326*4882a593Smuzhiyun * down to disk buffers if everything was updated successfully.
1327*4882a593Smuzhiyun */
1328*4882a593Smuzhiyun if (XFS_IS_UQUOTA_ON(mp)) {
1329*4882a593Smuzhiyun error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1330*4882a593Smuzhiyun &buffer_list);
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun if (XFS_IS_GQUOTA_ON(mp)) {
1333*4882a593Smuzhiyun error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1334*4882a593Smuzhiyun &buffer_list);
1335*4882a593Smuzhiyun if (!error)
1336*4882a593Smuzhiyun error = error2;
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun if (XFS_IS_PQUOTA_ON(mp)) {
1339*4882a593Smuzhiyun error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1340*4882a593Smuzhiyun &buffer_list);
1341*4882a593Smuzhiyun if (!error)
1342*4882a593Smuzhiyun error = error2;
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun error2 = xfs_buf_delwri_submit(&buffer_list);
1346*4882a593Smuzhiyun if (!error)
1347*4882a593Smuzhiyun error = error2;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun /*
1350*4882a593Smuzhiyun * We can get this error if we couldn't do a dquot allocation inside
1351*4882a593Smuzhiyun * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1352*4882a593Smuzhiyun * dirty dquots that might be cached, we just want to get rid of them
1353*4882a593Smuzhiyun * and turn quotaoff. The dquots won't be attached to any of the inodes
1354*4882a593Smuzhiyun * at this point (because we intentionally didn't in dqget_noattach).
1355*4882a593Smuzhiyun */
1356*4882a593Smuzhiyun if (error) {
1357*4882a593Smuzhiyun xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1358*4882a593Smuzhiyun goto error_return;
1359*4882a593Smuzhiyun }
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun /*
1362*4882a593Smuzhiyun * If one type of quotas is off, then it will lose its
1363*4882a593Smuzhiyun * quotachecked status, since we won't be doing accounting for
1364*4882a593Smuzhiyun * that type anymore.
1365*4882a593Smuzhiyun */
1366*4882a593Smuzhiyun mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1367*4882a593Smuzhiyun mp->m_qflags |= flags;
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun error_return:
1370*4882a593Smuzhiyun xfs_buf_delwri_cancel(&buffer_list);
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun if (error) {
1373*4882a593Smuzhiyun xfs_warn(mp,
1374*4882a593Smuzhiyun "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1375*4882a593Smuzhiyun error);
1376*4882a593Smuzhiyun /*
1377*4882a593Smuzhiyun * We must turn off quotas.
1378*4882a593Smuzhiyun */
1379*4882a593Smuzhiyun ASSERT(mp->m_quotainfo != NULL);
1380*4882a593Smuzhiyun xfs_qm_destroy_quotainfo(mp);
1381*4882a593Smuzhiyun if (xfs_mount_reset_sbqflags(mp)) {
1382*4882a593Smuzhiyun xfs_warn(mp,
1383*4882a593Smuzhiyun "Quotacheck: Failed to reset quota flags.");
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun } else
1386*4882a593Smuzhiyun xfs_notice(mp, "Quotacheck: Done.");
1387*4882a593Smuzhiyun return error;
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun /*
1391*4882a593Smuzhiyun * This is called from xfs_mountfs to start quotas and initialize all
1392*4882a593Smuzhiyun * necessary data structures like quotainfo. This is also responsible for
1393*4882a593Smuzhiyun * running a quotacheck as necessary. We are guaranteed that the superblock
1394*4882a593Smuzhiyun * is consistently read in at this point.
1395*4882a593Smuzhiyun *
1396*4882a593Smuzhiyun * If we fail here, the mount will continue with quota turned off. We don't
1397*4882a593Smuzhiyun * need to inidicate success or failure at all.
1398*4882a593Smuzhiyun */
1399*4882a593Smuzhiyun void
xfs_qm_mount_quotas(struct xfs_mount * mp)1400*4882a593Smuzhiyun xfs_qm_mount_quotas(
1401*4882a593Smuzhiyun struct xfs_mount *mp)
1402*4882a593Smuzhiyun {
1403*4882a593Smuzhiyun int error = 0;
1404*4882a593Smuzhiyun uint sbf;
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun /*
1407*4882a593Smuzhiyun * If quotas on realtime volumes is not supported, we disable
1408*4882a593Smuzhiyun * quotas immediately.
1409*4882a593Smuzhiyun */
1410*4882a593Smuzhiyun if (mp->m_sb.sb_rextents) {
1411*4882a593Smuzhiyun xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1412*4882a593Smuzhiyun mp->m_qflags = 0;
1413*4882a593Smuzhiyun goto write_changes;
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun /*
1419*4882a593Smuzhiyun * Allocate the quotainfo structure inside the mount struct, and
1420*4882a593Smuzhiyun * create quotainode(s), and change/rev superblock if necessary.
1421*4882a593Smuzhiyun */
1422*4882a593Smuzhiyun error = xfs_qm_init_quotainfo(mp);
1423*4882a593Smuzhiyun if (error) {
1424*4882a593Smuzhiyun /*
1425*4882a593Smuzhiyun * We must turn off quotas.
1426*4882a593Smuzhiyun */
1427*4882a593Smuzhiyun ASSERT(mp->m_quotainfo == NULL);
1428*4882a593Smuzhiyun mp->m_qflags = 0;
1429*4882a593Smuzhiyun goto write_changes;
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun /*
1432*4882a593Smuzhiyun * If any of the quotas are not consistent, do a quotacheck.
1433*4882a593Smuzhiyun */
1434*4882a593Smuzhiyun if (XFS_QM_NEED_QUOTACHECK(mp)) {
1435*4882a593Smuzhiyun error = xfs_qm_quotacheck(mp);
1436*4882a593Smuzhiyun if (error) {
1437*4882a593Smuzhiyun /* Quotacheck failed and disabled quotas. */
1438*4882a593Smuzhiyun return;
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun /*
1442*4882a593Smuzhiyun * If one type of quotas is off, then it will lose its
1443*4882a593Smuzhiyun * quotachecked status, since we won't be doing accounting for
1444*4882a593Smuzhiyun * that type anymore.
1445*4882a593Smuzhiyun */
1446*4882a593Smuzhiyun if (!XFS_IS_UQUOTA_ON(mp))
1447*4882a593Smuzhiyun mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1448*4882a593Smuzhiyun if (!XFS_IS_GQUOTA_ON(mp))
1449*4882a593Smuzhiyun mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1450*4882a593Smuzhiyun if (!XFS_IS_PQUOTA_ON(mp))
1451*4882a593Smuzhiyun mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun write_changes:
1454*4882a593Smuzhiyun /*
1455*4882a593Smuzhiyun * We actually don't have to acquire the m_sb_lock at all.
1456*4882a593Smuzhiyun * This can only be called from mount, and that's single threaded. XXX
1457*4882a593Smuzhiyun */
1458*4882a593Smuzhiyun spin_lock(&mp->m_sb_lock);
1459*4882a593Smuzhiyun sbf = mp->m_sb.sb_qflags;
1460*4882a593Smuzhiyun mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1461*4882a593Smuzhiyun spin_unlock(&mp->m_sb_lock);
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1464*4882a593Smuzhiyun if (xfs_sync_sb(mp, false)) {
1465*4882a593Smuzhiyun /*
1466*4882a593Smuzhiyun * We could only have been turning quotas off.
1467*4882a593Smuzhiyun * We aren't in very good shape actually because
1468*4882a593Smuzhiyun * the incore structures are convinced that quotas are
1469*4882a593Smuzhiyun * off, but the on disk superblock doesn't know that !
1470*4882a593Smuzhiyun */
1471*4882a593Smuzhiyun ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1472*4882a593Smuzhiyun xfs_alert(mp, "%s: Superblock update failed!",
1473*4882a593Smuzhiyun __func__);
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun if (error) {
1478*4882a593Smuzhiyun xfs_warn(mp, "Failed to initialize disk quotas.");
1479*4882a593Smuzhiyun return;
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun /*
1484*4882a593Smuzhiyun * This is called after the superblock has been read in and we're ready to
1485*4882a593Smuzhiyun * iget the quota inodes.
1486*4882a593Smuzhiyun */
1487*4882a593Smuzhiyun STATIC int
xfs_qm_init_quotainos(xfs_mount_t * mp)1488*4882a593Smuzhiyun xfs_qm_init_quotainos(
1489*4882a593Smuzhiyun xfs_mount_t *mp)
1490*4882a593Smuzhiyun {
1491*4882a593Smuzhiyun struct xfs_inode *uip = NULL;
1492*4882a593Smuzhiyun struct xfs_inode *gip = NULL;
1493*4882a593Smuzhiyun struct xfs_inode *pip = NULL;
1494*4882a593Smuzhiyun int error;
1495*4882a593Smuzhiyun uint flags = 0;
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun ASSERT(mp->m_quotainfo);
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun /*
1500*4882a593Smuzhiyun * Get the uquota and gquota inodes
1501*4882a593Smuzhiyun */
1502*4882a593Smuzhiyun if (xfs_sb_version_hasquota(&mp->m_sb)) {
1503*4882a593Smuzhiyun if (XFS_IS_UQUOTA_ON(mp) &&
1504*4882a593Smuzhiyun mp->m_sb.sb_uquotino != NULLFSINO) {
1505*4882a593Smuzhiyun ASSERT(mp->m_sb.sb_uquotino > 0);
1506*4882a593Smuzhiyun error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1507*4882a593Smuzhiyun 0, 0, &uip);
1508*4882a593Smuzhiyun if (error)
1509*4882a593Smuzhiyun return error;
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun if (XFS_IS_GQUOTA_ON(mp) &&
1512*4882a593Smuzhiyun mp->m_sb.sb_gquotino != NULLFSINO) {
1513*4882a593Smuzhiyun ASSERT(mp->m_sb.sb_gquotino > 0);
1514*4882a593Smuzhiyun error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1515*4882a593Smuzhiyun 0, 0, &gip);
1516*4882a593Smuzhiyun if (error)
1517*4882a593Smuzhiyun goto error_rele;
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun if (XFS_IS_PQUOTA_ON(mp) &&
1520*4882a593Smuzhiyun mp->m_sb.sb_pquotino != NULLFSINO) {
1521*4882a593Smuzhiyun ASSERT(mp->m_sb.sb_pquotino > 0);
1522*4882a593Smuzhiyun error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1523*4882a593Smuzhiyun 0, 0, &pip);
1524*4882a593Smuzhiyun if (error)
1525*4882a593Smuzhiyun goto error_rele;
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun } else {
1528*4882a593Smuzhiyun flags |= XFS_QMOPT_SBVERSION;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun /*
1532*4882a593Smuzhiyun * Create the three inodes, if they don't exist already. The changes
1533*4882a593Smuzhiyun * made above will get added to a transaction and logged in one of
1534*4882a593Smuzhiyun * the qino_alloc calls below. If the device is readonly,
1535*4882a593Smuzhiyun * temporarily switch to read-write to do this.
1536*4882a593Smuzhiyun */
1537*4882a593Smuzhiyun if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1538*4882a593Smuzhiyun error = xfs_qm_qino_alloc(mp, &uip,
1539*4882a593Smuzhiyun flags | XFS_QMOPT_UQUOTA);
1540*4882a593Smuzhiyun if (error)
1541*4882a593Smuzhiyun goto error_rele;
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun flags &= ~XFS_QMOPT_SBVERSION;
1544*4882a593Smuzhiyun }
1545*4882a593Smuzhiyun if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1546*4882a593Smuzhiyun error = xfs_qm_qino_alloc(mp, &gip,
1547*4882a593Smuzhiyun flags | XFS_QMOPT_GQUOTA);
1548*4882a593Smuzhiyun if (error)
1549*4882a593Smuzhiyun goto error_rele;
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun flags &= ~XFS_QMOPT_SBVERSION;
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1554*4882a593Smuzhiyun error = xfs_qm_qino_alloc(mp, &pip,
1555*4882a593Smuzhiyun flags | XFS_QMOPT_PQUOTA);
1556*4882a593Smuzhiyun if (error)
1557*4882a593Smuzhiyun goto error_rele;
1558*4882a593Smuzhiyun }
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun mp->m_quotainfo->qi_uquotaip = uip;
1561*4882a593Smuzhiyun mp->m_quotainfo->qi_gquotaip = gip;
1562*4882a593Smuzhiyun mp->m_quotainfo->qi_pquotaip = pip;
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun return 0;
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun error_rele:
1567*4882a593Smuzhiyun if (uip)
1568*4882a593Smuzhiyun xfs_irele(uip);
1569*4882a593Smuzhiyun if (gip)
1570*4882a593Smuzhiyun xfs_irele(gip);
1571*4882a593Smuzhiyun if (pip)
1572*4882a593Smuzhiyun xfs_irele(pip);
1573*4882a593Smuzhiyun return error;
1574*4882a593Smuzhiyun }
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun STATIC void
xfs_qm_destroy_quotainos(struct xfs_quotainfo * qi)1577*4882a593Smuzhiyun xfs_qm_destroy_quotainos(
1578*4882a593Smuzhiyun struct xfs_quotainfo *qi)
1579*4882a593Smuzhiyun {
1580*4882a593Smuzhiyun if (qi->qi_uquotaip) {
1581*4882a593Smuzhiyun xfs_irele(qi->qi_uquotaip);
1582*4882a593Smuzhiyun qi->qi_uquotaip = NULL; /* paranoia */
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun if (qi->qi_gquotaip) {
1585*4882a593Smuzhiyun xfs_irele(qi->qi_gquotaip);
1586*4882a593Smuzhiyun qi->qi_gquotaip = NULL;
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun if (qi->qi_pquotaip) {
1589*4882a593Smuzhiyun xfs_irele(qi->qi_pquotaip);
1590*4882a593Smuzhiyun qi->qi_pquotaip = NULL;
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun }
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun STATIC void
xfs_qm_dqfree_one(struct xfs_dquot * dqp)1595*4882a593Smuzhiyun xfs_qm_dqfree_one(
1596*4882a593Smuzhiyun struct xfs_dquot *dqp)
1597*4882a593Smuzhiyun {
1598*4882a593Smuzhiyun struct xfs_mount *mp = dqp->q_mount;
1599*4882a593Smuzhiyun struct xfs_quotainfo *qi = mp->m_quotainfo;
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun mutex_lock(&qi->qi_tree_lock);
1602*4882a593Smuzhiyun radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun qi->qi_dquots--;
1605*4882a593Smuzhiyun mutex_unlock(&qi->qi_tree_lock);
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun xfs_qm_dqdestroy(dqp);
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun /* --------------- utility functions for vnodeops ---------------- */
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun /*
1614*4882a593Smuzhiyun * Given an inode, a uid, gid and prid make sure that we have
1615*4882a593Smuzhiyun * allocated relevant dquot(s) on disk, and that we won't exceed inode
1616*4882a593Smuzhiyun * quotas by creating this file.
1617*4882a593Smuzhiyun * This also attaches dquot(s) to the given inode after locking it,
1618*4882a593Smuzhiyun * and returns the dquots corresponding to the uid and/or gid.
1619*4882a593Smuzhiyun *
1620*4882a593Smuzhiyun * in : inode (unlocked)
1621*4882a593Smuzhiyun * out : udquot, gdquot with references taken and unlocked
1622*4882a593Smuzhiyun */
1623*4882a593Smuzhiyun int
xfs_qm_vop_dqalloc(struct xfs_inode * ip,kuid_t uid,kgid_t gid,prid_t prid,uint flags,struct xfs_dquot ** O_udqpp,struct xfs_dquot ** O_gdqpp,struct xfs_dquot ** O_pdqpp)1624*4882a593Smuzhiyun xfs_qm_vop_dqalloc(
1625*4882a593Smuzhiyun struct xfs_inode *ip,
1626*4882a593Smuzhiyun kuid_t uid,
1627*4882a593Smuzhiyun kgid_t gid,
1628*4882a593Smuzhiyun prid_t prid,
1629*4882a593Smuzhiyun uint flags,
1630*4882a593Smuzhiyun struct xfs_dquot **O_udqpp,
1631*4882a593Smuzhiyun struct xfs_dquot **O_gdqpp,
1632*4882a593Smuzhiyun struct xfs_dquot **O_pdqpp)
1633*4882a593Smuzhiyun {
1634*4882a593Smuzhiyun struct xfs_mount *mp = ip->i_mount;
1635*4882a593Smuzhiyun struct inode *inode = VFS_I(ip);
1636*4882a593Smuzhiyun struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1637*4882a593Smuzhiyun struct xfs_dquot *uq = NULL;
1638*4882a593Smuzhiyun struct xfs_dquot *gq = NULL;
1639*4882a593Smuzhiyun struct xfs_dquot *pq = NULL;
1640*4882a593Smuzhiyun int error;
1641*4882a593Smuzhiyun uint lockflags;
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1644*4882a593Smuzhiyun return 0;
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun lockflags = XFS_ILOCK_EXCL;
1647*4882a593Smuzhiyun xfs_ilock(ip, lockflags);
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1650*4882a593Smuzhiyun gid = inode->i_gid;
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun /*
1653*4882a593Smuzhiyun * Attach the dquot(s) to this inode, doing a dquot allocation
1654*4882a593Smuzhiyun * if necessary. The dquot(s) will not be locked.
1655*4882a593Smuzhiyun */
1656*4882a593Smuzhiyun if (XFS_NOT_DQATTACHED(mp, ip)) {
1657*4882a593Smuzhiyun error = xfs_qm_dqattach_locked(ip, true);
1658*4882a593Smuzhiyun if (error) {
1659*4882a593Smuzhiyun xfs_iunlock(ip, lockflags);
1660*4882a593Smuzhiyun return error;
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1665*4882a593Smuzhiyun ASSERT(O_udqpp);
1666*4882a593Smuzhiyun if (!uid_eq(inode->i_uid, uid)) {
1667*4882a593Smuzhiyun /*
1668*4882a593Smuzhiyun * What we need is the dquot that has this uid, and
1669*4882a593Smuzhiyun * if we send the inode to dqget, the uid of the inode
1670*4882a593Smuzhiyun * takes priority over what's sent in the uid argument.
1671*4882a593Smuzhiyun * We must unlock inode here before calling dqget if
1672*4882a593Smuzhiyun * we're not sending the inode, because otherwise
1673*4882a593Smuzhiyun * we'll deadlock by doing trans_reserve while
1674*4882a593Smuzhiyun * holding ilock.
1675*4882a593Smuzhiyun */
1676*4882a593Smuzhiyun xfs_iunlock(ip, lockflags);
1677*4882a593Smuzhiyun error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1678*4882a593Smuzhiyun XFS_DQTYPE_USER, true, &uq);
1679*4882a593Smuzhiyun if (error) {
1680*4882a593Smuzhiyun ASSERT(error != -ENOENT);
1681*4882a593Smuzhiyun return error;
1682*4882a593Smuzhiyun }
1683*4882a593Smuzhiyun /*
1684*4882a593Smuzhiyun * Get the ilock in the right order.
1685*4882a593Smuzhiyun */
1686*4882a593Smuzhiyun xfs_dqunlock(uq);
1687*4882a593Smuzhiyun lockflags = XFS_ILOCK_SHARED;
1688*4882a593Smuzhiyun xfs_ilock(ip, lockflags);
1689*4882a593Smuzhiyun } else {
1690*4882a593Smuzhiyun /*
1691*4882a593Smuzhiyun * Take an extra reference, because we'll return
1692*4882a593Smuzhiyun * this to caller
1693*4882a593Smuzhiyun */
1694*4882a593Smuzhiyun ASSERT(ip->i_udquot);
1695*4882a593Smuzhiyun uq = xfs_qm_dqhold(ip->i_udquot);
1696*4882a593Smuzhiyun }
1697*4882a593Smuzhiyun }
1698*4882a593Smuzhiyun if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1699*4882a593Smuzhiyun ASSERT(O_gdqpp);
1700*4882a593Smuzhiyun if (!gid_eq(inode->i_gid, gid)) {
1701*4882a593Smuzhiyun xfs_iunlock(ip, lockflags);
1702*4882a593Smuzhiyun error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1703*4882a593Smuzhiyun XFS_DQTYPE_GROUP, true, &gq);
1704*4882a593Smuzhiyun if (error) {
1705*4882a593Smuzhiyun ASSERT(error != -ENOENT);
1706*4882a593Smuzhiyun goto error_rele;
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun xfs_dqunlock(gq);
1709*4882a593Smuzhiyun lockflags = XFS_ILOCK_SHARED;
1710*4882a593Smuzhiyun xfs_ilock(ip, lockflags);
1711*4882a593Smuzhiyun } else {
1712*4882a593Smuzhiyun ASSERT(ip->i_gdquot);
1713*4882a593Smuzhiyun gq = xfs_qm_dqhold(ip->i_gdquot);
1714*4882a593Smuzhiyun }
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1717*4882a593Smuzhiyun ASSERT(O_pdqpp);
1718*4882a593Smuzhiyun if (ip->i_d.di_projid != prid) {
1719*4882a593Smuzhiyun xfs_iunlock(ip, lockflags);
1720*4882a593Smuzhiyun error = xfs_qm_dqget(mp, prid,
1721*4882a593Smuzhiyun XFS_DQTYPE_PROJ, true, &pq);
1722*4882a593Smuzhiyun if (error) {
1723*4882a593Smuzhiyun ASSERT(error != -ENOENT);
1724*4882a593Smuzhiyun goto error_rele;
1725*4882a593Smuzhiyun }
1726*4882a593Smuzhiyun xfs_dqunlock(pq);
1727*4882a593Smuzhiyun lockflags = XFS_ILOCK_SHARED;
1728*4882a593Smuzhiyun xfs_ilock(ip, lockflags);
1729*4882a593Smuzhiyun } else {
1730*4882a593Smuzhiyun ASSERT(ip->i_pdquot);
1731*4882a593Smuzhiyun pq = xfs_qm_dqhold(ip->i_pdquot);
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun trace_xfs_dquot_dqalloc(ip);
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun xfs_iunlock(ip, lockflags);
1737*4882a593Smuzhiyun if (O_udqpp)
1738*4882a593Smuzhiyun *O_udqpp = uq;
1739*4882a593Smuzhiyun else
1740*4882a593Smuzhiyun xfs_qm_dqrele(uq);
1741*4882a593Smuzhiyun if (O_gdqpp)
1742*4882a593Smuzhiyun *O_gdqpp = gq;
1743*4882a593Smuzhiyun else
1744*4882a593Smuzhiyun xfs_qm_dqrele(gq);
1745*4882a593Smuzhiyun if (O_pdqpp)
1746*4882a593Smuzhiyun *O_pdqpp = pq;
1747*4882a593Smuzhiyun else
1748*4882a593Smuzhiyun xfs_qm_dqrele(pq);
1749*4882a593Smuzhiyun return 0;
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun error_rele:
1752*4882a593Smuzhiyun xfs_qm_dqrele(gq);
1753*4882a593Smuzhiyun xfs_qm_dqrele(uq);
1754*4882a593Smuzhiyun return error;
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun
1757*4882a593Smuzhiyun /*
1758*4882a593Smuzhiyun * Actually transfer ownership, and do dquot modifications.
1759*4882a593Smuzhiyun * These were already reserved.
1760*4882a593Smuzhiyun */
1761*4882a593Smuzhiyun struct xfs_dquot *
xfs_qm_vop_chown(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot ** IO_olddq,struct xfs_dquot * newdq)1762*4882a593Smuzhiyun xfs_qm_vop_chown(
1763*4882a593Smuzhiyun struct xfs_trans *tp,
1764*4882a593Smuzhiyun struct xfs_inode *ip,
1765*4882a593Smuzhiyun struct xfs_dquot **IO_olddq,
1766*4882a593Smuzhiyun struct xfs_dquot *newdq)
1767*4882a593Smuzhiyun {
1768*4882a593Smuzhiyun struct xfs_dquot *prevdq;
1769*4882a593Smuzhiyun uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1770*4882a593Smuzhiyun XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1774*4882a593Smuzhiyun ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun /* old dquot */
1777*4882a593Smuzhiyun prevdq = *IO_olddq;
1778*4882a593Smuzhiyun ASSERT(prevdq);
1779*4882a593Smuzhiyun ASSERT(prevdq != newdq);
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1782*4882a593Smuzhiyun xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun /* the sparkling new dquot */
1785*4882a593Smuzhiyun xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1786*4882a593Smuzhiyun xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun /*
1789*4882a593Smuzhiyun * Back when we made quota reservations for the chown, we reserved the
1790*4882a593Smuzhiyun * ondisk blocks + delalloc blocks with the new dquot. Now that we've
1791*4882a593Smuzhiyun * switched the dquots, decrease the new dquot's block reservation
1792*4882a593Smuzhiyun * (having already bumped up the real counter) so that we don't have
1793*4882a593Smuzhiyun * any reservation to give back when we commit.
1794*4882a593Smuzhiyun */
1795*4882a593Smuzhiyun xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1796*4882a593Smuzhiyun -ip->i_delayed_blks);
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun /*
1799*4882a593Smuzhiyun * Give the incore reservation for delalloc blocks back to the old
1800*4882a593Smuzhiyun * dquot. We don't normally handle delalloc quota reservations
1801*4882a593Smuzhiyun * transactionally, so just lock the dquot and subtract from the
1802*4882a593Smuzhiyun * reservation. Dirty the transaction because it's too late to turn
1803*4882a593Smuzhiyun * back now.
1804*4882a593Smuzhiyun */
1805*4882a593Smuzhiyun tp->t_flags |= XFS_TRANS_DIRTY;
1806*4882a593Smuzhiyun xfs_dqlock(prevdq);
1807*4882a593Smuzhiyun ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1808*4882a593Smuzhiyun prevdq->q_blk.reserved -= ip->i_delayed_blks;
1809*4882a593Smuzhiyun xfs_dqunlock(prevdq);
1810*4882a593Smuzhiyun
1811*4882a593Smuzhiyun /*
1812*4882a593Smuzhiyun * Take an extra reference, because the inode is going to keep
1813*4882a593Smuzhiyun * this dquot pointer even after the trans_commit.
1814*4882a593Smuzhiyun */
1815*4882a593Smuzhiyun *IO_olddq = xfs_qm_dqhold(newdq);
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun return prevdq;
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun /*
1821*4882a593Smuzhiyun * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1822*4882a593Smuzhiyun */
1823*4882a593Smuzhiyun int
xfs_qm_vop_chown_reserve(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp,uint flags)1824*4882a593Smuzhiyun xfs_qm_vop_chown_reserve(
1825*4882a593Smuzhiyun struct xfs_trans *tp,
1826*4882a593Smuzhiyun struct xfs_inode *ip,
1827*4882a593Smuzhiyun struct xfs_dquot *udqp,
1828*4882a593Smuzhiyun struct xfs_dquot *gdqp,
1829*4882a593Smuzhiyun struct xfs_dquot *pdqp,
1830*4882a593Smuzhiyun uint flags)
1831*4882a593Smuzhiyun {
1832*4882a593Smuzhiyun struct xfs_mount *mp = ip->i_mount;
1833*4882a593Smuzhiyun unsigned int blkflags;
1834*4882a593Smuzhiyun struct xfs_dquot *udq_delblks = NULL;
1835*4882a593Smuzhiyun struct xfs_dquot *gdq_delblks = NULL;
1836*4882a593Smuzhiyun struct xfs_dquot *pdq_delblks = NULL;
1837*4882a593Smuzhiyun
1838*4882a593Smuzhiyun ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1839*4882a593Smuzhiyun ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun blkflags = XFS_IS_REALTIME_INODE(ip) ?
1842*4882a593Smuzhiyun XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1845*4882a593Smuzhiyun i_uid_read(VFS_I(ip)) != udqp->q_id)
1846*4882a593Smuzhiyun udq_delblks = udqp;
1847*4882a593Smuzhiyun
1848*4882a593Smuzhiyun if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1849*4882a593Smuzhiyun i_gid_read(VFS_I(ip)) != gdqp->q_id)
1850*4882a593Smuzhiyun gdq_delblks = gdqp;
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1853*4882a593Smuzhiyun ip->i_d.di_projid != pdqp->q_id)
1854*4882a593Smuzhiyun pdq_delblks = pdqp;
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun /*
1857*4882a593Smuzhiyun * Reserve enough quota to handle blocks on disk and reserved for a
1858*4882a593Smuzhiyun * delayed allocation. We'll actually transfer the delalloc
1859*4882a593Smuzhiyun * reservation between dquots at chown time, even though that part is
1860*4882a593Smuzhiyun * only semi-transactional.
1861*4882a593Smuzhiyun */
1862*4882a593Smuzhiyun return xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, udq_delblks,
1863*4882a593Smuzhiyun gdq_delblks, pdq_delblks,
1864*4882a593Smuzhiyun ip->i_d.di_nblocks + ip->i_delayed_blks,
1865*4882a593Smuzhiyun 1, blkflags | flags);
1866*4882a593Smuzhiyun }
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun int
xfs_qm_vop_rename_dqattach(struct xfs_inode ** i_tab)1869*4882a593Smuzhiyun xfs_qm_vop_rename_dqattach(
1870*4882a593Smuzhiyun struct xfs_inode **i_tab)
1871*4882a593Smuzhiyun {
1872*4882a593Smuzhiyun struct xfs_mount *mp = i_tab[0]->i_mount;
1873*4882a593Smuzhiyun int i;
1874*4882a593Smuzhiyun
1875*4882a593Smuzhiyun if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1876*4882a593Smuzhiyun return 0;
1877*4882a593Smuzhiyun
1878*4882a593Smuzhiyun for (i = 0; (i < 4 && i_tab[i]); i++) {
1879*4882a593Smuzhiyun struct xfs_inode *ip = i_tab[i];
1880*4882a593Smuzhiyun int error;
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun /*
1883*4882a593Smuzhiyun * Watch out for duplicate entries in the table.
1884*4882a593Smuzhiyun */
1885*4882a593Smuzhiyun if (i == 0 || ip != i_tab[i-1]) {
1886*4882a593Smuzhiyun if (XFS_NOT_DQATTACHED(mp, ip)) {
1887*4882a593Smuzhiyun error = xfs_qm_dqattach(ip);
1888*4882a593Smuzhiyun if (error)
1889*4882a593Smuzhiyun return error;
1890*4882a593Smuzhiyun }
1891*4882a593Smuzhiyun }
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun return 0;
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun void
xfs_qm_vop_create_dqattach(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp)1897*4882a593Smuzhiyun xfs_qm_vop_create_dqattach(
1898*4882a593Smuzhiyun struct xfs_trans *tp,
1899*4882a593Smuzhiyun struct xfs_inode *ip,
1900*4882a593Smuzhiyun struct xfs_dquot *udqp,
1901*4882a593Smuzhiyun struct xfs_dquot *gdqp,
1902*4882a593Smuzhiyun struct xfs_dquot *pdqp)
1903*4882a593Smuzhiyun {
1904*4882a593Smuzhiyun struct xfs_mount *mp = tp->t_mountp;
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1907*4882a593Smuzhiyun return;
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1912*4882a593Smuzhiyun ASSERT(ip->i_udquot == NULL);
1913*4882a593Smuzhiyun ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun ip->i_udquot = xfs_qm_dqhold(udqp);
1916*4882a593Smuzhiyun xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1917*4882a593Smuzhiyun }
1918*4882a593Smuzhiyun if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1919*4882a593Smuzhiyun ASSERT(ip->i_gdquot == NULL);
1920*4882a593Smuzhiyun ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun ip->i_gdquot = xfs_qm_dqhold(gdqp);
1923*4882a593Smuzhiyun xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1926*4882a593Smuzhiyun ASSERT(ip->i_pdquot == NULL);
1927*4882a593Smuzhiyun ASSERT(ip->i_d.di_projid == pdqp->q_id);
1928*4882a593Smuzhiyun
1929*4882a593Smuzhiyun ip->i_pdquot = xfs_qm_dqhold(pdqp);
1930*4882a593Smuzhiyun xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
1931*4882a593Smuzhiyun }
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun
1934