1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2000-2003 Silicon Graphics, Inc.
4*4882a593Smuzhiyun * All Rights Reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_format.h"
9*4882a593Smuzhiyun #include "xfs_log_format.h"
10*4882a593Smuzhiyun #include "xfs_shared.h"
11*4882a593Smuzhiyun #include "xfs_trans_resv.h"
12*4882a593Smuzhiyun #include "xfs_bit.h"
13*4882a593Smuzhiyun #include "xfs_mount.h"
14*4882a593Smuzhiyun #include "xfs_defer.h"
15*4882a593Smuzhiyun #include "xfs_inode.h"
16*4882a593Smuzhiyun #include "xfs_bmap.h"
17*4882a593Smuzhiyun #include "xfs_quota.h"
18*4882a593Smuzhiyun #include "xfs_trans.h"
19*4882a593Smuzhiyun #include "xfs_buf_item.h"
20*4882a593Smuzhiyun #include "xfs_trans_space.h"
21*4882a593Smuzhiyun #include "xfs_trans_priv.h"
22*4882a593Smuzhiyun #include "xfs_qm.h"
23*4882a593Smuzhiyun #include "xfs_trace.h"
24*4882a593Smuzhiyun #include "xfs_log.h"
25*4882a593Smuzhiyun #include "xfs_bmap_btree.h"
26*4882a593Smuzhiyun #include "xfs_error.h"
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * Lock order:
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * ip->i_lock
32*4882a593Smuzhiyun * qi->qi_tree_lock
33*4882a593Smuzhiyun * dquot->q_qlock (xfs_dqlock() and friends)
34*4882a593Smuzhiyun * dquot->q_flush (xfs_dqflock() and friends)
35*4882a593Smuzhiyun * qi->qi_lru_lock
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * If two dquots need to be locked the order is user before group/project,
38*4882a593Smuzhiyun * otherwise by the lowest id first, see xfs_dqlock2.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun struct kmem_zone *xfs_qm_dqtrxzone;
42*4882a593Smuzhiyun static struct kmem_zone *xfs_qm_dqzone;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static struct lock_class_key xfs_dquot_group_class;
45*4882a593Smuzhiyun static struct lock_class_key xfs_dquot_project_class;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * This is called to free all the memory associated with a dquot
49*4882a593Smuzhiyun */
50*4882a593Smuzhiyun void
xfs_qm_dqdestroy(struct xfs_dquot * dqp)51*4882a593Smuzhiyun xfs_qm_dqdestroy(
52*4882a593Smuzhiyun struct xfs_dquot *dqp)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun ASSERT(list_empty(&dqp->q_lru));
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
57*4882a593Smuzhiyun mutex_destroy(&dqp->q_qlock);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
60*4882a593Smuzhiyun kmem_cache_free(xfs_qm_dqzone, dqp);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * If default limits are in force, push them into the dquot now.
65*4882a593Smuzhiyun * We overwrite the dquot limits only if they are zero and this
66*4882a593Smuzhiyun * is not the root dquot.
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun void
xfs_qm_adjust_dqlimits(struct xfs_dquot * dq)69*4882a593Smuzhiyun xfs_qm_adjust_dqlimits(
70*4882a593Smuzhiyun struct xfs_dquot *dq)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct xfs_mount *mp = dq->q_mount;
73*4882a593Smuzhiyun struct xfs_quotainfo *q = mp->m_quotainfo;
74*4882a593Smuzhiyun struct xfs_def_quota *defq;
75*4882a593Smuzhiyun int prealloc = 0;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun ASSERT(dq->q_id);
78*4882a593Smuzhiyun defq = xfs_get_defquota(q, xfs_dquot_type(dq));
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun if (!dq->q_blk.softlimit) {
81*4882a593Smuzhiyun dq->q_blk.softlimit = defq->blk.soft;
82*4882a593Smuzhiyun prealloc = 1;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun if (!dq->q_blk.hardlimit) {
85*4882a593Smuzhiyun dq->q_blk.hardlimit = defq->blk.hard;
86*4882a593Smuzhiyun prealloc = 1;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun if (!dq->q_ino.softlimit)
89*4882a593Smuzhiyun dq->q_ino.softlimit = defq->ino.soft;
90*4882a593Smuzhiyun if (!dq->q_ino.hardlimit)
91*4882a593Smuzhiyun dq->q_ino.hardlimit = defq->ino.hard;
92*4882a593Smuzhiyun if (!dq->q_rtb.softlimit)
93*4882a593Smuzhiyun dq->q_rtb.softlimit = defq->rtb.soft;
94*4882a593Smuzhiyun if (!dq->q_rtb.hardlimit)
95*4882a593Smuzhiyun dq->q_rtb.hardlimit = defq->rtb.hard;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun if (prealloc)
98*4882a593Smuzhiyun xfs_dquot_set_prealloc_limits(dq);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* Set the expiration time of a quota's grace period. */
102*4882a593Smuzhiyun time64_t
xfs_dquot_set_timeout(struct xfs_mount * mp,time64_t timeout)103*4882a593Smuzhiyun xfs_dquot_set_timeout(
104*4882a593Smuzhiyun struct xfs_mount *mp,
105*4882a593Smuzhiyun time64_t timeout)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct xfs_quotainfo *qi = mp->m_quotainfo;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun return clamp_t(time64_t, timeout, qi->qi_expiry_min,
110*4882a593Smuzhiyun qi->qi_expiry_max);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Set the length of the default grace period. */
114*4882a593Smuzhiyun time64_t
xfs_dquot_set_grace_period(time64_t grace)115*4882a593Smuzhiyun xfs_dquot_set_grace_period(
116*4882a593Smuzhiyun time64_t grace)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun return clamp_t(time64_t, grace, XFS_DQ_GRACE_MIN, XFS_DQ_GRACE_MAX);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun * Determine if this quota counter is over either limit and set the quota
123*4882a593Smuzhiyun * timers as appropriate.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun static inline void
xfs_qm_adjust_res_timer(struct xfs_mount * mp,struct xfs_dquot_res * res,struct xfs_quota_limits * qlim)126*4882a593Smuzhiyun xfs_qm_adjust_res_timer(
127*4882a593Smuzhiyun struct xfs_mount *mp,
128*4882a593Smuzhiyun struct xfs_dquot_res *res,
129*4882a593Smuzhiyun struct xfs_quota_limits *qlim)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun ASSERT(res->hardlimit == 0 || res->softlimit <= res->hardlimit);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if ((res->softlimit && res->count > res->softlimit) ||
134*4882a593Smuzhiyun (res->hardlimit && res->count > res->hardlimit)) {
135*4882a593Smuzhiyun if (res->timer == 0)
136*4882a593Smuzhiyun res->timer = xfs_dquot_set_timeout(mp,
137*4882a593Smuzhiyun ktime_get_real_seconds() + qlim->time);
138*4882a593Smuzhiyun } else {
139*4882a593Smuzhiyun if (res->timer == 0)
140*4882a593Smuzhiyun res->warnings = 0;
141*4882a593Smuzhiyun else
142*4882a593Smuzhiyun res->timer = 0;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /*
147*4882a593Smuzhiyun * Check the limits and timers of a dquot and start or reset timers
148*4882a593Smuzhiyun * if necessary.
149*4882a593Smuzhiyun * This gets called even when quota enforcement is OFF, which makes our
150*4882a593Smuzhiyun * life a little less complicated. (We just don't reject any quota
151*4882a593Smuzhiyun * reservations in that case, when enforcement is off).
152*4882a593Smuzhiyun * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
153*4882a593Smuzhiyun * enforcement's off.
154*4882a593Smuzhiyun * In contrast, warnings are a little different in that they don't
155*4882a593Smuzhiyun * 'automatically' get started when limits get exceeded. They do
156*4882a593Smuzhiyun * get reset to zero, however, when we find the count to be under
157*4882a593Smuzhiyun * the soft limit (they are only ever set non-zero via userspace).
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun void
xfs_qm_adjust_dqtimers(struct xfs_dquot * dq)160*4882a593Smuzhiyun xfs_qm_adjust_dqtimers(
161*4882a593Smuzhiyun struct xfs_dquot *dq)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun struct xfs_mount *mp = dq->q_mount;
164*4882a593Smuzhiyun struct xfs_quotainfo *qi = mp->m_quotainfo;
165*4882a593Smuzhiyun struct xfs_def_quota *defq;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun ASSERT(dq->q_id);
168*4882a593Smuzhiyun defq = xfs_get_defquota(qi, xfs_dquot_type(dq));
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_blk, &defq->blk);
171*4882a593Smuzhiyun xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_ino, &defq->ino);
172*4882a593Smuzhiyun xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_rtb, &defq->rtb);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * initialize a buffer full of dquots and log the whole thing
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun STATIC void
xfs_qm_init_dquot_blk(struct xfs_trans * tp,struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,struct xfs_buf * bp)179*4882a593Smuzhiyun xfs_qm_init_dquot_blk(
180*4882a593Smuzhiyun struct xfs_trans *tp,
181*4882a593Smuzhiyun struct xfs_mount *mp,
182*4882a593Smuzhiyun xfs_dqid_t id,
183*4882a593Smuzhiyun xfs_dqtype_t type,
184*4882a593Smuzhiyun struct xfs_buf *bp)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun struct xfs_quotainfo *q = mp->m_quotainfo;
187*4882a593Smuzhiyun struct xfs_dqblk *d;
188*4882a593Smuzhiyun xfs_dqid_t curid;
189*4882a593Smuzhiyun unsigned int qflag;
190*4882a593Smuzhiyun unsigned int blftype;
191*4882a593Smuzhiyun int i;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun ASSERT(tp);
194*4882a593Smuzhiyun ASSERT(xfs_buf_islocked(bp));
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun switch (type) {
197*4882a593Smuzhiyun case XFS_DQTYPE_USER:
198*4882a593Smuzhiyun qflag = XFS_UQUOTA_CHKD;
199*4882a593Smuzhiyun blftype = XFS_BLF_UDQUOT_BUF;
200*4882a593Smuzhiyun break;
201*4882a593Smuzhiyun case XFS_DQTYPE_PROJ:
202*4882a593Smuzhiyun qflag = XFS_PQUOTA_CHKD;
203*4882a593Smuzhiyun blftype = XFS_BLF_PDQUOT_BUF;
204*4882a593Smuzhiyun break;
205*4882a593Smuzhiyun case XFS_DQTYPE_GROUP:
206*4882a593Smuzhiyun qflag = XFS_GQUOTA_CHKD;
207*4882a593Smuzhiyun blftype = XFS_BLF_GDQUOT_BUF;
208*4882a593Smuzhiyun break;
209*4882a593Smuzhiyun default:
210*4882a593Smuzhiyun ASSERT(0);
211*4882a593Smuzhiyun return;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun d = bp->b_addr;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun * ID of the first dquot in the block - id's are zero based.
218*4882a593Smuzhiyun */
219*4882a593Smuzhiyun curid = id - (id % q->qi_dqperchunk);
220*4882a593Smuzhiyun memset(d, 0, BBTOB(q->qi_dqchunklen));
221*4882a593Smuzhiyun for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
222*4882a593Smuzhiyun d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
223*4882a593Smuzhiyun d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
224*4882a593Smuzhiyun d->dd_diskdq.d_id = cpu_to_be32(curid);
225*4882a593Smuzhiyun d->dd_diskdq.d_type = type;
226*4882a593Smuzhiyun if (curid > 0 && xfs_sb_version_hasbigtime(&mp->m_sb))
227*4882a593Smuzhiyun d->dd_diskdq.d_type |= XFS_DQTYPE_BIGTIME;
228*4882a593Smuzhiyun if (xfs_sb_version_hascrc(&mp->m_sb)) {
229*4882a593Smuzhiyun uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
230*4882a593Smuzhiyun xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
231*4882a593Smuzhiyun XFS_DQUOT_CRC_OFF);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun xfs_trans_dquot_buf(tp, bp, blftype);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * quotacheck uses delayed writes to update all the dquots on disk in an
239*4882a593Smuzhiyun * efficient manner instead of logging the individual dquot changes as
240*4882a593Smuzhiyun * they are made. However if we log the buffer allocated here and crash
241*4882a593Smuzhiyun * after quotacheck while the logged initialisation is still in the
242*4882a593Smuzhiyun * active region of the log, log recovery can replay the dquot buffer
243*4882a593Smuzhiyun * initialisation over the top of the checked dquots and corrupt quota
244*4882a593Smuzhiyun * accounting.
245*4882a593Smuzhiyun *
246*4882a593Smuzhiyun * To avoid this problem, quotacheck cannot log the initialised buffer.
247*4882a593Smuzhiyun * We must still dirty the buffer and write it back before the
248*4882a593Smuzhiyun * allocation transaction clears the log. Therefore, mark the buffer as
249*4882a593Smuzhiyun * ordered instead of logging it directly. This is safe for quotacheck
250*4882a593Smuzhiyun * because it detects and repairs allocated but initialized dquot blocks
251*4882a593Smuzhiyun * in the quota inodes.
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun if (!(mp->m_qflags & qflag))
254*4882a593Smuzhiyun xfs_trans_ordered_buf(tp, bp);
255*4882a593Smuzhiyun else
256*4882a593Smuzhiyun xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * Initialize the dynamic speculative preallocation thresholds. The lo/hi
261*4882a593Smuzhiyun * watermarks correspond to the soft and hard limits by default. If a soft limit
262*4882a593Smuzhiyun * is not specified, we use 95% of the hard limit.
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun void
xfs_dquot_set_prealloc_limits(struct xfs_dquot * dqp)265*4882a593Smuzhiyun xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun uint64_t space;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun dqp->q_prealloc_hi_wmark = dqp->q_blk.hardlimit;
270*4882a593Smuzhiyun dqp->q_prealloc_lo_wmark = dqp->q_blk.softlimit;
271*4882a593Smuzhiyun if (!dqp->q_prealloc_lo_wmark) {
272*4882a593Smuzhiyun dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
273*4882a593Smuzhiyun do_div(dqp->q_prealloc_lo_wmark, 100);
274*4882a593Smuzhiyun dqp->q_prealloc_lo_wmark *= 95;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun space = dqp->q_prealloc_hi_wmark;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun do_div(space, 100);
280*4882a593Smuzhiyun dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
281*4882a593Smuzhiyun dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
282*4882a593Smuzhiyun dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun * Ensure that the given in-core dquot has a buffer on disk backing it, and
287*4882a593Smuzhiyun * return the buffer locked and held. This is called when the bmapi finds a
288*4882a593Smuzhiyun * hole.
289*4882a593Smuzhiyun */
290*4882a593Smuzhiyun STATIC int
xfs_dquot_disk_alloc(struct xfs_trans ** tpp,struct xfs_dquot * dqp,struct xfs_buf ** bpp)291*4882a593Smuzhiyun xfs_dquot_disk_alloc(
292*4882a593Smuzhiyun struct xfs_trans **tpp,
293*4882a593Smuzhiyun struct xfs_dquot *dqp,
294*4882a593Smuzhiyun struct xfs_buf **bpp)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct xfs_bmbt_irec map;
297*4882a593Smuzhiyun struct xfs_trans *tp = *tpp;
298*4882a593Smuzhiyun struct xfs_mount *mp = tp->t_mountp;
299*4882a593Smuzhiyun struct xfs_buf *bp;
300*4882a593Smuzhiyun xfs_dqtype_t qtype = xfs_dquot_type(dqp);
301*4882a593Smuzhiyun struct xfs_inode *quotip = xfs_quota_inode(mp, qtype);
302*4882a593Smuzhiyun int nmaps = 1;
303*4882a593Smuzhiyun int error;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun trace_xfs_dqalloc(dqp);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun xfs_ilock(quotip, XFS_ILOCK_EXCL);
308*4882a593Smuzhiyun if (!xfs_this_quota_on(dqp->q_mount, qtype)) {
309*4882a593Smuzhiyun /*
310*4882a593Smuzhiyun * Return if this type of quotas is turned off while we didn't
311*4882a593Smuzhiyun * have an inode lock
312*4882a593Smuzhiyun */
313*4882a593Smuzhiyun xfs_iunlock(quotip, XFS_ILOCK_EXCL);
314*4882a593Smuzhiyun return -ESRCH;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* Create the block mapping. */
318*4882a593Smuzhiyun xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
319*4882a593Smuzhiyun error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
320*4882a593Smuzhiyun XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
321*4882a593Smuzhiyun &nmaps);
322*4882a593Smuzhiyun if (error)
323*4882a593Smuzhiyun return error;
324*4882a593Smuzhiyun ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
325*4882a593Smuzhiyun ASSERT(nmaps == 1);
326*4882a593Smuzhiyun ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
327*4882a593Smuzhiyun (map.br_startblock != HOLESTARTBLOCK));
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * Keep track of the blkno to save a lookup later
331*4882a593Smuzhiyun */
332*4882a593Smuzhiyun dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* now we can just get the buffer (there's nothing to read yet) */
335*4882a593Smuzhiyun error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
336*4882a593Smuzhiyun mp->m_quotainfo->qi_dqchunklen, 0, &bp);
337*4882a593Smuzhiyun if (error)
338*4882a593Smuzhiyun return error;
339*4882a593Smuzhiyun bp->b_ops = &xfs_dquot_buf_ops;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun * Make a chunk of dquots out of this buffer and log
343*4882a593Smuzhiyun * the entire thing.
344*4882a593Smuzhiyun */
345*4882a593Smuzhiyun xfs_qm_init_dquot_blk(tp, mp, dqp->q_id, qtype, bp);
346*4882a593Smuzhiyun xfs_buf_set_ref(bp, XFS_DQUOT_REF);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun * Hold the buffer and join it to the dfops so that we'll still own
350*4882a593Smuzhiyun * the buffer when we return to the caller. The buffer disposal on
351*4882a593Smuzhiyun * error must be paid attention to very carefully, as it has been
352*4882a593Smuzhiyun * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota
353*4882a593Smuzhiyun * code when allocating a new dquot record" in 2005, and the later
354*4882a593Smuzhiyun * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep
355*4882a593Smuzhiyun * the buffer locked across the _defer_finish call. We can now do
356*4882a593Smuzhiyun * this correctly with xfs_defer_bjoin.
357*4882a593Smuzhiyun *
358*4882a593Smuzhiyun * Above, we allocated a disk block for the dquot information and used
359*4882a593Smuzhiyun * get_buf to initialize the dquot. If the _defer_finish fails, the old
360*4882a593Smuzhiyun * transaction is gone but the new buffer is not joined or held to any
361*4882a593Smuzhiyun * transaction, so we must _buf_relse it.
362*4882a593Smuzhiyun *
363*4882a593Smuzhiyun * If everything succeeds, the caller of this function is returned a
364*4882a593Smuzhiyun * buffer that is locked and held to the transaction. The caller
365*4882a593Smuzhiyun * is responsible for unlocking any buffer passed back, either
366*4882a593Smuzhiyun * manually or by committing the transaction. On error, the buffer is
367*4882a593Smuzhiyun * released and not passed back.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun xfs_trans_bhold(tp, bp);
370*4882a593Smuzhiyun error = xfs_defer_finish(tpp);
371*4882a593Smuzhiyun if (error) {
372*4882a593Smuzhiyun xfs_trans_bhold_release(*tpp, bp);
373*4882a593Smuzhiyun xfs_trans_brelse(*tpp, bp);
374*4882a593Smuzhiyun return error;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun *bpp = bp;
377*4882a593Smuzhiyun return 0;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /*
381*4882a593Smuzhiyun * Read in the in-core dquot's on-disk metadata and return the buffer.
382*4882a593Smuzhiyun * Returns ENOENT to signal a hole.
383*4882a593Smuzhiyun */
384*4882a593Smuzhiyun STATIC int
xfs_dquot_disk_read(struct xfs_mount * mp,struct xfs_dquot * dqp,struct xfs_buf ** bpp)385*4882a593Smuzhiyun xfs_dquot_disk_read(
386*4882a593Smuzhiyun struct xfs_mount *mp,
387*4882a593Smuzhiyun struct xfs_dquot *dqp,
388*4882a593Smuzhiyun struct xfs_buf **bpp)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun struct xfs_bmbt_irec map;
391*4882a593Smuzhiyun struct xfs_buf *bp;
392*4882a593Smuzhiyun xfs_dqtype_t qtype = xfs_dquot_type(dqp);
393*4882a593Smuzhiyun struct xfs_inode *quotip = xfs_quota_inode(mp, qtype);
394*4882a593Smuzhiyun uint lock_mode;
395*4882a593Smuzhiyun int nmaps = 1;
396*4882a593Smuzhiyun int error;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun lock_mode = xfs_ilock_data_map_shared(quotip);
399*4882a593Smuzhiyun if (!xfs_this_quota_on(mp, qtype)) {
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * Return if this type of quotas is turned off while we
402*4882a593Smuzhiyun * didn't have the quota inode lock.
403*4882a593Smuzhiyun */
404*4882a593Smuzhiyun xfs_iunlock(quotip, lock_mode);
405*4882a593Smuzhiyun return -ESRCH;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /*
409*4882a593Smuzhiyun * Find the block map; no allocations yet
410*4882a593Smuzhiyun */
411*4882a593Smuzhiyun error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
412*4882a593Smuzhiyun XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
413*4882a593Smuzhiyun xfs_iunlock(quotip, lock_mode);
414*4882a593Smuzhiyun if (error)
415*4882a593Smuzhiyun return error;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun ASSERT(nmaps == 1);
418*4882a593Smuzhiyun ASSERT(map.br_blockcount >= 1);
419*4882a593Smuzhiyun ASSERT(map.br_startblock != DELAYSTARTBLOCK);
420*4882a593Smuzhiyun if (map.br_startblock == HOLESTARTBLOCK)
421*4882a593Smuzhiyun return -ENOENT;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun trace_xfs_dqtobp_read(dqp);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /*
426*4882a593Smuzhiyun * store the blkno etc so that we don't have to do the
427*4882a593Smuzhiyun * mapping all the time
428*4882a593Smuzhiyun */
429*4882a593Smuzhiyun dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
432*4882a593Smuzhiyun mp->m_quotainfo->qi_dqchunklen, 0, &bp,
433*4882a593Smuzhiyun &xfs_dquot_buf_ops);
434*4882a593Smuzhiyun if (error) {
435*4882a593Smuzhiyun ASSERT(bp == NULL);
436*4882a593Smuzhiyun return error;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun ASSERT(xfs_buf_islocked(bp));
440*4882a593Smuzhiyun xfs_buf_set_ref(bp, XFS_DQUOT_REF);
441*4882a593Smuzhiyun *bpp = bp;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun return 0;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /* Allocate and initialize everything we need for an incore dquot. */
447*4882a593Smuzhiyun STATIC struct xfs_dquot *
xfs_dquot_alloc(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type)448*4882a593Smuzhiyun xfs_dquot_alloc(
449*4882a593Smuzhiyun struct xfs_mount *mp,
450*4882a593Smuzhiyun xfs_dqid_t id,
451*4882a593Smuzhiyun xfs_dqtype_t type)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun struct xfs_dquot *dqp;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun dqp = kmem_cache_zalloc(xfs_qm_dqzone, GFP_KERNEL | __GFP_NOFAIL);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun dqp->q_type = type;
458*4882a593Smuzhiyun dqp->q_id = id;
459*4882a593Smuzhiyun dqp->q_mount = mp;
460*4882a593Smuzhiyun INIT_LIST_HEAD(&dqp->q_lru);
461*4882a593Smuzhiyun mutex_init(&dqp->q_qlock);
462*4882a593Smuzhiyun init_waitqueue_head(&dqp->q_pinwait);
463*4882a593Smuzhiyun dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
464*4882a593Smuzhiyun /*
465*4882a593Smuzhiyun * Offset of dquot in the (fixed sized) dquot chunk.
466*4882a593Smuzhiyun */
467*4882a593Smuzhiyun dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
468*4882a593Smuzhiyun sizeof(xfs_dqblk_t);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /*
471*4882a593Smuzhiyun * Because we want to use a counting completion, complete
472*4882a593Smuzhiyun * the flush completion once to allow a single access to
473*4882a593Smuzhiyun * the flush completion without blocking.
474*4882a593Smuzhiyun */
475*4882a593Smuzhiyun init_completion(&dqp->q_flush);
476*4882a593Smuzhiyun complete(&dqp->q_flush);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /*
479*4882a593Smuzhiyun * Make sure group quotas have a different lock class than user
480*4882a593Smuzhiyun * quotas.
481*4882a593Smuzhiyun */
482*4882a593Smuzhiyun switch (type) {
483*4882a593Smuzhiyun case XFS_DQTYPE_USER:
484*4882a593Smuzhiyun /* uses the default lock class */
485*4882a593Smuzhiyun break;
486*4882a593Smuzhiyun case XFS_DQTYPE_GROUP:
487*4882a593Smuzhiyun lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
488*4882a593Smuzhiyun break;
489*4882a593Smuzhiyun case XFS_DQTYPE_PROJ:
490*4882a593Smuzhiyun lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
491*4882a593Smuzhiyun break;
492*4882a593Smuzhiyun default:
493*4882a593Smuzhiyun ASSERT(0);
494*4882a593Smuzhiyun break;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun xfs_qm_dquot_logitem_init(dqp);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun XFS_STATS_INC(mp, xs_qm_dquot);
500*4882a593Smuzhiyun return dqp;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /* Check the ondisk dquot's id and type match what the incore dquot expects. */
504*4882a593Smuzhiyun static bool
xfs_dquot_check_type(struct xfs_dquot * dqp,struct xfs_disk_dquot * ddqp)505*4882a593Smuzhiyun xfs_dquot_check_type(
506*4882a593Smuzhiyun struct xfs_dquot *dqp,
507*4882a593Smuzhiyun struct xfs_disk_dquot *ddqp)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun uint8_t ddqp_type;
510*4882a593Smuzhiyun uint8_t dqp_type;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun ddqp_type = ddqp->d_type & XFS_DQTYPE_REC_MASK;
513*4882a593Smuzhiyun dqp_type = xfs_dquot_type(dqp);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun if (be32_to_cpu(ddqp->d_id) != dqp->q_id)
516*4882a593Smuzhiyun return false;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /*
519*4882a593Smuzhiyun * V5 filesystems always expect an exact type match. V4 filesystems
520*4882a593Smuzhiyun * expect an exact match for user dquots and for non-root group and
521*4882a593Smuzhiyun * project dquots.
522*4882a593Smuzhiyun */
523*4882a593Smuzhiyun if (xfs_sb_version_hascrc(&dqp->q_mount->m_sb) ||
524*4882a593Smuzhiyun dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0)
525*4882a593Smuzhiyun return ddqp_type == dqp_type;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /*
528*4882a593Smuzhiyun * V4 filesystems support either group or project quotas, but not both
529*4882a593Smuzhiyun * at the same time. The non-user quota file can be switched between
530*4882a593Smuzhiyun * group and project quota uses depending on the mount options, which
531*4882a593Smuzhiyun * means that we can encounter the other type when we try to load quota
532*4882a593Smuzhiyun * defaults. Quotacheck will soon reset the the entire quota file
533*4882a593Smuzhiyun * (including the root dquot) anyway, but don't log scary corruption
534*4882a593Smuzhiyun * reports to dmesg.
535*4882a593Smuzhiyun */
536*4882a593Smuzhiyun return ddqp_type == XFS_DQTYPE_GROUP || ddqp_type == XFS_DQTYPE_PROJ;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /* Copy the in-core quota fields in from the on-disk buffer. */
540*4882a593Smuzhiyun STATIC int
xfs_dquot_from_disk(struct xfs_dquot * dqp,struct xfs_buf * bp)541*4882a593Smuzhiyun xfs_dquot_from_disk(
542*4882a593Smuzhiyun struct xfs_dquot *dqp,
543*4882a593Smuzhiyun struct xfs_buf *bp)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /*
548*4882a593Smuzhiyun * Ensure that we got the type and ID we were looking for.
549*4882a593Smuzhiyun * Everything else was checked by the dquot buffer verifier.
550*4882a593Smuzhiyun */
551*4882a593Smuzhiyun if (!xfs_dquot_check_type(dqp, ddqp)) {
552*4882a593Smuzhiyun xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR,
553*4882a593Smuzhiyun "Metadata corruption detected at %pS, quota %u",
554*4882a593Smuzhiyun __this_address, dqp->q_id);
555*4882a593Smuzhiyun xfs_alert(bp->b_mount, "Unmount and run xfs_repair");
556*4882a593Smuzhiyun return -EFSCORRUPTED;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* copy everything from disk dquot to the incore dquot */
560*4882a593Smuzhiyun dqp->q_type = ddqp->d_type;
561*4882a593Smuzhiyun dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
562*4882a593Smuzhiyun dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit);
563*4882a593Smuzhiyun dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
564*4882a593Smuzhiyun dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit);
565*4882a593Smuzhiyun dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
566*4882a593Smuzhiyun dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount);
569*4882a593Smuzhiyun dqp->q_ino.count = be64_to_cpu(ddqp->d_icount);
570*4882a593Smuzhiyun dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun dqp->q_blk.warnings = be16_to_cpu(ddqp->d_bwarns);
573*4882a593Smuzhiyun dqp->q_ino.warnings = be16_to_cpu(ddqp->d_iwarns);
574*4882a593Smuzhiyun dqp->q_rtb.warnings = be16_to_cpu(ddqp->d_rtbwarns);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun dqp->q_blk.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_btimer);
577*4882a593Smuzhiyun dqp->q_ino.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_itimer);
578*4882a593Smuzhiyun dqp->q_rtb.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_rtbtimer);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /*
581*4882a593Smuzhiyun * Reservation counters are defined as reservation plus current usage
582*4882a593Smuzhiyun * to avoid having to add every time.
583*4882a593Smuzhiyun */
584*4882a593Smuzhiyun dqp->q_blk.reserved = dqp->q_blk.count;
585*4882a593Smuzhiyun dqp->q_ino.reserved = dqp->q_ino.count;
586*4882a593Smuzhiyun dqp->q_rtb.reserved = dqp->q_rtb.count;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /* initialize the dquot speculative prealloc thresholds */
589*4882a593Smuzhiyun xfs_dquot_set_prealloc_limits(dqp);
590*4882a593Smuzhiyun return 0;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /* Copy the in-core quota fields into the on-disk buffer. */
594*4882a593Smuzhiyun void
xfs_dquot_to_disk(struct xfs_disk_dquot * ddqp,struct xfs_dquot * dqp)595*4882a593Smuzhiyun xfs_dquot_to_disk(
596*4882a593Smuzhiyun struct xfs_disk_dquot *ddqp,
597*4882a593Smuzhiyun struct xfs_dquot *dqp)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun ddqp->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
600*4882a593Smuzhiyun ddqp->d_version = XFS_DQUOT_VERSION;
601*4882a593Smuzhiyun ddqp->d_type = dqp->q_type;
602*4882a593Smuzhiyun ddqp->d_id = cpu_to_be32(dqp->q_id);
603*4882a593Smuzhiyun ddqp->d_pad0 = 0;
604*4882a593Smuzhiyun ddqp->d_pad = 0;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit);
607*4882a593Smuzhiyun ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit);
608*4882a593Smuzhiyun ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit);
609*4882a593Smuzhiyun ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit);
610*4882a593Smuzhiyun ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit);
611*4882a593Smuzhiyun ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit);
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count);
614*4882a593Smuzhiyun ddqp->d_icount = cpu_to_be64(dqp->q_ino.count);
615*4882a593Smuzhiyun ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun ddqp->d_bwarns = cpu_to_be16(dqp->q_blk.warnings);
618*4882a593Smuzhiyun ddqp->d_iwarns = cpu_to_be16(dqp->q_ino.warnings);
619*4882a593Smuzhiyun ddqp->d_rtbwarns = cpu_to_be16(dqp->q_rtb.warnings);
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun ddqp->d_btimer = xfs_dquot_to_disk_ts(dqp, dqp->q_blk.timer);
622*4882a593Smuzhiyun ddqp->d_itimer = xfs_dquot_to_disk_ts(dqp, dqp->q_ino.timer);
623*4882a593Smuzhiyun ddqp->d_rtbtimer = xfs_dquot_to_disk_ts(dqp, dqp->q_rtb.timer);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /* Allocate and initialize the dquot buffer for this in-core dquot. */
627*4882a593Smuzhiyun static int
xfs_qm_dqread_alloc(struct xfs_mount * mp,struct xfs_dquot * dqp,struct xfs_buf ** bpp)628*4882a593Smuzhiyun xfs_qm_dqread_alloc(
629*4882a593Smuzhiyun struct xfs_mount *mp,
630*4882a593Smuzhiyun struct xfs_dquot *dqp,
631*4882a593Smuzhiyun struct xfs_buf **bpp)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun struct xfs_trans *tp;
634*4882a593Smuzhiyun int error;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
637*4882a593Smuzhiyun XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
638*4882a593Smuzhiyun if (error)
639*4882a593Smuzhiyun goto err;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
642*4882a593Smuzhiyun if (error)
643*4882a593Smuzhiyun goto err_cancel;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun error = xfs_trans_commit(tp);
646*4882a593Smuzhiyun if (error) {
647*4882a593Smuzhiyun /*
648*4882a593Smuzhiyun * Buffer was held to the transaction, so we have to unlock it
649*4882a593Smuzhiyun * manually here because we're not passing it back.
650*4882a593Smuzhiyun */
651*4882a593Smuzhiyun xfs_buf_relse(*bpp);
652*4882a593Smuzhiyun *bpp = NULL;
653*4882a593Smuzhiyun goto err;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun return 0;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun err_cancel:
658*4882a593Smuzhiyun xfs_trans_cancel(tp);
659*4882a593Smuzhiyun err:
660*4882a593Smuzhiyun return error;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /*
664*4882a593Smuzhiyun * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
665*4882a593Smuzhiyun * and release the buffer immediately. If @can_alloc is true, fill any
666*4882a593Smuzhiyun * holes in the on-disk metadata.
667*4882a593Smuzhiyun */
668*4882a593Smuzhiyun static int
xfs_qm_dqread(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,bool can_alloc,struct xfs_dquot ** dqpp)669*4882a593Smuzhiyun xfs_qm_dqread(
670*4882a593Smuzhiyun struct xfs_mount *mp,
671*4882a593Smuzhiyun xfs_dqid_t id,
672*4882a593Smuzhiyun xfs_dqtype_t type,
673*4882a593Smuzhiyun bool can_alloc,
674*4882a593Smuzhiyun struct xfs_dquot **dqpp)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun struct xfs_dquot *dqp;
677*4882a593Smuzhiyun struct xfs_buf *bp;
678*4882a593Smuzhiyun int error;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun dqp = xfs_dquot_alloc(mp, id, type);
681*4882a593Smuzhiyun trace_xfs_dqread(dqp);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /* Try to read the buffer, allocating if necessary. */
684*4882a593Smuzhiyun error = xfs_dquot_disk_read(mp, dqp, &bp);
685*4882a593Smuzhiyun if (error == -ENOENT && can_alloc)
686*4882a593Smuzhiyun error = xfs_qm_dqread_alloc(mp, dqp, &bp);
687*4882a593Smuzhiyun if (error)
688*4882a593Smuzhiyun goto err;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /*
691*4882a593Smuzhiyun * At this point we should have a clean locked buffer. Copy the data
692*4882a593Smuzhiyun * to the incore dquot and release the buffer since the incore dquot
693*4882a593Smuzhiyun * has its own locking protocol so we needn't tie up the buffer any
694*4882a593Smuzhiyun * further.
695*4882a593Smuzhiyun */
696*4882a593Smuzhiyun ASSERT(xfs_buf_islocked(bp));
697*4882a593Smuzhiyun error = xfs_dquot_from_disk(dqp, bp);
698*4882a593Smuzhiyun xfs_buf_relse(bp);
699*4882a593Smuzhiyun if (error)
700*4882a593Smuzhiyun goto err;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun *dqpp = dqp;
703*4882a593Smuzhiyun return error;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun err:
706*4882a593Smuzhiyun trace_xfs_dqread_fail(dqp);
707*4882a593Smuzhiyun xfs_qm_dqdestroy(dqp);
708*4882a593Smuzhiyun *dqpp = NULL;
709*4882a593Smuzhiyun return error;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /*
713*4882a593Smuzhiyun * Advance to the next id in the current chunk, or if at the
714*4882a593Smuzhiyun * end of the chunk, skip ahead to first id in next allocated chunk
715*4882a593Smuzhiyun * using the SEEK_DATA interface.
716*4882a593Smuzhiyun */
717*4882a593Smuzhiyun static int
xfs_dq_get_next_id(struct xfs_mount * mp,xfs_dqtype_t type,xfs_dqid_t * id)718*4882a593Smuzhiyun xfs_dq_get_next_id(
719*4882a593Smuzhiyun struct xfs_mount *mp,
720*4882a593Smuzhiyun xfs_dqtype_t type,
721*4882a593Smuzhiyun xfs_dqid_t *id)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun struct xfs_inode *quotip = xfs_quota_inode(mp, type);
724*4882a593Smuzhiyun xfs_dqid_t next_id = *id + 1; /* simple advance */
725*4882a593Smuzhiyun uint lock_flags;
726*4882a593Smuzhiyun struct xfs_bmbt_irec got;
727*4882a593Smuzhiyun struct xfs_iext_cursor cur;
728*4882a593Smuzhiyun xfs_fsblock_t start;
729*4882a593Smuzhiyun int error = 0;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /* If we'd wrap past the max ID, stop */
732*4882a593Smuzhiyun if (next_id < *id)
733*4882a593Smuzhiyun return -ENOENT;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /* If new ID is within the current chunk, advancing it sufficed */
736*4882a593Smuzhiyun if (next_id % mp->m_quotainfo->qi_dqperchunk) {
737*4882a593Smuzhiyun *id = next_id;
738*4882a593Smuzhiyun return 0;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /* Nope, next_id is now past the current chunk, so find the next one */
742*4882a593Smuzhiyun start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun lock_flags = xfs_ilock_data_map_shared(quotip);
745*4882a593Smuzhiyun if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) {
746*4882a593Smuzhiyun error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
747*4882a593Smuzhiyun if (error)
748*4882a593Smuzhiyun return error;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun if (xfs_iext_lookup_extent(quotip, "ip->i_df, start, &cur, &got)) {
752*4882a593Smuzhiyun /* contiguous chunk, bump startoff for the id calculation */
753*4882a593Smuzhiyun if (got.br_startoff < start)
754*4882a593Smuzhiyun got.br_startoff = start;
755*4882a593Smuzhiyun *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk;
756*4882a593Smuzhiyun } else {
757*4882a593Smuzhiyun error = -ENOENT;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun xfs_iunlock(quotip, lock_flags);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun return error;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun /*
766*4882a593Smuzhiyun * Look up the dquot in the in-core cache. If found, the dquot is returned
767*4882a593Smuzhiyun * locked and ready to go.
768*4882a593Smuzhiyun */
769*4882a593Smuzhiyun static struct xfs_dquot *
xfs_qm_dqget_cache_lookup(struct xfs_mount * mp,struct xfs_quotainfo * qi,struct radix_tree_root * tree,xfs_dqid_t id)770*4882a593Smuzhiyun xfs_qm_dqget_cache_lookup(
771*4882a593Smuzhiyun struct xfs_mount *mp,
772*4882a593Smuzhiyun struct xfs_quotainfo *qi,
773*4882a593Smuzhiyun struct radix_tree_root *tree,
774*4882a593Smuzhiyun xfs_dqid_t id)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun struct xfs_dquot *dqp;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun restart:
779*4882a593Smuzhiyun mutex_lock(&qi->qi_tree_lock);
780*4882a593Smuzhiyun dqp = radix_tree_lookup(tree, id);
781*4882a593Smuzhiyun if (!dqp) {
782*4882a593Smuzhiyun mutex_unlock(&qi->qi_tree_lock);
783*4882a593Smuzhiyun XFS_STATS_INC(mp, xs_qm_dqcachemisses);
784*4882a593Smuzhiyun return NULL;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun xfs_dqlock(dqp);
788*4882a593Smuzhiyun if (dqp->q_flags & XFS_DQFLAG_FREEING) {
789*4882a593Smuzhiyun xfs_dqunlock(dqp);
790*4882a593Smuzhiyun mutex_unlock(&qi->qi_tree_lock);
791*4882a593Smuzhiyun trace_xfs_dqget_freeing(dqp);
792*4882a593Smuzhiyun delay(1);
793*4882a593Smuzhiyun goto restart;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun dqp->q_nrefs++;
797*4882a593Smuzhiyun mutex_unlock(&qi->qi_tree_lock);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun trace_xfs_dqget_hit(dqp);
800*4882a593Smuzhiyun XFS_STATS_INC(mp, xs_qm_dqcachehits);
801*4882a593Smuzhiyun return dqp;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /*
805*4882a593Smuzhiyun * Try to insert a new dquot into the in-core cache. If an error occurs the
806*4882a593Smuzhiyun * caller should throw away the dquot and start over. Otherwise, the dquot
807*4882a593Smuzhiyun * is returned locked (and held by the cache) as if there had been a cache
808*4882a593Smuzhiyun * hit.
809*4882a593Smuzhiyun */
810*4882a593Smuzhiyun static int
xfs_qm_dqget_cache_insert(struct xfs_mount * mp,struct xfs_quotainfo * qi,struct radix_tree_root * tree,xfs_dqid_t id,struct xfs_dquot * dqp)811*4882a593Smuzhiyun xfs_qm_dqget_cache_insert(
812*4882a593Smuzhiyun struct xfs_mount *mp,
813*4882a593Smuzhiyun struct xfs_quotainfo *qi,
814*4882a593Smuzhiyun struct radix_tree_root *tree,
815*4882a593Smuzhiyun xfs_dqid_t id,
816*4882a593Smuzhiyun struct xfs_dquot *dqp)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun int error;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun mutex_lock(&qi->qi_tree_lock);
821*4882a593Smuzhiyun error = radix_tree_insert(tree, id, dqp);
822*4882a593Smuzhiyun if (unlikely(error)) {
823*4882a593Smuzhiyun /* Duplicate found! Caller must try again. */
824*4882a593Smuzhiyun WARN_ON(error != -EEXIST);
825*4882a593Smuzhiyun mutex_unlock(&qi->qi_tree_lock);
826*4882a593Smuzhiyun trace_xfs_dqget_dup(dqp);
827*4882a593Smuzhiyun return error;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun /* Return a locked dquot to the caller, with a reference taken. */
831*4882a593Smuzhiyun xfs_dqlock(dqp);
832*4882a593Smuzhiyun dqp->q_nrefs = 1;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun qi->qi_dquots++;
835*4882a593Smuzhiyun mutex_unlock(&qi->qi_tree_lock);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun return 0;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /* Check our input parameters. */
841*4882a593Smuzhiyun static int
xfs_qm_dqget_checks(struct xfs_mount * mp,xfs_dqtype_t type)842*4882a593Smuzhiyun xfs_qm_dqget_checks(
843*4882a593Smuzhiyun struct xfs_mount *mp,
844*4882a593Smuzhiyun xfs_dqtype_t type)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun if (WARN_ON_ONCE(!XFS_IS_QUOTA_RUNNING(mp)))
847*4882a593Smuzhiyun return -ESRCH;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun switch (type) {
850*4882a593Smuzhiyun case XFS_DQTYPE_USER:
851*4882a593Smuzhiyun if (!XFS_IS_UQUOTA_ON(mp))
852*4882a593Smuzhiyun return -ESRCH;
853*4882a593Smuzhiyun return 0;
854*4882a593Smuzhiyun case XFS_DQTYPE_GROUP:
855*4882a593Smuzhiyun if (!XFS_IS_GQUOTA_ON(mp))
856*4882a593Smuzhiyun return -ESRCH;
857*4882a593Smuzhiyun return 0;
858*4882a593Smuzhiyun case XFS_DQTYPE_PROJ:
859*4882a593Smuzhiyun if (!XFS_IS_PQUOTA_ON(mp))
860*4882a593Smuzhiyun return -ESRCH;
861*4882a593Smuzhiyun return 0;
862*4882a593Smuzhiyun default:
863*4882a593Smuzhiyun WARN_ON_ONCE(0);
864*4882a593Smuzhiyun return -EINVAL;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun /*
869*4882a593Smuzhiyun * Given the file system, id, and type (UDQUOT/GDQUOT/PDQUOT), return a
870*4882a593Smuzhiyun * locked dquot, doing an allocation (if requested) as needed.
871*4882a593Smuzhiyun */
872*4882a593Smuzhiyun int
xfs_qm_dqget(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,bool can_alloc,struct xfs_dquot ** O_dqpp)873*4882a593Smuzhiyun xfs_qm_dqget(
874*4882a593Smuzhiyun struct xfs_mount *mp,
875*4882a593Smuzhiyun xfs_dqid_t id,
876*4882a593Smuzhiyun xfs_dqtype_t type,
877*4882a593Smuzhiyun bool can_alloc,
878*4882a593Smuzhiyun struct xfs_dquot **O_dqpp)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun struct xfs_quotainfo *qi = mp->m_quotainfo;
881*4882a593Smuzhiyun struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
882*4882a593Smuzhiyun struct xfs_dquot *dqp;
883*4882a593Smuzhiyun int error;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun error = xfs_qm_dqget_checks(mp, type);
886*4882a593Smuzhiyun if (error)
887*4882a593Smuzhiyun return error;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun restart:
890*4882a593Smuzhiyun dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
891*4882a593Smuzhiyun if (dqp) {
892*4882a593Smuzhiyun *O_dqpp = dqp;
893*4882a593Smuzhiyun return 0;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
897*4882a593Smuzhiyun if (error)
898*4882a593Smuzhiyun return error;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
901*4882a593Smuzhiyun if (error) {
902*4882a593Smuzhiyun /*
903*4882a593Smuzhiyun * Duplicate found. Just throw away the new dquot and start
904*4882a593Smuzhiyun * over.
905*4882a593Smuzhiyun */
906*4882a593Smuzhiyun xfs_qm_dqdestroy(dqp);
907*4882a593Smuzhiyun XFS_STATS_INC(mp, xs_qm_dquot_dups);
908*4882a593Smuzhiyun goto restart;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun trace_xfs_dqget_miss(dqp);
912*4882a593Smuzhiyun *O_dqpp = dqp;
913*4882a593Smuzhiyun return 0;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun /*
917*4882a593Smuzhiyun * Given a dquot id and type, read and initialize a dquot from the on-disk
918*4882a593Smuzhiyun * metadata. This function is only for use during quota initialization so
919*4882a593Smuzhiyun * it ignores the dquot cache assuming that the dquot shrinker isn't set up.
920*4882a593Smuzhiyun * The caller is responsible for _qm_dqdestroy'ing the returned dquot.
921*4882a593Smuzhiyun */
922*4882a593Smuzhiyun int
xfs_qm_dqget_uncached(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,struct xfs_dquot ** dqpp)923*4882a593Smuzhiyun xfs_qm_dqget_uncached(
924*4882a593Smuzhiyun struct xfs_mount *mp,
925*4882a593Smuzhiyun xfs_dqid_t id,
926*4882a593Smuzhiyun xfs_dqtype_t type,
927*4882a593Smuzhiyun struct xfs_dquot **dqpp)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun int error;
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun error = xfs_qm_dqget_checks(mp, type);
932*4882a593Smuzhiyun if (error)
933*4882a593Smuzhiyun return error;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun return xfs_qm_dqread(mp, id, type, 0, dqpp);
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun /* Return the quota id for a given inode and type. */
939*4882a593Smuzhiyun xfs_dqid_t
xfs_qm_id_for_quotatype(struct xfs_inode * ip,xfs_dqtype_t type)940*4882a593Smuzhiyun xfs_qm_id_for_quotatype(
941*4882a593Smuzhiyun struct xfs_inode *ip,
942*4882a593Smuzhiyun xfs_dqtype_t type)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun switch (type) {
945*4882a593Smuzhiyun case XFS_DQTYPE_USER:
946*4882a593Smuzhiyun return i_uid_read(VFS_I(ip));
947*4882a593Smuzhiyun case XFS_DQTYPE_GROUP:
948*4882a593Smuzhiyun return i_gid_read(VFS_I(ip));
949*4882a593Smuzhiyun case XFS_DQTYPE_PROJ:
950*4882a593Smuzhiyun return ip->i_d.di_projid;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun ASSERT(0);
953*4882a593Smuzhiyun return 0;
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun /*
957*4882a593Smuzhiyun * Return the dquot for a given inode and type. If @can_alloc is true, then
958*4882a593Smuzhiyun * allocate blocks if needed. The inode's ILOCK must be held and it must not
959*4882a593Smuzhiyun * have already had an inode attached.
960*4882a593Smuzhiyun */
961*4882a593Smuzhiyun int
xfs_qm_dqget_inode(struct xfs_inode * ip,xfs_dqtype_t type,bool can_alloc,struct xfs_dquot ** O_dqpp)962*4882a593Smuzhiyun xfs_qm_dqget_inode(
963*4882a593Smuzhiyun struct xfs_inode *ip,
964*4882a593Smuzhiyun xfs_dqtype_t type,
965*4882a593Smuzhiyun bool can_alloc,
966*4882a593Smuzhiyun struct xfs_dquot **O_dqpp)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun struct xfs_mount *mp = ip->i_mount;
969*4882a593Smuzhiyun struct xfs_quotainfo *qi = mp->m_quotainfo;
970*4882a593Smuzhiyun struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
971*4882a593Smuzhiyun struct xfs_dquot *dqp;
972*4882a593Smuzhiyun xfs_dqid_t id;
973*4882a593Smuzhiyun int error;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun error = xfs_qm_dqget_checks(mp, type);
976*4882a593Smuzhiyun if (error)
977*4882a593Smuzhiyun return error;
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
980*4882a593Smuzhiyun ASSERT(xfs_inode_dquot(ip, type) == NULL);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun id = xfs_qm_id_for_quotatype(ip, type);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun restart:
985*4882a593Smuzhiyun dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
986*4882a593Smuzhiyun if (dqp) {
987*4882a593Smuzhiyun *O_dqpp = dqp;
988*4882a593Smuzhiyun return 0;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun /*
992*4882a593Smuzhiyun * Dquot cache miss. We don't want to keep the inode lock across
993*4882a593Smuzhiyun * a (potential) disk read. Also we don't want to deal with the lock
994*4882a593Smuzhiyun * ordering between quotainode and this inode. OTOH, dropping the inode
995*4882a593Smuzhiyun * lock here means dealing with a chown that can happen before
996*4882a593Smuzhiyun * we re-acquire the lock.
997*4882a593Smuzhiyun */
998*4882a593Smuzhiyun xfs_iunlock(ip, XFS_ILOCK_EXCL);
999*4882a593Smuzhiyun error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
1000*4882a593Smuzhiyun xfs_ilock(ip, XFS_ILOCK_EXCL);
1001*4882a593Smuzhiyun if (error)
1002*4882a593Smuzhiyun return error;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun /*
1005*4882a593Smuzhiyun * A dquot could be attached to this inode by now, since we had
1006*4882a593Smuzhiyun * dropped the ilock.
1007*4882a593Smuzhiyun */
1008*4882a593Smuzhiyun if (xfs_this_quota_on(mp, type)) {
1009*4882a593Smuzhiyun struct xfs_dquot *dqp1;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun dqp1 = xfs_inode_dquot(ip, type);
1012*4882a593Smuzhiyun if (dqp1) {
1013*4882a593Smuzhiyun xfs_qm_dqdestroy(dqp);
1014*4882a593Smuzhiyun dqp = dqp1;
1015*4882a593Smuzhiyun xfs_dqlock(dqp);
1016*4882a593Smuzhiyun goto dqret;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun } else {
1019*4882a593Smuzhiyun /* inode stays locked on return */
1020*4882a593Smuzhiyun xfs_qm_dqdestroy(dqp);
1021*4882a593Smuzhiyun return -ESRCH;
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
1025*4882a593Smuzhiyun if (error) {
1026*4882a593Smuzhiyun /*
1027*4882a593Smuzhiyun * Duplicate found. Just throw away the new dquot and start
1028*4882a593Smuzhiyun * over.
1029*4882a593Smuzhiyun */
1030*4882a593Smuzhiyun xfs_qm_dqdestroy(dqp);
1031*4882a593Smuzhiyun XFS_STATS_INC(mp, xs_qm_dquot_dups);
1032*4882a593Smuzhiyun goto restart;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun dqret:
1036*4882a593Smuzhiyun ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1037*4882a593Smuzhiyun trace_xfs_dqget_miss(dqp);
1038*4882a593Smuzhiyun *O_dqpp = dqp;
1039*4882a593Smuzhiyun return 0;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun /*
1043*4882a593Smuzhiyun * Starting at @id and progressing upwards, look for an initialized incore
1044*4882a593Smuzhiyun * dquot, lock it, and return it.
1045*4882a593Smuzhiyun */
1046*4882a593Smuzhiyun int
xfs_qm_dqget_next(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,struct xfs_dquot ** dqpp)1047*4882a593Smuzhiyun xfs_qm_dqget_next(
1048*4882a593Smuzhiyun struct xfs_mount *mp,
1049*4882a593Smuzhiyun xfs_dqid_t id,
1050*4882a593Smuzhiyun xfs_dqtype_t type,
1051*4882a593Smuzhiyun struct xfs_dquot **dqpp)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun struct xfs_dquot *dqp;
1054*4882a593Smuzhiyun int error = 0;
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun *dqpp = NULL;
1057*4882a593Smuzhiyun for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) {
1058*4882a593Smuzhiyun error = xfs_qm_dqget(mp, id, type, false, &dqp);
1059*4882a593Smuzhiyun if (error == -ENOENT)
1060*4882a593Smuzhiyun continue;
1061*4882a593Smuzhiyun else if (error != 0)
1062*4882a593Smuzhiyun break;
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
1065*4882a593Smuzhiyun *dqpp = dqp;
1066*4882a593Smuzhiyun return 0;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun xfs_qm_dqput(dqp);
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun return error;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun /*
1076*4882a593Smuzhiyun * Release a reference to the dquot (decrement ref-count) and unlock it.
1077*4882a593Smuzhiyun *
1078*4882a593Smuzhiyun * If there is a group quota attached to this dquot, carefully release that
1079*4882a593Smuzhiyun * too without tripping over deadlocks'n'stuff.
1080*4882a593Smuzhiyun */
1081*4882a593Smuzhiyun void
xfs_qm_dqput(struct xfs_dquot * dqp)1082*4882a593Smuzhiyun xfs_qm_dqput(
1083*4882a593Smuzhiyun struct xfs_dquot *dqp)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun ASSERT(dqp->q_nrefs > 0);
1086*4882a593Smuzhiyun ASSERT(XFS_DQ_IS_LOCKED(dqp));
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun trace_xfs_dqput(dqp);
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun if (--dqp->q_nrefs == 0) {
1091*4882a593Smuzhiyun struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
1092*4882a593Smuzhiyun trace_xfs_dqput_free(dqp);
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
1095*4882a593Smuzhiyun XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun xfs_dqunlock(dqp);
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun /*
1101*4882a593Smuzhiyun * Release a dquot. Flush it if dirty, then dqput() it.
1102*4882a593Smuzhiyun * dquot must not be locked.
1103*4882a593Smuzhiyun */
1104*4882a593Smuzhiyun void
xfs_qm_dqrele(struct xfs_dquot * dqp)1105*4882a593Smuzhiyun xfs_qm_dqrele(
1106*4882a593Smuzhiyun struct xfs_dquot *dqp)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun if (!dqp)
1109*4882a593Smuzhiyun return;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun trace_xfs_dqrele(dqp);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun xfs_dqlock(dqp);
1114*4882a593Smuzhiyun /*
1115*4882a593Smuzhiyun * We don't care to flush it if the dquot is dirty here.
1116*4882a593Smuzhiyun * That will create stutters that we want to avoid.
1117*4882a593Smuzhiyun * Instead we do a delayed write when we try to reclaim
1118*4882a593Smuzhiyun * a dirty dquot. Also xfs_sync will take part of the burden...
1119*4882a593Smuzhiyun */
1120*4882a593Smuzhiyun xfs_qm_dqput(dqp);
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun /*
1124*4882a593Smuzhiyun * This is the dquot flushing I/O completion routine. It is called
1125*4882a593Smuzhiyun * from interrupt level when the buffer containing the dquot is
1126*4882a593Smuzhiyun * flushed to disk. It is responsible for removing the dquot logitem
1127*4882a593Smuzhiyun * from the AIL if it has not been re-logged, and unlocking the dquot's
1128*4882a593Smuzhiyun * flush lock. This behavior is very similar to that of inodes..
1129*4882a593Smuzhiyun */
1130*4882a593Smuzhiyun static void
xfs_qm_dqflush_done(struct xfs_log_item * lip)1131*4882a593Smuzhiyun xfs_qm_dqflush_done(
1132*4882a593Smuzhiyun struct xfs_log_item *lip)
1133*4882a593Smuzhiyun {
1134*4882a593Smuzhiyun struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip;
1135*4882a593Smuzhiyun struct xfs_dquot *dqp = qip->qli_dquot;
1136*4882a593Smuzhiyun struct xfs_ail *ailp = lip->li_ailp;
1137*4882a593Smuzhiyun xfs_lsn_t tail_lsn;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun /*
1140*4882a593Smuzhiyun * We only want to pull the item from the AIL if its
1141*4882a593Smuzhiyun * location in the log has not changed since we started the flush.
1142*4882a593Smuzhiyun * Thus, we only bother if the dquot's lsn has
1143*4882a593Smuzhiyun * not changed. First we check the lsn outside the lock
1144*4882a593Smuzhiyun * since it's cheaper, and then we recheck while
1145*4882a593Smuzhiyun * holding the lock before removing the dquot from the AIL.
1146*4882a593Smuzhiyun */
1147*4882a593Smuzhiyun if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
1148*4882a593Smuzhiyun ((lip->li_lsn == qip->qli_flush_lsn) ||
1149*4882a593Smuzhiyun test_bit(XFS_LI_FAILED, &lip->li_flags))) {
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun spin_lock(&ailp->ail_lock);
1152*4882a593Smuzhiyun xfs_clear_li_failed(lip);
1153*4882a593Smuzhiyun if (lip->li_lsn == qip->qli_flush_lsn) {
1154*4882a593Smuzhiyun /* xfs_ail_update_finish() drops the AIL lock */
1155*4882a593Smuzhiyun tail_lsn = xfs_ail_delete_one(ailp, lip);
1156*4882a593Smuzhiyun xfs_ail_update_finish(ailp, tail_lsn);
1157*4882a593Smuzhiyun } else {
1158*4882a593Smuzhiyun spin_unlock(&ailp->ail_lock);
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun /*
1163*4882a593Smuzhiyun * Release the dq's flush lock since we're done with it.
1164*4882a593Smuzhiyun */
1165*4882a593Smuzhiyun xfs_dqfunlock(dqp);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun void
xfs_buf_dquot_iodone(struct xfs_buf * bp)1169*4882a593Smuzhiyun xfs_buf_dquot_iodone(
1170*4882a593Smuzhiyun struct xfs_buf *bp)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun struct xfs_log_item *lip, *n;
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
1175*4882a593Smuzhiyun list_del_init(&lip->li_bio_list);
1176*4882a593Smuzhiyun xfs_qm_dqflush_done(lip);
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun void
xfs_buf_dquot_io_fail(struct xfs_buf * bp)1181*4882a593Smuzhiyun xfs_buf_dquot_io_fail(
1182*4882a593Smuzhiyun struct xfs_buf *bp)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun struct xfs_log_item *lip;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun spin_lock(&bp->b_mount->m_ail->ail_lock);
1187*4882a593Smuzhiyun list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1188*4882a593Smuzhiyun xfs_set_li_failed(lip, bp);
1189*4882a593Smuzhiyun spin_unlock(&bp->b_mount->m_ail->ail_lock);
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun /* Check incore dquot for errors before we flush. */
1193*4882a593Smuzhiyun static xfs_failaddr_t
xfs_qm_dqflush_check(struct xfs_dquot * dqp)1194*4882a593Smuzhiyun xfs_qm_dqflush_check(
1195*4882a593Smuzhiyun struct xfs_dquot *dqp)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun xfs_dqtype_t type = xfs_dquot_type(dqp);
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun if (type != XFS_DQTYPE_USER &&
1200*4882a593Smuzhiyun type != XFS_DQTYPE_GROUP &&
1201*4882a593Smuzhiyun type != XFS_DQTYPE_PROJ)
1202*4882a593Smuzhiyun return __this_address;
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun if (dqp->q_id == 0)
1205*4882a593Smuzhiyun return NULL;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit &&
1208*4882a593Smuzhiyun !dqp->q_blk.timer)
1209*4882a593Smuzhiyun return __this_address;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit &&
1212*4882a593Smuzhiyun !dqp->q_ino.timer)
1213*4882a593Smuzhiyun return __this_address;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit &&
1216*4882a593Smuzhiyun !dqp->q_rtb.timer)
1217*4882a593Smuzhiyun return __this_address;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun /* bigtime flag should never be set on root dquots */
1220*4882a593Smuzhiyun if (dqp->q_type & XFS_DQTYPE_BIGTIME) {
1221*4882a593Smuzhiyun if (!xfs_sb_version_hasbigtime(&dqp->q_mount->m_sb))
1222*4882a593Smuzhiyun return __this_address;
1223*4882a593Smuzhiyun if (dqp->q_id == 0)
1224*4882a593Smuzhiyun return __this_address;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun return NULL;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun /*
1231*4882a593Smuzhiyun * Write a modified dquot to disk.
1232*4882a593Smuzhiyun * The dquot must be locked and the flush lock too taken by caller.
1233*4882a593Smuzhiyun * The flush lock will not be unlocked until the dquot reaches the disk,
1234*4882a593Smuzhiyun * but the dquot is free to be unlocked and modified by the caller
1235*4882a593Smuzhiyun * in the interim. Dquot is still locked on return. This behavior is
1236*4882a593Smuzhiyun * identical to that of inodes.
1237*4882a593Smuzhiyun */
1238*4882a593Smuzhiyun int
xfs_qm_dqflush(struct xfs_dquot * dqp,struct xfs_buf ** bpp)1239*4882a593Smuzhiyun xfs_qm_dqflush(
1240*4882a593Smuzhiyun struct xfs_dquot *dqp,
1241*4882a593Smuzhiyun struct xfs_buf **bpp)
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun struct xfs_mount *mp = dqp->q_mount;
1244*4882a593Smuzhiyun struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
1245*4882a593Smuzhiyun struct xfs_buf *bp;
1246*4882a593Smuzhiyun struct xfs_dqblk *dqblk;
1247*4882a593Smuzhiyun xfs_failaddr_t fa;
1248*4882a593Smuzhiyun int error;
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun ASSERT(XFS_DQ_IS_LOCKED(dqp));
1251*4882a593Smuzhiyun ASSERT(!completion_done(&dqp->q_flush));
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun trace_xfs_dqflush(dqp);
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun *bpp = NULL;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun xfs_qm_dqunpin_wait(dqp);
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun /*
1260*4882a593Smuzhiyun * Get the buffer containing the on-disk dquot
1261*4882a593Smuzhiyun */
1262*4882a593Smuzhiyun error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1263*4882a593Smuzhiyun mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
1264*4882a593Smuzhiyun &bp, &xfs_dquot_buf_ops);
1265*4882a593Smuzhiyun if (error == -EAGAIN)
1266*4882a593Smuzhiyun goto out_unlock;
1267*4882a593Smuzhiyun if (error)
1268*4882a593Smuzhiyun goto out_abort;
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun fa = xfs_qm_dqflush_check(dqp);
1271*4882a593Smuzhiyun if (fa) {
1272*4882a593Smuzhiyun xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
1273*4882a593Smuzhiyun dqp->q_id, fa);
1274*4882a593Smuzhiyun xfs_buf_relse(bp);
1275*4882a593Smuzhiyun error = -EFSCORRUPTED;
1276*4882a593Smuzhiyun goto out_abort;
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun /* Flush the incore dquot to the ondisk buffer. */
1280*4882a593Smuzhiyun dqblk = bp->b_addr + dqp->q_bufoffset;
1281*4882a593Smuzhiyun xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp);
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun /*
1284*4882a593Smuzhiyun * Clear the dirty field and remember the flush lsn for later use.
1285*4882a593Smuzhiyun */
1286*4882a593Smuzhiyun dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1289*4882a593Smuzhiyun &dqp->q_logitem.qli_item.li_lsn);
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun /*
1292*4882a593Smuzhiyun * copy the lsn into the on-disk dquot now while we have the in memory
1293*4882a593Smuzhiyun * dquot here. This can't be done later in the write verifier as we
1294*4882a593Smuzhiyun * can't get access to the log item at that point in time.
1295*4882a593Smuzhiyun *
1296*4882a593Smuzhiyun * We also calculate the CRC here so that the on-disk dquot in the
1297*4882a593Smuzhiyun * buffer always has a valid CRC. This ensures there is no possibility
1298*4882a593Smuzhiyun * of a dquot without an up-to-date CRC getting to disk.
1299*4882a593Smuzhiyun */
1300*4882a593Smuzhiyun if (xfs_sb_version_hascrc(&mp->m_sb)) {
1301*4882a593Smuzhiyun dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1302*4882a593Smuzhiyun xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
1303*4882a593Smuzhiyun XFS_DQUOT_CRC_OFF);
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun /*
1307*4882a593Smuzhiyun * Attach the dquot to the buffer so that we can remove this dquot from
1308*4882a593Smuzhiyun * the AIL and release the flush lock once the dquot is synced to disk.
1309*4882a593Smuzhiyun */
1310*4882a593Smuzhiyun bp->b_flags |= _XBF_DQUOTS;
1311*4882a593Smuzhiyun list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list);
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun /*
1314*4882a593Smuzhiyun * If the buffer is pinned then push on the log so we won't
1315*4882a593Smuzhiyun * get stuck waiting in the write for too long.
1316*4882a593Smuzhiyun */
1317*4882a593Smuzhiyun if (xfs_buf_ispinned(bp)) {
1318*4882a593Smuzhiyun trace_xfs_dqflush_force(dqp);
1319*4882a593Smuzhiyun xfs_log_force(mp, 0);
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun trace_xfs_dqflush_done(dqp);
1323*4882a593Smuzhiyun *bpp = bp;
1324*4882a593Smuzhiyun return 0;
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun out_abort:
1327*4882a593Smuzhiyun dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1328*4882a593Smuzhiyun xfs_trans_ail_delete(lip, 0);
1329*4882a593Smuzhiyun xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1330*4882a593Smuzhiyun out_unlock:
1331*4882a593Smuzhiyun xfs_dqfunlock(dqp);
1332*4882a593Smuzhiyun return error;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun /*
1336*4882a593Smuzhiyun * Lock two xfs_dquot structures.
1337*4882a593Smuzhiyun *
1338*4882a593Smuzhiyun * To avoid deadlocks we always lock the quota structure with
1339*4882a593Smuzhiyun * the lowerd id first.
1340*4882a593Smuzhiyun */
1341*4882a593Smuzhiyun void
xfs_dqlock2(struct xfs_dquot * d1,struct xfs_dquot * d2)1342*4882a593Smuzhiyun xfs_dqlock2(
1343*4882a593Smuzhiyun struct xfs_dquot *d1,
1344*4882a593Smuzhiyun struct xfs_dquot *d2)
1345*4882a593Smuzhiyun {
1346*4882a593Smuzhiyun if (d1 && d2) {
1347*4882a593Smuzhiyun ASSERT(d1 != d2);
1348*4882a593Smuzhiyun if (d1->q_id > d2->q_id) {
1349*4882a593Smuzhiyun mutex_lock(&d2->q_qlock);
1350*4882a593Smuzhiyun mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1351*4882a593Smuzhiyun } else {
1352*4882a593Smuzhiyun mutex_lock(&d1->q_qlock);
1353*4882a593Smuzhiyun mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun } else if (d1) {
1356*4882a593Smuzhiyun mutex_lock(&d1->q_qlock);
1357*4882a593Smuzhiyun } else if (d2) {
1358*4882a593Smuzhiyun mutex_lock(&d2->q_qlock);
1359*4882a593Smuzhiyun }
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun int __init
xfs_qm_init(void)1363*4882a593Smuzhiyun xfs_qm_init(void)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun xfs_qm_dqzone = kmem_cache_create("xfs_dquot",
1366*4882a593Smuzhiyun sizeof(struct xfs_dquot),
1367*4882a593Smuzhiyun 0, 0, NULL);
1368*4882a593Smuzhiyun if (!xfs_qm_dqzone)
1369*4882a593Smuzhiyun goto out;
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun xfs_qm_dqtrxzone = kmem_cache_create("xfs_dqtrx",
1372*4882a593Smuzhiyun sizeof(struct xfs_dquot_acct),
1373*4882a593Smuzhiyun 0, 0, NULL);
1374*4882a593Smuzhiyun if (!xfs_qm_dqtrxzone)
1375*4882a593Smuzhiyun goto out_free_dqzone;
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun return 0;
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun out_free_dqzone:
1380*4882a593Smuzhiyun kmem_cache_destroy(xfs_qm_dqzone);
1381*4882a593Smuzhiyun out:
1382*4882a593Smuzhiyun return -ENOMEM;
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun void
xfs_qm_exit(void)1386*4882a593Smuzhiyun xfs_qm_exit(void)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun kmem_cache_destroy(xfs_qm_dqtrxzone);
1389*4882a593Smuzhiyun kmem_cache_destroy(xfs_qm_dqzone);
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun /*
1393*4882a593Smuzhiyun * Iterate every dquot of a particular type. The caller must ensure that the
1394*4882a593Smuzhiyun * particular quota type is active. iter_fn can return negative error codes,
1395*4882a593Smuzhiyun * or -ECANCELED to indicate that it wants to stop iterating.
1396*4882a593Smuzhiyun */
1397*4882a593Smuzhiyun int
xfs_qm_dqiterate(struct xfs_mount * mp,xfs_dqtype_t type,xfs_qm_dqiterate_fn iter_fn,void * priv)1398*4882a593Smuzhiyun xfs_qm_dqiterate(
1399*4882a593Smuzhiyun struct xfs_mount *mp,
1400*4882a593Smuzhiyun xfs_dqtype_t type,
1401*4882a593Smuzhiyun xfs_qm_dqiterate_fn iter_fn,
1402*4882a593Smuzhiyun void *priv)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun struct xfs_dquot *dq;
1405*4882a593Smuzhiyun xfs_dqid_t id = 0;
1406*4882a593Smuzhiyun int error;
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun do {
1409*4882a593Smuzhiyun error = xfs_qm_dqget_next(mp, id, type, &dq);
1410*4882a593Smuzhiyun if (error == -ENOENT)
1411*4882a593Smuzhiyun return 0;
1412*4882a593Smuzhiyun if (error)
1413*4882a593Smuzhiyun return error;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun error = iter_fn(dq, type, priv);
1416*4882a593Smuzhiyun id = dq->q_id;
1417*4882a593Smuzhiyun xfs_qm_dqput(dq);
1418*4882a593Smuzhiyun } while (error == 0 && id != 0);
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun return error;
1421*4882a593Smuzhiyun }
1422