1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4*4882a593Smuzhiyun * All Rights Reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_log_format.h"
11*4882a593Smuzhiyun #include "xfs_trans_resv.h"
12*4882a593Smuzhiyun #include "xfs_bit.h"
13*4882a593Smuzhiyun #include "xfs_sb.h"
14*4882a593Smuzhiyun #include "xfs_mount.h"
15*4882a593Smuzhiyun #include "xfs_inode.h"
16*4882a593Smuzhiyun #include "xfs_dir2.h"
17*4882a593Smuzhiyun #include "xfs_ialloc.h"
18*4882a593Smuzhiyun #include "xfs_alloc.h"
19*4882a593Smuzhiyun #include "xfs_rtalloc.h"
20*4882a593Smuzhiyun #include "xfs_bmap.h"
21*4882a593Smuzhiyun #include "xfs_trans.h"
22*4882a593Smuzhiyun #include "xfs_trans_priv.h"
23*4882a593Smuzhiyun #include "xfs_log.h"
24*4882a593Smuzhiyun #include "xfs_error.h"
25*4882a593Smuzhiyun #include "xfs_quota.h"
26*4882a593Smuzhiyun #include "xfs_fsops.h"
27*4882a593Smuzhiyun #include "xfs_icache.h"
28*4882a593Smuzhiyun #include "xfs_sysfs.h"
29*4882a593Smuzhiyun #include "xfs_rmap_btree.h"
30*4882a593Smuzhiyun #include "xfs_refcount_btree.h"
31*4882a593Smuzhiyun #include "xfs_reflink.h"
32*4882a593Smuzhiyun #include "xfs_extent_busy.h"
33*4882a593Smuzhiyun #include "xfs_health.h"
34*4882a593Smuzhiyun #include "xfs_trace.h"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun static DEFINE_MUTEX(xfs_uuid_table_mutex);
37*4882a593Smuzhiyun static int xfs_uuid_table_size;
38*4882a593Smuzhiyun static uuid_t *xfs_uuid_table;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun void
xfs_uuid_table_free(void)41*4882a593Smuzhiyun xfs_uuid_table_free(void)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun if (xfs_uuid_table_size == 0)
44*4882a593Smuzhiyun return;
45*4882a593Smuzhiyun kmem_free(xfs_uuid_table);
46*4882a593Smuzhiyun xfs_uuid_table = NULL;
47*4882a593Smuzhiyun xfs_uuid_table_size = 0;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * See if the UUID is unique among mounted XFS filesystems.
52*4882a593Smuzhiyun * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun STATIC int
xfs_uuid_mount(struct xfs_mount * mp)55*4882a593Smuzhiyun xfs_uuid_mount(
56*4882a593Smuzhiyun struct xfs_mount *mp)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun uuid_t *uuid = &mp->m_sb.sb_uuid;
59*4882a593Smuzhiyun int hole, i;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* Publish UUID in struct super_block */
62*4882a593Smuzhiyun uuid_copy(&mp->m_super->s_uuid, uuid);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun if (mp->m_flags & XFS_MOUNT_NOUUID)
65*4882a593Smuzhiyun return 0;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun if (uuid_is_null(uuid)) {
68*4882a593Smuzhiyun xfs_warn(mp, "Filesystem has null UUID - can't mount");
69*4882a593Smuzhiyun return -EINVAL;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun mutex_lock(&xfs_uuid_table_mutex);
73*4882a593Smuzhiyun for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
74*4882a593Smuzhiyun if (uuid_is_null(&xfs_uuid_table[i])) {
75*4882a593Smuzhiyun hole = i;
76*4882a593Smuzhiyun continue;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun if (uuid_equal(uuid, &xfs_uuid_table[i]))
79*4882a593Smuzhiyun goto out_duplicate;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (hole < 0) {
83*4882a593Smuzhiyun xfs_uuid_table = krealloc(xfs_uuid_table,
84*4882a593Smuzhiyun (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
85*4882a593Smuzhiyun GFP_KERNEL | __GFP_NOFAIL);
86*4882a593Smuzhiyun hole = xfs_uuid_table_size++;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun xfs_uuid_table[hole] = *uuid;
89*4882a593Smuzhiyun mutex_unlock(&xfs_uuid_table_mutex);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun return 0;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun out_duplicate:
94*4882a593Smuzhiyun mutex_unlock(&xfs_uuid_table_mutex);
95*4882a593Smuzhiyun xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
96*4882a593Smuzhiyun return -EINVAL;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun STATIC void
xfs_uuid_unmount(struct xfs_mount * mp)100*4882a593Smuzhiyun xfs_uuid_unmount(
101*4882a593Smuzhiyun struct xfs_mount *mp)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun uuid_t *uuid = &mp->m_sb.sb_uuid;
104*4882a593Smuzhiyun int i;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (mp->m_flags & XFS_MOUNT_NOUUID)
107*4882a593Smuzhiyun return;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun mutex_lock(&xfs_uuid_table_mutex);
110*4882a593Smuzhiyun for (i = 0; i < xfs_uuid_table_size; i++) {
111*4882a593Smuzhiyun if (uuid_is_null(&xfs_uuid_table[i]))
112*4882a593Smuzhiyun continue;
113*4882a593Smuzhiyun if (!uuid_equal(uuid, &xfs_uuid_table[i]))
114*4882a593Smuzhiyun continue;
115*4882a593Smuzhiyun memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
116*4882a593Smuzhiyun break;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun ASSERT(i < xfs_uuid_table_size);
119*4882a593Smuzhiyun mutex_unlock(&xfs_uuid_table_mutex);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun STATIC void
__xfs_free_perag(struct rcu_head * head)124*4882a593Smuzhiyun __xfs_free_perag(
125*4882a593Smuzhiyun struct rcu_head *head)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun ASSERT(atomic_read(&pag->pag_ref) == 0);
130*4882a593Smuzhiyun kmem_free(pag);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * Free up the per-ag resources associated with the mount structure.
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun STATIC void
xfs_free_perag(xfs_mount_t * mp)137*4882a593Smuzhiyun xfs_free_perag(
138*4882a593Smuzhiyun xfs_mount_t *mp)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun xfs_agnumber_t agno;
141*4882a593Smuzhiyun struct xfs_perag *pag;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
144*4882a593Smuzhiyun spin_lock(&mp->m_perag_lock);
145*4882a593Smuzhiyun pag = radix_tree_delete(&mp->m_perag_tree, agno);
146*4882a593Smuzhiyun spin_unlock(&mp->m_perag_lock);
147*4882a593Smuzhiyun ASSERT(pag);
148*4882a593Smuzhiyun ASSERT(atomic_read(&pag->pag_ref) == 0);
149*4882a593Smuzhiyun xfs_iunlink_destroy(pag);
150*4882a593Smuzhiyun xfs_buf_hash_destroy(pag);
151*4882a593Smuzhiyun call_rcu(&pag->rcu_head, __xfs_free_perag);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun * Check size of device based on the (data/realtime) block count.
157*4882a593Smuzhiyun * Note: this check is used by the growfs code as well as mount.
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun int
xfs_sb_validate_fsb_count(xfs_sb_t * sbp,uint64_t nblocks)160*4882a593Smuzhiyun xfs_sb_validate_fsb_count(
161*4882a593Smuzhiyun xfs_sb_t *sbp,
162*4882a593Smuzhiyun uint64_t nblocks)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
165*4882a593Smuzhiyun ASSERT(sbp->sb_blocklog >= BBSHIFT);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* Limited by ULONG_MAX of page cache index */
168*4882a593Smuzhiyun if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
169*4882a593Smuzhiyun return -EFBIG;
170*4882a593Smuzhiyun return 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun int
xfs_initialize_perag(xfs_mount_t * mp,xfs_agnumber_t agcount,xfs_agnumber_t * maxagi)174*4882a593Smuzhiyun xfs_initialize_perag(
175*4882a593Smuzhiyun xfs_mount_t *mp,
176*4882a593Smuzhiyun xfs_agnumber_t agcount,
177*4882a593Smuzhiyun xfs_agnumber_t *maxagi)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun xfs_agnumber_t index;
180*4882a593Smuzhiyun xfs_agnumber_t first_initialised = NULLAGNUMBER;
181*4882a593Smuzhiyun xfs_perag_t *pag;
182*4882a593Smuzhiyun int error = -ENOMEM;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun * Walk the current per-ag tree so we don't try to initialise AGs
186*4882a593Smuzhiyun * that already exist (growfs case). Allocate and insert all the
187*4882a593Smuzhiyun * AGs we don't find ready for initialisation.
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun for (index = 0; index < agcount; index++) {
190*4882a593Smuzhiyun pag = xfs_perag_get(mp, index);
191*4882a593Smuzhiyun if (pag) {
192*4882a593Smuzhiyun xfs_perag_put(pag);
193*4882a593Smuzhiyun continue;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
197*4882a593Smuzhiyun if (!pag) {
198*4882a593Smuzhiyun error = -ENOMEM;
199*4882a593Smuzhiyun goto out_unwind_new_pags;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun pag->pag_agno = index;
202*4882a593Smuzhiyun pag->pag_mount = mp;
203*4882a593Smuzhiyun spin_lock_init(&pag->pag_ici_lock);
204*4882a593Smuzhiyun INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun error = xfs_buf_hash_init(pag);
207*4882a593Smuzhiyun if (error)
208*4882a593Smuzhiyun goto out_free_pag;
209*4882a593Smuzhiyun init_waitqueue_head(&pag->pagb_wait);
210*4882a593Smuzhiyun spin_lock_init(&pag->pagb_lock);
211*4882a593Smuzhiyun pag->pagb_count = 0;
212*4882a593Smuzhiyun pag->pagb_tree = RB_ROOT;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun error = radix_tree_preload(GFP_NOFS);
215*4882a593Smuzhiyun if (error)
216*4882a593Smuzhiyun goto out_hash_destroy;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun spin_lock(&mp->m_perag_lock);
219*4882a593Smuzhiyun if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
220*4882a593Smuzhiyun WARN_ON_ONCE(1);
221*4882a593Smuzhiyun spin_unlock(&mp->m_perag_lock);
222*4882a593Smuzhiyun radix_tree_preload_end();
223*4882a593Smuzhiyun error = -EEXIST;
224*4882a593Smuzhiyun goto out_hash_destroy;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun spin_unlock(&mp->m_perag_lock);
227*4882a593Smuzhiyun radix_tree_preload_end();
228*4882a593Smuzhiyun /* first new pag is fully initialized */
229*4882a593Smuzhiyun if (first_initialised == NULLAGNUMBER)
230*4882a593Smuzhiyun first_initialised = index;
231*4882a593Smuzhiyun error = xfs_iunlink_init(pag);
232*4882a593Smuzhiyun if (error)
233*4882a593Smuzhiyun goto out_hash_destroy;
234*4882a593Smuzhiyun spin_lock_init(&pag->pag_state_lock);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun index = xfs_set_inode_alloc(mp, agcount);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (maxagi)
240*4882a593Smuzhiyun *maxagi = index;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
243*4882a593Smuzhiyun return 0;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun out_hash_destroy:
246*4882a593Smuzhiyun xfs_buf_hash_destroy(pag);
247*4882a593Smuzhiyun out_free_pag:
248*4882a593Smuzhiyun kmem_free(pag);
249*4882a593Smuzhiyun out_unwind_new_pags:
250*4882a593Smuzhiyun /* unwind any prior newly initialized pags */
251*4882a593Smuzhiyun for (index = first_initialised; index < agcount; index++) {
252*4882a593Smuzhiyun pag = radix_tree_delete(&mp->m_perag_tree, index);
253*4882a593Smuzhiyun if (!pag)
254*4882a593Smuzhiyun break;
255*4882a593Smuzhiyun xfs_buf_hash_destroy(pag);
256*4882a593Smuzhiyun xfs_iunlink_destroy(pag);
257*4882a593Smuzhiyun kmem_free(pag);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun return error;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun * xfs_readsb
264*4882a593Smuzhiyun *
265*4882a593Smuzhiyun * Does the initial read of the superblock.
266*4882a593Smuzhiyun */
267*4882a593Smuzhiyun int
xfs_readsb(struct xfs_mount * mp,int flags)268*4882a593Smuzhiyun xfs_readsb(
269*4882a593Smuzhiyun struct xfs_mount *mp,
270*4882a593Smuzhiyun int flags)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun unsigned int sector_size;
273*4882a593Smuzhiyun struct xfs_buf *bp;
274*4882a593Smuzhiyun struct xfs_sb *sbp = &mp->m_sb;
275*4882a593Smuzhiyun int error;
276*4882a593Smuzhiyun int loud = !(flags & XFS_MFSI_QUIET);
277*4882a593Smuzhiyun const struct xfs_buf_ops *buf_ops;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun ASSERT(mp->m_sb_bp == NULL);
280*4882a593Smuzhiyun ASSERT(mp->m_ddev_targp != NULL);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /*
283*4882a593Smuzhiyun * For the initial read, we must guess at the sector
284*4882a593Smuzhiyun * size based on the block device. It's enough to
285*4882a593Smuzhiyun * get the sb_sectsize out of the superblock and
286*4882a593Smuzhiyun * then reread with the proper length.
287*4882a593Smuzhiyun * We don't verify it yet, because it may not be complete.
288*4882a593Smuzhiyun */
289*4882a593Smuzhiyun sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
290*4882a593Smuzhiyun buf_ops = NULL;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /*
293*4882a593Smuzhiyun * Allocate a (locked) buffer to hold the superblock. This will be kept
294*4882a593Smuzhiyun * around at all times to optimize access to the superblock. Therefore,
295*4882a593Smuzhiyun * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
296*4882a593Smuzhiyun * elevated.
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun reread:
299*4882a593Smuzhiyun error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
300*4882a593Smuzhiyun BTOBB(sector_size), XBF_NO_IOACCT, &bp,
301*4882a593Smuzhiyun buf_ops);
302*4882a593Smuzhiyun if (error) {
303*4882a593Smuzhiyun if (loud)
304*4882a593Smuzhiyun xfs_warn(mp, "SB validate failed with error %d.", error);
305*4882a593Smuzhiyun /* bad CRC means corrupted metadata */
306*4882a593Smuzhiyun if (error == -EFSBADCRC)
307*4882a593Smuzhiyun error = -EFSCORRUPTED;
308*4882a593Smuzhiyun return error;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * Initialize the mount structure from the superblock.
313*4882a593Smuzhiyun */
314*4882a593Smuzhiyun xfs_sb_from_disk(sbp, bp->b_addr);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /*
317*4882a593Smuzhiyun * If we haven't validated the superblock, do so now before we try
318*4882a593Smuzhiyun * to check the sector size and reread the superblock appropriately.
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun if (sbp->sb_magicnum != XFS_SB_MAGIC) {
321*4882a593Smuzhiyun if (loud)
322*4882a593Smuzhiyun xfs_warn(mp, "Invalid superblock magic number");
323*4882a593Smuzhiyun error = -EINVAL;
324*4882a593Smuzhiyun goto release_buf;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /*
328*4882a593Smuzhiyun * We must be able to do sector-sized and sector-aligned IO.
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun if (sector_size > sbp->sb_sectsize) {
331*4882a593Smuzhiyun if (loud)
332*4882a593Smuzhiyun xfs_warn(mp, "device supports %u byte sectors (not %u)",
333*4882a593Smuzhiyun sector_size, sbp->sb_sectsize);
334*4882a593Smuzhiyun error = -ENOSYS;
335*4882a593Smuzhiyun goto release_buf;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun if (buf_ops == NULL) {
339*4882a593Smuzhiyun /*
340*4882a593Smuzhiyun * Re-read the superblock so the buffer is correctly sized,
341*4882a593Smuzhiyun * and properly verified.
342*4882a593Smuzhiyun */
343*4882a593Smuzhiyun xfs_buf_relse(bp);
344*4882a593Smuzhiyun sector_size = sbp->sb_sectsize;
345*4882a593Smuzhiyun buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
346*4882a593Smuzhiyun goto reread;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun xfs_reinit_percpu_counters(mp);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* no need to be quiet anymore, so reset the buf ops */
352*4882a593Smuzhiyun bp->b_ops = &xfs_sb_buf_ops;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun mp->m_sb_bp = bp;
355*4882a593Smuzhiyun xfs_buf_unlock(bp);
356*4882a593Smuzhiyun return 0;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun release_buf:
359*4882a593Smuzhiyun xfs_buf_relse(bp);
360*4882a593Smuzhiyun return error;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun * If the sunit/swidth change would move the precomputed root inode value, we
365*4882a593Smuzhiyun * must reject the ondisk change because repair will stumble over that.
366*4882a593Smuzhiyun * However, we allow the mount to proceed because we never rejected this
367*4882a593Smuzhiyun * combination before. Returns true to update the sb, false otherwise.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun static inline int
xfs_check_new_dalign(struct xfs_mount * mp,int new_dalign,bool * update_sb)370*4882a593Smuzhiyun xfs_check_new_dalign(
371*4882a593Smuzhiyun struct xfs_mount *mp,
372*4882a593Smuzhiyun int new_dalign,
373*4882a593Smuzhiyun bool *update_sb)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun struct xfs_sb *sbp = &mp->m_sb;
376*4882a593Smuzhiyun xfs_ino_t calc_ino;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun calc_ino = xfs_ialloc_calc_rootino(mp, new_dalign);
379*4882a593Smuzhiyun trace_xfs_check_new_dalign(mp, new_dalign, calc_ino);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (sbp->sb_rootino == calc_ino) {
382*4882a593Smuzhiyun *update_sb = true;
383*4882a593Smuzhiyun return 0;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun xfs_warn(mp,
387*4882a593Smuzhiyun "Cannot change stripe alignment; would require moving root inode.");
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /*
390*4882a593Smuzhiyun * XXX: Next time we add a new incompat feature, this should start
391*4882a593Smuzhiyun * returning -EINVAL to fail the mount. Until then, spit out a warning
392*4882a593Smuzhiyun * that we're ignoring the administrator's instructions.
393*4882a593Smuzhiyun */
394*4882a593Smuzhiyun xfs_warn(mp, "Skipping superblock stripe alignment update.");
395*4882a593Smuzhiyun *update_sb = false;
396*4882a593Smuzhiyun return 0;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /*
400*4882a593Smuzhiyun * If we were provided with new sunit/swidth values as mount options, make sure
401*4882a593Smuzhiyun * that they pass basic alignment and superblock feature checks, and convert
402*4882a593Smuzhiyun * them into the same units (FSB) that everything else expects. This step
403*4882a593Smuzhiyun * /must/ be done before computing the inode geometry.
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun STATIC int
xfs_validate_new_dalign(struct xfs_mount * mp)406*4882a593Smuzhiyun xfs_validate_new_dalign(
407*4882a593Smuzhiyun struct xfs_mount *mp)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun if (mp->m_dalign == 0)
410*4882a593Smuzhiyun return 0;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun /*
413*4882a593Smuzhiyun * If stripe unit and stripe width are not multiples
414*4882a593Smuzhiyun * of the fs blocksize turn off alignment.
415*4882a593Smuzhiyun */
416*4882a593Smuzhiyun if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
417*4882a593Smuzhiyun (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
418*4882a593Smuzhiyun xfs_warn(mp,
419*4882a593Smuzhiyun "alignment check failed: sunit/swidth vs. blocksize(%d)",
420*4882a593Smuzhiyun mp->m_sb.sb_blocksize);
421*4882a593Smuzhiyun return -EINVAL;
422*4882a593Smuzhiyun } else {
423*4882a593Smuzhiyun /*
424*4882a593Smuzhiyun * Convert the stripe unit and width to FSBs.
425*4882a593Smuzhiyun */
426*4882a593Smuzhiyun mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
427*4882a593Smuzhiyun if (mp->m_dalign && (mp->m_sb.sb_agblocks % mp->m_dalign)) {
428*4882a593Smuzhiyun xfs_warn(mp,
429*4882a593Smuzhiyun "alignment check failed: sunit/swidth vs. agsize(%d)",
430*4882a593Smuzhiyun mp->m_sb.sb_agblocks);
431*4882a593Smuzhiyun return -EINVAL;
432*4882a593Smuzhiyun } else if (mp->m_dalign) {
433*4882a593Smuzhiyun mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
434*4882a593Smuzhiyun } else {
435*4882a593Smuzhiyun xfs_warn(mp,
436*4882a593Smuzhiyun "alignment check failed: sunit(%d) less than bsize(%d)",
437*4882a593Smuzhiyun mp->m_dalign, mp->m_sb.sb_blocksize);
438*4882a593Smuzhiyun return -EINVAL;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (!xfs_sb_version_hasdalign(&mp->m_sb)) {
443*4882a593Smuzhiyun xfs_warn(mp,
444*4882a593Smuzhiyun "cannot change alignment: superblock does not support data alignment");
445*4882a593Smuzhiyun return -EINVAL;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun return 0;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /* Update alignment values based on mount options and sb values. */
452*4882a593Smuzhiyun STATIC int
xfs_update_alignment(struct xfs_mount * mp)453*4882a593Smuzhiyun xfs_update_alignment(
454*4882a593Smuzhiyun struct xfs_mount *mp)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun struct xfs_sb *sbp = &mp->m_sb;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun if (mp->m_dalign) {
459*4882a593Smuzhiyun bool update_sb;
460*4882a593Smuzhiyun int error;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun if (sbp->sb_unit == mp->m_dalign &&
463*4882a593Smuzhiyun sbp->sb_width == mp->m_swidth)
464*4882a593Smuzhiyun return 0;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun error = xfs_check_new_dalign(mp, mp->m_dalign, &update_sb);
467*4882a593Smuzhiyun if (error || !update_sb)
468*4882a593Smuzhiyun return error;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun sbp->sb_unit = mp->m_dalign;
471*4882a593Smuzhiyun sbp->sb_width = mp->m_swidth;
472*4882a593Smuzhiyun mp->m_update_sb = true;
473*4882a593Smuzhiyun } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
474*4882a593Smuzhiyun xfs_sb_version_hasdalign(&mp->m_sb)) {
475*4882a593Smuzhiyun mp->m_dalign = sbp->sb_unit;
476*4882a593Smuzhiyun mp->m_swidth = sbp->sb_width;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun return 0;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /*
483*4882a593Smuzhiyun * precalculate the low space thresholds for dynamic speculative preallocation.
484*4882a593Smuzhiyun */
485*4882a593Smuzhiyun void
xfs_set_low_space_thresholds(struct xfs_mount * mp)486*4882a593Smuzhiyun xfs_set_low_space_thresholds(
487*4882a593Smuzhiyun struct xfs_mount *mp)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun int i;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun for (i = 0; i < XFS_LOWSP_MAX; i++) {
492*4882a593Smuzhiyun uint64_t space = mp->m_sb.sb_dblocks;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun do_div(space, 100);
495*4882a593Smuzhiyun mp->m_low_space[i] = space * (i + 1);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /*
500*4882a593Smuzhiyun * Check that the data (and log if separate) is an ok size.
501*4882a593Smuzhiyun */
502*4882a593Smuzhiyun STATIC int
xfs_check_sizes(struct xfs_mount * mp)503*4882a593Smuzhiyun xfs_check_sizes(
504*4882a593Smuzhiyun struct xfs_mount *mp)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun struct xfs_buf *bp;
507*4882a593Smuzhiyun xfs_daddr_t d;
508*4882a593Smuzhiyun int error;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
511*4882a593Smuzhiyun if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
512*4882a593Smuzhiyun xfs_warn(mp, "filesystem size mismatch detected");
513*4882a593Smuzhiyun return -EFBIG;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun error = xfs_buf_read_uncached(mp->m_ddev_targp,
516*4882a593Smuzhiyun d - XFS_FSS_TO_BB(mp, 1),
517*4882a593Smuzhiyun XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
518*4882a593Smuzhiyun if (error) {
519*4882a593Smuzhiyun xfs_warn(mp, "last sector read failed");
520*4882a593Smuzhiyun return error;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun xfs_buf_relse(bp);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (mp->m_logdev_targp == mp->m_ddev_targp)
525*4882a593Smuzhiyun return 0;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
528*4882a593Smuzhiyun if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
529*4882a593Smuzhiyun xfs_warn(mp, "log size mismatch detected");
530*4882a593Smuzhiyun return -EFBIG;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun error = xfs_buf_read_uncached(mp->m_logdev_targp,
533*4882a593Smuzhiyun d - XFS_FSB_TO_BB(mp, 1),
534*4882a593Smuzhiyun XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
535*4882a593Smuzhiyun if (error) {
536*4882a593Smuzhiyun xfs_warn(mp, "log device read failed");
537*4882a593Smuzhiyun return error;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun xfs_buf_relse(bp);
540*4882a593Smuzhiyun return 0;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /*
544*4882a593Smuzhiyun * Clear the quotaflags in memory and in the superblock.
545*4882a593Smuzhiyun */
546*4882a593Smuzhiyun int
xfs_mount_reset_sbqflags(struct xfs_mount * mp)547*4882a593Smuzhiyun xfs_mount_reset_sbqflags(
548*4882a593Smuzhiyun struct xfs_mount *mp)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun mp->m_qflags = 0;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
553*4882a593Smuzhiyun if (mp->m_sb.sb_qflags == 0)
554*4882a593Smuzhiyun return 0;
555*4882a593Smuzhiyun spin_lock(&mp->m_sb_lock);
556*4882a593Smuzhiyun mp->m_sb.sb_qflags = 0;
557*4882a593Smuzhiyun spin_unlock(&mp->m_sb_lock);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
560*4882a593Smuzhiyun return 0;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun return xfs_sync_sb(mp, false);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun uint64_t
xfs_default_resblks(xfs_mount_t * mp)566*4882a593Smuzhiyun xfs_default_resblks(xfs_mount_t *mp)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun uint64_t resblks;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun /*
571*4882a593Smuzhiyun * We default to 5% or 8192 fsbs of space reserved, whichever is
572*4882a593Smuzhiyun * smaller. This is intended to cover concurrent allocation
573*4882a593Smuzhiyun * transactions when we initially hit enospc. These each require a 4
574*4882a593Smuzhiyun * block reservation. Hence by default we cover roughly 2000 concurrent
575*4882a593Smuzhiyun * allocation reservations.
576*4882a593Smuzhiyun */
577*4882a593Smuzhiyun resblks = mp->m_sb.sb_dblocks;
578*4882a593Smuzhiyun do_div(resblks, 20);
579*4882a593Smuzhiyun resblks = min_t(uint64_t, resblks, 8192);
580*4882a593Smuzhiyun return resblks;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /* Ensure the summary counts are correct. */
584*4882a593Smuzhiyun STATIC int
xfs_check_summary_counts(struct xfs_mount * mp)585*4882a593Smuzhiyun xfs_check_summary_counts(
586*4882a593Smuzhiyun struct xfs_mount *mp)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun /*
589*4882a593Smuzhiyun * The AG0 superblock verifier rejects in-progress filesystems,
590*4882a593Smuzhiyun * so we should never see the flag set this far into mounting.
591*4882a593Smuzhiyun */
592*4882a593Smuzhiyun if (mp->m_sb.sb_inprogress) {
593*4882a593Smuzhiyun xfs_err(mp, "sb_inprogress set after log recovery??");
594*4882a593Smuzhiyun WARN_ON(1);
595*4882a593Smuzhiyun return -EFSCORRUPTED;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /*
599*4882a593Smuzhiyun * Now the log is mounted, we know if it was an unclean shutdown or
600*4882a593Smuzhiyun * not. If it was, with the first phase of recovery has completed, we
601*4882a593Smuzhiyun * have consistent AG blocks on disk. We have not recovered EFIs yet,
602*4882a593Smuzhiyun * but they are recovered transactionally in the second recovery phase
603*4882a593Smuzhiyun * later.
604*4882a593Smuzhiyun *
605*4882a593Smuzhiyun * If the log was clean when we mounted, we can check the summary
606*4882a593Smuzhiyun * counters. If any of them are obviously incorrect, we can recompute
607*4882a593Smuzhiyun * them from the AGF headers in the next step.
608*4882a593Smuzhiyun */
609*4882a593Smuzhiyun if (XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
610*4882a593Smuzhiyun (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks ||
611*4882a593Smuzhiyun !xfs_verify_icount(mp, mp->m_sb.sb_icount) ||
612*4882a593Smuzhiyun mp->m_sb.sb_ifree > mp->m_sb.sb_icount))
613*4882a593Smuzhiyun xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /*
616*4882a593Smuzhiyun * We can safely re-initialise incore superblock counters from the
617*4882a593Smuzhiyun * per-ag data. These may not be correct if the filesystem was not
618*4882a593Smuzhiyun * cleanly unmounted, so we waited for recovery to finish before doing
619*4882a593Smuzhiyun * this.
620*4882a593Smuzhiyun *
621*4882a593Smuzhiyun * If the filesystem was cleanly unmounted or the previous check did
622*4882a593Smuzhiyun * not flag anything weird, then we can trust the values in the
623*4882a593Smuzhiyun * superblock to be correct and we don't need to do anything here.
624*4882a593Smuzhiyun * Otherwise, recalculate the summary counters.
625*4882a593Smuzhiyun */
626*4882a593Smuzhiyun if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) ||
627*4882a593Smuzhiyun XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) &&
628*4882a593Smuzhiyun !xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS))
629*4882a593Smuzhiyun return 0;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount);
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /*
635*4882a593Smuzhiyun * Flush and reclaim dirty inodes in preparation for unmount. Inodes and
636*4882a593Smuzhiyun * internal inode structures can be sitting in the CIL and AIL at this point,
637*4882a593Smuzhiyun * so we need to unpin them, write them back and/or reclaim them before unmount
638*4882a593Smuzhiyun * can proceed.
639*4882a593Smuzhiyun *
640*4882a593Smuzhiyun * An inode cluster that has been freed can have its buffer still pinned in
641*4882a593Smuzhiyun * memory because the transaction is still sitting in a iclog. The stale inodes
642*4882a593Smuzhiyun * on that buffer will be pinned to the buffer until the transaction hits the
643*4882a593Smuzhiyun * disk and the callbacks run. Pushing the AIL will skip the stale inodes and
644*4882a593Smuzhiyun * may never see the pinned buffer, so nothing will push out the iclog and
645*4882a593Smuzhiyun * unpin the buffer.
646*4882a593Smuzhiyun *
647*4882a593Smuzhiyun * Hence we need to force the log to unpin everything first. However, log
648*4882a593Smuzhiyun * forces don't wait for the discards they issue to complete, so we have to
649*4882a593Smuzhiyun * explicitly wait for them to complete here as well.
650*4882a593Smuzhiyun *
651*4882a593Smuzhiyun * Then we can tell the world we are unmounting so that error handling knows
652*4882a593Smuzhiyun * that the filesystem is going away and we should error out anything that we
653*4882a593Smuzhiyun * have been retrying in the background. This will prevent never-ending
654*4882a593Smuzhiyun * retries in AIL pushing from hanging the unmount.
655*4882a593Smuzhiyun *
656*4882a593Smuzhiyun * Finally, we can push the AIL to clean all the remaining dirty objects, then
657*4882a593Smuzhiyun * reclaim the remaining inodes that are still in memory at this point in time.
658*4882a593Smuzhiyun */
659*4882a593Smuzhiyun static void
xfs_unmount_flush_inodes(struct xfs_mount * mp)660*4882a593Smuzhiyun xfs_unmount_flush_inodes(
661*4882a593Smuzhiyun struct xfs_mount *mp)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun xfs_log_force(mp, XFS_LOG_SYNC);
664*4882a593Smuzhiyun xfs_extent_busy_wait_all(mp);
665*4882a593Smuzhiyun flush_workqueue(xfs_discard_wq);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun mp->m_flags |= XFS_MOUNT_UNMOUNTING;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun xfs_ail_push_all_sync(mp->m_ail);
670*4882a593Smuzhiyun cancel_delayed_work_sync(&mp->m_reclaim_work);
671*4882a593Smuzhiyun xfs_reclaim_inodes(mp);
672*4882a593Smuzhiyun xfs_health_unmount(mp);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /*
676*4882a593Smuzhiyun * This function does the following on an initial mount of a file system:
677*4882a593Smuzhiyun * - reads the superblock from disk and init the mount struct
678*4882a593Smuzhiyun * - if we're a 32-bit kernel, do a size check on the superblock
679*4882a593Smuzhiyun * so we don't mount terabyte filesystems
680*4882a593Smuzhiyun * - init mount struct realtime fields
681*4882a593Smuzhiyun * - allocate inode hash table for fs
682*4882a593Smuzhiyun * - init directory manager
683*4882a593Smuzhiyun * - perform recovery and init the log manager
684*4882a593Smuzhiyun */
685*4882a593Smuzhiyun int
xfs_mountfs(struct xfs_mount * mp)686*4882a593Smuzhiyun xfs_mountfs(
687*4882a593Smuzhiyun struct xfs_mount *mp)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun struct xfs_sb *sbp = &(mp->m_sb);
690*4882a593Smuzhiyun struct xfs_inode *rip;
691*4882a593Smuzhiyun struct xfs_ino_geometry *igeo = M_IGEO(mp);
692*4882a593Smuzhiyun uint64_t resblks;
693*4882a593Smuzhiyun uint quotamount = 0;
694*4882a593Smuzhiyun uint quotaflags = 0;
695*4882a593Smuzhiyun int error = 0;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun xfs_sb_mount_common(mp, sbp);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /*
700*4882a593Smuzhiyun * Check for a mismatched features2 values. Older kernels read & wrote
701*4882a593Smuzhiyun * into the wrong sb offset for sb_features2 on some platforms due to
702*4882a593Smuzhiyun * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
703*4882a593Smuzhiyun * which made older superblock reading/writing routines swap it as a
704*4882a593Smuzhiyun * 64-bit value.
705*4882a593Smuzhiyun *
706*4882a593Smuzhiyun * For backwards compatibility, we make both slots equal.
707*4882a593Smuzhiyun *
708*4882a593Smuzhiyun * If we detect a mismatched field, we OR the set bits into the existing
709*4882a593Smuzhiyun * features2 field in case it has already been modified; we don't want
710*4882a593Smuzhiyun * to lose any features. We then update the bad location with the ORed
711*4882a593Smuzhiyun * value so that older kernels will see any features2 flags. The
712*4882a593Smuzhiyun * superblock writeback code ensures the new sb_features2 is copied to
713*4882a593Smuzhiyun * sb_bad_features2 before it is logged or written to disk.
714*4882a593Smuzhiyun */
715*4882a593Smuzhiyun if (xfs_sb_has_mismatched_features2(sbp)) {
716*4882a593Smuzhiyun xfs_warn(mp, "correcting sb_features alignment problem");
717*4882a593Smuzhiyun sbp->sb_features2 |= sbp->sb_bad_features2;
718*4882a593Smuzhiyun mp->m_update_sb = true;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /*
721*4882a593Smuzhiyun * Re-check for ATTR2 in case it was found in bad_features2
722*4882a593Smuzhiyun * slot.
723*4882a593Smuzhiyun */
724*4882a593Smuzhiyun if (xfs_sb_version_hasattr2(&mp->m_sb) &&
725*4882a593Smuzhiyun !(mp->m_flags & XFS_MOUNT_NOATTR2))
726*4882a593Smuzhiyun mp->m_flags |= XFS_MOUNT_ATTR2;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (xfs_sb_version_hasattr2(&mp->m_sb) &&
730*4882a593Smuzhiyun (mp->m_flags & XFS_MOUNT_NOATTR2)) {
731*4882a593Smuzhiyun xfs_sb_version_removeattr2(&mp->m_sb);
732*4882a593Smuzhiyun mp->m_update_sb = true;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /* update sb_versionnum for the clearing of the morebits */
735*4882a593Smuzhiyun if (!sbp->sb_features2)
736*4882a593Smuzhiyun mp->m_update_sb = true;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /* always use v2 inodes by default now */
740*4882a593Smuzhiyun if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
741*4882a593Smuzhiyun mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
742*4882a593Smuzhiyun mp->m_update_sb = true;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun /*
746*4882a593Smuzhiyun * If we were given new sunit/swidth options, do some basic validation
747*4882a593Smuzhiyun * checks and convert the incore dalign and swidth values to the
748*4882a593Smuzhiyun * same units (FSB) that everything else uses. This /must/ happen
749*4882a593Smuzhiyun * before computing the inode geometry.
750*4882a593Smuzhiyun */
751*4882a593Smuzhiyun error = xfs_validate_new_dalign(mp);
752*4882a593Smuzhiyun if (error)
753*4882a593Smuzhiyun goto out;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun xfs_alloc_compute_maxlevels(mp);
756*4882a593Smuzhiyun xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
757*4882a593Smuzhiyun xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
758*4882a593Smuzhiyun xfs_ialloc_setup_geometry(mp);
759*4882a593Smuzhiyun xfs_rmapbt_compute_maxlevels(mp);
760*4882a593Smuzhiyun xfs_refcountbt_compute_maxlevels(mp);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun /*
763*4882a593Smuzhiyun * Check if sb_agblocks is aligned at stripe boundary. If sb_agblocks
764*4882a593Smuzhiyun * is NOT aligned turn off m_dalign since allocator alignment is within
765*4882a593Smuzhiyun * an ag, therefore ag has to be aligned at stripe boundary. Note that
766*4882a593Smuzhiyun * we must compute the free space and rmap btree geometry before doing
767*4882a593Smuzhiyun * this.
768*4882a593Smuzhiyun */
769*4882a593Smuzhiyun error = xfs_update_alignment(mp);
770*4882a593Smuzhiyun if (error)
771*4882a593Smuzhiyun goto out;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun /* enable fail_at_unmount as default */
774*4882a593Smuzhiyun mp->m_fail_unmount = true;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype,
777*4882a593Smuzhiyun NULL, mp->m_super->s_id);
778*4882a593Smuzhiyun if (error)
779*4882a593Smuzhiyun goto out;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype,
782*4882a593Smuzhiyun &mp->m_kobj, "stats");
783*4882a593Smuzhiyun if (error)
784*4882a593Smuzhiyun goto out_remove_sysfs;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun error = xfs_error_sysfs_init(mp);
787*4882a593Smuzhiyun if (error)
788*4882a593Smuzhiyun goto out_del_stats;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun error = xfs_errortag_init(mp);
791*4882a593Smuzhiyun if (error)
792*4882a593Smuzhiyun goto out_remove_error_sysfs;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun error = xfs_uuid_mount(mp);
795*4882a593Smuzhiyun if (error)
796*4882a593Smuzhiyun goto out_remove_errortag;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun /*
799*4882a593Smuzhiyun * Update the preferred write size based on the information from the
800*4882a593Smuzhiyun * on-disk superblock.
801*4882a593Smuzhiyun */
802*4882a593Smuzhiyun mp->m_allocsize_log =
803*4882a593Smuzhiyun max_t(uint32_t, sbp->sb_blocklog, mp->m_allocsize_log);
804*4882a593Smuzhiyun mp->m_allocsize_blocks = 1U << (mp->m_allocsize_log - sbp->sb_blocklog);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun /* set the low space thresholds for dynamic preallocation */
807*4882a593Smuzhiyun xfs_set_low_space_thresholds(mp);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun /*
810*4882a593Smuzhiyun * If enabled, sparse inode chunk alignment is expected to match the
811*4882a593Smuzhiyun * cluster size. Full inode chunk alignment must match the chunk size,
812*4882a593Smuzhiyun * but that is checked on sb read verification...
813*4882a593Smuzhiyun */
814*4882a593Smuzhiyun if (xfs_sb_version_hassparseinodes(&mp->m_sb) &&
815*4882a593Smuzhiyun mp->m_sb.sb_spino_align !=
816*4882a593Smuzhiyun XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw)) {
817*4882a593Smuzhiyun xfs_warn(mp,
818*4882a593Smuzhiyun "Sparse inode block alignment (%u) must match cluster size (%llu).",
819*4882a593Smuzhiyun mp->m_sb.sb_spino_align,
820*4882a593Smuzhiyun XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw));
821*4882a593Smuzhiyun error = -EINVAL;
822*4882a593Smuzhiyun goto out_remove_uuid;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /*
826*4882a593Smuzhiyun * Check that the data (and log if separate) is an ok size.
827*4882a593Smuzhiyun */
828*4882a593Smuzhiyun error = xfs_check_sizes(mp);
829*4882a593Smuzhiyun if (error)
830*4882a593Smuzhiyun goto out_remove_uuid;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun /*
833*4882a593Smuzhiyun * Initialize realtime fields in the mount structure
834*4882a593Smuzhiyun */
835*4882a593Smuzhiyun error = xfs_rtmount_init(mp);
836*4882a593Smuzhiyun if (error) {
837*4882a593Smuzhiyun xfs_warn(mp, "RT mount failed");
838*4882a593Smuzhiyun goto out_remove_uuid;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /*
842*4882a593Smuzhiyun * Copies the low order bits of the timestamp and the randomly
843*4882a593Smuzhiyun * set "sequence" number out of a UUID.
844*4882a593Smuzhiyun */
845*4882a593Smuzhiyun mp->m_fixedfsid[0] =
846*4882a593Smuzhiyun (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
847*4882a593Smuzhiyun get_unaligned_be16(&sbp->sb_uuid.b[4]);
848*4882a593Smuzhiyun mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun error = xfs_da_mount(mp);
851*4882a593Smuzhiyun if (error) {
852*4882a593Smuzhiyun xfs_warn(mp, "Failed dir/attr init: %d", error);
853*4882a593Smuzhiyun goto out_remove_uuid;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun /*
857*4882a593Smuzhiyun * Initialize the precomputed transaction reservations values.
858*4882a593Smuzhiyun */
859*4882a593Smuzhiyun xfs_trans_init(mp);
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun /*
862*4882a593Smuzhiyun * Allocate and initialize the per-ag data.
863*4882a593Smuzhiyun */
864*4882a593Smuzhiyun error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
865*4882a593Smuzhiyun if (error) {
866*4882a593Smuzhiyun xfs_warn(mp, "Failed per-ag init: %d", error);
867*4882a593Smuzhiyun goto out_free_dir;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun if (XFS_IS_CORRUPT(mp, !sbp->sb_logblocks)) {
871*4882a593Smuzhiyun xfs_warn(mp, "no log defined");
872*4882a593Smuzhiyun error = -EFSCORRUPTED;
873*4882a593Smuzhiyun goto out_free_perag;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /*
877*4882a593Smuzhiyun * Log's mount-time initialization. The first part of recovery can place
878*4882a593Smuzhiyun * some items on the AIL, to be handled when recovery is finished or
879*4882a593Smuzhiyun * cancelled.
880*4882a593Smuzhiyun */
881*4882a593Smuzhiyun error = xfs_log_mount(mp, mp->m_logdev_targp,
882*4882a593Smuzhiyun XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
883*4882a593Smuzhiyun XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
884*4882a593Smuzhiyun if (error) {
885*4882a593Smuzhiyun xfs_warn(mp, "log mount failed");
886*4882a593Smuzhiyun goto out_fail_wait;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun /* Make sure the summary counts are ok. */
890*4882a593Smuzhiyun error = xfs_check_summary_counts(mp);
891*4882a593Smuzhiyun if (error)
892*4882a593Smuzhiyun goto out_log_dealloc;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun /*
895*4882a593Smuzhiyun * Get and sanity-check the root inode.
896*4882a593Smuzhiyun * Save the pointer to it in the mount structure.
897*4882a593Smuzhiyun */
898*4882a593Smuzhiyun error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_IGET_UNTRUSTED,
899*4882a593Smuzhiyun XFS_ILOCK_EXCL, &rip);
900*4882a593Smuzhiyun if (error) {
901*4882a593Smuzhiyun xfs_warn(mp,
902*4882a593Smuzhiyun "Failed to read root inode 0x%llx, error %d",
903*4882a593Smuzhiyun sbp->sb_rootino, -error);
904*4882a593Smuzhiyun goto out_log_dealloc;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun ASSERT(rip != NULL);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun if (XFS_IS_CORRUPT(mp, !S_ISDIR(VFS_I(rip)->i_mode))) {
910*4882a593Smuzhiyun xfs_warn(mp, "corrupted root inode %llu: not a directory",
911*4882a593Smuzhiyun (unsigned long long)rip->i_ino);
912*4882a593Smuzhiyun xfs_iunlock(rip, XFS_ILOCK_EXCL);
913*4882a593Smuzhiyun error = -EFSCORRUPTED;
914*4882a593Smuzhiyun goto out_rele_rip;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun mp->m_rootip = rip; /* save it */
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun xfs_iunlock(rip, XFS_ILOCK_EXCL);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun /*
921*4882a593Smuzhiyun * Initialize realtime inode pointers in the mount structure
922*4882a593Smuzhiyun */
923*4882a593Smuzhiyun error = xfs_rtmount_inodes(mp);
924*4882a593Smuzhiyun if (error) {
925*4882a593Smuzhiyun /*
926*4882a593Smuzhiyun * Free up the root inode.
927*4882a593Smuzhiyun */
928*4882a593Smuzhiyun xfs_warn(mp, "failed to read RT inodes");
929*4882a593Smuzhiyun goto out_rele_rip;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun /*
933*4882a593Smuzhiyun * If this is a read-only mount defer the superblock updates until
934*4882a593Smuzhiyun * the next remount into writeable mode. Otherwise we would never
935*4882a593Smuzhiyun * perform the update e.g. for the root filesystem.
936*4882a593Smuzhiyun */
937*4882a593Smuzhiyun if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
938*4882a593Smuzhiyun error = xfs_sync_sb(mp, false);
939*4882a593Smuzhiyun if (error) {
940*4882a593Smuzhiyun xfs_warn(mp, "failed to write sb changes");
941*4882a593Smuzhiyun goto out_rtunmount;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun /*
946*4882a593Smuzhiyun * Initialise the XFS quota management subsystem for this mount
947*4882a593Smuzhiyun */
948*4882a593Smuzhiyun if (XFS_IS_QUOTA_RUNNING(mp)) {
949*4882a593Smuzhiyun error = xfs_qm_newmount(mp, "amount, "aflags);
950*4882a593Smuzhiyun if (error)
951*4882a593Smuzhiyun goto out_rtunmount;
952*4882a593Smuzhiyun } else {
953*4882a593Smuzhiyun ASSERT(!XFS_IS_QUOTA_ON(mp));
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun /*
956*4882a593Smuzhiyun * If a file system had quotas running earlier, but decided to
957*4882a593Smuzhiyun * mount without -o uquota/pquota/gquota options, revoke the
958*4882a593Smuzhiyun * quotachecked license.
959*4882a593Smuzhiyun */
960*4882a593Smuzhiyun if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
961*4882a593Smuzhiyun xfs_notice(mp, "resetting quota flags");
962*4882a593Smuzhiyun error = xfs_mount_reset_sbqflags(mp);
963*4882a593Smuzhiyun if (error)
964*4882a593Smuzhiyun goto out_rtunmount;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /*
969*4882a593Smuzhiyun * Finish recovering the file system. This part needed to be delayed
970*4882a593Smuzhiyun * until after the root and real-time bitmap inodes were consistently
971*4882a593Smuzhiyun * read in. Temporarily create per-AG space reservations for metadata
972*4882a593Smuzhiyun * btree shape changes because space freeing transactions (for inode
973*4882a593Smuzhiyun * inactivation) require the per-AG reservation in lieu of reserving
974*4882a593Smuzhiyun * blocks.
975*4882a593Smuzhiyun */
976*4882a593Smuzhiyun error = xfs_fs_reserve_ag_blocks(mp);
977*4882a593Smuzhiyun if (error && error == -ENOSPC)
978*4882a593Smuzhiyun xfs_warn(mp,
979*4882a593Smuzhiyun "ENOSPC reserving per-AG metadata pool, log recovery may fail.");
980*4882a593Smuzhiyun error = xfs_log_mount_finish(mp);
981*4882a593Smuzhiyun xfs_fs_unreserve_ag_blocks(mp);
982*4882a593Smuzhiyun if (error) {
983*4882a593Smuzhiyun xfs_warn(mp, "log mount finish failed");
984*4882a593Smuzhiyun goto out_rtunmount;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun /*
988*4882a593Smuzhiyun * Now the log is fully replayed, we can transition to full read-only
989*4882a593Smuzhiyun * mode for read-only mounts. This will sync all the metadata and clean
990*4882a593Smuzhiyun * the log so that the recovery we just performed does not have to be
991*4882a593Smuzhiyun * replayed again on the next mount.
992*4882a593Smuzhiyun *
993*4882a593Smuzhiyun * We use the same quiesce mechanism as the rw->ro remount, as they are
994*4882a593Smuzhiyun * semantically identical operations.
995*4882a593Smuzhiyun */
996*4882a593Smuzhiyun if ((mp->m_flags & (XFS_MOUNT_RDONLY|XFS_MOUNT_NORECOVERY)) ==
997*4882a593Smuzhiyun XFS_MOUNT_RDONLY) {
998*4882a593Smuzhiyun xfs_quiesce_attr(mp);
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /*
1002*4882a593Smuzhiyun * Complete the quota initialisation, post-log-replay component.
1003*4882a593Smuzhiyun */
1004*4882a593Smuzhiyun if (quotamount) {
1005*4882a593Smuzhiyun ASSERT(mp->m_qflags == 0);
1006*4882a593Smuzhiyun mp->m_qflags = quotaflags;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun xfs_qm_mount_quotas(mp);
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun /*
1012*4882a593Smuzhiyun * Now we are mounted, reserve a small amount of unused space for
1013*4882a593Smuzhiyun * privileged transactions. This is needed so that transaction
1014*4882a593Smuzhiyun * space required for critical operations can dip into this pool
1015*4882a593Smuzhiyun * when at ENOSPC. This is needed for operations like create with
1016*4882a593Smuzhiyun * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
1017*4882a593Smuzhiyun * are not allowed to use this reserved space.
1018*4882a593Smuzhiyun *
1019*4882a593Smuzhiyun * This may drive us straight to ENOSPC on mount, but that implies
1020*4882a593Smuzhiyun * we were already there on the last unmount. Warn if this occurs.
1021*4882a593Smuzhiyun */
1022*4882a593Smuzhiyun if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
1023*4882a593Smuzhiyun resblks = xfs_default_resblks(mp);
1024*4882a593Smuzhiyun error = xfs_reserve_blocks(mp, &resblks, NULL);
1025*4882a593Smuzhiyun if (error)
1026*4882a593Smuzhiyun xfs_warn(mp,
1027*4882a593Smuzhiyun "Unable to allocate reserve blocks. Continuing without reserve pool.");
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun /* Recover any CoW blocks that never got remapped. */
1030*4882a593Smuzhiyun error = xfs_reflink_recover_cow(mp);
1031*4882a593Smuzhiyun if (error) {
1032*4882a593Smuzhiyun xfs_err(mp,
1033*4882a593Smuzhiyun "Error %d recovering leftover CoW allocations.", error);
1034*4882a593Smuzhiyun xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1035*4882a593Smuzhiyun goto out_quota;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun /* Reserve AG blocks for future btree expansion. */
1039*4882a593Smuzhiyun error = xfs_fs_reserve_ag_blocks(mp);
1040*4882a593Smuzhiyun if (error && error != -ENOSPC)
1041*4882a593Smuzhiyun goto out_agresv;
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun return 0;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun out_agresv:
1047*4882a593Smuzhiyun xfs_fs_unreserve_ag_blocks(mp);
1048*4882a593Smuzhiyun out_quota:
1049*4882a593Smuzhiyun xfs_qm_unmount_quotas(mp);
1050*4882a593Smuzhiyun out_rtunmount:
1051*4882a593Smuzhiyun xfs_rtunmount_inodes(mp);
1052*4882a593Smuzhiyun out_rele_rip:
1053*4882a593Smuzhiyun xfs_irele(rip);
1054*4882a593Smuzhiyun /* Clean out dquots that might be in memory after quotacheck. */
1055*4882a593Smuzhiyun xfs_qm_unmount(mp);
1056*4882a593Smuzhiyun /*
1057*4882a593Smuzhiyun * Flush all inode reclamation work and flush the log.
1058*4882a593Smuzhiyun * We have to do this /after/ rtunmount and qm_unmount because those
1059*4882a593Smuzhiyun * two will have scheduled delayed reclaim for the rt/quota inodes.
1060*4882a593Smuzhiyun *
1061*4882a593Smuzhiyun * This is slightly different from the unmountfs call sequence
1062*4882a593Smuzhiyun * because we could be tearing down a partially set up mount. In
1063*4882a593Smuzhiyun * particular, if log_mount_finish fails we bail out without calling
1064*4882a593Smuzhiyun * qm_unmount_quotas and therefore rely on qm_unmount to release the
1065*4882a593Smuzhiyun * quota inodes.
1066*4882a593Smuzhiyun */
1067*4882a593Smuzhiyun xfs_unmount_flush_inodes(mp);
1068*4882a593Smuzhiyun out_log_dealloc:
1069*4882a593Smuzhiyun xfs_log_mount_cancel(mp);
1070*4882a593Smuzhiyun out_fail_wait:
1071*4882a593Smuzhiyun if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
1072*4882a593Smuzhiyun xfs_wait_buftarg(mp->m_logdev_targp);
1073*4882a593Smuzhiyun xfs_wait_buftarg(mp->m_ddev_targp);
1074*4882a593Smuzhiyun out_free_perag:
1075*4882a593Smuzhiyun xfs_free_perag(mp);
1076*4882a593Smuzhiyun out_free_dir:
1077*4882a593Smuzhiyun xfs_da_unmount(mp);
1078*4882a593Smuzhiyun out_remove_uuid:
1079*4882a593Smuzhiyun xfs_uuid_unmount(mp);
1080*4882a593Smuzhiyun out_remove_errortag:
1081*4882a593Smuzhiyun xfs_errortag_del(mp);
1082*4882a593Smuzhiyun out_remove_error_sysfs:
1083*4882a593Smuzhiyun xfs_error_sysfs_del(mp);
1084*4882a593Smuzhiyun out_del_stats:
1085*4882a593Smuzhiyun xfs_sysfs_del(&mp->m_stats.xs_kobj);
1086*4882a593Smuzhiyun out_remove_sysfs:
1087*4882a593Smuzhiyun xfs_sysfs_del(&mp->m_kobj);
1088*4882a593Smuzhiyun out:
1089*4882a593Smuzhiyun return error;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun /*
1093*4882a593Smuzhiyun * This flushes out the inodes,dquots and the superblock, unmounts the
1094*4882a593Smuzhiyun * log and makes sure that incore structures are freed.
1095*4882a593Smuzhiyun */
1096*4882a593Smuzhiyun void
xfs_unmountfs(struct xfs_mount * mp)1097*4882a593Smuzhiyun xfs_unmountfs(
1098*4882a593Smuzhiyun struct xfs_mount *mp)
1099*4882a593Smuzhiyun {
1100*4882a593Smuzhiyun uint64_t resblks;
1101*4882a593Smuzhiyun int error;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun xfs_stop_block_reaping(mp);
1104*4882a593Smuzhiyun xfs_fs_unreserve_ag_blocks(mp);
1105*4882a593Smuzhiyun xfs_qm_unmount_quotas(mp);
1106*4882a593Smuzhiyun xfs_rtunmount_inodes(mp);
1107*4882a593Smuzhiyun xfs_irele(mp->m_rootip);
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun xfs_unmount_flush_inodes(mp);
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun xfs_qm_unmount(mp);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun /*
1114*4882a593Smuzhiyun * Unreserve any blocks we have so that when we unmount we don't account
1115*4882a593Smuzhiyun * the reserved free space as used. This is really only necessary for
1116*4882a593Smuzhiyun * lazy superblock counting because it trusts the incore superblock
1117*4882a593Smuzhiyun * counters to be absolutely correct on clean unmount.
1118*4882a593Smuzhiyun *
1119*4882a593Smuzhiyun * We don't bother correcting this elsewhere for lazy superblock
1120*4882a593Smuzhiyun * counting because on mount of an unclean filesystem we reconstruct the
1121*4882a593Smuzhiyun * correct counter value and this is irrelevant.
1122*4882a593Smuzhiyun *
1123*4882a593Smuzhiyun * For non-lazy counter filesystems, this doesn't matter at all because
1124*4882a593Smuzhiyun * we only every apply deltas to the superblock and hence the incore
1125*4882a593Smuzhiyun * value does not matter....
1126*4882a593Smuzhiyun */
1127*4882a593Smuzhiyun resblks = 0;
1128*4882a593Smuzhiyun error = xfs_reserve_blocks(mp, &resblks, NULL);
1129*4882a593Smuzhiyun if (error)
1130*4882a593Smuzhiyun xfs_warn(mp, "Unable to free reserved block pool. "
1131*4882a593Smuzhiyun "Freespace may not be correct on next mount.");
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun error = xfs_log_sbcount(mp);
1134*4882a593Smuzhiyun if (error)
1135*4882a593Smuzhiyun xfs_warn(mp, "Unable to update superblock counters. "
1136*4882a593Smuzhiyun "Freespace may not be correct on next mount.");
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun xfs_log_unmount(mp);
1140*4882a593Smuzhiyun xfs_da_unmount(mp);
1141*4882a593Smuzhiyun xfs_uuid_unmount(mp);
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun #if defined(DEBUG)
1144*4882a593Smuzhiyun xfs_errortag_clearall(mp);
1145*4882a593Smuzhiyun #endif
1146*4882a593Smuzhiyun xfs_free_perag(mp);
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun xfs_errortag_del(mp);
1149*4882a593Smuzhiyun xfs_error_sysfs_del(mp);
1150*4882a593Smuzhiyun xfs_sysfs_del(&mp->m_stats.xs_kobj);
1151*4882a593Smuzhiyun xfs_sysfs_del(&mp->m_kobj);
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun /*
1155*4882a593Smuzhiyun * Determine whether modifications can proceed. The caller specifies the minimum
1156*4882a593Smuzhiyun * freeze level for which modifications should not be allowed. This allows
1157*4882a593Smuzhiyun * certain operations to proceed while the freeze sequence is in progress, if
1158*4882a593Smuzhiyun * necessary.
1159*4882a593Smuzhiyun */
1160*4882a593Smuzhiyun bool
xfs_fs_writable(struct xfs_mount * mp,int level)1161*4882a593Smuzhiyun xfs_fs_writable(
1162*4882a593Smuzhiyun struct xfs_mount *mp,
1163*4882a593Smuzhiyun int level)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun ASSERT(level > SB_UNFROZEN);
1166*4882a593Smuzhiyun if ((mp->m_super->s_writers.frozen >= level) ||
1167*4882a593Smuzhiyun XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY))
1168*4882a593Smuzhiyun return false;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun return true;
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun /*
1174*4882a593Smuzhiyun * xfs_log_sbcount
1175*4882a593Smuzhiyun *
1176*4882a593Smuzhiyun * Sync the superblock counters to disk.
1177*4882a593Smuzhiyun *
1178*4882a593Smuzhiyun * Note this code can be called during the process of freezing, so we use the
1179*4882a593Smuzhiyun * transaction allocator that does not block when the transaction subsystem is
1180*4882a593Smuzhiyun * in its frozen state.
1181*4882a593Smuzhiyun */
1182*4882a593Smuzhiyun int
xfs_log_sbcount(xfs_mount_t * mp)1183*4882a593Smuzhiyun xfs_log_sbcount(xfs_mount_t *mp)
1184*4882a593Smuzhiyun {
1185*4882a593Smuzhiyun if (!xfs_log_writable(mp))
1186*4882a593Smuzhiyun return 0;
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun /*
1189*4882a593Smuzhiyun * we don't need to do this if we are updating the superblock
1190*4882a593Smuzhiyun * counters on every modification.
1191*4882a593Smuzhiyun */
1192*4882a593Smuzhiyun if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1193*4882a593Smuzhiyun return 0;
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun return xfs_sync_sb(mp, true);
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun /*
1199*4882a593Smuzhiyun * Deltas for the block count can vary from 1 to very large, but lock contention
1200*4882a593Smuzhiyun * only occurs on frequent small block count updates such as in the delayed
1201*4882a593Smuzhiyun * allocation path for buffered writes (page a time updates). Hence we set
1202*4882a593Smuzhiyun * a large batch count (1024) to minimise global counter updates except when
1203*4882a593Smuzhiyun * we get near to ENOSPC and we have to be very accurate with our updates.
1204*4882a593Smuzhiyun */
1205*4882a593Smuzhiyun #define XFS_FDBLOCKS_BATCH 1024
1206*4882a593Smuzhiyun int
xfs_mod_fdblocks(struct xfs_mount * mp,int64_t delta,bool rsvd)1207*4882a593Smuzhiyun xfs_mod_fdblocks(
1208*4882a593Smuzhiyun struct xfs_mount *mp,
1209*4882a593Smuzhiyun int64_t delta,
1210*4882a593Smuzhiyun bool rsvd)
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun int64_t lcounter;
1213*4882a593Smuzhiyun long long res_used;
1214*4882a593Smuzhiyun s32 batch;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun if (delta > 0) {
1217*4882a593Smuzhiyun /*
1218*4882a593Smuzhiyun * If the reserve pool is depleted, put blocks back into it
1219*4882a593Smuzhiyun * first. Most of the time the pool is full.
1220*4882a593Smuzhiyun */
1221*4882a593Smuzhiyun if (likely(mp->m_resblks == mp->m_resblks_avail)) {
1222*4882a593Smuzhiyun percpu_counter_add(&mp->m_fdblocks, delta);
1223*4882a593Smuzhiyun return 0;
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun spin_lock(&mp->m_sb_lock);
1227*4882a593Smuzhiyun res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun if (res_used > delta) {
1230*4882a593Smuzhiyun mp->m_resblks_avail += delta;
1231*4882a593Smuzhiyun } else {
1232*4882a593Smuzhiyun delta -= res_used;
1233*4882a593Smuzhiyun mp->m_resblks_avail = mp->m_resblks;
1234*4882a593Smuzhiyun percpu_counter_add(&mp->m_fdblocks, delta);
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun spin_unlock(&mp->m_sb_lock);
1237*4882a593Smuzhiyun return 0;
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun /*
1241*4882a593Smuzhiyun * Taking blocks away, need to be more accurate the closer we
1242*4882a593Smuzhiyun * are to zero.
1243*4882a593Smuzhiyun *
1244*4882a593Smuzhiyun * If the counter has a value of less than 2 * max batch size,
1245*4882a593Smuzhiyun * then make everything serialise as we are real close to
1246*4882a593Smuzhiyun * ENOSPC.
1247*4882a593Smuzhiyun */
1248*4882a593Smuzhiyun if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH,
1249*4882a593Smuzhiyun XFS_FDBLOCKS_BATCH) < 0)
1250*4882a593Smuzhiyun batch = 1;
1251*4882a593Smuzhiyun else
1252*4882a593Smuzhiyun batch = XFS_FDBLOCKS_BATCH;
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
1255*4882a593Smuzhiyun if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside,
1256*4882a593Smuzhiyun XFS_FDBLOCKS_BATCH) >= 0) {
1257*4882a593Smuzhiyun /* we had space! */
1258*4882a593Smuzhiyun return 0;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun /*
1262*4882a593Smuzhiyun * lock up the sb for dipping into reserves before releasing the space
1263*4882a593Smuzhiyun * that took us to ENOSPC.
1264*4882a593Smuzhiyun */
1265*4882a593Smuzhiyun spin_lock(&mp->m_sb_lock);
1266*4882a593Smuzhiyun percpu_counter_add(&mp->m_fdblocks, -delta);
1267*4882a593Smuzhiyun if (!rsvd)
1268*4882a593Smuzhiyun goto fdblocks_enospc;
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun lcounter = (long long)mp->m_resblks_avail + delta;
1271*4882a593Smuzhiyun if (lcounter >= 0) {
1272*4882a593Smuzhiyun mp->m_resblks_avail = lcounter;
1273*4882a593Smuzhiyun spin_unlock(&mp->m_sb_lock);
1274*4882a593Smuzhiyun return 0;
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun xfs_warn_once(mp,
1277*4882a593Smuzhiyun "Reserve blocks depleted! Consider increasing reserve pool size.");
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun fdblocks_enospc:
1280*4882a593Smuzhiyun spin_unlock(&mp->m_sb_lock);
1281*4882a593Smuzhiyun return -ENOSPC;
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun int
xfs_mod_frextents(struct xfs_mount * mp,int64_t delta)1285*4882a593Smuzhiyun xfs_mod_frextents(
1286*4882a593Smuzhiyun struct xfs_mount *mp,
1287*4882a593Smuzhiyun int64_t delta)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun int64_t lcounter;
1290*4882a593Smuzhiyun int ret = 0;
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun spin_lock(&mp->m_sb_lock);
1293*4882a593Smuzhiyun lcounter = mp->m_sb.sb_frextents + delta;
1294*4882a593Smuzhiyun if (lcounter < 0)
1295*4882a593Smuzhiyun ret = -ENOSPC;
1296*4882a593Smuzhiyun else
1297*4882a593Smuzhiyun mp->m_sb.sb_frextents = lcounter;
1298*4882a593Smuzhiyun spin_unlock(&mp->m_sb_lock);
1299*4882a593Smuzhiyun return ret;
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun /*
1303*4882a593Smuzhiyun * Used to free the superblock along various error paths.
1304*4882a593Smuzhiyun */
1305*4882a593Smuzhiyun void
xfs_freesb(struct xfs_mount * mp)1306*4882a593Smuzhiyun xfs_freesb(
1307*4882a593Smuzhiyun struct xfs_mount *mp)
1308*4882a593Smuzhiyun {
1309*4882a593Smuzhiyun struct xfs_buf *bp = mp->m_sb_bp;
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun xfs_buf_lock(bp);
1312*4882a593Smuzhiyun mp->m_sb_bp = NULL;
1313*4882a593Smuzhiyun xfs_buf_relse(bp);
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun /*
1317*4882a593Smuzhiyun * If the underlying (data/log/rt) device is readonly, there are some
1318*4882a593Smuzhiyun * operations that cannot proceed.
1319*4882a593Smuzhiyun */
1320*4882a593Smuzhiyun int
xfs_dev_is_read_only(struct xfs_mount * mp,char * message)1321*4882a593Smuzhiyun xfs_dev_is_read_only(
1322*4882a593Smuzhiyun struct xfs_mount *mp,
1323*4882a593Smuzhiyun char *message)
1324*4882a593Smuzhiyun {
1325*4882a593Smuzhiyun if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1326*4882a593Smuzhiyun xfs_readonly_buftarg(mp->m_logdev_targp) ||
1327*4882a593Smuzhiyun (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1328*4882a593Smuzhiyun xfs_notice(mp, "%s required on read-only device.", message);
1329*4882a593Smuzhiyun xfs_notice(mp, "write access unavailable, cannot proceed.");
1330*4882a593Smuzhiyun return -EROFS;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun return 0;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun /* Force the summary counters to be recalculated at next mount. */
1336*4882a593Smuzhiyun void
xfs_force_summary_recalc(struct xfs_mount * mp)1337*4882a593Smuzhiyun xfs_force_summary_recalc(
1338*4882a593Smuzhiyun struct xfs_mount *mp)
1339*4882a593Smuzhiyun {
1340*4882a593Smuzhiyun if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1341*4882a593Smuzhiyun return;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun /*
1347*4882a593Smuzhiyun * Update the in-core delayed block counter.
1348*4882a593Smuzhiyun *
1349*4882a593Smuzhiyun * We prefer to update the counter without having to take a spinlock for every
1350*4882a593Smuzhiyun * counter update (i.e. batching). Each change to delayed allocation
1351*4882a593Smuzhiyun * reservations can change can easily exceed the default percpu counter
1352*4882a593Smuzhiyun * batching, so we use a larger batch factor here.
1353*4882a593Smuzhiyun *
1354*4882a593Smuzhiyun * Note that we don't currently have any callers requiring fast summation
1355*4882a593Smuzhiyun * (e.g. percpu_counter_read) so we can use a big batch value here.
1356*4882a593Smuzhiyun */
1357*4882a593Smuzhiyun #define XFS_DELALLOC_BATCH (4096)
1358*4882a593Smuzhiyun void
xfs_mod_delalloc(struct xfs_mount * mp,int64_t delta)1359*4882a593Smuzhiyun xfs_mod_delalloc(
1360*4882a593Smuzhiyun struct xfs_mount *mp,
1361*4882a593Smuzhiyun int64_t delta)
1362*4882a593Smuzhiyun {
1363*4882a593Smuzhiyun percpu_counter_add_batch(&mp->m_delalloc_blks, delta,
1364*4882a593Smuzhiyun XFS_DELALLOC_BATCH);
1365*4882a593Smuzhiyun }
1366