1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4*4882a593Smuzhiyun * All Rights Reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #ifndef __XFS_MOUNT_H__
7*4882a593Smuzhiyun #define __XFS_MOUNT_H__
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun struct xlog;
10*4882a593Smuzhiyun struct xfs_inode;
11*4882a593Smuzhiyun struct xfs_mru_cache;
12*4882a593Smuzhiyun struct xfs_ail;
13*4882a593Smuzhiyun struct xfs_quotainfo;
14*4882a593Smuzhiyun struct xfs_da_geometry;
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /* dynamic preallocation free space thresholds, 5% down to 1% */
17*4882a593Smuzhiyun enum {
18*4882a593Smuzhiyun XFS_LOWSP_1_PCNT = 0,
19*4882a593Smuzhiyun XFS_LOWSP_2_PCNT,
20*4882a593Smuzhiyun XFS_LOWSP_3_PCNT,
21*4882a593Smuzhiyun XFS_LOWSP_4_PCNT,
22*4882a593Smuzhiyun XFS_LOWSP_5_PCNT,
23*4882a593Smuzhiyun XFS_LOWSP_MAX,
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun * Error Configuration
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Error classes define the subsystem the configuration belongs to.
30*4882a593Smuzhiyun * Error numbers define the errors that are configurable.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun enum {
33*4882a593Smuzhiyun XFS_ERR_METADATA,
34*4882a593Smuzhiyun XFS_ERR_CLASS_MAX,
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun enum {
37*4882a593Smuzhiyun XFS_ERR_DEFAULT,
38*4882a593Smuzhiyun XFS_ERR_EIO,
39*4882a593Smuzhiyun XFS_ERR_ENOSPC,
40*4882a593Smuzhiyun XFS_ERR_ENODEV,
41*4882a593Smuzhiyun XFS_ERR_ERRNO_MAX,
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define XFS_ERR_RETRY_FOREVER -1
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * Although retry_timeout is in jiffies which is normally an unsigned long,
48*4882a593Smuzhiyun * we limit the retry timeout to 86400 seconds, or one day. So even a
49*4882a593Smuzhiyun * signed 32-bit long is sufficient for a HZ value up to 24855. Making it
50*4882a593Smuzhiyun * signed lets us store the special "-1" value, meaning retry forever.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun struct xfs_error_cfg {
53*4882a593Smuzhiyun struct xfs_kobj kobj;
54*4882a593Smuzhiyun int max_retries;
55*4882a593Smuzhiyun long retry_timeout; /* in jiffies, -1 = infinite */
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * The struct xfsmount layout is optimised to separate read-mostly variables
60*4882a593Smuzhiyun * from variables that are frequently modified. We put the read-mostly variables
61*4882a593Smuzhiyun * first, then place all the other variables at the end.
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * Typically, read-mostly variables are those that are set at mount time and
64*4882a593Smuzhiyun * never changed again, or only change rarely as a result of things like sysfs
65*4882a593Smuzhiyun * knobs being tweaked.
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun typedef struct xfs_mount {
68*4882a593Smuzhiyun struct xfs_sb m_sb; /* copy of fs superblock */
69*4882a593Smuzhiyun struct super_block *m_super;
70*4882a593Smuzhiyun struct xfs_ail *m_ail; /* fs active log item list */
71*4882a593Smuzhiyun struct xfs_buf *m_sb_bp; /* buffer for superblock */
72*4882a593Smuzhiyun char *m_rtname; /* realtime device name */
73*4882a593Smuzhiyun char *m_logname; /* external log device name */
74*4882a593Smuzhiyun struct xfs_da_geometry *m_dir_geo; /* directory block geometry */
75*4882a593Smuzhiyun struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */
76*4882a593Smuzhiyun struct xlog *m_log; /* log specific stuff */
77*4882a593Smuzhiyun struct xfs_inode *m_rbmip; /* pointer to bitmap inode */
78*4882a593Smuzhiyun struct xfs_inode *m_rsumip; /* pointer to summary inode */
79*4882a593Smuzhiyun struct xfs_inode *m_rootip; /* pointer to root directory */
80*4882a593Smuzhiyun struct xfs_quotainfo *m_quotainfo; /* disk quota information */
81*4882a593Smuzhiyun xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
82*4882a593Smuzhiyun xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
83*4882a593Smuzhiyun xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * Optional cache of rt summary level per bitmap block with the
86*4882a593Smuzhiyun * invariant that m_rsum_cache[bbno] <= the minimum i for which
87*4882a593Smuzhiyun * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip
88*4882a593Smuzhiyun * inode lock.
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun uint8_t *m_rsum_cache;
91*4882a593Smuzhiyun struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
92*4882a593Smuzhiyun struct workqueue_struct *m_buf_workqueue;
93*4882a593Smuzhiyun struct workqueue_struct *m_unwritten_workqueue;
94*4882a593Smuzhiyun struct workqueue_struct *m_cil_workqueue;
95*4882a593Smuzhiyun struct workqueue_struct *m_reclaim_workqueue;
96*4882a593Smuzhiyun struct workqueue_struct *m_eofblocks_workqueue;
97*4882a593Smuzhiyun struct workqueue_struct *m_sync_workqueue;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun int m_bsize; /* fs logical block size */
100*4882a593Smuzhiyun uint8_t m_blkbit_log; /* blocklog + NBBY */
101*4882a593Smuzhiyun uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
102*4882a593Smuzhiyun uint8_t m_agno_log; /* log #ag's */
103*4882a593Smuzhiyun uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
104*4882a593Smuzhiyun uint m_blockmask; /* sb_blocksize-1 */
105*4882a593Smuzhiyun uint m_blockwsize; /* sb_blocksize in words */
106*4882a593Smuzhiyun uint m_blockwmask; /* blockwsize-1 */
107*4882a593Smuzhiyun uint m_alloc_mxr[2]; /* max alloc btree records */
108*4882a593Smuzhiyun uint m_alloc_mnr[2]; /* min alloc btree records */
109*4882a593Smuzhiyun uint m_bmap_dmxr[2]; /* max bmap btree records */
110*4882a593Smuzhiyun uint m_bmap_dmnr[2]; /* min bmap btree records */
111*4882a593Smuzhiyun uint m_rmap_mxr[2]; /* max rmap btree records */
112*4882a593Smuzhiyun uint m_rmap_mnr[2]; /* min rmap btree records */
113*4882a593Smuzhiyun uint m_refc_mxr[2]; /* max refc btree records */
114*4882a593Smuzhiyun uint m_refc_mnr[2]; /* min refc btree records */
115*4882a593Smuzhiyun uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */
116*4882a593Smuzhiyun uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
117*4882a593Smuzhiyun uint m_rmap_maxlevels; /* max rmap btree levels */
118*4882a593Smuzhiyun uint m_refc_maxlevels; /* max refcount btree level */
119*4882a593Smuzhiyun xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */
120*4882a593Smuzhiyun uint m_alloc_set_aside; /* space we can't use */
121*4882a593Smuzhiyun uint m_ag_max_usable; /* max space per AG */
122*4882a593Smuzhiyun int m_dalign; /* stripe unit */
123*4882a593Smuzhiyun int m_swidth; /* stripe width */
124*4882a593Smuzhiyun xfs_agnumber_t m_maxagi; /* highest inode alloc group */
125*4882a593Smuzhiyun uint m_allocsize_log;/* min write size log bytes */
126*4882a593Smuzhiyun uint m_allocsize_blocks; /* min write size blocks */
127*4882a593Smuzhiyun int m_logbufs; /* number of log buffers */
128*4882a593Smuzhiyun int m_logbsize; /* size of each log buffer */
129*4882a593Smuzhiyun uint m_rsumlevels; /* rt summary levels */
130*4882a593Smuzhiyun uint m_rsumsize; /* size of rt summary, bytes */
131*4882a593Smuzhiyun int m_fixedfsid[2]; /* unchanged for life of FS */
132*4882a593Smuzhiyun uint m_qflags; /* quota status flags */
133*4882a593Smuzhiyun uint64_t m_flags; /* global mount flags */
134*4882a593Smuzhiyun int64_t m_low_space[XFS_LOWSP_MAX];
135*4882a593Smuzhiyun struct xfs_ino_geometry m_ino_geo; /* inode geometry */
136*4882a593Smuzhiyun struct xfs_trans_resv m_resv; /* precomputed res values */
137*4882a593Smuzhiyun /* low free space thresholds */
138*4882a593Smuzhiyun bool m_always_cow;
139*4882a593Smuzhiyun bool m_fail_unmount;
140*4882a593Smuzhiyun bool m_finobt_nores; /* no per-AG finobt resv. */
141*4882a593Smuzhiyun bool m_update_sb; /* sb needs update in mount */
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun * Bitsets of per-fs metadata that have been checked and/or are sick.
145*4882a593Smuzhiyun * Callers must hold m_sb_lock to access these two fields.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun uint8_t m_fs_checked;
148*4882a593Smuzhiyun uint8_t m_fs_sick;
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * Bitsets of rt metadata that have been checked and/or are sick.
151*4882a593Smuzhiyun * Callers must hold m_sb_lock to access this field.
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun uint8_t m_rt_checked;
154*4882a593Smuzhiyun uint8_t m_rt_sick;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun * End of read-mostly variables. Frequently written variables and locks
158*4882a593Smuzhiyun * should be placed below this comment from now on. The first variable
159*4882a593Smuzhiyun * here is marked as cacheline aligned so they it is separated from
160*4882a593Smuzhiyun * the read-mostly variables.
161*4882a593Smuzhiyun */
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */
164*4882a593Smuzhiyun struct percpu_counter m_icount; /* allocated inodes counter */
165*4882a593Smuzhiyun struct percpu_counter m_ifree; /* free inodes counter */
166*4882a593Smuzhiyun struct percpu_counter m_fdblocks; /* free block counter */
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun * Count of data device blocks reserved for delayed allocations,
169*4882a593Smuzhiyun * including indlen blocks. Does not include allocated CoW staging
170*4882a593Smuzhiyun * extents or anything related to the rt device.
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun struct percpu_counter m_delalloc_blks;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun struct radix_tree_root m_perag_tree; /* per-ag accounting info */
175*4882a593Smuzhiyun spinlock_t m_perag_lock; /* lock for m_perag_tree */
176*4882a593Smuzhiyun uint64_t m_resblks; /* total reserved blocks */
177*4882a593Smuzhiyun uint64_t m_resblks_avail;/* available reserved blocks */
178*4882a593Smuzhiyun uint64_t m_resblks_save; /* reserved blks @ remount,ro */
179*4882a593Smuzhiyun struct delayed_work m_reclaim_work; /* background inode reclaim */
180*4882a593Smuzhiyun struct delayed_work m_eofblocks_work; /* background eof blocks
181*4882a593Smuzhiyun trimming */
182*4882a593Smuzhiyun struct delayed_work m_cowblocks_work; /* background cow blocks
183*4882a593Smuzhiyun trimming */
184*4882a593Smuzhiyun struct xfs_kobj m_kobj;
185*4882a593Smuzhiyun struct xfs_kobj m_error_kobj;
186*4882a593Smuzhiyun struct xfs_kobj m_error_meta_kobj;
187*4882a593Smuzhiyun struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
188*4882a593Smuzhiyun struct xstats m_stats; /* per-fs stats */
189*4882a593Smuzhiyun xfs_agnumber_t m_agfrotor; /* last ag where space found */
190*4882a593Smuzhiyun xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */
191*4882a593Smuzhiyun spinlock_t m_agirotor_lock;/* .. and lock protecting it */
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * Workqueue item so that we can coalesce multiple inode flush attempts
195*4882a593Smuzhiyun * into a single flush.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun struct work_struct m_flush_inodes_work;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /*
200*4882a593Smuzhiyun * Generation of the filesysyem layout. This is incremented by each
201*4882a593Smuzhiyun * growfs, and used by the pNFS server to ensure the client updates
202*4882a593Smuzhiyun * its view of the block device once it gets a layout that might
203*4882a593Smuzhiyun * reference the newly added blocks. Does not need to be persistent
204*4882a593Smuzhiyun * as long as we only allow file system size increments, but if we
205*4882a593Smuzhiyun * ever support shrinks it would have to be persisted in addition
206*4882a593Smuzhiyun * to various other kinds of pain inflicted on the pNFS server.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun uint32_t m_generation;
209*4882a593Smuzhiyun struct mutex m_growlock; /* growfs mutex */
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun #ifdef DEBUG
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun * Frequency with which errors are injected. Replaces xfs_etest; the
214*4882a593Smuzhiyun * value stored in here is the inverse of the frequency with which the
215*4882a593Smuzhiyun * error triggers. 1 = always, 2 = half the time, etc.
216*4882a593Smuzhiyun */
217*4882a593Smuzhiyun unsigned int *m_errortag;
218*4882a593Smuzhiyun struct xfs_kobj m_errortag_kobj;
219*4882a593Smuzhiyun #endif
220*4882a593Smuzhiyun } xfs_mount_t;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun #define M_IGEO(mp) (&(mp)->m_ino_geo)
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /*
225*4882a593Smuzhiyun * Flags for m_flags.
226*4882a593Smuzhiyun */
227*4882a593Smuzhiyun #define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops
228*4882a593Smuzhiyun must be synchronous except
229*4882a593Smuzhiyun for space allocations */
230*4882a593Smuzhiyun #define XFS_MOUNT_UNMOUNTING (1ULL << 1) /* filesystem is unmounting */
231*4882a593Smuzhiyun #define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
232*4882a593Smuzhiyun #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
233*4882a593Smuzhiyun operations, typically for
234*4882a593Smuzhiyun disk errors in metadata */
235*4882a593Smuzhiyun #define XFS_MOUNT_DISCARD (1ULL << 5) /* discard unused blocks */
236*4882a593Smuzhiyun #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment
237*4882a593Smuzhiyun allocations */
238*4882a593Smuzhiyun #define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */
239*4882a593Smuzhiyun #define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */
240*4882a593Smuzhiyun #define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */
241*4882a593Smuzhiyun #define XFS_MOUNT_ALLOCSIZE (1ULL << 12) /* specified allocation size */
242*4882a593Smuzhiyun #define XFS_MOUNT_SMALL_INUMS (1ULL << 14) /* user wants 32bit inodes */
243*4882a593Smuzhiyun #define XFS_MOUNT_32BITINODES (1ULL << 15) /* inode32 allocator active */
244*4882a593Smuzhiyun #define XFS_MOUNT_NOUUID (1ULL << 16) /* ignore uuid during mount */
245*4882a593Smuzhiyun #define XFS_MOUNT_IKEEP (1ULL << 18) /* keep empty inode clusters*/
246*4882a593Smuzhiyun #define XFS_MOUNT_SWALLOC (1ULL << 19) /* turn on stripe width
247*4882a593Smuzhiyun * allocation */
248*4882a593Smuzhiyun #define XFS_MOUNT_RDONLY (1ULL << 20) /* read-only fs */
249*4882a593Smuzhiyun #define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */
250*4882a593Smuzhiyun #define XFS_MOUNT_LARGEIO (1ULL << 22) /* report large preferred
251*4882a593Smuzhiyun * I/O size in stat() */
252*4882a593Smuzhiyun #define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams
253*4882a593Smuzhiyun allocator */
254*4882a593Smuzhiyun #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */
255*4882a593Smuzhiyun #define XFS_MOUNT_DAX_ALWAYS (1ULL << 26)
256*4882a593Smuzhiyun #define XFS_MOUNT_DAX_NEVER (1ULL << 27)
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /*
259*4882a593Smuzhiyun * Max and min values for mount-option defined I/O
260*4882a593Smuzhiyun * preallocation sizes.
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun #define XFS_MAX_IO_LOG 30 /* 1G */
263*4882a593Smuzhiyun #define XFS_MIN_IO_LOG PAGE_SHIFT
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun #define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
266*4882a593Smuzhiyun ((mp)->m_flags & XFS_MOUNT_WAS_CLEAN)
267*4882a593Smuzhiyun #define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
268*4882a593Smuzhiyun void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname,
269*4882a593Smuzhiyun int lnnum);
270*4882a593Smuzhiyun #define xfs_force_shutdown(m,f) \
271*4882a593Smuzhiyun xfs_do_force_shutdown(m, f, __FILE__, __LINE__)
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun #define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */
274*4882a593Smuzhiyun #define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */
275*4882a593Smuzhiyun #define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */
276*4882a593Smuzhiyun #define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /*
279*4882a593Smuzhiyun * Flags for xfs_mountfs
280*4882a593Smuzhiyun */
281*4882a593Smuzhiyun #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun static inline xfs_agnumber_t
xfs_daddr_to_agno(struct xfs_mount * mp,xfs_daddr_t d)284*4882a593Smuzhiyun xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
287*4882a593Smuzhiyun do_div(ld, mp->m_sb.sb_agblocks);
288*4882a593Smuzhiyun return (xfs_agnumber_t) ld;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun static inline xfs_agblock_t
xfs_daddr_to_agbno(struct xfs_mount * mp,xfs_daddr_t d)292*4882a593Smuzhiyun xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
295*4882a593Smuzhiyun return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /* per-AG block reservation data structures*/
299*4882a593Smuzhiyun struct xfs_ag_resv {
300*4882a593Smuzhiyun /* number of blocks originally reserved here */
301*4882a593Smuzhiyun xfs_extlen_t ar_orig_reserved;
302*4882a593Smuzhiyun /* number of blocks reserved here */
303*4882a593Smuzhiyun xfs_extlen_t ar_reserved;
304*4882a593Smuzhiyun /* number of blocks originally asked for */
305*4882a593Smuzhiyun xfs_extlen_t ar_asked;
306*4882a593Smuzhiyun };
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /*
309*4882a593Smuzhiyun * Per-ag incore structure, copies of information in agf and agi, to improve the
310*4882a593Smuzhiyun * performance of allocation group selection.
311*4882a593Smuzhiyun */
312*4882a593Smuzhiyun typedef struct xfs_perag {
313*4882a593Smuzhiyun struct xfs_mount *pag_mount; /* owner filesystem */
314*4882a593Smuzhiyun xfs_agnumber_t pag_agno; /* AG this structure belongs to */
315*4882a593Smuzhiyun atomic_t pag_ref; /* perag reference count */
316*4882a593Smuzhiyun char pagf_init; /* this agf's entry is initialized */
317*4882a593Smuzhiyun char pagi_init; /* this agi's entry is initialized */
318*4882a593Smuzhiyun char pagf_metadata; /* the agf is preferred to be metadata */
319*4882a593Smuzhiyun char pagi_inodeok; /* The agi is ok for inodes */
320*4882a593Smuzhiyun uint8_t pagf_levels[XFS_BTNUM_AGF];
321*4882a593Smuzhiyun /* # of levels in bno & cnt btree */
322*4882a593Smuzhiyun bool pagf_agflreset; /* agfl requires reset before use */
323*4882a593Smuzhiyun uint32_t pagf_flcount; /* count of blocks in freelist */
324*4882a593Smuzhiyun xfs_extlen_t pagf_freeblks; /* total free blocks */
325*4882a593Smuzhiyun xfs_extlen_t pagf_longest; /* longest free space */
326*4882a593Smuzhiyun uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
327*4882a593Smuzhiyun xfs_agino_t pagi_freecount; /* number of free inodes */
328*4882a593Smuzhiyun xfs_agino_t pagi_count; /* number of allocated inodes */
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun * Inode allocation search lookup optimisation.
332*4882a593Smuzhiyun * If the pagino matches, the search for new inodes
333*4882a593Smuzhiyun * doesn't need to search the near ones again straight away
334*4882a593Smuzhiyun */
335*4882a593Smuzhiyun xfs_agino_t pagl_pagino;
336*4882a593Smuzhiyun xfs_agino_t pagl_leftrec;
337*4882a593Smuzhiyun xfs_agino_t pagl_rightrec;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /*
340*4882a593Smuzhiyun * Bitsets of per-ag metadata that have been checked and/or are sick.
341*4882a593Smuzhiyun * Callers should hold pag_state_lock before accessing this field.
342*4882a593Smuzhiyun */
343*4882a593Smuzhiyun uint16_t pag_checked;
344*4882a593Smuzhiyun uint16_t pag_sick;
345*4882a593Smuzhiyun spinlock_t pag_state_lock;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun spinlock_t pagb_lock; /* lock for pagb_tree */
348*4882a593Smuzhiyun struct rb_root pagb_tree; /* ordered tree of busy extents */
349*4882a593Smuzhiyun unsigned int pagb_gen; /* generation count for pagb_tree */
350*4882a593Smuzhiyun wait_queue_head_t pagb_wait; /* woken when pagb_gen changes */
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun atomic_t pagf_fstrms; /* # of filestreams active in this AG */
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun spinlock_t pag_ici_lock; /* incore inode cache lock */
355*4882a593Smuzhiyun struct radix_tree_root pag_ici_root; /* incore inode cache root */
356*4882a593Smuzhiyun int pag_ici_reclaimable; /* reclaimable inodes */
357*4882a593Smuzhiyun unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* buffer cache index */
360*4882a593Smuzhiyun spinlock_t pag_buf_lock; /* lock for pag_buf_hash */
361*4882a593Smuzhiyun struct rhashtable pag_buf_hash;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* for rcu-safe freeing */
364*4882a593Smuzhiyun struct rcu_head rcu_head;
365*4882a593Smuzhiyun int pagb_count; /* pagb slots in use */
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* Blocks reserved for all kinds of metadata. */
368*4882a593Smuzhiyun struct xfs_ag_resv pag_meta_resv;
369*4882a593Smuzhiyun /* Blocks reserved for the reverse mapping btree. */
370*4882a593Smuzhiyun struct xfs_ag_resv pag_rmapbt_resv;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* reference count */
373*4882a593Smuzhiyun uint8_t pagf_refcount_level;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /*
376*4882a593Smuzhiyun * Unlinked inode information. This incore information reflects
377*4882a593Smuzhiyun * data stored in the AGI, so callers must hold the AGI buffer lock
378*4882a593Smuzhiyun * or have some other means to control concurrency.
379*4882a593Smuzhiyun */
380*4882a593Smuzhiyun struct rhashtable pagi_unlinked_hash;
381*4882a593Smuzhiyun } xfs_perag_t;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun static inline struct xfs_ag_resv *
xfs_perag_resv(struct xfs_perag * pag,enum xfs_ag_resv_type type)384*4882a593Smuzhiyun xfs_perag_resv(
385*4882a593Smuzhiyun struct xfs_perag *pag,
386*4882a593Smuzhiyun enum xfs_ag_resv_type type)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun switch (type) {
389*4882a593Smuzhiyun case XFS_AG_RESV_METADATA:
390*4882a593Smuzhiyun return &pag->pag_meta_resv;
391*4882a593Smuzhiyun case XFS_AG_RESV_RMAPBT:
392*4882a593Smuzhiyun return &pag->pag_rmapbt_resv;
393*4882a593Smuzhiyun default:
394*4882a593Smuzhiyun return NULL;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun int xfs_buf_hash_init(xfs_perag_t *pag);
399*4882a593Smuzhiyun void xfs_buf_hash_destroy(xfs_perag_t *pag);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun extern void xfs_uuid_table_free(void);
402*4882a593Smuzhiyun extern int xfs_log_sbcount(xfs_mount_t *);
403*4882a593Smuzhiyun extern uint64_t xfs_default_resblks(xfs_mount_t *mp);
404*4882a593Smuzhiyun extern int xfs_mountfs(xfs_mount_t *mp);
405*4882a593Smuzhiyun extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
406*4882a593Smuzhiyun xfs_agnumber_t *maxagi);
407*4882a593Smuzhiyun extern void xfs_unmountfs(xfs_mount_t *);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* Accessor added for 5.10.y backport */
410*4882a593Smuzhiyun static inline uint64_t
xfs_fdblocks_unavailable(struct xfs_mount * mp)411*4882a593Smuzhiyun xfs_fdblocks_unavailable(
412*4882a593Smuzhiyun struct xfs_mount *mp)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun return mp->m_alloc_set_aside;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
418*4882a593Smuzhiyun bool reserved);
419*4882a593Smuzhiyun extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun extern int xfs_readsb(xfs_mount_t *, int);
422*4882a593Smuzhiyun extern void xfs_freesb(xfs_mount_t *);
423*4882a593Smuzhiyun extern bool xfs_fs_writable(struct xfs_mount *mp, int level);
424*4882a593Smuzhiyun extern int xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun extern void xfs_set_low_space_thresholds(struct xfs_mount *);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun int xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
431*4882a593Smuzhiyun xfs_off_t count_fsb);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
434*4882a593Smuzhiyun int error_class, int error);
435*4882a593Smuzhiyun void xfs_force_summary_recalc(struct xfs_mount *mp);
436*4882a593Smuzhiyun void xfs_mod_delalloc(struct xfs_mount *mp, int64_t delta);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun #endif /* __XFS_MOUNT_H__ */
439