1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4*4882a593Smuzhiyun * All Rights Reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #ifndef __XFS_BUF_H__
7*4882a593Smuzhiyun #define __XFS_BUF_H__
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/list.h>
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/spinlock.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/fs.h>
14*4882a593Smuzhiyun #include <linux/dax.h>
15*4882a593Smuzhiyun #include <linux/uio.h>
16*4882a593Smuzhiyun #include <linux/list_lru.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun * Base types
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun struct xfs_buf;
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define XBF_READ (1 << 0) /* buffer intended for reading from device */
26*4882a593Smuzhiyun #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
27*4882a593Smuzhiyun #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
28*4882a593Smuzhiyun #define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
29*4882a593Smuzhiyun #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
30*4882a593Smuzhiyun #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
31*4882a593Smuzhiyun #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
32*4882a593Smuzhiyun #define XBF_WRITE_FAIL (1 << 7) /* async writes have failed on this buffer */
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* buffer type flags for write callbacks */
35*4882a593Smuzhiyun #define _XBF_INODES (1 << 16)/* inode buffer */
36*4882a593Smuzhiyun #define _XBF_DQUOTS (1 << 17)/* dquot buffer */
37*4882a593Smuzhiyun #define _XBF_LOGRECOVERY (1 << 18)/* log recovery buffer */
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /* flags used only internally */
40*4882a593Smuzhiyun #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
41*4882a593Smuzhiyun #define _XBF_KMEM (1 << 21)/* backed by heap memory */
42*4882a593Smuzhiyun #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* flags used only as arguments to access routines */
45*4882a593Smuzhiyun #define XBF_TRYLOCK (1 << 30)/* lock requested, but do not wait */
46*4882a593Smuzhiyun #define XBF_UNMAPPED (1 << 31)/* do not map the buffer */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun typedef unsigned int xfs_buf_flags_t;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #define XFS_BUF_FLAGS \
51*4882a593Smuzhiyun { XBF_READ, "READ" }, \
52*4882a593Smuzhiyun { XBF_WRITE, "WRITE" }, \
53*4882a593Smuzhiyun { XBF_READ_AHEAD, "READ_AHEAD" }, \
54*4882a593Smuzhiyun { XBF_NO_IOACCT, "NO_IOACCT" }, \
55*4882a593Smuzhiyun { XBF_ASYNC, "ASYNC" }, \
56*4882a593Smuzhiyun { XBF_DONE, "DONE" }, \
57*4882a593Smuzhiyun { XBF_STALE, "STALE" }, \
58*4882a593Smuzhiyun { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
59*4882a593Smuzhiyun { _XBF_INODES, "INODES" }, \
60*4882a593Smuzhiyun { _XBF_DQUOTS, "DQUOTS" }, \
61*4882a593Smuzhiyun { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
62*4882a593Smuzhiyun { _XBF_PAGES, "PAGES" }, \
63*4882a593Smuzhiyun { _XBF_KMEM, "KMEM" }, \
64*4882a593Smuzhiyun { _XBF_DELWRI_Q, "DELWRI_Q" }, \
65*4882a593Smuzhiyun /* The following interface flags should never be set */ \
66*4882a593Smuzhiyun { XBF_TRYLOCK, "TRYLOCK" }, \
67*4882a593Smuzhiyun { XBF_UNMAPPED, "UNMAPPED" }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * Internal state flags.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
73*4882a593Smuzhiyun #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun * The xfs_buftarg contains 2 notions of "sector size" -
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * 1) The metadata sector size, which is the minimum unit and
79*4882a593Smuzhiyun * alignment of IO which will be performed by metadata operations.
80*4882a593Smuzhiyun * 2) The device logical sector size
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * The first is specified at mkfs time, and is stored on-disk in the
83*4882a593Smuzhiyun * superblock's sb_sectsize.
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * The latter is derived from the underlying device, and controls direct IO
86*4882a593Smuzhiyun * alignment constraints.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun typedef struct xfs_buftarg {
89*4882a593Smuzhiyun dev_t bt_dev;
90*4882a593Smuzhiyun struct block_device *bt_bdev;
91*4882a593Smuzhiyun struct dax_device *bt_daxdev;
92*4882a593Smuzhiyun struct xfs_mount *bt_mount;
93*4882a593Smuzhiyun unsigned int bt_meta_sectorsize;
94*4882a593Smuzhiyun size_t bt_meta_sectormask;
95*4882a593Smuzhiyun size_t bt_logical_sectorsize;
96*4882a593Smuzhiyun size_t bt_logical_sectormask;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* LRU control structures */
99*4882a593Smuzhiyun struct shrinker bt_shrinker;
100*4882a593Smuzhiyun struct list_lru bt_lru;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun struct percpu_counter bt_io_count;
103*4882a593Smuzhiyun struct ratelimit_state bt_ioerror_rl;
104*4882a593Smuzhiyun } xfs_buftarg_t;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #define XB_PAGES 2
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun struct xfs_buf_map {
109*4882a593Smuzhiyun xfs_daddr_t bm_bn; /* block number for I/O */
110*4882a593Smuzhiyun int bm_len; /* size of I/O */
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
114*4882a593Smuzhiyun struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun struct xfs_buf_ops {
117*4882a593Smuzhiyun char *name;
118*4882a593Smuzhiyun union {
119*4882a593Smuzhiyun __be32 magic[2]; /* v4 and v5 on disk magic values */
120*4882a593Smuzhiyun __be16 magic16[2]; /* v4 and v5 on disk magic values */
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun void (*verify_read)(struct xfs_buf *);
123*4882a593Smuzhiyun void (*verify_write)(struct xfs_buf *);
124*4882a593Smuzhiyun xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
125*4882a593Smuzhiyun };
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun typedef struct xfs_buf {
128*4882a593Smuzhiyun /*
129*4882a593Smuzhiyun * first cacheline holds all the fields needed for an uncontended cache
130*4882a593Smuzhiyun * hit to be fully processed. The semaphore straddles the cacheline
131*4882a593Smuzhiyun * boundary, but the counter and lock sits on the first cacheline,
132*4882a593Smuzhiyun * which is the only bit that is touched if we hit the semaphore
133*4882a593Smuzhiyun * fast-path on locking.
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun struct rhash_head b_rhash_head; /* pag buffer hash node */
136*4882a593Smuzhiyun xfs_daddr_t b_bn; /* block number of buffer */
137*4882a593Smuzhiyun int b_length; /* size of buffer in BBs */
138*4882a593Smuzhiyun atomic_t b_hold; /* reference count */
139*4882a593Smuzhiyun atomic_t b_lru_ref; /* lru reclaim ref count */
140*4882a593Smuzhiyun xfs_buf_flags_t b_flags; /* status flags */
141*4882a593Smuzhiyun struct semaphore b_sema; /* semaphore for lockables */
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun * concurrent access to b_lru and b_lru_flags are protected by
145*4882a593Smuzhiyun * bt_lru_lock and not by b_sema
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun struct list_head b_lru; /* lru list */
148*4882a593Smuzhiyun spinlock_t b_lock; /* internal state lock */
149*4882a593Smuzhiyun unsigned int b_state; /* internal state flags */
150*4882a593Smuzhiyun int b_io_error; /* internal IO error state */
151*4882a593Smuzhiyun wait_queue_head_t b_waiters; /* unpin waiters */
152*4882a593Smuzhiyun struct list_head b_list;
153*4882a593Smuzhiyun struct xfs_perag *b_pag; /* contains rbtree root */
154*4882a593Smuzhiyun struct xfs_mount *b_mount;
155*4882a593Smuzhiyun xfs_buftarg_t *b_target; /* buffer target (device) */
156*4882a593Smuzhiyun void *b_addr; /* virtual address of buffer */
157*4882a593Smuzhiyun struct work_struct b_ioend_work;
158*4882a593Smuzhiyun struct completion b_iowait; /* queue for I/O waiters */
159*4882a593Smuzhiyun struct xfs_buf_log_item *b_log_item;
160*4882a593Smuzhiyun struct list_head b_li_list; /* Log items list head */
161*4882a593Smuzhiyun struct xfs_trans *b_transp;
162*4882a593Smuzhiyun struct page **b_pages; /* array of page pointers */
163*4882a593Smuzhiyun struct page *b_page_array[XB_PAGES]; /* inline pages */
164*4882a593Smuzhiyun struct xfs_buf_map *b_maps; /* compound buffer map */
165*4882a593Smuzhiyun struct xfs_buf_map __b_map; /* inline compound buffer map */
166*4882a593Smuzhiyun int b_map_count;
167*4882a593Smuzhiyun atomic_t b_pin_count; /* pin count */
168*4882a593Smuzhiyun atomic_t b_io_remaining; /* #outstanding I/O requests */
169*4882a593Smuzhiyun unsigned int b_page_count; /* size of page array */
170*4882a593Smuzhiyun unsigned int b_offset; /* page offset in first page */
171*4882a593Smuzhiyun int b_error; /* error code on I/O */
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * async write failure retry count. Initialised to zero on the first
175*4882a593Smuzhiyun * failure, then when it exceeds the maximum configured without a
176*4882a593Smuzhiyun * success the write is considered to be failed permanently and the
177*4882a593Smuzhiyun * iodone handler will take appropriate action.
178*4882a593Smuzhiyun *
179*4882a593Smuzhiyun * For retry timeouts, we record the jiffie of the first failure. This
180*4882a593Smuzhiyun * means that we can change the retry timeout for buffers already under
181*4882a593Smuzhiyun * I/O and thus avoid getting stuck in a retry loop with a long timeout.
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * last_error is used to ensure that we are getting repeated errors, not
184*4882a593Smuzhiyun * different errors. e.g. a block device might change ENOSPC to EIO when
185*4882a593Smuzhiyun * a failure timeout occurs, so we want to re-initialise the error
186*4882a593Smuzhiyun * retry behaviour appropriately when that happens.
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun int b_retries;
189*4882a593Smuzhiyun unsigned long b_first_retry_time; /* in jiffies */
190*4882a593Smuzhiyun int b_last_error;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun const struct xfs_buf_ops *b_ops;
193*4882a593Smuzhiyun } xfs_buf_t;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* Finding and Reading Buffers */
196*4882a593Smuzhiyun struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
197*4882a593Smuzhiyun xfs_daddr_t blkno, size_t numblks,
198*4882a593Smuzhiyun xfs_buf_flags_t flags);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
201*4882a593Smuzhiyun int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
202*4882a593Smuzhiyun int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
203*4882a593Smuzhiyun int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
204*4882a593Smuzhiyun const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
205*4882a593Smuzhiyun void xfs_buf_readahead_map(struct xfs_buftarg *target,
206*4882a593Smuzhiyun struct xfs_buf_map *map, int nmaps,
207*4882a593Smuzhiyun const struct xfs_buf_ops *ops);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun static inline int
xfs_buf_get(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,struct xfs_buf ** bpp)210*4882a593Smuzhiyun xfs_buf_get(
211*4882a593Smuzhiyun struct xfs_buftarg *target,
212*4882a593Smuzhiyun xfs_daddr_t blkno,
213*4882a593Smuzhiyun size_t numblks,
214*4882a593Smuzhiyun struct xfs_buf **bpp)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun return xfs_buf_get_map(target, &map, 1, 0, bpp);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun static inline int
xfs_buf_read(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,xfs_buf_flags_t flags,struct xfs_buf ** bpp,const struct xfs_buf_ops * ops)222*4882a593Smuzhiyun xfs_buf_read(
223*4882a593Smuzhiyun struct xfs_buftarg *target,
224*4882a593Smuzhiyun xfs_daddr_t blkno,
225*4882a593Smuzhiyun size_t numblks,
226*4882a593Smuzhiyun xfs_buf_flags_t flags,
227*4882a593Smuzhiyun struct xfs_buf **bpp,
228*4882a593Smuzhiyun const struct xfs_buf_ops *ops)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
233*4882a593Smuzhiyun __builtin_return_address(0));
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun static inline void
xfs_buf_readahead(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,const struct xfs_buf_ops * ops)237*4882a593Smuzhiyun xfs_buf_readahead(
238*4882a593Smuzhiyun struct xfs_buftarg *target,
239*4882a593Smuzhiyun xfs_daddr_t blkno,
240*4882a593Smuzhiyun size_t numblks,
241*4882a593Smuzhiyun const struct xfs_buf_ops *ops)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
244*4882a593Smuzhiyun return xfs_buf_readahead_map(target, &map, 1, ops);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags,
248*4882a593Smuzhiyun struct xfs_buf **bpp);
249*4882a593Smuzhiyun int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
250*4882a593Smuzhiyun size_t numblks, int flags, struct xfs_buf **bpp,
251*4882a593Smuzhiyun const struct xfs_buf_ops *ops);
252*4882a593Smuzhiyun int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
253*4882a593Smuzhiyun void xfs_buf_hold(struct xfs_buf *bp);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* Releasing Buffers */
256*4882a593Smuzhiyun extern void xfs_buf_rele(xfs_buf_t *);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* Locking and Unlocking Buffers */
259*4882a593Smuzhiyun extern int xfs_buf_trylock(xfs_buf_t *);
260*4882a593Smuzhiyun extern void xfs_buf_lock(xfs_buf_t *);
261*4882a593Smuzhiyun extern void xfs_buf_unlock(xfs_buf_t *);
262*4882a593Smuzhiyun #define xfs_buf_islocked(bp) \
263*4882a593Smuzhiyun ((bp)->b_sema.count <= 0)
264*4882a593Smuzhiyun
xfs_buf_relse(xfs_buf_t * bp)265*4882a593Smuzhiyun static inline void xfs_buf_relse(xfs_buf_t *bp)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun xfs_buf_unlock(bp);
268*4882a593Smuzhiyun xfs_buf_rele(bp);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /* Buffer Read and Write Routines */
272*4882a593Smuzhiyun extern int xfs_bwrite(struct xfs_buf *bp);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
275*4882a593Smuzhiyun xfs_failaddr_t failaddr);
276*4882a593Smuzhiyun #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
277*4882a593Smuzhiyun extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
278*4882a593Smuzhiyun void xfs_buf_ioend_fail(struct xfs_buf *);
279*4882a593Smuzhiyun void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
280*4882a593Smuzhiyun void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
281*4882a593Smuzhiyun #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* Buffer Utility Routines */
284*4882a593Smuzhiyun extern void *xfs_buf_offset(struct xfs_buf *, size_t);
285*4882a593Smuzhiyun extern void xfs_buf_stale(struct xfs_buf *bp);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* Delayed Write Buffer Routines */
288*4882a593Smuzhiyun extern void xfs_buf_delwri_cancel(struct list_head *);
289*4882a593Smuzhiyun extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
290*4882a593Smuzhiyun extern int xfs_buf_delwri_submit(struct list_head *);
291*4882a593Smuzhiyun extern int xfs_buf_delwri_submit_nowait(struct list_head *);
292*4882a593Smuzhiyun extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* Buffer Daemon Setup Routines */
295*4882a593Smuzhiyun extern int xfs_buf_init(void);
296*4882a593Smuzhiyun extern void xfs_buf_terminate(void);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /*
299*4882a593Smuzhiyun * These macros use the IO block map rather than b_bn. b_bn is now really
300*4882a593Smuzhiyun * just for the buffer cache index for cached buffers. As IO does not use b_bn
301*4882a593Smuzhiyun * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
302*4882a593Smuzhiyun * map directly. Uncached buffers are not allowed to be discontiguous, so this
303*4882a593Smuzhiyun * is safe to do.
304*4882a593Smuzhiyun *
305*4882a593Smuzhiyun * In future, uncached buffers will pass the block number directly to the io
306*4882a593Smuzhiyun * request function and hence these macros will go away at that point.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun #define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn)
309*4882a593Smuzhiyun #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /*
314*4882a593Smuzhiyun * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
315*4882a593Smuzhiyun * up with a reference count of 0 so it will be tossed from the cache when
316*4882a593Smuzhiyun * released.
317*4882a593Smuzhiyun */
xfs_buf_oneshot(struct xfs_buf * bp)318*4882a593Smuzhiyun static inline void xfs_buf_oneshot(struct xfs_buf *bp)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
321*4882a593Smuzhiyun return;
322*4882a593Smuzhiyun atomic_set(&bp->b_lru_ref, 0);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
xfs_buf_ispinned(struct xfs_buf * bp)325*4882a593Smuzhiyun static inline int xfs_buf_ispinned(struct xfs_buf *bp)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun return atomic_read(&bp->b_pin_count);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun static inline int
xfs_buf_verify_cksum(struct xfs_buf * bp,unsigned long cksum_offset)331*4882a593Smuzhiyun xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
334*4882a593Smuzhiyun cksum_offset);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun static inline void
xfs_buf_update_cksum(struct xfs_buf * bp,unsigned long cksum_offset)338*4882a593Smuzhiyun xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
341*4882a593Smuzhiyun cksum_offset);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun * Handling of buftargs.
346*4882a593Smuzhiyun */
347*4882a593Smuzhiyun extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
348*4882a593Smuzhiyun struct block_device *, struct dax_device *);
349*4882a593Smuzhiyun extern void xfs_free_buftarg(struct xfs_buftarg *);
350*4882a593Smuzhiyun extern void xfs_wait_buftarg(xfs_buftarg_t *);
351*4882a593Smuzhiyun extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
354*4882a593Smuzhiyun #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun static inline int
xfs_buftarg_dma_alignment(struct xfs_buftarg * bt)357*4882a593Smuzhiyun xfs_buftarg_dma_alignment(struct xfs_buftarg *bt)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun return queue_dma_alignment(bt->bt_bdev->bd_disk->queue);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
363*4882a593Smuzhiyun bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
364*4882a593Smuzhiyun bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun #endif /* __XFS_BUF_H__ */
367