xref: /OK3568_Linux_fs/kernel/include/linux/buffer_head.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * include/linux/buffer_head.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Everything to do with buffer_heads.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #ifndef _LINUX_BUFFER_HEAD_H
9*4882a593Smuzhiyun #define _LINUX_BUFFER_HEAD_H
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/types.h>
12*4882a593Smuzhiyun #include <linux/fs.h>
13*4882a593Smuzhiyun #include <linux/linkage.h>
14*4882a593Smuzhiyun #include <linux/pagemap.h>
15*4882a593Smuzhiyun #include <linux/wait.h>
16*4882a593Smuzhiyun #include <linux/atomic.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #ifdef CONFIG_BLOCK
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun enum bh_state_bits {
21*4882a593Smuzhiyun 	BH_Uptodate,	/* Contains valid data */
22*4882a593Smuzhiyun 	BH_Dirty,	/* Is dirty */
23*4882a593Smuzhiyun 	BH_Lock,	/* Is locked */
24*4882a593Smuzhiyun 	BH_Req,		/* Has been submitted for I/O */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	BH_Mapped,	/* Has a disk mapping */
27*4882a593Smuzhiyun 	BH_New,		/* Disk mapping was newly created by get_block */
28*4882a593Smuzhiyun 	BH_Async_Read,	/* Is under end_buffer_async_read I/O */
29*4882a593Smuzhiyun 	BH_Async_Write,	/* Is under end_buffer_async_write I/O */
30*4882a593Smuzhiyun 	BH_Delay,	/* Buffer is not yet allocated on disk */
31*4882a593Smuzhiyun 	BH_Boundary,	/* Block is followed by a discontiguity */
32*4882a593Smuzhiyun 	BH_Write_EIO,	/* I/O error on write */
33*4882a593Smuzhiyun 	BH_Unwritten,	/* Buffer is allocated on disk but not written */
34*4882a593Smuzhiyun 	BH_Quiet,	/* Buffer Error Prinks to be quiet */
35*4882a593Smuzhiyun 	BH_Meta,	/* Buffer contains metadata */
36*4882a593Smuzhiyun 	BH_Prio,	/* Buffer should be submitted with REQ_PRIO */
37*4882a593Smuzhiyun 	BH_Defer_Completion, /* Defer AIO completion to workqueue */
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	BH_PrivateStart,/* not a state bit, but the first bit available
40*4882a593Smuzhiyun 			 * for private allocation by other entities
41*4882a593Smuzhiyun 			 */
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun struct page;
47*4882a593Smuzhiyun struct buffer_head;
48*4882a593Smuzhiyun struct address_space;
49*4882a593Smuzhiyun typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * Historically, a buffer_head was used to map a single block
53*4882a593Smuzhiyun  * within a page, and of course as the unit of I/O through the
54*4882a593Smuzhiyun  * filesystem and block layers.  Nowadays the basic I/O unit
55*4882a593Smuzhiyun  * is the bio, and buffer_heads are used for extracting block
56*4882a593Smuzhiyun  * mappings (via a get_block_t call), for tracking state within
57*4882a593Smuzhiyun  * a page (via a page_mapping) and for wrapping bio submission
58*4882a593Smuzhiyun  * for backward compatibility reasons (e.g. submit_bh).
59*4882a593Smuzhiyun  */
60*4882a593Smuzhiyun struct buffer_head {
61*4882a593Smuzhiyun 	unsigned long b_state;		/* buffer state bitmap (see above) */
62*4882a593Smuzhiyun 	struct buffer_head *b_this_page;/* circular list of page's buffers */
63*4882a593Smuzhiyun 	struct page *b_page;		/* the page this bh is mapped to */
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	sector_t b_blocknr;		/* start block number */
66*4882a593Smuzhiyun 	size_t b_size;			/* size of mapping */
67*4882a593Smuzhiyun 	char *b_data;			/* pointer to data within the page */
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	struct block_device *b_bdev;
70*4882a593Smuzhiyun 	bh_end_io_t *b_end_io;		/* I/O completion */
71*4882a593Smuzhiyun  	void *b_private;		/* reserved for b_end_io */
72*4882a593Smuzhiyun 	struct list_head b_assoc_buffers; /* associated with another mapping */
73*4882a593Smuzhiyun 	struct address_space *b_assoc_map;	/* mapping this buffer is
74*4882a593Smuzhiyun 						   associated with */
75*4882a593Smuzhiyun 	atomic_t b_count;		/* users using this buffer_head */
76*4882a593Smuzhiyun 	spinlock_t b_uptodate_lock;	/* Used by the first bh in a page, to
77*4882a593Smuzhiyun 					 * serialise IO completion of other
78*4882a593Smuzhiyun 					 * buffers in the page */
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
83*4882a593Smuzhiyun  * and buffer_foo() functions.
84*4882a593Smuzhiyun  * To avoid reset buffer flags that are already set, because that causes
85*4882a593Smuzhiyun  * a costly cache line transition, check the flag first.
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun #define BUFFER_FNS(bit, name)						\
88*4882a593Smuzhiyun static __always_inline void set_buffer_##name(struct buffer_head *bh)	\
89*4882a593Smuzhiyun {									\
90*4882a593Smuzhiyun 	if (!test_bit(BH_##bit, &(bh)->b_state))			\
91*4882a593Smuzhiyun 		set_bit(BH_##bit, &(bh)->b_state);			\
92*4882a593Smuzhiyun }									\
93*4882a593Smuzhiyun static __always_inline void clear_buffer_##name(struct buffer_head *bh)	\
94*4882a593Smuzhiyun {									\
95*4882a593Smuzhiyun 	clear_bit(BH_##bit, &(bh)->b_state);				\
96*4882a593Smuzhiyun }									\
97*4882a593Smuzhiyun static __always_inline int buffer_##name(const struct buffer_head *bh)	\
98*4882a593Smuzhiyun {									\
99*4882a593Smuzhiyun 	return test_bit(BH_##bit, &(bh)->b_state);			\
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun  * test_set_buffer_foo() and test_clear_buffer_foo()
104*4882a593Smuzhiyun  */
105*4882a593Smuzhiyun #define TAS_BUFFER_FNS(bit, name)					\
106*4882a593Smuzhiyun static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
107*4882a593Smuzhiyun {									\
108*4882a593Smuzhiyun 	return test_and_set_bit(BH_##bit, &(bh)->b_state);		\
109*4882a593Smuzhiyun }									\
110*4882a593Smuzhiyun static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
111*4882a593Smuzhiyun {									\
112*4882a593Smuzhiyun 	return test_and_clear_bit(BH_##bit, &(bh)->b_state);		\
113*4882a593Smuzhiyun }									\
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun  * Emit the buffer bitops functions.   Note that there are also functions
117*4882a593Smuzhiyun  * of the form "mark_buffer_foo()".  These are higher-level functions which
118*4882a593Smuzhiyun  * do something in addition to setting a b_state bit.
119*4882a593Smuzhiyun  */
BUFFER_FNS(Dirty,dirty)120*4882a593Smuzhiyun BUFFER_FNS(Dirty, dirty)
121*4882a593Smuzhiyun TAS_BUFFER_FNS(Dirty, dirty)
122*4882a593Smuzhiyun BUFFER_FNS(Lock, locked)
123*4882a593Smuzhiyun BUFFER_FNS(Req, req)
124*4882a593Smuzhiyun TAS_BUFFER_FNS(Req, req)
125*4882a593Smuzhiyun BUFFER_FNS(Mapped, mapped)
126*4882a593Smuzhiyun BUFFER_FNS(New, new)
127*4882a593Smuzhiyun BUFFER_FNS(Async_Read, async_read)
128*4882a593Smuzhiyun BUFFER_FNS(Async_Write, async_write)
129*4882a593Smuzhiyun BUFFER_FNS(Delay, delay)
130*4882a593Smuzhiyun BUFFER_FNS(Boundary, boundary)
131*4882a593Smuzhiyun BUFFER_FNS(Write_EIO, write_io_error)
132*4882a593Smuzhiyun BUFFER_FNS(Unwritten, unwritten)
133*4882a593Smuzhiyun BUFFER_FNS(Meta, meta)
134*4882a593Smuzhiyun BUFFER_FNS(Prio, prio)
135*4882a593Smuzhiyun BUFFER_FNS(Defer_Completion, defer_completion)
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	/*
140*4882a593Smuzhiyun 	 * If somebody else already set this uptodate, they will
141*4882a593Smuzhiyun 	 * have done the memory barrier, and a reader will thus
142*4882a593Smuzhiyun 	 * see *some* valid buffer state.
143*4882a593Smuzhiyun 	 *
144*4882a593Smuzhiyun 	 * Any other serialization (with IO errors or whatever that
145*4882a593Smuzhiyun 	 * might clear the bit) has to come from other state (eg BH_Lock).
146*4882a593Smuzhiyun 	 */
147*4882a593Smuzhiyun 	if (test_bit(BH_Uptodate, &bh->b_state))
148*4882a593Smuzhiyun 		return;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	/*
151*4882a593Smuzhiyun 	 * make it consistent with folio_mark_uptodate
152*4882a593Smuzhiyun 	 * pairs with smp_load_acquire in buffer_uptodate
153*4882a593Smuzhiyun 	 */
154*4882a593Smuzhiyun 	smp_mb__before_atomic();
155*4882a593Smuzhiyun 	set_bit(BH_Uptodate, &bh->b_state);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
clear_buffer_uptodate(struct buffer_head * bh)158*4882a593Smuzhiyun static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	clear_bit(BH_Uptodate, &bh->b_state);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
buffer_uptodate(const struct buffer_head * bh)163*4882a593Smuzhiyun static __always_inline int buffer_uptodate(const struct buffer_head *bh)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	/*
166*4882a593Smuzhiyun 	 * make it consistent with folio_test_uptodate
167*4882a593Smuzhiyun 	 * pairs with smp_mb__before_atomic in set_buffer_uptodate
168*4882a593Smuzhiyun 	 */
169*4882a593Smuzhiyun 	return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun #define bh_offset(bh)		((unsigned long)(bh)->b_data & ~PAGE_MASK)
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun /* If we *know* page->private refers to buffer_heads */
175*4882a593Smuzhiyun #define page_buffers(page)					\
176*4882a593Smuzhiyun 	({							\
177*4882a593Smuzhiyun 		BUG_ON(!PagePrivate(page));			\
178*4882a593Smuzhiyun 		((struct buffer_head *)page_private(page));	\
179*4882a593Smuzhiyun 	})
180*4882a593Smuzhiyun #define page_has_buffers(page)	PagePrivate(page)
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun void buffer_check_dirty_writeback(struct page *page,
183*4882a593Smuzhiyun 				     bool *dirty, bool *writeback);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun /*
186*4882a593Smuzhiyun  * Declarations
187*4882a593Smuzhiyun  */
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun void mark_buffer_dirty(struct buffer_head *bh);
190*4882a593Smuzhiyun void mark_buffer_write_io_error(struct buffer_head *bh);
191*4882a593Smuzhiyun void touch_buffer(struct buffer_head *bh);
192*4882a593Smuzhiyun void set_bh_page(struct buffer_head *bh,
193*4882a593Smuzhiyun 		struct page *page, unsigned long offset);
194*4882a593Smuzhiyun int try_to_free_buffers(struct page *);
195*4882a593Smuzhiyun struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
196*4882a593Smuzhiyun 		bool retry);
197*4882a593Smuzhiyun void create_empty_buffers(struct page *, unsigned long,
198*4882a593Smuzhiyun 			unsigned long b_state);
199*4882a593Smuzhiyun void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
200*4882a593Smuzhiyun void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
201*4882a593Smuzhiyun void end_buffer_async_write(struct buffer_head *bh, int uptodate);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /* Things to do with buffers at mapping->private_list */
204*4882a593Smuzhiyun void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
205*4882a593Smuzhiyun int inode_has_buffers(struct inode *);
206*4882a593Smuzhiyun void invalidate_inode_buffers(struct inode *);
207*4882a593Smuzhiyun int remove_inode_buffers(struct inode *inode);
208*4882a593Smuzhiyun int sync_mapping_buffers(struct address_space *mapping);
209*4882a593Smuzhiyun void clean_bdev_aliases(struct block_device *bdev, sector_t block,
210*4882a593Smuzhiyun 			sector_t len);
clean_bdev_bh_alias(struct buffer_head * bh)211*4882a593Smuzhiyun static inline void clean_bdev_bh_alias(struct buffer_head *bh)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun void mark_buffer_async_write(struct buffer_head *bh);
217*4882a593Smuzhiyun void __wait_on_buffer(struct buffer_head *);
218*4882a593Smuzhiyun wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
219*4882a593Smuzhiyun struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
220*4882a593Smuzhiyun 			unsigned size);
221*4882a593Smuzhiyun struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
222*4882a593Smuzhiyun 				  unsigned size, gfp_t gfp);
223*4882a593Smuzhiyun void __brelse(struct buffer_head *);
224*4882a593Smuzhiyun void __bforget(struct buffer_head *);
225*4882a593Smuzhiyun void __breadahead(struct block_device *, sector_t block, unsigned int size);
226*4882a593Smuzhiyun void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
227*4882a593Smuzhiyun 		  gfp_t gfp);
228*4882a593Smuzhiyun struct buffer_head *__bread_gfp(struct block_device *,
229*4882a593Smuzhiyun 				sector_t block, unsigned size, gfp_t gfp);
230*4882a593Smuzhiyun void invalidate_bh_lrus(void);
231*4882a593Smuzhiyun void invalidate_bh_lrus_cpu(void);
232*4882a593Smuzhiyun bool has_bh_in_lru(int cpu, void *dummy);
233*4882a593Smuzhiyun struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
234*4882a593Smuzhiyun void free_buffer_head(struct buffer_head * bh);
235*4882a593Smuzhiyun void unlock_buffer(struct buffer_head *bh);
236*4882a593Smuzhiyun void __lock_buffer(struct buffer_head *bh);
237*4882a593Smuzhiyun void ll_rw_block(int, int, int, struct buffer_head * bh[]);
238*4882a593Smuzhiyun int sync_dirty_buffer(struct buffer_head *bh);
239*4882a593Smuzhiyun int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
240*4882a593Smuzhiyun void write_dirty_buffer(struct buffer_head *bh, int op_flags);
241*4882a593Smuzhiyun int submit_bh(int, int, struct buffer_head *);
242*4882a593Smuzhiyun void write_boundary_block(struct block_device *bdev,
243*4882a593Smuzhiyun 			sector_t bblock, unsigned blocksize);
244*4882a593Smuzhiyun int bh_uptodate_or_lock(struct buffer_head *bh);
245*4882a593Smuzhiyun int bh_submit_read(struct buffer_head *bh);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun extern int buffer_heads_over_limit;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun  * Generic address_space_operations implementations for buffer_head-backed
251*4882a593Smuzhiyun  * address_spaces.
252*4882a593Smuzhiyun  */
253*4882a593Smuzhiyun void block_invalidatepage(struct page *page, unsigned int offset,
254*4882a593Smuzhiyun 			  unsigned int length);
255*4882a593Smuzhiyun int block_write_full_page(struct page *page, get_block_t *get_block,
256*4882a593Smuzhiyun 				struct writeback_control *wbc);
257*4882a593Smuzhiyun int __block_write_full_page(struct inode *inode, struct page *page,
258*4882a593Smuzhiyun 			get_block_t *get_block, struct writeback_control *wbc,
259*4882a593Smuzhiyun 			bh_end_io_t *handler);
260*4882a593Smuzhiyun int block_read_full_page(struct page*, get_block_t*);
261*4882a593Smuzhiyun int block_is_partially_uptodate(struct page *page, unsigned long from,
262*4882a593Smuzhiyun 				unsigned long count);
263*4882a593Smuzhiyun int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
264*4882a593Smuzhiyun 		unsigned flags, struct page **pagep, get_block_t *get_block);
265*4882a593Smuzhiyun int __block_write_begin(struct page *page, loff_t pos, unsigned len,
266*4882a593Smuzhiyun 		get_block_t *get_block);
267*4882a593Smuzhiyun int block_write_end(struct file *, struct address_space *,
268*4882a593Smuzhiyun 				loff_t, unsigned, unsigned,
269*4882a593Smuzhiyun 				struct page *, void *);
270*4882a593Smuzhiyun int generic_write_end(struct file *, struct address_space *,
271*4882a593Smuzhiyun 				loff_t, unsigned, unsigned,
272*4882a593Smuzhiyun 				struct page *, void *);
273*4882a593Smuzhiyun void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
274*4882a593Smuzhiyun void clean_page_buffers(struct page *page);
275*4882a593Smuzhiyun int cont_write_begin(struct file *, struct address_space *, loff_t,
276*4882a593Smuzhiyun 			unsigned, unsigned, struct page **, void **,
277*4882a593Smuzhiyun 			get_block_t *, loff_t *);
278*4882a593Smuzhiyun int generic_cont_expand_simple(struct inode *inode, loff_t size);
279*4882a593Smuzhiyun int block_commit_write(struct page *page, unsigned from, unsigned to);
280*4882a593Smuzhiyun int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
281*4882a593Smuzhiyun 				get_block_t get_block);
282*4882a593Smuzhiyun /* Convert errno to return value from ->page_mkwrite() call */
block_page_mkwrite_return(int err)283*4882a593Smuzhiyun static inline vm_fault_t block_page_mkwrite_return(int err)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	if (err == 0)
286*4882a593Smuzhiyun 		return VM_FAULT_LOCKED;
287*4882a593Smuzhiyun 	if (err == -EFAULT || err == -EAGAIN)
288*4882a593Smuzhiyun 		return VM_FAULT_NOPAGE;
289*4882a593Smuzhiyun 	if (err == -ENOMEM)
290*4882a593Smuzhiyun 		return VM_FAULT_OOM;
291*4882a593Smuzhiyun 	/* -ENOSPC, -EDQUOT, -EIO ... */
292*4882a593Smuzhiyun 	return VM_FAULT_SIGBUS;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
295*4882a593Smuzhiyun int block_truncate_page(struct address_space *, loff_t, get_block_t *);
296*4882a593Smuzhiyun int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
297*4882a593Smuzhiyun 				struct page **, void **, get_block_t*);
298*4882a593Smuzhiyun int nobh_write_end(struct file *, struct address_space *,
299*4882a593Smuzhiyun 				loff_t, unsigned, unsigned,
300*4882a593Smuzhiyun 				struct page *, void *);
301*4882a593Smuzhiyun int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
302*4882a593Smuzhiyun int nobh_writepage(struct page *page, get_block_t *get_block,
303*4882a593Smuzhiyun                         struct writeback_control *wbc);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun void buffer_init(void);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun /*
308*4882a593Smuzhiyun  * inline definitions
309*4882a593Smuzhiyun  */
310*4882a593Smuzhiyun 
get_bh(struct buffer_head * bh)311*4882a593Smuzhiyun static inline void get_bh(struct buffer_head *bh)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun         atomic_inc(&bh->b_count);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
put_bh(struct buffer_head * bh)316*4882a593Smuzhiyun static inline void put_bh(struct buffer_head *bh)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun         smp_mb__before_atomic();
319*4882a593Smuzhiyun         atomic_dec(&bh->b_count);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
brelse(struct buffer_head * bh)322*4882a593Smuzhiyun static inline void brelse(struct buffer_head *bh)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	if (bh)
325*4882a593Smuzhiyun 		__brelse(bh);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
bforget(struct buffer_head * bh)328*4882a593Smuzhiyun static inline void bforget(struct buffer_head *bh)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	if (bh)
331*4882a593Smuzhiyun 		__bforget(bh);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun static inline struct buffer_head *
sb_bread(struct super_block * sb,sector_t block)335*4882a593Smuzhiyun sb_bread(struct super_block *sb, sector_t block)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun static inline struct buffer_head *
sb_bread_unmovable(struct super_block * sb,sector_t block)341*4882a593Smuzhiyun sb_bread_unmovable(struct super_block *sb, sector_t block)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun static inline void
sb_breadahead(struct super_block * sb,sector_t block)347*4882a593Smuzhiyun sb_breadahead(struct super_block *sb, sector_t block)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	__breadahead(sb->s_bdev, block, sb->s_blocksize);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun static inline void
sb_breadahead_unmovable(struct super_block * sb,sector_t block)353*4882a593Smuzhiyun sb_breadahead_unmovable(struct super_block *sb, sector_t block)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	__breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun static inline struct buffer_head *
sb_getblk(struct super_block * sb,sector_t block)359*4882a593Smuzhiyun sb_getblk(struct super_block *sb, sector_t block)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun static inline struct buffer_head *
sb_getblk_gfp(struct super_block * sb,sector_t block,gfp_t gfp)366*4882a593Smuzhiyun sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun static inline struct buffer_head *
sb_find_get_block(struct super_block * sb,sector_t block)372*4882a593Smuzhiyun sb_find_get_block(struct super_block *sb, sector_t block)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun static inline void
map_bh(struct buffer_head * bh,struct super_block * sb,sector_t block)378*4882a593Smuzhiyun map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	set_buffer_mapped(bh);
381*4882a593Smuzhiyun 	bh->b_bdev = sb->s_bdev;
382*4882a593Smuzhiyun 	bh->b_blocknr = block;
383*4882a593Smuzhiyun 	bh->b_size = sb->s_blocksize;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
wait_on_buffer(struct buffer_head * bh)386*4882a593Smuzhiyun static inline void wait_on_buffer(struct buffer_head *bh)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	might_sleep();
389*4882a593Smuzhiyun 	if (buffer_locked(bh))
390*4882a593Smuzhiyun 		__wait_on_buffer(bh);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
trylock_buffer(struct buffer_head * bh)393*4882a593Smuzhiyun static inline int trylock_buffer(struct buffer_head *bh)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
lock_buffer(struct buffer_head * bh)398*4882a593Smuzhiyun static inline void lock_buffer(struct buffer_head *bh)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	might_sleep();
401*4882a593Smuzhiyun 	if (!trylock_buffer(bh))
402*4882a593Smuzhiyun 		__lock_buffer(bh);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
getblk_unmovable(struct block_device * bdev,sector_t block,unsigned size)405*4882a593Smuzhiyun static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
406*4882a593Smuzhiyun 						   sector_t block,
407*4882a593Smuzhiyun 						   unsigned size)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	return __getblk_gfp(bdev, block, size, 0);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
__getblk(struct block_device * bdev,sector_t block,unsigned size)412*4882a593Smuzhiyun static inline struct buffer_head *__getblk(struct block_device *bdev,
413*4882a593Smuzhiyun 					   sector_t block,
414*4882a593Smuzhiyun 					   unsigned size)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun 	return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun /**
420*4882a593Smuzhiyun  *  __bread() - reads a specified block and returns the bh
421*4882a593Smuzhiyun  *  @bdev: the block_device to read from
422*4882a593Smuzhiyun  *  @block: number of block
423*4882a593Smuzhiyun  *  @size: size (in bytes) to read
424*4882a593Smuzhiyun  *
425*4882a593Smuzhiyun  *  Reads a specified block, and returns buffer head that contains it.
426*4882a593Smuzhiyun  *  The page cache is allocated from movable area so that it can be migrated.
427*4882a593Smuzhiyun  *  It returns NULL if the block was unreadable.
428*4882a593Smuzhiyun  */
429*4882a593Smuzhiyun static inline struct buffer_head *
__bread(struct block_device * bdev,sector_t block,unsigned size)430*4882a593Smuzhiyun __bread(struct block_device *bdev, sector_t block, unsigned size)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun extern int __set_page_dirty_buffers(struct page *page);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun #else /* CONFIG_BLOCK */
438*4882a593Smuzhiyun 
buffer_init(void)439*4882a593Smuzhiyun static inline void buffer_init(void) {}
try_to_free_buffers(struct page * page)440*4882a593Smuzhiyun static inline int try_to_free_buffers(struct page *page) { return 1; }
inode_has_buffers(struct inode * inode)441*4882a593Smuzhiyun static inline int inode_has_buffers(struct inode *inode) { return 0; }
invalidate_inode_buffers(struct inode * inode)442*4882a593Smuzhiyun static inline void invalidate_inode_buffers(struct inode *inode) {}
remove_inode_buffers(struct inode * inode)443*4882a593Smuzhiyun static inline int remove_inode_buffers(struct inode *inode) { return 1; }
sync_mapping_buffers(struct address_space * mapping)444*4882a593Smuzhiyun static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
invalidate_bh_lrus_cpu(void)445*4882a593Smuzhiyun static inline void invalidate_bh_lrus_cpu(void) {}
has_bh_in_lru(int cpu,void * dummy)446*4882a593Smuzhiyun static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; }
447*4882a593Smuzhiyun #define buffer_heads_over_limit 0
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun #endif /* CONFIG_BLOCK */
450*4882a593Smuzhiyun #endif /* _LINUX_BUFFER_HEAD_H */
451