xref: /OK3568_Linux_fs/kernel/block/blk.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef BLK_INTERNAL_H
3*4882a593Smuzhiyun #define BLK_INTERNAL_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/idr.h>
6*4882a593Smuzhiyun #include <linux/blk-mq.h>
7*4882a593Smuzhiyun #include <linux/part_stat.h>
8*4882a593Smuzhiyun #include <linux/blk-crypto.h>
9*4882a593Smuzhiyun #include <xen/xen.h>
10*4882a593Smuzhiyun #include "blk-crypto-internal.h"
11*4882a593Smuzhiyun #include "blk-mq.h"
12*4882a593Smuzhiyun #include "blk-mq-sched.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /* Max future timer expiry for timeouts */
15*4882a593Smuzhiyun #define BLK_MAX_TIMEOUT		(5 * HZ)
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun extern struct dentry *blk_debugfs_root;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun struct blk_flush_queue {
20*4882a593Smuzhiyun 	unsigned int		flush_pending_idx:1;
21*4882a593Smuzhiyun 	unsigned int		flush_running_idx:1;
22*4882a593Smuzhiyun 	blk_status_t 		rq_status;
23*4882a593Smuzhiyun 	unsigned long		flush_pending_since;
24*4882a593Smuzhiyun 	struct list_head	flush_queue[2];
25*4882a593Smuzhiyun 	struct list_head	flush_data_in_flight;
26*4882a593Smuzhiyun 	struct request		*flush_rq;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	struct lock_class_key	key;
29*4882a593Smuzhiyun 	spinlock_t		mq_flush_lock;
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun extern struct kmem_cache *blk_requestq_cachep;
33*4882a593Smuzhiyun extern struct kobj_type blk_queue_ktype;
34*4882a593Smuzhiyun extern struct ida blk_queue_ida;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static inline struct blk_flush_queue *
blk_get_flush_queue(struct request_queue * q,struct blk_mq_ctx * ctx)37*4882a593Smuzhiyun blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
__blk_get_queue(struct request_queue * q)42*4882a593Smuzhiyun static inline void __blk_get_queue(struct request_queue *q)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	kobject_get(&q->kobj);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun bool is_flush_rq(struct request *req);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
50*4882a593Smuzhiyun 					      gfp_t flags);
51*4882a593Smuzhiyun void blk_free_flush_queue(struct blk_flush_queue *q);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun void blk_freeze_queue(struct request_queue *q);
54*4882a593Smuzhiyun 
biovec_phys_mergeable(struct request_queue * q,struct bio_vec * vec1,struct bio_vec * vec2)55*4882a593Smuzhiyun static inline bool biovec_phys_mergeable(struct request_queue *q,
56*4882a593Smuzhiyun 		struct bio_vec *vec1, struct bio_vec *vec2)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	unsigned long mask = queue_segment_boundary(q);
59*4882a593Smuzhiyun 	phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
60*4882a593Smuzhiyun 	phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (addr1 + vec1->bv_len != addr2)
63*4882a593Smuzhiyun 		return false;
64*4882a593Smuzhiyun 	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
65*4882a593Smuzhiyun 		return false;
66*4882a593Smuzhiyun 	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
67*4882a593Smuzhiyun 		return false;
68*4882a593Smuzhiyun 	return true;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
__bvec_gap_to_prev(struct request_queue * q,struct bio_vec * bprv,unsigned int offset)71*4882a593Smuzhiyun static inline bool __bvec_gap_to_prev(struct request_queue *q,
72*4882a593Smuzhiyun 		struct bio_vec *bprv, unsigned int offset)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return (offset & queue_virt_boundary(q)) ||
75*4882a593Smuzhiyun 		((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun  * Check if adding a bio_vec after bprv with offset would create a gap in
80*4882a593Smuzhiyun  * the SG list. Most drivers don't care about this, but some do.
81*4882a593Smuzhiyun  */
bvec_gap_to_prev(struct request_queue * q,struct bio_vec * bprv,unsigned int offset)82*4882a593Smuzhiyun static inline bool bvec_gap_to_prev(struct request_queue *q,
83*4882a593Smuzhiyun 		struct bio_vec *bprv, unsigned int offset)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	if (!queue_virt_boundary(q))
86*4882a593Smuzhiyun 		return false;
87*4882a593Smuzhiyun 	return __bvec_gap_to_prev(q, bprv, offset);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
blk_rq_bio_prep(struct request * rq,struct bio * bio,unsigned int nr_segs)90*4882a593Smuzhiyun static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
91*4882a593Smuzhiyun 		unsigned int nr_segs)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	rq->nr_phys_segments = nr_segs;
94*4882a593Smuzhiyun 	rq->__data_len = bio->bi_iter.bi_size;
95*4882a593Smuzhiyun 	rq->bio = rq->biotail = bio;
96*4882a593Smuzhiyun 	rq->ioprio = bio_prio(bio);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	if (bio->bi_disk)
99*4882a593Smuzhiyun 		rq->rq_disk = bio->bi_disk;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_INTEGRITY
103*4882a593Smuzhiyun void blk_flush_integrity(void);
104*4882a593Smuzhiyun bool __bio_integrity_endio(struct bio *);
105*4882a593Smuzhiyun void bio_integrity_free(struct bio *bio);
bio_integrity_endio(struct bio * bio)106*4882a593Smuzhiyun static inline bool bio_integrity_endio(struct bio *bio)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	if (bio_integrity(bio))
109*4882a593Smuzhiyun 		return __bio_integrity_endio(bio);
110*4882a593Smuzhiyun 	return true;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun bool blk_integrity_merge_rq(struct request_queue *, struct request *,
114*4882a593Smuzhiyun 		struct request *);
115*4882a593Smuzhiyun bool blk_integrity_merge_bio(struct request_queue *, struct request *,
116*4882a593Smuzhiyun 		struct bio *);
117*4882a593Smuzhiyun 
integrity_req_gap_back_merge(struct request * req,struct bio * next)118*4882a593Smuzhiyun static inline bool integrity_req_gap_back_merge(struct request *req,
119*4882a593Smuzhiyun 		struct bio *next)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct bio_integrity_payload *bip = bio_integrity(req->bio);
122*4882a593Smuzhiyun 	struct bio_integrity_payload *bip_next = bio_integrity(next);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
125*4882a593Smuzhiyun 				bip_next->bip_vec[0].bv_offset);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
integrity_req_gap_front_merge(struct request * req,struct bio * bio)128*4882a593Smuzhiyun static inline bool integrity_req_gap_front_merge(struct request *req,
129*4882a593Smuzhiyun 		struct bio *bio)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	struct bio_integrity_payload *bip = bio_integrity(bio);
132*4882a593Smuzhiyun 	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
135*4882a593Smuzhiyun 				bip_next->bip_vec[0].bv_offset);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun void blk_integrity_add(struct gendisk *);
139*4882a593Smuzhiyun void blk_integrity_del(struct gendisk *);
140*4882a593Smuzhiyun #else /* CONFIG_BLK_DEV_INTEGRITY */
blk_integrity_merge_rq(struct request_queue * rq,struct request * r1,struct request * r2)141*4882a593Smuzhiyun static inline bool blk_integrity_merge_rq(struct request_queue *rq,
142*4882a593Smuzhiyun 		struct request *r1, struct request *r2)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	return true;
145*4882a593Smuzhiyun }
blk_integrity_merge_bio(struct request_queue * rq,struct request * r,struct bio * b)146*4882a593Smuzhiyun static inline bool blk_integrity_merge_bio(struct request_queue *rq,
147*4882a593Smuzhiyun 		struct request *r, struct bio *b)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	return true;
150*4882a593Smuzhiyun }
integrity_req_gap_back_merge(struct request * req,struct bio * next)151*4882a593Smuzhiyun static inline bool integrity_req_gap_back_merge(struct request *req,
152*4882a593Smuzhiyun 		struct bio *next)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	return false;
155*4882a593Smuzhiyun }
integrity_req_gap_front_merge(struct request * req,struct bio * bio)156*4882a593Smuzhiyun static inline bool integrity_req_gap_front_merge(struct request *req,
157*4882a593Smuzhiyun 		struct bio *bio)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	return false;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
blk_flush_integrity(void)162*4882a593Smuzhiyun static inline void blk_flush_integrity(void)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun }
bio_integrity_endio(struct bio * bio)165*4882a593Smuzhiyun static inline bool bio_integrity_endio(struct bio *bio)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	return true;
168*4882a593Smuzhiyun }
bio_integrity_free(struct bio * bio)169*4882a593Smuzhiyun static inline void bio_integrity_free(struct bio *bio)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun }
blk_integrity_add(struct gendisk * disk)172*4882a593Smuzhiyun static inline void blk_integrity_add(struct gendisk *disk)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun }
blk_integrity_del(struct gendisk * disk)175*4882a593Smuzhiyun static inline void blk_integrity_del(struct gendisk *disk)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_INTEGRITY */
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun unsigned long blk_rq_timeout(unsigned long timeout);
181*4882a593Smuzhiyun void blk_add_timer(struct request *req);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
184*4882a593Smuzhiyun 		unsigned int nr_segs, struct request **same_queue_rq);
185*4882a593Smuzhiyun bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
186*4882a593Smuzhiyun 			struct bio *bio, unsigned int nr_segs);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun void blk_account_io_start(struct request *req);
189*4882a593Smuzhiyun void blk_account_io_done(struct request *req, u64 now);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun  * Plug flush limits
193*4882a593Smuzhiyun  */
194*4882a593Smuzhiyun #define BLK_MAX_REQUEST_COUNT	32
195*4882a593Smuzhiyun #define BLK_PLUG_FLUSH_SIZE	(128 * 1024)
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun  * Internal elevator interface
199*4882a593Smuzhiyun  */
200*4882a593Smuzhiyun #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun void blk_insert_flush(struct request *rq);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun void elevator_init_mq(struct request_queue *q);
205*4882a593Smuzhiyun int elevator_switch_mq(struct request_queue *q,
206*4882a593Smuzhiyun 			      struct elevator_type *new_e);
207*4882a593Smuzhiyun void __elevator_exit(struct request_queue *, struct elevator_queue *);
208*4882a593Smuzhiyun int elv_register_queue(struct request_queue *q, bool uevent);
209*4882a593Smuzhiyun void elv_unregister_queue(struct request_queue *q);
210*4882a593Smuzhiyun 
elevator_exit(struct request_queue * q,struct elevator_queue * e)211*4882a593Smuzhiyun static inline void elevator_exit(struct request_queue *q,
212*4882a593Smuzhiyun 		struct elevator_queue *e)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	lockdep_assert_held(&q->sysfs_lock);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	blk_mq_sched_free_requests(q);
217*4882a593Smuzhiyun 	__elevator_exit(q, e);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
223*4882a593Smuzhiyun 		char *buf);
224*4882a593Smuzhiyun ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
225*4882a593Smuzhiyun 		char *buf);
226*4882a593Smuzhiyun ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
227*4882a593Smuzhiyun 		char *buf);
228*4882a593Smuzhiyun ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
229*4882a593Smuzhiyun 		char *buf);
230*4882a593Smuzhiyun ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
231*4882a593Smuzhiyun 		const char *buf, size_t count);
232*4882a593Smuzhiyun ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
233*4882a593Smuzhiyun ssize_t part_timeout_store(struct device *, struct device_attribute *,
234*4882a593Smuzhiyun 				const char *, size_t);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
237*4882a593Smuzhiyun int ll_back_merge_fn(struct request *req, struct bio *bio,
238*4882a593Smuzhiyun 		unsigned int nr_segs);
239*4882a593Smuzhiyun int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
240*4882a593Smuzhiyun 				struct request *next);
241*4882a593Smuzhiyun unsigned int blk_recalc_rq_segments(struct request *rq);
242*4882a593Smuzhiyun void blk_rq_set_mixed_merge(struct request *rq);
243*4882a593Smuzhiyun bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
244*4882a593Smuzhiyun enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun int blk_dev_init(void);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun /*
249*4882a593Smuzhiyun  * Contribute to IO statistics IFF:
250*4882a593Smuzhiyun  *
251*4882a593Smuzhiyun  *	a) it's attached to a gendisk, and
252*4882a593Smuzhiyun  *	b) the queue had IO stats enabled when this request was started
253*4882a593Smuzhiyun  */
blk_do_io_stat(struct request * rq)254*4882a593Smuzhiyun static inline bool blk_do_io_stat(struct request *rq)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
req_set_nomerge(struct request_queue * q,struct request * req)259*4882a593Smuzhiyun static inline void req_set_nomerge(struct request_queue *q, struct request *req)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	req->cmd_flags |= REQ_NOMERGE;
262*4882a593Smuzhiyun 	if (req == q->last_merge)
263*4882a593Smuzhiyun 		q->last_merge = NULL;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun  * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
268*4882a593Smuzhiyun  * is defined as 'unsigned int', meantime it has to aligned to with logical
269*4882a593Smuzhiyun  * block size which is the minimum accepted unit by hardware.
270*4882a593Smuzhiyun  */
bio_allowed_max_sectors(struct request_queue * q)271*4882a593Smuzhiyun static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /*
277*4882a593Smuzhiyun  * The max bio size which is aligned to q->limits.discard_granularity. This
278*4882a593Smuzhiyun  * is a hint to split large discard bio in generic block layer, then if device
279*4882a593Smuzhiyun  * driver needs to split the discard bio into smaller ones, their bi_size can
280*4882a593Smuzhiyun  * be very probably and easily aligned to discard_granularity of the device's
281*4882a593Smuzhiyun  * queue.
282*4882a593Smuzhiyun  */
bio_aligned_discard_max_sectors(struct request_queue * q)283*4882a593Smuzhiyun static inline unsigned int bio_aligned_discard_max_sectors(
284*4882a593Smuzhiyun 					struct request_queue *q)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	return round_down(UINT_MAX, q->limits.discard_granularity) >>
287*4882a593Smuzhiyun 			SECTOR_SHIFT;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /*
291*4882a593Smuzhiyun  * Internal io_context interface
292*4882a593Smuzhiyun  */
293*4882a593Smuzhiyun void get_io_context(struct io_context *ioc);
294*4882a593Smuzhiyun struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
295*4882a593Smuzhiyun struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
296*4882a593Smuzhiyun 			     gfp_t gfp_mask);
297*4882a593Smuzhiyun void ioc_clear_queue(struct request_queue *q);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun  * Internal throttling interface
303*4882a593Smuzhiyun  */
304*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_THROTTLING
305*4882a593Smuzhiyun extern int blk_throtl_init(struct request_queue *q);
306*4882a593Smuzhiyun extern void blk_throtl_exit(struct request_queue *q);
307*4882a593Smuzhiyun extern void blk_throtl_register_queue(struct request_queue *q);
308*4882a593Smuzhiyun extern void blk_throtl_charge_bio_split(struct bio *bio);
309*4882a593Smuzhiyun bool blk_throtl_bio(struct bio *bio);
310*4882a593Smuzhiyun #else /* CONFIG_BLK_DEV_THROTTLING */
blk_throtl_init(struct request_queue * q)311*4882a593Smuzhiyun static inline int blk_throtl_init(struct request_queue *q) { return 0; }
blk_throtl_exit(struct request_queue * q)312*4882a593Smuzhiyun static inline void blk_throtl_exit(struct request_queue *q) { }
blk_throtl_register_queue(struct request_queue * q)313*4882a593Smuzhiyun static inline void blk_throtl_register_queue(struct request_queue *q) { }
blk_throtl_charge_bio_split(struct bio * bio)314*4882a593Smuzhiyun static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
blk_throtl_bio(struct bio * bio)315*4882a593Smuzhiyun static inline bool blk_throtl_bio(struct bio *bio) { return false; }
316*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_THROTTLING */
317*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
318*4882a593Smuzhiyun extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
319*4882a593Smuzhiyun extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
320*4882a593Smuzhiyun 	const char *page, size_t count);
321*4882a593Smuzhiyun extern void blk_throtl_bio_endio(struct bio *bio);
322*4882a593Smuzhiyun extern void blk_throtl_stat_add(struct request *rq, u64 time);
323*4882a593Smuzhiyun #else
blk_throtl_bio_endio(struct bio * bio)324*4882a593Smuzhiyun static inline void blk_throtl_bio_endio(struct bio *bio) { }
blk_throtl_stat_add(struct request * rq,u64 time)325*4882a593Smuzhiyun static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
326*4882a593Smuzhiyun #endif
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun #ifdef CONFIG_BOUNCE
329*4882a593Smuzhiyun extern int init_emergency_isa_pool(void);
330*4882a593Smuzhiyun extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
331*4882a593Smuzhiyun #else
init_emergency_isa_pool(void)332*4882a593Smuzhiyun static inline int init_emergency_isa_pool(void)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	return 0;
335*4882a593Smuzhiyun }
blk_queue_bounce(struct request_queue * q,struct bio ** bio)336*4882a593Smuzhiyun static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun #endif /* CONFIG_BOUNCE */
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun #ifdef CONFIG_BLK_CGROUP_IOLATENCY
342*4882a593Smuzhiyun extern int blk_iolatency_init(struct request_queue *q);
343*4882a593Smuzhiyun #else
blk_iolatency_init(struct request_queue * q)344*4882a593Smuzhiyun static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
345*4882a593Smuzhiyun #endif
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
350*4882a593Smuzhiyun void blk_queue_free_zone_bitmaps(struct request_queue *q);
351*4882a593Smuzhiyun #else
blk_queue_free_zone_bitmaps(struct request_queue * q)352*4882a593Smuzhiyun static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
353*4882a593Smuzhiyun #endif
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
358*4882a593Smuzhiyun void blk_free_devt(dev_t devt);
359*4882a593Smuzhiyun void blk_invalidate_devt(dev_t devt);
360*4882a593Smuzhiyun char *disk_name(struct gendisk *hd, int partno, char *buf);
361*4882a593Smuzhiyun #define ADDPART_FLAG_NONE	0
362*4882a593Smuzhiyun #define ADDPART_FLAG_RAID	1
363*4882a593Smuzhiyun #define ADDPART_FLAG_WHOLEDISK	2
364*4882a593Smuzhiyun void delete_partition(struct hd_struct *part);
365*4882a593Smuzhiyun int bdev_add_partition(struct block_device *bdev, int partno,
366*4882a593Smuzhiyun 		sector_t start, sector_t length);
367*4882a593Smuzhiyun int bdev_del_partition(struct block_device *bdev, int partno);
368*4882a593Smuzhiyun int bdev_resize_partition(struct block_device *bdev, int partno,
369*4882a593Smuzhiyun 		sector_t start, sector_t length);
370*4882a593Smuzhiyun int disk_expand_part_tbl(struct gendisk *disk, int target);
371*4882a593Smuzhiyun int hd_ref_init(struct hd_struct *part);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun /* no need to get/put refcount of part0 */
hd_struct_try_get(struct hd_struct * part)374*4882a593Smuzhiyun static inline int hd_struct_try_get(struct hd_struct *part)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	if (part->partno)
377*4882a593Smuzhiyun 		return percpu_ref_tryget_live(&part->ref);
378*4882a593Smuzhiyun 	return 1;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
hd_struct_put(struct hd_struct * part)381*4882a593Smuzhiyun static inline void hd_struct_put(struct hd_struct *part)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	if (part->partno)
384*4882a593Smuzhiyun 		percpu_ref_put(&part->ref);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
hd_free_part(struct hd_struct * part)387*4882a593Smuzhiyun static inline void hd_free_part(struct hd_struct *part)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	free_percpu(part->dkstats);
390*4882a593Smuzhiyun 	kfree(part->info);
391*4882a593Smuzhiyun 	percpu_ref_exit(&part->ref);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun  * Any access of part->nr_sects which is not protected by partition
396*4882a593Smuzhiyun  * bd_mutex or gendisk bdev bd_mutex, should be done using this
397*4882a593Smuzhiyun  * accessor function.
398*4882a593Smuzhiyun  *
399*4882a593Smuzhiyun  * Code written along the lines of i_size_read() and i_size_write().
400*4882a593Smuzhiyun  * CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption
401*4882a593Smuzhiyun  * on.
402*4882a593Smuzhiyun  */
part_nr_sects_read(struct hd_struct * part)403*4882a593Smuzhiyun static inline sector_t part_nr_sects_read(struct hd_struct *part)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
406*4882a593Smuzhiyun 	sector_t nr_sects;
407*4882a593Smuzhiyun 	unsigned seq;
408*4882a593Smuzhiyun 	do {
409*4882a593Smuzhiyun 		seq = read_seqcount_begin(&part->nr_sects_seq);
410*4882a593Smuzhiyun 		nr_sects = part->nr_sects;
411*4882a593Smuzhiyun 	} while (read_seqcount_retry(&part->nr_sects_seq, seq));
412*4882a593Smuzhiyun 	return nr_sects;
413*4882a593Smuzhiyun #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
414*4882a593Smuzhiyun 	sector_t nr_sects;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	preempt_disable();
417*4882a593Smuzhiyun 	nr_sects = part->nr_sects;
418*4882a593Smuzhiyun 	preempt_enable();
419*4882a593Smuzhiyun 	return nr_sects;
420*4882a593Smuzhiyun #else
421*4882a593Smuzhiyun 	return part->nr_sects;
422*4882a593Smuzhiyun #endif
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun /*
426*4882a593Smuzhiyun  * Should be called with mutex lock held (typically bd_mutex) of partition
427*4882a593Smuzhiyun  * to provide mutual exlusion among writers otherwise seqcount might be
428*4882a593Smuzhiyun  * left in wrong state leaving the readers spinning infinitely.
429*4882a593Smuzhiyun  */
part_nr_sects_write(struct hd_struct * part,sector_t size)430*4882a593Smuzhiyun static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
433*4882a593Smuzhiyun 	preempt_disable();
434*4882a593Smuzhiyun 	write_seqcount_begin(&part->nr_sects_seq);
435*4882a593Smuzhiyun 	part->nr_sects = size;
436*4882a593Smuzhiyun 	write_seqcount_end(&part->nr_sects_seq);
437*4882a593Smuzhiyun 	preempt_enable();
438*4882a593Smuzhiyun #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
439*4882a593Smuzhiyun 	preempt_disable();
440*4882a593Smuzhiyun 	part->nr_sects = size;
441*4882a593Smuzhiyun 	preempt_enable();
442*4882a593Smuzhiyun #else
443*4882a593Smuzhiyun 	part->nr_sects = size;
444*4882a593Smuzhiyun #endif
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun int bio_add_hw_page(struct request_queue *q, struct bio *bio,
448*4882a593Smuzhiyun 		struct page *page, unsigned int len, unsigned int offset,
449*4882a593Smuzhiyun 		unsigned int max_sectors, bool *same_page);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun #endif /* BLK_INTERNAL_H */
452