1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_BLKDEV_H
3*4882a593Smuzhiyun #define _LINUX_BLKDEV_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/sched.h>
6*4882a593Smuzhiyun #include <linux/sched/clock.h>
7*4882a593Smuzhiyun #include <linux/major.h>
8*4882a593Smuzhiyun #include <linux/genhd.h>
9*4882a593Smuzhiyun #include <linux/list.h>
10*4882a593Smuzhiyun #include <linux/llist.h>
11*4882a593Smuzhiyun #include <linux/minmax.h>
12*4882a593Smuzhiyun #include <linux/timer.h>
13*4882a593Smuzhiyun #include <linux/workqueue.h>
14*4882a593Smuzhiyun #include <linux/pagemap.h>
15*4882a593Smuzhiyun #include <linux/backing-dev-defs.h>
16*4882a593Smuzhiyun #include <linux/wait.h>
17*4882a593Smuzhiyun #include <linux/mempool.h>
18*4882a593Smuzhiyun #include <linux/pfn.h>
19*4882a593Smuzhiyun #include <linux/bio.h>
20*4882a593Smuzhiyun #include <linux/stringify.h>
21*4882a593Smuzhiyun #include <linux/gfp.h>
22*4882a593Smuzhiyun #include <linux/bsg.h>
23*4882a593Smuzhiyun #include <linux/smp.h>
24*4882a593Smuzhiyun #include <linux/rcupdate.h>
25*4882a593Smuzhiyun #include <linux/percpu-refcount.h>
26*4882a593Smuzhiyun #include <linux/scatterlist.h>
27*4882a593Smuzhiyun #include <linux/blkzoned.h>
28*4882a593Smuzhiyun #include <linux/pm.h>
29*4882a593Smuzhiyun #include <linux/android_kabi.h>
30*4882a593Smuzhiyun #include <linux/android_vendor.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct module;
33*4882a593Smuzhiyun struct scsi_ioctl_command;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun struct request_queue;
36*4882a593Smuzhiyun struct elevator_queue;
37*4882a593Smuzhiyun struct blk_trace;
38*4882a593Smuzhiyun struct request;
39*4882a593Smuzhiyun struct sg_io_hdr;
40*4882a593Smuzhiyun struct bsg_job;
41*4882a593Smuzhiyun struct blkcg_gq;
42*4882a593Smuzhiyun struct blk_flush_queue;
43*4882a593Smuzhiyun struct pr_ops;
44*4882a593Smuzhiyun struct rq_qos;
45*4882a593Smuzhiyun struct blk_queue_stats;
46*4882a593Smuzhiyun struct blk_stat_callback;
47*4882a593Smuzhiyun struct blk_keyslot_manager;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #define BLKDEV_MIN_RQ 4
50*4882a593Smuzhiyun #define BLKDEV_MAX_RQ 128 /* Default maximum */
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* Must be consistent with blk_mq_poll_stats_bkt() */
53*4882a593Smuzhiyun #define BLK_MQ_POLL_STATS_BKTS 16
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* Doing classic polling */
56*4882a593Smuzhiyun #define BLK_MQ_POLL_CLASSIC -1
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * Maximum number of blkcg policies allowed to be registered concurrently.
60*4882a593Smuzhiyun * Defined here to simplify include dependency.
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun #define BLKCG_MAX_POLS 5
63*4882a593Smuzhiyun
blk_validate_block_size(unsigned int bsize)64*4882a593Smuzhiyun static inline int blk_validate_block_size(unsigned int bsize)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
67*4882a593Smuzhiyun return -EINVAL;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun return 0;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun typedef void (rq_end_io_fn)(struct request *, blk_status_t);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * request flags */
76*4882a593Smuzhiyun typedef __u32 __bitwise req_flags_t;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* elevator knows about this request */
79*4882a593Smuzhiyun #define RQF_SORTED ((__force req_flags_t)(1 << 0))
80*4882a593Smuzhiyun /* drive already may have started this one */
81*4882a593Smuzhiyun #define RQF_STARTED ((__force req_flags_t)(1 << 1))
82*4882a593Smuzhiyun /* may not be passed by ioscheduler */
83*4882a593Smuzhiyun #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
84*4882a593Smuzhiyun /* request for flush sequence */
85*4882a593Smuzhiyun #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
86*4882a593Smuzhiyun /* merge of different types, fail separately */
87*4882a593Smuzhiyun #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
88*4882a593Smuzhiyun /* track inflight for MQ */
89*4882a593Smuzhiyun #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
90*4882a593Smuzhiyun /* don't call prep for this one */
91*4882a593Smuzhiyun #define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
92*4882a593Smuzhiyun /* vaguely specified driver internal error. Ignored by the block layer */
93*4882a593Smuzhiyun #define RQF_FAILED ((__force req_flags_t)(1 << 10))
94*4882a593Smuzhiyun /* don't warn about errors */
95*4882a593Smuzhiyun #define RQF_QUIET ((__force req_flags_t)(1 << 11))
96*4882a593Smuzhiyun /* elevator private data attached */
97*4882a593Smuzhiyun #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
98*4882a593Smuzhiyun /* account into disk and partition IO statistics */
99*4882a593Smuzhiyun #define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
100*4882a593Smuzhiyun /* request came from our alloc pool */
101*4882a593Smuzhiyun #define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
102*4882a593Smuzhiyun /* runtime pm request */
103*4882a593Smuzhiyun #define RQF_PM ((__force req_flags_t)(1 << 15))
104*4882a593Smuzhiyun /* on IO scheduler merge hash */
105*4882a593Smuzhiyun #define RQF_HASHED ((__force req_flags_t)(1 << 16))
106*4882a593Smuzhiyun /* track IO completion time */
107*4882a593Smuzhiyun #define RQF_STATS ((__force req_flags_t)(1 << 17))
108*4882a593Smuzhiyun /* Look at ->special_vec for the actual data payload instead of the
109*4882a593Smuzhiyun bio chain. */
110*4882a593Smuzhiyun #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
111*4882a593Smuzhiyun /* The per-zone write lock is held for this request */
112*4882a593Smuzhiyun #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
113*4882a593Smuzhiyun /* already slept for hybrid poll */
114*4882a593Smuzhiyun #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
115*4882a593Smuzhiyun /* ->timeout has been called, don't expire again */
116*4882a593Smuzhiyun #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /* flags that prevent us from merging requests: */
119*4882a593Smuzhiyun #define RQF_NOMERGE_FLAGS \
120*4882a593Smuzhiyun (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun * Request state for blk-mq.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun enum mq_rq_state {
126*4882a593Smuzhiyun MQ_RQ_IDLE = 0,
127*4882a593Smuzhiyun MQ_RQ_IN_FLIGHT = 1,
128*4882a593Smuzhiyun MQ_RQ_COMPLETE = 2,
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * Try to put the fields that are referenced together in the same cacheline.
133*4882a593Smuzhiyun *
134*4882a593Smuzhiyun * If you modify this structure, make sure to update blk_rq_init() and
135*4882a593Smuzhiyun * especially blk_mq_rq_ctx_init() to take care of the added fields.
136*4882a593Smuzhiyun */
137*4882a593Smuzhiyun struct request {
138*4882a593Smuzhiyun struct request_queue *q;
139*4882a593Smuzhiyun struct blk_mq_ctx *mq_ctx;
140*4882a593Smuzhiyun struct blk_mq_hw_ctx *mq_hctx;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun unsigned int cmd_flags; /* op and common flags */
143*4882a593Smuzhiyun req_flags_t rq_flags;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun int tag;
146*4882a593Smuzhiyun int internal_tag;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /* the following two fields are internal, NEVER access directly */
149*4882a593Smuzhiyun unsigned int __data_len; /* total data len */
150*4882a593Smuzhiyun sector_t __sector; /* sector cursor */
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun struct bio *bio;
153*4882a593Smuzhiyun struct bio *biotail;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun struct list_head queuelist;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun * The hash is used inside the scheduler, and killed once the
159*4882a593Smuzhiyun * request reaches the dispatch list. The ipi_list is only used
160*4882a593Smuzhiyun * to queue the request for softirq completion, which is long
161*4882a593Smuzhiyun * after the request has been unhashed (and even removed from
162*4882a593Smuzhiyun * the dispatch list).
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun union {
165*4882a593Smuzhiyun struct hlist_node hash; /* merge hash */
166*4882a593Smuzhiyun struct list_head ipi_list;
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /*
170*4882a593Smuzhiyun * The rb_node is only used inside the io scheduler, requests
171*4882a593Smuzhiyun * are pruned when moved to the dispatch queue. So let the
172*4882a593Smuzhiyun * completion_data share space with the rb_node.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun union {
175*4882a593Smuzhiyun struct rb_node rb_node; /* sort/lookup */
176*4882a593Smuzhiyun struct bio_vec special_vec;
177*4882a593Smuzhiyun void *completion_data;
178*4882a593Smuzhiyun int error_count; /* for legacy drivers, don't use */
179*4882a593Smuzhiyun };
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * Three pointers are available for the IO schedulers, if they need
183*4882a593Smuzhiyun * more they have to dynamically allocate it. Flush requests are
184*4882a593Smuzhiyun * never put on the IO scheduler. So let the flush fields share
185*4882a593Smuzhiyun * space with the elevator data.
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun union {
188*4882a593Smuzhiyun struct {
189*4882a593Smuzhiyun struct io_cq *icq;
190*4882a593Smuzhiyun void *priv[2];
191*4882a593Smuzhiyun } elv;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun struct {
194*4882a593Smuzhiyun unsigned int seq;
195*4882a593Smuzhiyun struct list_head list;
196*4882a593Smuzhiyun rq_end_io_fn *saved_end_io;
197*4882a593Smuzhiyun } flush;
198*4882a593Smuzhiyun };
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun struct gendisk *rq_disk;
201*4882a593Smuzhiyun struct hd_struct *part;
202*4882a593Smuzhiyun #ifdef CONFIG_BLK_RQ_ALLOC_TIME
203*4882a593Smuzhiyun /* Time that the first bio started allocating this request. */
204*4882a593Smuzhiyun u64 alloc_time_ns;
205*4882a593Smuzhiyun #endif
206*4882a593Smuzhiyun /* Time that this request was allocated for this IO. */
207*4882a593Smuzhiyun u64 start_time_ns;
208*4882a593Smuzhiyun /* Time that I/O was submitted to the device. */
209*4882a593Smuzhiyun u64 io_start_time_ns;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun #ifdef CONFIG_BLK_WBT
212*4882a593Smuzhiyun unsigned short wbt_flags;
213*4882a593Smuzhiyun #endif
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun * rq sectors used for blk stats. It has the same value
216*4882a593Smuzhiyun * with blk_rq_sectors(rq), except that it never be zeroed
217*4882a593Smuzhiyun * by completion.
218*4882a593Smuzhiyun */
219*4882a593Smuzhiyun unsigned short stats_sectors;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * Number of scatter-gather DMA addr+len pairs after
223*4882a593Smuzhiyun * physical address coalescing is performed.
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun unsigned short nr_phys_segments;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun #if defined(CONFIG_BLK_DEV_INTEGRITY)
228*4882a593Smuzhiyun unsigned short nr_integrity_segments;
229*4882a593Smuzhiyun #endif
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun #ifdef CONFIG_BLK_INLINE_ENCRYPTION
232*4882a593Smuzhiyun struct bio_crypt_ctx *crypt_ctx;
233*4882a593Smuzhiyun struct blk_ksm_keyslot *crypt_keyslot;
234*4882a593Smuzhiyun #endif
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun unsigned short write_hint;
237*4882a593Smuzhiyun unsigned short ioprio;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun enum mq_rq_state state;
240*4882a593Smuzhiyun refcount_t ref;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun unsigned int timeout;
243*4882a593Smuzhiyun unsigned long deadline;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun union {
246*4882a593Smuzhiyun struct __call_single_data csd;
247*4882a593Smuzhiyun u64 fifo_time;
248*4882a593Smuzhiyun };
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /*
251*4882a593Smuzhiyun * completion callback.
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun rq_end_io_fn *end_io;
254*4882a593Smuzhiyun void *end_io_data;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
257*4882a593Smuzhiyun };
258*4882a593Smuzhiyun
blk_op_is_scsi(unsigned int op)259*4882a593Smuzhiyun static inline bool blk_op_is_scsi(unsigned int op)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
blk_op_is_private(unsigned int op)264*4882a593Smuzhiyun static inline bool blk_op_is_private(unsigned int op)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
blk_rq_is_scsi(struct request * rq)269*4882a593Smuzhiyun static inline bool blk_rq_is_scsi(struct request *rq)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun return blk_op_is_scsi(req_op(rq));
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
blk_rq_is_private(struct request * rq)274*4882a593Smuzhiyun static inline bool blk_rq_is_private(struct request *rq)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun return blk_op_is_private(req_op(rq));
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
blk_rq_is_passthrough(struct request * rq)279*4882a593Smuzhiyun static inline bool blk_rq_is_passthrough(struct request *rq)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
bio_is_passthrough(struct bio * bio)284*4882a593Smuzhiyun static inline bool bio_is_passthrough(struct bio *bio)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun unsigned op = bio_op(bio);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun return blk_op_is_scsi(op) || blk_op_is_private(op);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
req_get_ioprio(struct request * req)291*4882a593Smuzhiyun static inline unsigned short req_get_ioprio(struct request *req)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun return req->ioprio;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun #include <linux/elevator.h>
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun struct blk_queue_ctx;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun struct bio_vec;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun enum blk_eh_timer_return {
303*4882a593Smuzhiyun BLK_EH_DONE, /* drivers has completed the command */
304*4882a593Smuzhiyun BLK_EH_RESET_TIMER, /* reset timer and try again */
305*4882a593Smuzhiyun };
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun enum blk_queue_state {
308*4882a593Smuzhiyun Queue_down,
309*4882a593Smuzhiyun Queue_up,
310*4882a593Smuzhiyun };
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
313*4882a593Smuzhiyun #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun #define BLK_SCSI_MAX_CMDS (256)
316*4882a593Smuzhiyun #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * Zoned block device models (zoned limit).
320*4882a593Smuzhiyun *
321*4882a593Smuzhiyun * Note: This needs to be ordered from the least to the most severe
322*4882a593Smuzhiyun * restrictions for the inheritance in blk_stack_limits() to work.
323*4882a593Smuzhiyun */
324*4882a593Smuzhiyun enum blk_zoned_model {
325*4882a593Smuzhiyun BLK_ZONED_NONE = 0, /* Regular block device */
326*4882a593Smuzhiyun BLK_ZONED_HA, /* Host-aware zoned block device */
327*4882a593Smuzhiyun BLK_ZONED_HM, /* Host-managed zoned block device */
328*4882a593Smuzhiyun };
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun struct queue_limits {
331*4882a593Smuzhiyun unsigned long bounce_pfn;
332*4882a593Smuzhiyun unsigned long seg_boundary_mask;
333*4882a593Smuzhiyun unsigned long virt_boundary_mask;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun unsigned int max_hw_sectors;
336*4882a593Smuzhiyun unsigned int max_dev_sectors;
337*4882a593Smuzhiyun unsigned int chunk_sectors;
338*4882a593Smuzhiyun unsigned int max_sectors;
339*4882a593Smuzhiyun unsigned int max_segment_size;
340*4882a593Smuzhiyun unsigned int physical_block_size;
341*4882a593Smuzhiyun unsigned int logical_block_size;
342*4882a593Smuzhiyun unsigned int alignment_offset;
343*4882a593Smuzhiyun unsigned int io_min;
344*4882a593Smuzhiyun unsigned int io_opt;
345*4882a593Smuzhiyun unsigned int max_discard_sectors;
346*4882a593Smuzhiyun unsigned int max_hw_discard_sectors;
347*4882a593Smuzhiyun unsigned int max_write_same_sectors;
348*4882a593Smuzhiyun unsigned int max_write_zeroes_sectors;
349*4882a593Smuzhiyun unsigned int max_zone_append_sectors;
350*4882a593Smuzhiyun unsigned int discard_granularity;
351*4882a593Smuzhiyun unsigned int discard_alignment;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun unsigned short max_segments;
354*4882a593Smuzhiyun unsigned short max_integrity_segments;
355*4882a593Smuzhiyun unsigned short max_discard_segments;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun unsigned char misaligned;
358*4882a593Smuzhiyun unsigned char discard_misaligned;
359*4882a593Smuzhiyun unsigned char raid_partial_stripes_expensive;
360*4882a593Smuzhiyun enum blk_zoned_model zoned;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
363*4882a593Smuzhiyun };
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
366*4882a593Smuzhiyun void *data);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun #define BLK_ALL_ZONES ((unsigned int)-1)
373*4882a593Smuzhiyun int blkdev_report_zones(struct block_device *bdev, sector_t sector,
374*4882a593Smuzhiyun unsigned int nr_zones, report_zones_cb cb, void *data);
375*4882a593Smuzhiyun unsigned int blkdev_nr_zones(struct gendisk *disk);
376*4882a593Smuzhiyun extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
377*4882a593Smuzhiyun sector_t sectors, sector_t nr_sectors,
378*4882a593Smuzhiyun gfp_t gfp_mask);
379*4882a593Smuzhiyun int blk_revalidate_disk_zones(struct gendisk *disk,
380*4882a593Smuzhiyun void (*update_driver_data)(struct gendisk *disk));
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
383*4882a593Smuzhiyun unsigned int cmd, unsigned long arg);
384*4882a593Smuzhiyun extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
385*4882a593Smuzhiyun unsigned int cmd, unsigned long arg);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun #else /* CONFIG_BLK_DEV_ZONED */
388*4882a593Smuzhiyun
blkdev_nr_zones(struct gendisk * disk)389*4882a593Smuzhiyun static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun return 0;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
blkdev_report_zones_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)394*4882a593Smuzhiyun static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
395*4882a593Smuzhiyun fmode_t mode, unsigned int cmd,
396*4882a593Smuzhiyun unsigned long arg)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun return -ENOTTY;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
blkdev_zone_mgmt_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)401*4882a593Smuzhiyun static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
402*4882a593Smuzhiyun fmode_t mode, unsigned int cmd,
403*4882a593Smuzhiyun unsigned long arg)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun return -ENOTTY;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_ZONED */
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun struct request_queue {
411*4882a593Smuzhiyun struct request *last_merge;
412*4882a593Smuzhiyun struct elevator_queue *elevator;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun struct percpu_ref q_usage_counter;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun struct blk_queue_stats *stats;
417*4882a593Smuzhiyun struct rq_qos *rq_qos;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun const struct blk_mq_ops *mq_ops;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* sw queues */
422*4882a593Smuzhiyun struct blk_mq_ctx __percpu *queue_ctx;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun unsigned int queue_depth;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /* hw dispatch queues */
427*4882a593Smuzhiyun struct blk_mq_hw_ctx **queue_hw_ctx;
428*4882a593Smuzhiyun unsigned int nr_hw_queues;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun struct backing_dev_info *backing_dev_info;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /*
433*4882a593Smuzhiyun * The queue owner gets to use this for whatever they like.
434*4882a593Smuzhiyun * ll_rw_blk doesn't touch it.
435*4882a593Smuzhiyun */
436*4882a593Smuzhiyun void *queuedata;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /*
439*4882a593Smuzhiyun * various queue flags, see QUEUE_* below
440*4882a593Smuzhiyun */
441*4882a593Smuzhiyun unsigned long queue_flags;
442*4882a593Smuzhiyun /*
443*4882a593Smuzhiyun * Number of contexts that have called blk_set_pm_only(). If this
444*4882a593Smuzhiyun * counter is above zero then only RQF_PM requests are processed.
445*4882a593Smuzhiyun */
446*4882a593Smuzhiyun atomic_t pm_only;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /*
449*4882a593Smuzhiyun * ida allocated id for this queue. Used to index queues from
450*4882a593Smuzhiyun * ioctx.
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun int id;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /*
455*4882a593Smuzhiyun * queue needs bounce pages for pages above this limit
456*4882a593Smuzhiyun */
457*4882a593Smuzhiyun gfp_t bounce_gfp;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun spinlock_t queue_lock;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /*
462*4882a593Smuzhiyun * queue kobject
463*4882a593Smuzhiyun */
464*4882a593Smuzhiyun struct kobject kobj;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /*
467*4882a593Smuzhiyun * mq queue kobject
468*4882a593Smuzhiyun */
469*4882a593Smuzhiyun struct kobject *mq_kobj;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_INTEGRITY
472*4882a593Smuzhiyun struct blk_integrity integrity;
473*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_INTEGRITY */
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun #ifdef CONFIG_PM
476*4882a593Smuzhiyun struct device *dev;
477*4882a593Smuzhiyun enum rpm_status rpm_status;
478*4882a593Smuzhiyun unsigned int nr_pending;
479*4882a593Smuzhiyun #endif
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /*
482*4882a593Smuzhiyun * queue settings
483*4882a593Smuzhiyun */
484*4882a593Smuzhiyun unsigned long nr_requests; /* Max # of requests */
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun unsigned int dma_pad_mask;
487*4882a593Smuzhiyun unsigned int dma_alignment;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun #ifdef CONFIG_BLK_INLINE_ENCRYPTION
490*4882a593Smuzhiyun /* Inline crypto capabilities */
491*4882a593Smuzhiyun struct blk_keyslot_manager *ksm;
492*4882a593Smuzhiyun #endif
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun unsigned int rq_timeout;
495*4882a593Smuzhiyun int poll_nsec;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun struct blk_stat_callback *poll_cb;
498*4882a593Smuzhiyun struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun struct timer_list timeout;
501*4882a593Smuzhiyun struct work_struct timeout_work;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun atomic_t nr_active_requests_shared_sbitmap;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun struct list_head icq_list;
506*4882a593Smuzhiyun #ifdef CONFIG_BLK_CGROUP
507*4882a593Smuzhiyun DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
508*4882a593Smuzhiyun struct blkcg_gq *root_blkg;
509*4882a593Smuzhiyun struct list_head blkg_list;
510*4882a593Smuzhiyun #endif
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun struct queue_limits limits;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun unsigned int required_elevator_features;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
517*4882a593Smuzhiyun /*
518*4882a593Smuzhiyun * Zoned block device information for request dispatch control.
519*4882a593Smuzhiyun * nr_zones is the total number of zones of the device. This is always
520*4882a593Smuzhiyun * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
521*4882a593Smuzhiyun * bits which indicates if a zone is conventional (bit set) or
522*4882a593Smuzhiyun * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
523*4882a593Smuzhiyun * bits which indicates if a zone is write locked, that is, if a write
524*4882a593Smuzhiyun * request targeting the zone was dispatched. All three fields are
525*4882a593Smuzhiyun * initialized by the low level device driver (e.g. scsi/sd.c).
526*4882a593Smuzhiyun * Stacking drivers (device mappers) may or may not initialize
527*4882a593Smuzhiyun * these fields.
528*4882a593Smuzhiyun *
529*4882a593Smuzhiyun * Reads of this information must be protected with blk_queue_enter() /
530*4882a593Smuzhiyun * blk_queue_exit(). Modifying this information is only allowed while
531*4882a593Smuzhiyun * no requests are being processed. See also blk_mq_freeze_queue() and
532*4882a593Smuzhiyun * blk_mq_unfreeze_queue().
533*4882a593Smuzhiyun */
534*4882a593Smuzhiyun unsigned int nr_zones;
535*4882a593Smuzhiyun unsigned long *conv_zones_bitmap;
536*4882a593Smuzhiyun unsigned long *seq_zones_wlock;
537*4882a593Smuzhiyun unsigned int max_open_zones;
538*4882a593Smuzhiyun unsigned int max_active_zones;
539*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_ZONED */
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /*
542*4882a593Smuzhiyun * sg stuff
543*4882a593Smuzhiyun */
544*4882a593Smuzhiyun unsigned int sg_timeout;
545*4882a593Smuzhiyun unsigned int sg_reserved_size;
546*4882a593Smuzhiyun int node;
547*4882a593Smuzhiyun struct mutex debugfs_mutex;
548*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_IO_TRACE
549*4882a593Smuzhiyun struct blk_trace __rcu *blk_trace;
550*4882a593Smuzhiyun #endif
551*4882a593Smuzhiyun /*
552*4882a593Smuzhiyun * for flush operations
553*4882a593Smuzhiyun */
554*4882a593Smuzhiyun struct blk_flush_queue *fq;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun struct list_head requeue_list;
557*4882a593Smuzhiyun spinlock_t requeue_lock;
558*4882a593Smuzhiyun struct delayed_work requeue_work;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun struct mutex sysfs_lock;
561*4882a593Smuzhiyun struct mutex sysfs_dir_lock;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /*
564*4882a593Smuzhiyun * for reusing dead hctx instance in case of updating
565*4882a593Smuzhiyun * nr_hw_queues
566*4882a593Smuzhiyun */
567*4882a593Smuzhiyun struct list_head unused_hctx_list;
568*4882a593Smuzhiyun spinlock_t unused_hctx_lock;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun int mq_freeze_depth;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun #if defined(CONFIG_BLK_DEV_BSG)
573*4882a593Smuzhiyun struct bsg_class_device bsg_dev;
574*4882a593Smuzhiyun #endif
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_THROTTLING
577*4882a593Smuzhiyun /* Throttle data */
578*4882a593Smuzhiyun struct throtl_data *td;
579*4882a593Smuzhiyun #endif
580*4882a593Smuzhiyun struct rcu_head rcu_head;
581*4882a593Smuzhiyun wait_queue_head_t mq_freeze_wq;
582*4882a593Smuzhiyun /*
583*4882a593Smuzhiyun * Protect concurrent access to q_usage_counter by
584*4882a593Smuzhiyun * percpu_ref_kill() and percpu_ref_reinit().
585*4882a593Smuzhiyun */
586*4882a593Smuzhiyun struct mutex mq_freeze_lock;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun struct blk_mq_tag_set *tag_set;
589*4882a593Smuzhiyun struct list_head tag_set_list;
590*4882a593Smuzhiyun struct bio_set bio_split;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun struct dentry *debugfs_dir;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEBUG_FS
595*4882a593Smuzhiyun struct dentry *sched_debugfs_dir;
596*4882a593Smuzhiyun struct dentry *rqos_debugfs_dir;
597*4882a593Smuzhiyun #endif
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun bool mq_sysfs_init_done;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun size_t cmd_size;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun #define BLK_MAX_WRITE_HINTS 5
604*4882a593Smuzhiyun u64 write_hints[BLK_MAX_WRITE_HINTS];
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
607*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
608*4882a593Smuzhiyun ANDROID_KABI_RESERVE(3);
609*4882a593Smuzhiyun ANDROID_KABI_RESERVE(4);
610*4882a593Smuzhiyun ANDROID_OEM_DATA(1);
611*4882a593Smuzhiyun };
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /* Keep blk_queue_flag_name[] in sync with the definitions below */
614*4882a593Smuzhiyun #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
615*4882a593Smuzhiyun #define QUEUE_FLAG_DYING 1 /* queue being torn down */
616*4882a593Smuzhiyun #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
617*4882a593Smuzhiyun #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
618*4882a593Smuzhiyun #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
619*4882a593Smuzhiyun #define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
620*4882a593Smuzhiyun #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
621*4882a593Smuzhiyun #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
622*4882a593Smuzhiyun #define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */
623*4882a593Smuzhiyun #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
624*4882a593Smuzhiyun #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
625*4882a593Smuzhiyun #define QUEUE_FLAG_SECERASE 11 /* supports secure erase */
626*4882a593Smuzhiyun #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
627*4882a593Smuzhiyun #define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
628*4882a593Smuzhiyun #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
629*4882a593Smuzhiyun #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
630*4882a593Smuzhiyun #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
631*4882a593Smuzhiyun #define QUEUE_FLAG_WC 17 /* Write back caching */
632*4882a593Smuzhiyun #define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
633*4882a593Smuzhiyun #define QUEUE_FLAG_DAX 19 /* device supports DAX */
634*4882a593Smuzhiyun #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
635*4882a593Smuzhiyun #define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */
636*4882a593Smuzhiyun #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
637*4882a593Smuzhiyun #define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */
638*4882a593Smuzhiyun #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
639*4882a593Smuzhiyun #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
640*4882a593Smuzhiyun #define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
641*4882a593Smuzhiyun #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
642*4882a593Smuzhiyun #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
643*4882a593Smuzhiyun #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
646*4882a593Smuzhiyun (1 << QUEUE_FLAG_SAME_COMP) | \
647*4882a593Smuzhiyun (1 << QUEUE_FLAG_NOWAIT))
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
650*4882a593Smuzhiyun void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
651*4882a593Smuzhiyun bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
654*4882a593Smuzhiyun #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
655*4882a593Smuzhiyun #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
656*4882a593Smuzhiyun #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
657*4882a593Smuzhiyun #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
658*4882a593Smuzhiyun #define blk_queue_noxmerges(q) \
659*4882a593Smuzhiyun test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
660*4882a593Smuzhiyun #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
661*4882a593Smuzhiyun #define blk_queue_stable_writes(q) \
662*4882a593Smuzhiyun test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
663*4882a593Smuzhiyun #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
664*4882a593Smuzhiyun #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
665*4882a593Smuzhiyun #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
666*4882a593Smuzhiyun #define blk_queue_zone_resetall(q) \
667*4882a593Smuzhiyun test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
668*4882a593Smuzhiyun #define blk_queue_secure_erase(q) \
669*4882a593Smuzhiyun (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
670*4882a593Smuzhiyun #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
671*4882a593Smuzhiyun #define blk_queue_scsi_passthrough(q) \
672*4882a593Smuzhiyun test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
673*4882a593Smuzhiyun #define blk_queue_pci_p2pdma(q) \
674*4882a593Smuzhiyun test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
675*4882a593Smuzhiyun #ifdef CONFIG_BLK_RQ_ALLOC_TIME
676*4882a593Smuzhiyun #define blk_queue_rq_alloc_time(q) \
677*4882a593Smuzhiyun test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
678*4882a593Smuzhiyun #else
679*4882a593Smuzhiyun #define blk_queue_rq_alloc_time(q) false
680*4882a593Smuzhiyun #endif
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun #define blk_noretry_request(rq) \
683*4882a593Smuzhiyun ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
684*4882a593Smuzhiyun REQ_FAILFAST_DRIVER))
685*4882a593Smuzhiyun #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
686*4882a593Smuzhiyun #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
687*4882a593Smuzhiyun #define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
688*4882a593Smuzhiyun #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
689*4882a593Smuzhiyun #define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun extern void blk_set_pm_only(struct request_queue *q);
692*4882a593Smuzhiyun extern void blk_clear_pm_only(struct request_queue *q);
693*4882a593Smuzhiyun
blk_account_rq(struct request * rq)694*4882a593Smuzhiyun static inline bool blk_account_rq(struct request *rq)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun #define rq_dma_dir(rq) \
704*4882a593Smuzhiyun (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun #define dma_map_bvec(dev, bv, dir, attrs) \
707*4882a593Smuzhiyun dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
708*4882a593Smuzhiyun (dir), (attrs))
709*4882a593Smuzhiyun
queue_is_mq(struct request_queue * q)710*4882a593Smuzhiyun static inline bool queue_is_mq(struct request_queue *q)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun return q->mq_ops;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun static inline enum blk_zoned_model
blk_queue_zoned_model(struct request_queue * q)716*4882a593Smuzhiyun blk_queue_zoned_model(struct request_queue *q)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
719*4882a593Smuzhiyun return q->limits.zoned;
720*4882a593Smuzhiyun return BLK_ZONED_NONE;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
blk_queue_is_zoned(struct request_queue * q)723*4882a593Smuzhiyun static inline bool blk_queue_is_zoned(struct request_queue *q)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun switch (blk_queue_zoned_model(q)) {
726*4882a593Smuzhiyun case BLK_ZONED_HA:
727*4882a593Smuzhiyun case BLK_ZONED_HM:
728*4882a593Smuzhiyun return true;
729*4882a593Smuzhiyun default:
730*4882a593Smuzhiyun return false;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
blk_queue_zone_sectors(struct request_queue * q)734*4882a593Smuzhiyun static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
blk_queue_nr_zones(struct request_queue * q)740*4882a593Smuzhiyun static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun return blk_queue_is_zoned(q) ? q->nr_zones : 0;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
blk_queue_zone_no(struct request_queue * q,sector_t sector)745*4882a593Smuzhiyun static inline unsigned int blk_queue_zone_no(struct request_queue *q,
746*4882a593Smuzhiyun sector_t sector)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun if (!blk_queue_is_zoned(q))
749*4882a593Smuzhiyun return 0;
750*4882a593Smuzhiyun return sector >> ilog2(q->limits.chunk_sectors);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
blk_queue_zone_is_seq(struct request_queue * q,sector_t sector)753*4882a593Smuzhiyun static inline bool blk_queue_zone_is_seq(struct request_queue *q,
754*4882a593Smuzhiyun sector_t sector)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun if (!blk_queue_is_zoned(q))
757*4882a593Smuzhiyun return false;
758*4882a593Smuzhiyun if (!q->conv_zones_bitmap)
759*4882a593Smuzhiyun return true;
760*4882a593Smuzhiyun return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
blk_queue_max_open_zones(struct request_queue * q,unsigned int max_open_zones)763*4882a593Smuzhiyun static inline void blk_queue_max_open_zones(struct request_queue *q,
764*4882a593Smuzhiyun unsigned int max_open_zones)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun q->max_open_zones = max_open_zones;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
queue_max_open_zones(const struct request_queue * q)769*4882a593Smuzhiyun static inline unsigned int queue_max_open_zones(const struct request_queue *q)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun return q->max_open_zones;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
blk_queue_max_active_zones(struct request_queue * q,unsigned int max_active_zones)774*4882a593Smuzhiyun static inline void blk_queue_max_active_zones(struct request_queue *q,
775*4882a593Smuzhiyun unsigned int max_active_zones)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun q->max_active_zones = max_active_zones;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
queue_max_active_zones(const struct request_queue * q)780*4882a593Smuzhiyun static inline unsigned int queue_max_active_zones(const struct request_queue *q)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun return q->max_active_zones;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun #else /* CONFIG_BLK_DEV_ZONED */
blk_queue_nr_zones(struct request_queue * q)785*4882a593Smuzhiyun static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun return 0;
788*4882a593Smuzhiyun }
blk_queue_zone_is_seq(struct request_queue * q,sector_t sector)789*4882a593Smuzhiyun static inline bool blk_queue_zone_is_seq(struct request_queue *q,
790*4882a593Smuzhiyun sector_t sector)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun return false;
793*4882a593Smuzhiyun }
blk_queue_zone_no(struct request_queue * q,sector_t sector)794*4882a593Smuzhiyun static inline unsigned int blk_queue_zone_no(struct request_queue *q,
795*4882a593Smuzhiyun sector_t sector)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun return 0;
798*4882a593Smuzhiyun }
queue_max_open_zones(const struct request_queue * q)799*4882a593Smuzhiyun static inline unsigned int queue_max_open_zones(const struct request_queue *q)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun return 0;
802*4882a593Smuzhiyun }
queue_max_active_zones(const struct request_queue * q)803*4882a593Smuzhiyun static inline unsigned int queue_max_active_zones(const struct request_queue *q)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun return 0;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_ZONED */
808*4882a593Smuzhiyun
rq_is_sync(struct request * rq)809*4882a593Smuzhiyun static inline bool rq_is_sync(struct request *rq)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun return op_is_sync(rq->cmd_flags);
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
rq_mergeable(struct request * rq)814*4882a593Smuzhiyun static inline bool rq_mergeable(struct request *rq)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun if (blk_rq_is_passthrough(rq))
817*4882a593Smuzhiyun return false;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun if (req_op(rq) == REQ_OP_FLUSH)
820*4882a593Smuzhiyun return false;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun if (req_op(rq) == REQ_OP_WRITE_ZEROES)
823*4882a593Smuzhiyun return false;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun if (req_op(rq) == REQ_OP_ZONE_APPEND)
826*4882a593Smuzhiyun return false;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
829*4882a593Smuzhiyun return false;
830*4882a593Smuzhiyun if (rq->rq_flags & RQF_NOMERGE_FLAGS)
831*4882a593Smuzhiyun return false;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun return true;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
blk_write_same_mergeable(struct bio * a,struct bio * b)836*4882a593Smuzhiyun static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun if (bio_page(a) == bio_page(b) &&
839*4882a593Smuzhiyun bio_offset(a) == bio_offset(b))
840*4882a593Smuzhiyun return true;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun return false;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
blk_queue_depth(struct request_queue * q)845*4882a593Smuzhiyun static inline unsigned int blk_queue_depth(struct request_queue *q)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun if (q->queue_depth)
848*4882a593Smuzhiyun return q->queue_depth;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun return q->nr_requests;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun extern unsigned long blk_max_low_pfn, blk_max_pfn;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun /*
856*4882a593Smuzhiyun * standard bounce addresses:
857*4882a593Smuzhiyun *
858*4882a593Smuzhiyun * BLK_BOUNCE_HIGH : bounce all highmem pages
859*4882a593Smuzhiyun * BLK_BOUNCE_ANY : don't bounce anything
860*4882a593Smuzhiyun * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
861*4882a593Smuzhiyun */
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun #if BITS_PER_LONG == 32
864*4882a593Smuzhiyun #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
865*4882a593Smuzhiyun #else
866*4882a593Smuzhiyun #define BLK_BOUNCE_HIGH -1ULL
867*4882a593Smuzhiyun #endif
868*4882a593Smuzhiyun #define BLK_BOUNCE_ANY (-1ULL)
869*4882a593Smuzhiyun #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun /*
872*4882a593Smuzhiyun * default timeout for SG_IO if none specified
873*4882a593Smuzhiyun */
874*4882a593Smuzhiyun #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
875*4882a593Smuzhiyun #define BLK_MIN_SG_TIMEOUT (7 * HZ)
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun struct rq_map_data {
878*4882a593Smuzhiyun struct page **pages;
879*4882a593Smuzhiyun int page_order;
880*4882a593Smuzhiyun int nr_entries;
881*4882a593Smuzhiyun unsigned long offset;
882*4882a593Smuzhiyun int null_mapped;
883*4882a593Smuzhiyun int from_user;
884*4882a593Smuzhiyun };
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun struct req_iterator {
887*4882a593Smuzhiyun struct bvec_iter iter;
888*4882a593Smuzhiyun struct bio *bio;
889*4882a593Smuzhiyun };
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun /* This should not be used directly - use rq_for_each_segment */
892*4882a593Smuzhiyun #define for_each_bio(_bio) \
893*4882a593Smuzhiyun for (; _bio; _bio = _bio->bi_next)
894*4882a593Smuzhiyun #define __rq_for_each_bio(_bio, rq) \
895*4882a593Smuzhiyun if ((rq->bio)) \
896*4882a593Smuzhiyun for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun #define rq_for_each_segment(bvl, _rq, _iter) \
899*4882a593Smuzhiyun __rq_for_each_bio(_iter.bio, _rq) \
900*4882a593Smuzhiyun bio_for_each_segment(bvl, _iter.bio, _iter.iter)
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun #define rq_for_each_bvec(bvl, _rq, _iter) \
903*4882a593Smuzhiyun __rq_for_each_bio(_iter.bio, _rq) \
904*4882a593Smuzhiyun bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun #define rq_iter_last(bvec, _iter) \
907*4882a593Smuzhiyun (_iter.bio->bi_next == NULL && \
908*4882a593Smuzhiyun bio_iter_last(bvec, _iter.iter))
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
911*4882a593Smuzhiyun # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
912*4882a593Smuzhiyun #endif
913*4882a593Smuzhiyun #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
914*4882a593Smuzhiyun extern void rq_flush_dcache_pages(struct request *rq);
915*4882a593Smuzhiyun #else
rq_flush_dcache_pages(struct request * rq)916*4882a593Smuzhiyun static inline void rq_flush_dcache_pages(struct request *rq)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun #endif
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun extern int blk_register_queue(struct gendisk *disk);
922*4882a593Smuzhiyun extern void blk_unregister_queue(struct gendisk *disk);
923*4882a593Smuzhiyun blk_qc_t submit_bio_noacct(struct bio *bio);
924*4882a593Smuzhiyun extern void blk_rq_init(struct request_queue *q, struct request *rq);
925*4882a593Smuzhiyun extern void blk_put_request(struct request *);
926*4882a593Smuzhiyun extern struct request *blk_get_request(struct request_queue *, unsigned int op,
927*4882a593Smuzhiyun blk_mq_req_flags_t flags);
928*4882a593Smuzhiyun extern int blk_lld_busy(struct request_queue *q);
929*4882a593Smuzhiyun extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
930*4882a593Smuzhiyun struct bio_set *bs, gfp_t gfp_mask,
931*4882a593Smuzhiyun int (*bio_ctr)(struct bio *, struct bio *, void *),
932*4882a593Smuzhiyun void *data);
933*4882a593Smuzhiyun extern void blk_rq_unprep_clone(struct request *rq);
934*4882a593Smuzhiyun extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
935*4882a593Smuzhiyun struct request *rq);
936*4882a593Smuzhiyun extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
937*4882a593Smuzhiyun extern void blk_queue_split(struct bio **);
938*4882a593Smuzhiyun extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
939*4882a593Smuzhiyun extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
940*4882a593Smuzhiyun unsigned int, void __user *);
941*4882a593Smuzhiyun extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
942*4882a593Smuzhiyun unsigned int, void __user *);
943*4882a593Smuzhiyun extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
944*4882a593Smuzhiyun struct scsi_ioctl_command __user *);
945*4882a593Smuzhiyun extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
946*4882a593Smuzhiyun extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
949*4882a593Smuzhiyun extern void blk_queue_exit(struct request_queue *q);
950*4882a593Smuzhiyun extern void blk_sync_queue(struct request_queue *q);
951*4882a593Smuzhiyun extern int blk_rq_map_user(struct request_queue *, struct request *,
952*4882a593Smuzhiyun struct rq_map_data *, void __user *, unsigned long,
953*4882a593Smuzhiyun gfp_t);
954*4882a593Smuzhiyun extern int blk_rq_unmap_user(struct bio *);
955*4882a593Smuzhiyun extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
956*4882a593Smuzhiyun extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
957*4882a593Smuzhiyun struct rq_map_data *, const struct iov_iter *,
958*4882a593Smuzhiyun gfp_t);
959*4882a593Smuzhiyun extern void blk_execute_rq(struct request_queue *, struct gendisk *,
960*4882a593Smuzhiyun struct request *, int);
961*4882a593Smuzhiyun extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
962*4882a593Smuzhiyun struct request *, int, rq_end_io_fn *);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun /* Helper to convert REQ_OP_XXX to its string format XXX */
965*4882a593Smuzhiyun extern const char *blk_op_str(unsigned int op);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun int blk_status_to_errno(blk_status_t status);
968*4882a593Smuzhiyun blk_status_t errno_to_blk_status(int errno);
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
971*4882a593Smuzhiyun
bdev_get_queue(struct block_device * bdev)972*4882a593Smuzhiyun static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun return bdev->bd_disk->queue; /* this is never NULL */
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun /*
978*4882a593Smuzhiyun * The basic unit of block I/O is a sector. It is used in a number of contexts
979*4882a593Smuzhiyun * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
980*4882a593Smuzhiyun * bytes. Variables of type sector_t represent an offset or size that is a
981*4882a593Smuzhiyun * multiple of 512 bytes. Hence these two constants.
982*4882a593Smuzhiyun */
983*4882a593Smuzhiyun #ifndef SECTOR_SHIFT
984*4882a593Smuzhiyun #define SECTOR_SHIFT 9
985*4882a593Smuzhiyun #endif
986*4882a593Smuzhiyun #ifndef SECTOR_SIZE
987*4882a593Smuzhiyun #define SECTOR_SIZE (1 << SECTOR_SHIFT)
988*4882a593Smuzhiyun #endif
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun /*
991*4882a593Smuzhiyun * blk_rq_pos() : the current sector
992*4882a593Smuzhiyun * blk_rq_bytes() : bytes left in the entire request
993*4882a593Smuzhiyun * blk_rq_cur_bytes() : bytes left in the current segment
994*4882a593Smuzhiyun * blk_rq_err_bytes() : bytes left till the next error boundary
995*4882a593Smuzhiyun * blk_rq_sectors() : sectors left in the entire request
996*4882a593Smuzhiyun * blk_rq_cur_sectors() : sectors left in the current segment
997*4882a593Smuzhiyun * blk_rq_stats_sectors() : sectors of the entire request used for stats
998*4882a593Smuzhiyun */
blk_rq_pos(const struct request * rq)999*4882a593Smuzhiyun static inline sector_t blk_rq_pos(const struct request *rq)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun return rq->__sector;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
blk_rq_bytes(const struct request * rq)1004*4882a593Smuzhiyun static inline unsigned int blk_rq_bytes(const struct request *rq)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun return rq->__data_len;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
blk_rq_cur_bytes(const struct request * rq)1009*4882a593Smuzhiyun static inline int blk_rq_cur_bytes(const struct request *rq)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun return rq->bio ? bio_cur_bytes(rq->bio) : 0;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun extern unsigned int blk_rq_err_bytes(const struct request *rq);
1015*4882a593Smuzhiyun
blk_rq_sectors(const struct request * rq)1016*4882a593Smuzhiyun static inline unsigned int blk_rq_sectors(const struct request *rq)
1017*4882a593Smuzhiyun {
1018*4882a593Smuzhiyun return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
blk_rq_cur_sectors(const struct request * rq)1021*4882a593Smuzhiyun static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1022*4882a593Smuzhiyun {
1023*4882a593Smuzhiyun return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
blk_rq_stats_sectors(const struct request * rq)1026*4882a593Smuzhiyun static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun return rq->stats_sectors;
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
1034*4882a593Smuzhiyun const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
1035*4882a593Smuzhiyun
blk_rq_zone_no(struct request * rq)1036*4882a593Smuzhiyun static inline unsigned int blk_rq_zone_no(struct request *rq)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
blk_rq_zone_is_seq(struct request * rq)1041*4882a593Smuzhiyun static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_ZONED */
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun /*
1048*4882a593Smuzhiyun * Some commands like WRITE SAME have a payload or data transfer size which
1049*4882a593Smuzhiyun * is different from the size of the request. Any driver that supports such
1050*4882a593Smuzhiyun * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1051*4882a593Smuzhiyun * calculate the data transfer size.
1052*4882a593Smuzhiyun */
blk_rq_payload_bytes(struct request * rq)1053*4882a593Smuzhiyun static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1056*4882a593Smuzhiyun return rq->special_vec.bv_len;
1057*4882a593Smuzhiyun return blk_rq_bytes(rq);
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun /*
1061*4882a593Smuzhiyun * Return the first full biovec in the request. The caller needs to check that
1062*4882a593Smuzhiyun * there are any bvecs before calling this helper.
1063*4882a593Smuzhiyun */
req_bvec(struct request * rq)1064*4882a593Smuzhiyun static inline struct bio_vec req_bvec(struct request *rq)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1067*4882a593Smuzhiyun return rq->special_vec;
1068*4882a593Smuzhiyun return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun
blk_queue_get_max_sectors(struct request_queue * q,int op)1071*4882a593Smuzhiyun static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
1072*4882a593Smuzhiyun int op)
1073*4882a593Smuzhiyun {
1074*4882a593Smuzhiyun if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
1075*4882a593Smuzhiyun return min(q->limits.max_discard_sectors,
1076*4882a593Smuzhiyun UINT_MAX >> SECTOR_SHIFT);
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun if (unlikely(op == REQ_OP_WRITE_SAME))
1079*4882a593Smuzhiyun return q->limits.max_write_same_sectors;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun if (unlikely(op == REQ_OP_WRITE_ZEROES))
1082*4882a593Smuzhiyun return q->limits.max_write_zeroes_sectors;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun return q->limits.max_sectors;
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun /*
1088*4882a593Smuzhiyun * Return maximum size of a request at given offset. Only valid for
1089*4882a593Smuzhiyun * file system requests.
1090*4882a593Smuzhiyun */
blk_max_size_offset(struct request_queue * q,sector_t offset,unsigned int chunk_sectors)1091*4882a593Smuzhiyun static inline unsigned int blk_max_size_offset(struct request_queue *q,
1092*4882a593Smuzhiyun sector_t offset,
1093*4882a593Smuzhiyun unsigned int chunk_sectors)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun if (!chunk_sectors) {
1096*4882a593Smuzhiyun if (q->limits.chunk_sectors)
1097*4882a593Smuzhiyun chunk_sectors = q->limits.chunk_sectors;
1098*4882a593Smuzhiyun else
1099*4882a593Smuzhiyun return q->limits.max_sectors;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun if (likely(is_power_of_2(chunk_sectors)))
1103*4882a593Smuzhiyun chunk_sectors -= offset & (chunk_sectors - 1);
1104*4882a593Smuzhiyun else
1105*4882a593Smuzhiyun chunk_sectors -= sector_div(offset, chunk_sectors);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun return min(q->limits.max_sectors, chunk_sectors);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
blk_rq_get_max_sectors(struct request * rq,sector_t offset)1110*4882a593Smuzhiyun static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
1111*4882a593Smuzhiyun sector_t offset)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun struct request_queue *q = rq->q;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun if (blk_rq_is_passthrough(rq))
1116*4882a593Smuzhiyun return q->limits.max_hw_sectors;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun if (!q->limits.chunk_sectors ||
1119*4882a593Smuzhiyun req_op(rq) == REQ_OP_DISCARD ||
1120*4882a593Smuzhiyun req_op(rq) == REQ_OP_SECURE_ERASE)
1121*4882a593Smuzhiyun return blk_queue_get_max_sectors(q, req_op(rq));
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun return min(blk_max_size_offset(q, offset, 0),
1124*4882a593Smuzhiyun blk_queue_get_max_sectors(q, req_op(rq)));
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun
blk_rq_count_bios(struct request * rq)1127*4882a593Smuzhiyun static inline unsigned int blk_rq_count_bios(struct request *rq)
1128*4882a593Smuzhiyun {
1129*4882a593Smuzhiyun unsigned int nr_bios = 0;
1130*4882a593Smuzhiyun struct bio *bio;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun __rq_for_each_bio(bio, rq)
1133*4882a593Smuzhiyun nr_bios++;
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun return nr_bios;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun void blk_steal_bios(struct bio_list *list, struct request *rq);
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun /*
1141*4882a593Smuzhiyun * Request completion related functions.
1142*4882a593Smuzhiyun *
1143*4882a593Smuzhiyun * blk_update_request() completes given number of bytes and updates
1144*4882a593Smuzhiyun * the request without completing it.
1145*4882a593Smuzhiyun */
1146*4882a593Smuzhiyun extern bool blk_update_request(struct request *rq, blk_status_t error,
1147*4882a593Smuzhiyun unsigned int nr_bytes);
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun extern void blk_abort_request(struct request *);
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun /*
1152*4882a593Smuzhiyun * Access functions for manipulating queue properties
1153*4882a593Smuzhiyun */
1154*4882a593Smuzhiyun extern void blk_cleanup_queue(struct request_queue *);
1155*4882a593Smuzhiyun extern void blk_queue_bounce_limit(struct request_queue *, u64);
1156*4882a593Smuzhiyun extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1157*4882a593Smuzhiyun extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
1158*4882a593Smuzhiyun extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1159*4882a593Smuzhiyun extern void blk_queue_max_discard_segments(struct request_queue *,
1160*4882a593Smuzhiyun unsigned short);
1161*4882a593Smuzhiyun extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
1162*4882a593Smuzhiyun extern void blk_queue_max_discard_sectors(struct request_queue *q,
1163*4882a593Smuzhiyun unsigned int max_discard_sectors);
1164*4882a593Smuzhiyun extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1165*4882a593Smuzhiyun unsigned int max_write_same_sectors);
1166*4882a593Smuzhiyun extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
1167*4882a593Smuzhiyun unsigned int max_write_same_sectors);
1168*4882a593Smuzhiyun extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
1169*4882a593Smuzhiyun extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
1170*4882a593Smuzhiyun unsigned int max_zone_append_sectors);
1171*4882a593Smuzhiyun extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
1172*4882a593Smuzhiyun extern void blk_queue_alignment_offset(struct request_queue *q,
1173*4882a593Smuzhiyun unsigned int alignment);
1174*4882a593Smuzhiyun void blk_queue_update_readahead(struct request_queue *q);
1175*4882a593Smuzhiyun extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
1176*4882a593Smuzhiyun extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1177*4882a593Smuzhiyun extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
1178*4882a593Smuzhiyun extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1179*4882a593Smuzhiyun extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1180*4882a593Smuzhiyun extern void blk_set_default_limits(struct queue_limits *lim);
1181*4882a593Smuzhiyun extern void blk_set_stacking_limits(struct queue_limits *lim);
1182*4882a593Smuzhiyun extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1183*4882a593Smuzhiyun sector_t offset);
1184*4882a593Smuzhiyun extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1185*4882a593Smuzhiyun sector_t offset);
1186*4882a593Smuzhiyun extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1187*4882a593Smuzhiyun extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1188*4882a593Smuzhiyun extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
1189*4882a593Smuzhiyun extern void blk_queue_dma_alignment(struct request_queue *, int);
1190*4882a593Smuzhiyun extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1191*4882a593Smuzhiyun extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1192*4882a593Smuzhiyun extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1193*4882a593Smuzhiyun extern void blk_queue_required_elevator_features(struct request_queue *q,
1194*4882a593Smuzhiyun unsigned int features);
1195*4882a593Smuzhiyun extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1196*4882a593Smuzhiyun struct device *dev);
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun /*
1199*4882a593Smuzhiyun * Number of physical segments as sent to the device.
1200*4882a593Smuzhiyun *
1201*4882a593Smuzhiyun * Normally this is the number of discontiguous data segments sent by the
1202*4882a593Smuzhiyun * submitter. But for data-less command like discard we might have no
1203*4882a593Smuzhiyun * actual data segments submitted, but the driver might have to add it's
1204*4882a593Smuzhiyun * own special payload. In that case we still return 1 here so that this
1205*4882a593Smuzhiyun * special payload will be mapped.
1206*4882a593Smuzhiyun */
blk_rq_nr_phys_segments(struct request * rq)1207*4882a593Smuzhiyun static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1210*4882a593Smuzhiyun return 1;
1211*4882a593Smuzhiyun return rq->nr_phys_segments;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun /*
1215*4882a593Smuzhiyun * Number of discard segments (or ranges) the driver needs to fill in.
1216*4882a593Smuzhiyun * Each discard bio merged into a request is counted as one segment.
1217*4882a593Smuzhiyun */
blk_rq_nr_discard_segments(struct request * rq)1218*4882a593Smuzhiyun static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun return max_t(unsigned short, rq->nr_phys_segments, 1);
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1224*4882a593Smuzhiyun struct scatterlist *sglist, struct scatterlist **last_sg);
blk_rq_map_sg(struct request_queue * q,struct request * rq,struct scatterlist * sglist)1225*4882a593Smuzhiyun static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1226*4882a593Smuzhiyun struct scatterlist *sglist)
1227*4882a593Smuzhiyun {
1228*4882a593Smuzhiyun struct scatterlist *last_sg = NULL;
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun extern void blk_dump_rq_flags(struct request *, char *);
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun bool __must_check blk_get_queue(struct request_queue *);
1235*4882a593Smuzhiyun struct request_queue *blk_alloc_queue(int node_id);
1236*4882a593Smuzhiyun extern void blk_put_queue(struct request_queue *);
1237*4882a593Smuzhiyun extern void blk_set_queue_dying(struct request_queue *);
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun #ifdef CONFIG_BLOCK
1240*4882a593Smuzhiyun /*
1241*4882a593Smuzhiyun * blk_plug permits building a queue of related requests by holding the I/O
1242*4882a593Smuzhiyun * fragments for a short period. This allows merging of sequential requests
1243*4882a593Smuzhiyun * into single larger request. As the requests are moved from a per-task list to
1244*4882a593Smuzhiyun * the device's request_queue in a batch, this results in improved scalability
1245*4882a593Smuzhiyun * as the lock contention for request_queue lock is reduced.
1246*4882a593Smuzhiyun *
1247*4882a593Smuzhiyun * It is ok not to disable preemption when adding the request to the plug list
1248*4882a593Smuzhiyun * or when attempting a merge, because blk_schedule_flush_list() will only flush
1249*4882a593Smuzhiyun * the plug list when the task sleeps by itself. For details, please see
1250*4882a593Smuzhiyun * schedule() where blk_schedule_flush_plug() is called.
1251*4882a593Smuzhiyun */
1252*4882a593Smuzhiyun struct blk_plug {
1253*4882a593Smuzhiyun struct list_head mq_list; /* blk-mq requests */
1254*4882a593Smuzhiyun struct list_head cb_list; /* md requires an unplug callback */
1255*4882a593Smuzhiyun unsigned short rq_count;
1256*4882a593Smuzhiyun bool multiple_queues;
1257*4882a593Smuzhiyun bool nowait;
1258*4882a593Smuzhiyun };
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun struct blk_plug_cb;
1261*4882a593Smuzhiyun typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1262*4882a593Smuzhiyun struct blk_plug_cb {
1263*4882a593Smuzhiyun struct list_head list;
1264*4882a593Smuzhiyun blk_plug_cb_fn callback;
1265*4882a593Smuzhiyun void *data;
1266*4882a593Smuzhiyun };
1267*4882a593Smuzhiyun extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1268*4882a593Smuzhiyun void *data, int size);
1269*4882a593Smuzhiyun extern void blk_start_plug(struct blk_plug *);
1270*4882a593Smuzhiyun extern void blk_finish_plug(struct blk_plug *);
1271*4882a593Smuzhiyun extern void blk_flush_plug_list(struct blk_plug *, bool);
1272*4882a593Smuzhiyun
blk_flush_plug(struct task_struct * tsk)1273*4882a593Smuzhiyun static inline void blk_flush_plug(struct task_struct *tsk)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun struct blk_plug *plug = tsk->plug;
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun if (plug)
1278*4882a593Smuzhiyun blk_flush_plug_list(plug, false);
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun
blk_schedule_flush_plug(struct task_struct * tsk)1281*4882a593Smuzhiyun static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun struct blk_plug *plug = tsk->plug;
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun if (plug)
1286*4882a593Smuzhiyun blk_flush_plug_list(plug, true);
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun
blk_needs_flush_plug(struct task_struct * tsk)1289*4882a593Smuzhiyun static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun struct blk_plug *plug = tsk->plug;
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun return plug &&
1294*4882a593Smuzhiyun (!list_empty(&plug->mq_list) ||
1295*4882a593Smuzhiyun !list_empty(&plug->cb_list));
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun int blkdev_issue_flush(struct block_device *, gfp_t);
1299*4882a593Smuzhiyun long nr_blockdev_pages(void);
1300*4882a593Smuzhiyun #else /* CONFIG_BLOCK */
1301*4882a593Smuzhiyun struct blk_plug {
1302*4882a593Smuzhiyun };
1303*4882a593Smuzhiyun
blk_start_plug(struct blk_plug * plug)1304*4882a593Smuzhiyun static inline void blk_start_plug(struct blk_plug *plug)
1305*4882a593Smuzhiyun {
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun
blk_finish_plug(struct blk_plug * plug)1308*4882a593Smuzhiyun static inline void blk_finish_plug(struct blk_plug *plug)
1309*4882a593Smuzhiyun {
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun
blk_flush_plug(struct task_struct * task)1312*4882a593Smuzhiyun static inline void blk_flush_plug(struct task_struct *task)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun
blk_schedule_flush_plug(struct task_struct * task)1316*4882a593Smuzhiyun static inline void blk_schedule_flush_plug(struct task_struct *task)
1317*4882a593Smuzhiyun {
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun
blk_needs_flush_plug(struct task_struct * tsk)1321*4882a593Smuzhiyun static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1322*4882a593Smuzhiyun {
1323*4882a593Smuzhiyun return false;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun
blkdev_issue_flush(struct block_device * bdev,gfp_t gfp_mask)1326*4882a593Smuzhiyun static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
1327*4882a593Smuzhiyun {
1328*4882a593Smuzhiyun return 0;
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun
nr_blockdev_pages(void)1331*4882a593Smuzhiyun static inline long nr_blockdev_pages(void)
1332*4882a593Smuzhiyun {
1333*4882a593Smuzhiyun return 0;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun #endif /* CONFIG_BLOCK */
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun extern void blk_io_schedule(void);
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1340*4882a593Smuzhiyun sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1345*4882a593Smuzhiyun sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1346*4882a593Smuzhiyun extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1347*4882a593Smuzhiyun sector_t nr_sects, gfp_t gfp_mask, int flags,
1348*4882a593Smuzhiyun struct bio **biop);
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
1351*4882a593Smuzhiyun #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1354*4882a593Smuzhiyun sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1355*4882a593Smuzhiyun unsigned flags);
1356*4882a593Smuzhiyun extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1357*4882a593Smuzhiyun sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1358*4882a593Smuzhiyun
sb_issue_discard(struct super_block * sb,sector_t block,sector_t nr_blocks,gfp_t gfp_mask,unsigned long flags)1359*4882a593Smuzhiyun static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1360*4882a593Smuzhiyun sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1361*4882a593Smuzhiyun {
1362*4882a593Smuzhiyun return blkdev_issue_discard(sb->s_bdev,
1363*4882a593Smuzhiyun block << (sb->s_blocksize_bits -
1364*4882a593Smuzhiyun SECTOR_SHIFT),
1365*4882a593Smuzhiyun nr_blocks << (sb->s_blocksize_bits -
1366*4882a593Smuzhiyun SECTOR_SHIFT),
1367*4882a593Smuzhiyun gfp_mask, flags);
1368*4882a593Smuzhiyun }
sb_issue_zeroout(struct super_block * sb,sector_t block,sector_t nr_blocks,gfp_t gfp_mask)1369*4882a593Smuzhiyun static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1370*4882a593Smuzhiyun sector_t nr_blocks, gfp_t gfp_mask)
1371*4882a593Smuzhiyun {
1372*4882a593Smuzhiyun return blkdev_issue_zeroout(sb->s_bdev,
1373*4882a593Smuzhiyun block << (sb->s_blocksize_bits -
1374*4882a593Smuzhiyun SECTOR_SHIFT),
1375*4882a593Smuzhiyun nr_blocks << (sb->s_blocksize_bits -
1376*4882a593Smuzhiyun SECTOR_SHIFT),
1377*4882a593Smuzhiyun gfp_mask, 0);
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
1381*4882a593Smuzhiyun
bdev_is_partition(struct block_device * bdev)1382*4882a593Smuzhiyun static inline bool bdev_is_partition(struct block_device *bdev)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun return bdev->bd_partno;
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun enum blk_default_limits {
1388*4882a593Smuzhiyun BLK_MAX_SEGMENTS = 128,
1389*4882a593Smuzhiyun BLK_SAFE_MAX_SECTORS = 255,
1390*4882a593Smuzhiyun BLK_DEF_MAX_SECTORS = 2560,
1391*4882a593Smuzhiyun BLK_MAX_SEGMENT_SIZE = 65536,
1392*4882a593Smuzhiyun BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1393*4882a593Smuzhiyun };
1394*4882a593Smuzhiyun
queue_segment_boundary(const struct request_queue * q)1395*4882a593Smuzhiyun static inline unsigned long queue_segment_boundary(const struct request_queue *q)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun return q->limits.seg_boundary_mask;
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun
queue_virt_boundary(const struct request_queue * q)1400*4882a593Smuzhiyun static inline unsigned long queue_virt_boundary(const struct request_queue *q)
1401*4882a593Smuzhiyun {
1402*4882a593Smuzhiyun return q->limits.virt_boundary_mask;
1403*4882a593Smuzhiyun }
1404*4882a593Smuzhiyun
queue_max_sectors(const struct request_queue * q)1405*4882a593Smuzhiyun static inline unsigned int queue_max_sectors(const struct request_queue *q)
1406*4882a593Smuzhiyun {
1407*4882a593Smuzhiyun return q->limits.max_sectors;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun
queue_max_hw_sectors(const struct request_queue * q)1410*4882a593Smuzhiyun static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
1411*4882a593Smuzhiyun {
1412*4882a593Smuzhiyun return q->limits.max_hw_sectors;
1413*4882a593Smuzhiyun }
1414*4882a593Smuzhiyun
queue_max_segments(const struct request_queue * q)1415*4882a593Smuzhiyun static inline unsigned short queue_max_segments(const struct request_queue *q)
1416*4882a593Smuzhiyun {
1417*4882a593Smuzhiyun return q->limits.max_segments;
1418*4882a593Smuzhiyun }
1419*4882a593Smuzhiyun
queue_max_discard_segments(const struct request_queue * q)1420*4882a593Smuzhiyun static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
1421*4882a593Smuzhiyun {
1422*4882a593Smuzhiyun return q->limits.max_discard_segments;
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun
queue_max_segment_size(const struct request_queue * q)1425*4882a593Smuzhiyun static inline unsigned int queue_max_segment_size(const struct request_queue *q)
1426*4882a593Smuzhiyun {
1427*4882a593Smuzhiyun return q->limits.max_segment_size;
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun
queue_max_zone_append_sectors(const struct request_queue * q)1430*4882a593Smuzhiyun static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun const struct queue_limits *l = &q->limits;
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun return min(l->max_zone_append_sectors, l->max_sectors);
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun
queue_logical_block_size(const struct request_queue * q)1438*4882a593Smuzhiyun static inline unsigned queue_logical_block_size(const struct request_queue *q)
1439*4882a593Smuzhiyun {
1440*4882a593Smuzhiyun int retval = 512;
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun if (q && q->limits.logical_block_size)
1443*4882a593Smuzhiyun retval = q->limits.logical_block_size;
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun return retval;
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
bdev_logical_block_size(struct block_device * bdev)1448*4882a593Smuzhiyun static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
1449*4882a593Smuzhiyun {
1450*4882a593Smuzhiyun return queue_logical_block_size(bdev_get_queue(bdev));
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun
queue_physical_block_size(const struct request_queue * q)1453*4882a593Smuzhiyun static inline unsigned int queue_physical_block_size(const struct request_queue *q)
1454*4882a593Smuzhiyun {
1455*4882a593Smuzhiyun return q->limits.physical_block_size;
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun
bdev_physical_block_size(struct block_device * bdev)1458*4882a593Smuzhiyun static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1459*4882a593Smuzhiyun {
1460*4882a593Smuzhiyun return queue_physical_block_size(bdev_get_queue(bdev));
1461*4882a593Smuzhiyun }
1462*4882a593Smuzhiyun
queue_io_min(const struct request_queue * q)1463*4882a593Smuzhiyun static inline unsigned int queue_io_min(const struct request_queue *q)
1464*4882a593Smuzhiyun {
1465*4882a593Smuzhiyun return q->limits.io_min;
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun
bdev_io_min(struct block_device * bdev)1468*4882a593Smuzhiyun static inline int bdev_io_min(struct block_device *bdev)
1469*4882a593Smuzhiyun {
1470*4882a593Smuzhiyun return queue_io_min(bdev_get_queue(bdev));
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun
queue_io_opt(const struct request_queue * q)1473*4882a593Smuzhiyun static inline unsigned int queue_io_opt(const struct request_queue *q)
1474*4882a593Smuzhiyun {
1475*4882a593Smuzhiyun return q->limits.io_opt;
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun
bdev_io_opt(struct block_device * bdev)1478*4882a593Smuzhiyun static inline int bdev_io_opt(struct block_device *bdev)
1479*4882a593Smuzhiyun {
1480*4882a593Smuzhiyun return queue_io_opt(bdev_get_queue(bdev));
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun
queue_alignment_offset(const struct request_queue * q)1483*4882a593Smuzhiyun static inline int queue_alignment_offset(const struct request_queue *q)
1484*4882a593Smuzhiyun {
1485*4882a593Smuzhiyun if (q->limits.misaligned)
1486*4882a593Smuzhiyun return -1;
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun return q->limits.alignment_offset;
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun
queue_limit_alignment_offset(struct queue_limits * lim,sector_t sector)1491*4882a593Smuzhiyun static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1492*4882a593Smuzhiyun {
1493*4882a593Smuzhiyun unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1494*4882a593Smuzhiyun unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
1495*4882a593Smuzhiyun << SECTOR_SHIFT;
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun return (granularity + lim->alignment_offset - alignment) % granularity;
1498*4882a593Smuzhiyun }
1499*4882a593Smuzhiyun
bdev_alignment_offset(struct block_device * bdev)1500*4882a593Smuzhiyun static inline int bdev_alignment_offset(struct block_device *bdev)
1501*4882a593Smuzhiyun {
1502*4882a593Smuzhiyun struct request_queue *q = bdev_get_queue(bdev);
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun if (q->limits.misaligned)
1505*4882a593Smuzhiyun return -1;
1506*4882a593Smuzhiyun if (bdev_is_partition(bdev))
1507*4882a593Smuzhiyun return queue_limit_alignment_offset(&q->limits,
1508*4882a593Smuzhiyun bdev->bd_part->start_sect);
1509*4882a593Smuzhiyun return q->limits.alignment_offset;
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun
queue_discard_alignment(const struct request_queue * q)1512*4882a593Smuzhiyun static inline int queue_discard_alignment(const struct request_queue *q)
1513*4882a593Smuzhiyun {
1514*4882a593Smuzhiyun if (q->limits.discard_misaligned)
1515*4882a593Smuzhiyun return -1;
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun return q->limits.discard_alignment;
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun
queue_limit_discard_alignment(struct queue_limits * lim,sector_t sector)1520*4882a593Smuzhiyun static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun unsigned int alignment, granularity, offset;
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun if (!lim->max_discard_sectors)
1525*4882a593Smuzhiyun return 0;
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun /* Why are these in bytes, not sectors? */
1528*4882a593Smuzhiyun alignment = lim->discard_alignment >> SECTOR_SHIFT;
1529*4882a593Smuzhiyun granularity = lim->discard_granularity >> SECTOR_SHIFT;
1530*4882a593Smuzhiyun if (!granularity)
1531*4882a593Smuzhiyun return 0;
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun /* Offset of the partition start in 'granularity' sectors */
1534*4882a593Smuzhiyun offset = sector_div(sector, granularity);
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1537*4882a593Smuzhiyun offset = (granularity + alignment - offset) % granularity;
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun /* Turn it back into bytes, gaah */
1540*4882a593Smuzhiyun return offset << SECTOR_SHIFT;
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun /*
1544*4882a593Smuzhiyun * Two cases of handling DISCARD merge:
1545*4882a593Smuzhiyun * If max_discard_segments > 1, the driver takes every bio
1546*4882a593Smuzhiyun * as a range and send them to controller together. The ranges
1547*4882a593Smuzhiyun * needn't to be contiguous.
1548*4882a593Smuzhiyun * Otherwise, the bios/requests will be handled as same as
1549*4882a593Smuzhiyun * others which should be contiguous.
1550*4882a593Smuzhiyun */
blk_discard_mergable(struct request * req)1551*4882a593Smuzhiyun static inline bool blk_discard_mergable(struct request *req)
1552*4882a593Smuzhiyun {
1553*4882a593Smuzhiyun if (req_op(req) == REQ_OP_DISCARD &&
1554*4882a593Smuzhiyun queue_max_discard_segments(req->q) > 1)
1555*4882a593Smuzhiyun return true;
1556*4882a593Smuzhiyun return false;
1557*4882a593Smuzhiyun }
1558*4882a593Smuzhiyun
bdev_discard_alignment(struct block_device * bdev)1559*4882a593Smuzhiyun static inline int bdev_discard_alignment(struct block_device *bdev)
1560*4882a593Smuzhiyun {
1561*4882a593Smuzhiyun struct request_queue *q = bdev_get_queue(bdev);
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun if (bdev_is_partition(bdev))
1564*4882a593Smuzhiyun return queue_limit_discard_alignment(&q->limits,
1565*4882a593Smuzhiyun bdev->bd_part->start_sect);
1566*4882a593Smuzhiyun return q->limits.discard_alignment;
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun
bdev_write_same(struct block_device * bdev)1569*4882a593Smuzhiyun static inline unsigned int bdev_write_same(struct block_device *bdev)
1570*4882a593Smuzhiyun {
1571*4882a593Smuzhiyun struct request_queue *q = bdev_get_queue(bdev);
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun if (q)
1574*4882a593Smuzhiyun return q->limits.max_write_same_sectors;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun return 0;
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun
bdev_write_zeroes_sectors(struct block_device * bdev)1579*4882a593Smuzhiyun static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1580*4882a593Smuzhiyun {
1581*4882a593Smuzhiyun struct request_queue *q = bdev_get_queue(bdev);
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun if (q)
1584*4882a593Smuzhiyun return q->limits.max_write_zeroes_sectors;
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun return 0;
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun
bdev_zoned_model(struct block_device * bdev)1589*4882a593Smuzhiyun static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
1590*4882a593Smuzhiyun {
1591*4882a593Smuzhiyun struct request_queue *q = bdev_get_queue(bdev);
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun if (q)
1594*4882a593Smuzhiyun return blk_queue_zoned_model(q);
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun return BLK_ZONED_NONE;
1597*4882a593Smuzhiyun }
1598*4882a593Smuzhiyun
bdev_is_zoned(struct block_device * bdev)1599*4882a593Smuzhiyun static inline bool bdev_is_zoned(struct block_device *bdev)
1600*4882a593Smuzhiyun {
1601*4882a593Smuzhiyun struct request_queue *q = bdev_get_queue(bdev);
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun if (q)
1604*4882a593Smuzhiyun return blk_queue_is_zoned(q);
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun return false;
1607*4882a593Smuzhiyun }
1608*4882a593Smuzhiyun
bdev_zone_sectors(struct block_device * bdev)1609*4882a593Smuzhiyun static inline sector_t bdev_zone_sectors(struct block_device *bdev)
1610*4882a593Smuzhiyun {
1611*4882a593Smuzhiyun struct request_queue *q = bdev_get_queue(bdev);
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun if (q)
1614*4882a593Smuzhiyun return blk_queue_zone_sectors(q);
1615*4882a593Smuzhiyun return 0;
1616*4882a593Smuzhiyun }
1617*4882a593Smuzhiyun
bdev_max_open_zones(struct block_device * bdev)1618*4882a593Smuzhiyun static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
1619*4882a593Smuzhiyun {
1620*4882a593Smuzhiyun struct request_queue *q = bdev_get_queue(bdev);
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun if (q)
1623*4882a593Smuzhiyun return queue_max_open_zones(q);
1624*4882a593Smuzhiyun return 0;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun
bdev_max_active_zones(struct block_device * bdev)1627*4882a593Smuzhiyun static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
1628*4882a593Smuzhiyun {
1629*4882a593Smuzhiyun struct request_queue *q = bdev_get_queue(bdev);
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun if (q)
1632*4882a593Smuzhiyun return queue_max_active_zones(q);
1633*4882a593Smuzhiyun return 0;
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun
queue_dma_alignment(const struct request_queue * q)1636*4882a593Smuzhiyun static inline int queue_dma_alignment(const struct request_queue *q)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun return q ? q->dma_alignment : 511;
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun
blk_rq_aligned(struct request_queue * q,unsigned long addr,unsigned int len)1641*4882a593Smuzhiyun static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1642*4882a593Smuzhiyun unsigned int len)
1643*4882a593Smuzhiyun {
1644*4882a593Smuzhiyun unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1645*4882a593Smuzhiyun return !(addr & alignment) && !(len & alignment);
1646*4882a593Smuzhiyun }
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun /* assumes size > 256 */
blksize_bits(unsigned int size)1649*4882a593Smuzhiyun static inline unsigned int blksize_bits(unsigned int size)
1650*4882a593Smuzhiyun {
1651*4882a593Smuzhiyun unsigned int bits = 8;
1652*4882a593Smuzhiyun do {
1653*4882a593Smuzhiyun bits++;
1654*4882a593Smuzhiyun size >>= 1;
1655*4882a593Smuzhiyun } while (size > 256);
1656*4882a593Smuzhiyun return bits;
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun
block_size(struct block_device * bdev)1659*4882a593Smuzhiyun static inline unsigned int block_size(struct block_device *bdev)
1660*4882a593Smuzhiyun {
1661*4882a593Smuzhiyun return 1 << bdev->bd_inode->i_blkbits;
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun int kblockd_schedule_work(struct work_struct *work);
1665*4882a593Smuzhiyun int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1666*4882a593Smuzhiyun
1667*4882a593Smuzhiyun #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1668*4882a593Smuzhiyun MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1669*4882a593Smuzhiyun #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1670*4882a593Smuzhiyun MODULE_ALIAS("block-major-" __stringify(major) "-*")
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun #if defined(CONFIG_BLK_DEV_INTEGRITY)
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun enum blk_integrity_flags {
1675*4882a593Smuzhiyun BLK_INTEGRITY_VERIFY = 1 << 0,
1676*4882a593Smuzhiyun BLK_INTEGRITY_GENERATE = 1 << 1,
1677*4882a593Smuzhiyun BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
1678*4882a593Smuzhiyun BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
1679*4882a593Smuzhiyun };
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun struct blk_integrity_iter {
1682*4882a593Smuzhiyun void *prot_buf;
1683*4882a593Smuzhiyun void *data_buf;
1684*4882a593Smuzhiyun sector_t seed;
1685*4882a593Smuzhiyun unsigned int data_size;
1686*4882a593Smuzhiyun unsigned short interval;
1687*4882a593Smuzhiyun const char *disk_name;
1688*4882a593Smuzhiyun };
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
1691*4882a593Smuzhiyun typedef void (integrity_prepare_fn) (struct request *);
1692*4882a593Smuzhiyun typedef void (integrity_complete_fn) (struct request *, unsigned int);
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun struct blk_integrity_profile {
1695*4882a593Smuzhiyun integrity_processing_fn *generate_fn;
1696*4882a593Smuzhiyun integrity_processing_fn *verify_fn;
1697*4882a593Smuzhiyun integrity_prepare_fn *prepare_fn;
1698*4882a593Smuzhiyun integrity_complete_fn *complete_fn;
1699*4882a593Smuzhiyun const char *name;
1700*4882a593Smuzhiyun };
1701*4882a593Smuzhiyun
1702*4882a593Smuzhiyun extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
1703*4882a593Smuzhiyun extern void blk_integrity_unregister(struct gendisk *);
1704*4882a593Smuzhiyun extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1705*4882a593Smuzhiyun extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1706*4882a593Smuzhiyun struct scatterlist *);
1707*4882a593Smuzhiyun extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1708*4882a593Smuzhiyun
blk_get_integrity(struct gendisk * disk)1709*4882a593Smuzhiyun static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1710*4882a593Smuzhiyun {
1711*4882a593Smuzhiyun struct blk_integrity *bi = &disk->queue->integrity;
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun if (!bi->profile)
1714*4882a593Smuzhiyun return NULL;
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun return bi;
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun static inline
bdev_get_integrity(struct block_device * bdev)1720*4882a593Smuzhiyun struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1721*4882a593Smuzhiyun {
1722*4882a593Smuzhiyun return blk_get_integrity(bdev->bd_disk);
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun static inline bool
blk_integrity_queue_supports_integrity(struct request_queue * q)1726*4882a593Smuzhiyun blk_integrity_queue_supports_integrity(struct request_queue *q)
1727*4882a593Smuzhiyun {
1728*4882a593Smuzhiyun return q->integrity.profile;
1729*4882a593Smuzhiyun }
1730*4882a593Smuzhiyun
blk_integrity_rq(struct request * rq)1731*4882a593Smuzhiyun static inline bool blk_integrity_rq(struct request *rq)
1732*4882a593Smuzhiyun {
1733*4882a593Smuzhiyun return rq->cmd_flags & REQ_INTEGRITY;
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun
blk_queue_max_integrity_segments(struct request_queue * q,unsigned int segs)1736*4882a593Smuzhiyun static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1737*4882a593Smuzhiyun unsigned int segs)
1738*4882a593Smuzhiyun {
1739*4882a593Smuzhiyun q->limits.max_integrity_segments = segs;
1740*4882a593Smuzhiyun }
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun static inline unsigned short
queue_max_integrity_segments(const struct request_queue * q)1743*4882a593Smuzhiyun queue_max_integrity_segments(const struct request_queue *q)
1744*4882a593Smuzhiyun {
1745*4882a593Smuzhiyun return q->limits.max_integrity_segments;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun /**
1749*4882a593Smuzhiyun * bio_integrity_intervals - Return number of integrity intervals for a bio
1750*4882a593Smuzhiyun * @bi: blk_integrity profile for device
1751*4882a593Smuzhiyun * @sectors: Size of the bio in 512-byte sectors
1752*4882a593Smuzhiyun *
1753*4882a593Smuzhiyun * Description: The block layer calculates everything in 512 byte
1754*4882a593Smuzhiyun * sectors but integrity metadata is done in terms of the data integrity
1755*4882a593Smuzhiyun * interval size of the storage device. Convert the block layer sectors
1756*4882a593Smuzhiyun * to the appropriate number of integrity intervals.
1757*4882a593Smuzhiyun */
bio_integrity_intervals(struct blk_integrity * bi,unsigned int sectors)1758*4882a593Smuzhiyun static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1759*4882a593Smuzhiyun unsigned int sectors)
1760*4882a593Smuzhiyun {
1761*4882a593Smuzhiyun return sectors >> (bi->interval_exp - 9);
1762*4882a593Smuzhiyun }
1763*4882a593Smuzhiyun
bio_integrity_bytes(struct blk_integrity * bi,unsigned int sectors)1764*4882a593Smuzhiyun static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1765*4882a593Smuzhiyun unsigned int sectors)
1766*4882a593Smuzhiyun {
1767*4882a593Smuzhiyun return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
1768*4882a593Smuzhiyun }
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun /*
1771*4882a593Smuzhiyun * Return the first bvec that contains integrity data. Only drivers that are
1772*4882a593Smuzhiyun * limited to a single integrity segment should use this helper.
1773*4882a593Smuzhiyun */
rq_integrity_vec(struct request * rq)1774*4882a593Smuzhiyun static inline struct bio_vec *rq_integrity_vec(struct request *rq)
1775*4882a593Smuzhiyun {
1776*4882a593Smuzhiyun if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
1777*4882a593Smuzhiyun return NULL;
1778*4882a593Smuzhiyun return rq->bio->bi_integrity->bip_vec;
1779*4882a593Smuzhiyun }
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun #else /* CONFIG_BLK_DEV_INTEGRITY */
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun struct bio;
1784*4882a593Smuzhiyun struct block_device;
1785*4882a593Smuzhiyun struct gendisk;
1786*4882a593Smuzhiyun struct blk_integrity;
1787*4882a593Smuzhiyun
blk_integrity_rq(struct request * rq)1788*4882a593Smuzhiyun static inline int blk_integrity_rq(struct request *rq)
1789*4882a593Smuzhiyun {
1790*4882a593Smuzhiyun return 0;
1791*4882a593Smuzhiyun }
blk_rq_count_integrity_sg(struct request_queue * q,struct bio * b)1792*4882a593Smuzhiyun static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1793*4882a593Smuzhiyun struct bio *b)
1794*4882a593Smuzhiyun {
1795*4882a593Smuzhiyun return 0;
1796*4882a593Smuzhiyun }
blk_rq_map_integrity_sg(struct request_queue * q,struct bio * b,struct scatterlist * s)1797*4882a593Smuzhiyun static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1798*4882a593Smuzhiyun struct bio *b,
1799*4882a593Smuzhiyun struct scatterlist *s)
1800*4882a593Smuzhiyun {
1801*4882a593Smuzhiyun return 0;
1802*4882a593Smuzhiyun }
bdev_get_integrity(struct block_device * b)1803*4882a593Smuzhiyun static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1804*4882a593Smuzhiyun {
1805*4882a593Smuzhiyun return NULL;
1806*4882a593Smuzhiyun }
blk_get_integrity(struct gendisk * disk)1807*4882a593Smuzhiyun static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1808*4882a593Smuzhiyun {
1809*4882a593Smuzhiyun return NULL;
1810*4882a593Smuzhiyun }
1811*4882a593Smuzhiyun static inline bool
blk_integrity_queue_supports_integrity(struct request_queue * q)1812*4882a593Smuzhiyun blk_integrity_queue_supports_integrity(struct request_queue *q)
1813*4882a593Smuzhiyun {
1814*4882a593Smuzhiyun return false;
1815*4882a593Smuzhiyun }
blk_integrity_compare(struct gendisk * a,struct gendisk * b)1816*4882a593Smuzhiyun static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1817*4882a593Smuzhiyun {
1818*4882a593Smuzhiyun return 0;
1819*4882a593Smuzhiyun }
blk_integrity_register(struct gendisk * d,struct blk_integrity * b)1820*4882a593Smuzhiyun static inline void blk_integrity_register(struct gendisk *d,
1821*4882a593Smuzhiyun struct blk_integrity *b)
1822*4882a593Smuzhiyun {
1823*4882a593Smuzhiyun }
blk_integrity_unregister(struct gendisk * d)1824*4882a593Smuzhiyun static inline void blk_integrity_unregister(struct gendisk *d)
1825*4882a593Smuzhiyun {
1826*4882a593Smuzhiyun }
blk_queue_max_integrity_segments(struct request_queue * q,unsigned int segs)1827*4882a593Smuzhiyun static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1828*4882a593Smuzhiyun unsigned int segs)
1829*4882a593Smuzhiyun {
1830*4882a593Smuzhiyun }
queue_max_integrity_segments(const struct request_queue * q)1831*4882a593Smuzhiyun static inline unsigned short queue_max_integrity_segments(const struct request_queue *q)
1832*4882a593Smuzhiyun {
1833*4882a593Smuzhiyun return 0;
1834*4882a593Smuzhiyun }
1835*4882a593Smuzhiyun
bio_integrity_intervals(struct blk_integrity * bi,unsigned int sectors)1836*4882a593Smuzhiyun static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1837*4882a593Smuzhiyun unsigned int sectors)
1838*4882a593Smuzhiyun {
1839*4882a593Smuzhiyun return 0;
1840*4882a593Smuzhiyun }
1841*4882a593Smuzhiyun
bio_integrity_bytes(struct blk_integrity * bi,unsigned int sectors)1842*4882a593Smuzhiyun static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1843*4882a593Smuzhiyun unsigned int sectors)
1844*4882a593Smuzhiyun {
1845*4882a593Smuzhiyun return 0;
1846*4882a593Smuzhiyun }
1847*4882a593Smuzhiyun
rq_integrity_vec(struct request * rq)1848*4882a593Smuzhiyun static inline struct bio_vec *rq_integrity_vec(struct request *rq)
1849*4882a593Smuzhiyun {
1850*4882a593Smuzhiyun return NULL;
1851*4882a593Smuzhiyun }
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_INTEGRITY */
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q);
1858*4882a593Smuzhiyun
1859*4882a593Smuzhiyun void blk_ksm_unregister(struct request_queue *q);
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1862*4882a593Smuzhiyun
blk_ksm_register(struct blk_keyslot_manager * ksm,struct request_queue * q)1863*4882a593Smuzhiyun static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm,
1864*4882a593Smuzhiyun struct request_queue *q)
1865*4882a593Smuzhiyun {
1866*4882a593Smuzhiyun return true;
1867*4882a593Smuzhiyun }
1868*4882a593Smuzhiyun
blk_ksm_unregister(struct request_queue * q)1869*4882a593Smuzhiyun static inline void blk_ksm_unregister(struct request_queue *q) { }
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
1872*4882a593Smuzhiyun
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun struct block_device_operations {
1875*4882a593Smuzhiyun blk_qc_t (*submit_bio) (struct bio *bio);
1876*4882a593Smuzhiyun int (*open) (struct block_device *, fmode_t);
1877*4882a593Smuzhiyun void (*release) (struct gendisk *, fmode_t);
1878*4882a593Smuzhiyun int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
1879*4882a593Smuzhiyun int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1880*4882a593Smuzhiyun int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1881*4882a593Smuzhiyun unsigned int (*check_events) (struct gendisk *disk,
1882*4882a593Smuzhiyun unsigned int clearing);
1883*4882a593Smuzhiyun void (*unlock_native_capacity) (struct gendisk *);
1884*4882a593Smuzhiyun int (*revalidate_disk) (struct gendisk *);
1885*4882a593Smuzhiyun int (*getgeo)(struct block_device *, struct hd_geometry *);
1886*4882a593Smuzhiyun /* this callback is with swap_lock and sometimes page table lock held */
1887*4882a593Smuzhiyun void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1888*4882a593Smuzhiyun int (*report_zones)(struct gendisk *, sector_t sector,
1889*4882a593Smuzhiyun unsigned int nr_zones, report_zones_cb cb, void *data);
1890*4882a593Smuzhiyun char *(*devnode)(struct gendisk *disk, umode_t *mode);
1891*4882a593Smuzhiyun struct module *owner;
1892*4882a593Smuzhiyun const struct pr_ops *pr_ops;
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
1895*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
1896*4882a593Smuzhiyun ANDROID_OEM_DATA(1);
1897*4882a593Smuzhiyun };
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
1900*4882a593Smuzhiyun extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
1901*4882a593Smuzhiyun unsigned int, unsigned long);
1902*4882a593Smuzhiyun #else
1903*4882a593Smuzhiyun #define blkdev_compat_ptr_ioctl NULL
1904*4882a593Smuzhiyun #endif
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1907*4882a593Smuzhiyun unsigned long);
1908*4882a593Smuzhiyun extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1909*4882a593Smuzhiyun extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1910*4882a593Smuzhiyun struct writeback_control *);
1911*4882a593Smuzhiyun
1912*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
1913*4882a593Smuzhiyun bool blk_req_needs_zone_write_lock(struct request *rq);
1914*4882a593Smuzhiyun bool blk_req_zone_write_trylock(struct request *rq);
1915*4882a593Smuzhiyun void __blk_req_zone_write_lock(struct request *rq);
1916*4882a593Smuzhiyun void __blk_req_zone_write_unlock(struct request *rq);
1917*4882a593Smuzhiyun
blk_req_zone_write_lock(struct request * rq)1918*4882a593Smuzhiyun static inline void blk_req_zone_write_lock(struct request *rq)
1919*4882a593Smuzhiyun {
1920*4882a593Smuzhiyun if (blk_req_needs_zone_write_lock(rq))
1921*4882a593Smuzhiyun __blk_req_zone_write_lock(rq);
1922*4882a593Smuzhiyun }
1923*4882a593Smuzhiyun
blk_req_zone_write_unlock(struct request * rq)1924*4882a593Smuzhiyun static inline void blk_req_zone_write_unlock(struct request *rq)
1925*4882a593Smuzhiyun {
1926*4882a593Smuzhiyun if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1927*4882a593Smuzhiyun __blk_req_zone_write_unlock(rq);
1928*4882a593Smuzhiyun }
1929*4882a593Smuzhiyun
blk_req_zone_is_write_locked(struct request * rq)1930*4882a593Smuzhiyun static inline bool blk_req_zone_is_write_locked(struct request *rq)
1931*4882a593Smuzhiyun {
1932*4882a593Smuzhiyun return rq->q->seq_zones_wlock &&
1933*4882a593Smuzhiyun test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
1934*4882a593Smuzhiyun }
1935*4882a593Smuzhiyun
blk_req_can_dispatch_to_zone(struct request * rq)1936*4882a593Smuzhiyun static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1937*4882a593Smuzhiyun {
1938*4882a593Smuzhiyun if (!blk_req_needs_zone_write_lock(rq))
1939*4882a593Smuzhiyun return true;
1940*4882a593Smuzhiyun return !blk_req_zone_is_write_locked(rq);
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun #else
blk_req_needs_zone_write_lock(struct request * rq)1943*4882a593Smuzhiyun static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1944*4882a593Smuzhiyun {
1945*4882a593Smuzhiyun return false;
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun
blk_req_zone_write_lock(struct request * rq)1948*4882a593Smuzhiyun static inline void blk_req_zone_write_lock(struct request *rq)
1949*4882a593Smuzhiyun {
1950*4882a593Smuzhiyun }
1951*4882a593Smuzhiyun
blk_req_zone_write_unlock(struct request * rq)1952*4882a593Smuzhiyun static inline void blk_req_zone_write_unlock(struct request *rq)
1953*4882a593Smuzhiyun {
1954*4882a593Smuzhiyun }
blk_req_zone_is_write_locked(struct request * rq)1955*4882a593Smuzhiyun static inline bool blk_req_zone_is_write_locked(struct request *rq)
1956*4882a593Smuzhiyun {
1957*4882a593Smuzhiyun return false;
1958*4882a593Smuzhiyun }
1959*4882a593Smuzhiyun
blk_req_can_dispatch_to_zone(struct request * rq)1960*4882a593Smuzhiyun static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1961*4882a593Smuzhiyun {
1962*4882a593Smuzhiyun return true;
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_ZONED */
1965*4882a593Smuzhiyun
blk_wake_io_task(struct task_struct * waiter)1966*4882a593Smuzhiyun static inline void blk_wake_io_task(struct task_struct *waiter)
1967*4882a593Smuzhiyun {
1968*4882a593Smuzhiyun /*
1969*4882a593Smuzhiyun * If we're polling, the task itself is doing the completions. For
1970*4882a593Smuzhiyun * that case, we don't need to signal a wakeup, it's enough to just
1971*4882a593Smuzhiyun * mark us as RUNNING.
1972*4882a593Smuzhiyun */
1973*4882a593Smuzhiyun if (waiter == current)
1974*4882a593Smuzhiyun __set_current_state(TASK_RUNNING);
1975*4882a593Smuzhiyun else
1976*4882a593Smuzhiyun wake_up_process(waiter);
1977*4882a593Smuzhiyun }
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1980*4882a593Smuzhiyun unsigned int op);
1981*4882a593Smuzhiyun void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1982*4882a593Smuzhiyun unsigned long start_time);
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
1985*4882a593Smuzhiyun struct bio *bio);
1986*4882a593Smuzhiyun void part_end_io_acct(struct hd_struct *part, struct bio *bio,
1987*4882a593Smuzhiyun unsigned long start_time);
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun /**
1990*4882a593Smuzhiyun * bio_start_io_acct - start I/O accounting for bio based drivers
1991*4882a593Smuzhiyun * @bio: bio to start account for
1992*4882a593Smuzhiyun *
1993*4882a593Smuzhiyun * Returns the start time that should be passed back to bio_end_io_acct().
1994*4882a593Smuzhiyun */
bio_start_io_acct(struct bio * bio)1995*4882a593Smuzhiyun static inline unsigned long bio_start_io_acct(struct bio *bio)
1996*4882a593Smuzhiyun {
1997*4882a593Smuzhiyun return disk_start_io_acct(bio->bi_disk, bio_sectors(bio), bio_op(bio));
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun /**
2001*4882a593Smuzhiyun * bio_end_io_acct - end I/O accounting for bio based drivers
2002*4882a593Smuzhiyun * @bio: bio to end account for
2003*4882a593Smuzhiyun * @start: start time returned by bio_start_io_acct()
2004*4882a593Smuzhiyun */
bio_end_io_acct(struct bio * bio,unsigned long start_time)2005*4882a593Smuzhiyun static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
2006*4882a593Smuzhiyun {
2007*4882a593Smuzhiyun return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time);
2008*4882a593Smuzhiyun }
2009*4882a593Smuzhiyun
2010*4882a593Smuzhiyun int bdev_read_only(struct block_device *bdev);
2011*4882a593Smuzhiyun int set_blocksize(struct block_device *bdev, int size);
2012*4882a593Smuzhiyun
2013*4882a593Smuzhiyun const char *bdevname(struct block_device *bdev, char *buffer);
2014*4882a593Smuzhiyun struct block_device *lookup_bdev(const char *);
2015*4882a593Smuzhiyun
2016*4882a593Smuzhiyun void blkdev_show(struct seq_file *seqf, off_t offset);
2017*4882a593Smuzhiyun
2018*4882a593Smuzhiyun #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
2019*4882a593Smuzhiyun #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
2020*4882a593Smuzhiyun #ifdef CONFIG_BLOCK
2021*4882a593Smuzhiyun #define BLKDEV_MAJOR_MAX 512
2022*4882a593Smuzhiyun #else
2023*4882a593Smuzhiyun #define BLKDEV_MAJOR_MAX 0
2024*4882a593Smuzhiyun #endif
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
2027*4882a593Smuzhiyun void *holder);
2028*4882a593Smuzhiyun struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
2029*4882a593Smuzhiyun int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
2030*4882a593Smuzhiyun void *holder);
2031*4882a593Smuzhiyun void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
2032*4882a593Smuzhiyun void *holder);
2033*4882a593Smuzhiyun void blkdev_put(struct block_device *bdev, fmode_t mode);
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun struct block_device *I_BDEV(struct inode *inode);
2036*4882a593Smuzhiyun struct block_device *bdget_part(struct hd_struct *part);
2037*4882a593Smuzhiyun struct block_device *bdgrab(struct block_device *bdev);
2038*4882a593Smuzhiyun void bdput(struct block_device *);
2039*4882a593Smuzhiyun
2040*4882a593Smuzhiyun #ifdef CONFIG_BLOCK
2041*4882a593Smuzhiyun void invalidate_bdev(struct block_device *bdev);
2042*4882a593Smuzhiyun int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
2043*4882a593Smuzhiyun loff_t lend);
2044*4882a593Smuzhiyun int sync_blockdev(struct block_device *bdev);
2045*4882a593Smuzhiyun #else
invalidate_bdev(struct block_device * bdev)2046*4882a593Smuzhiyun static inline void invalidate_bdev(struct block_device *bdev)
2047*4882a593Smuzhiyun {
2048*4882a593Smuzhiyun }
truncate_bdev_range(struct block_device * bdev,fmode_t mode,loff_t lstart,loff_t lend)2049*4882a593Smuzhiyun static inline int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
2050*4882a593Smuzhiyun loff_t lstart, loff_t lend)
2051*4882a593Smuzhiyun {
2052*4882a593Smuzhiyun return 0;
2053*4882a593Smuzhiyun }
sync_blockdev(struct block_device * bdev)2054*4882a593Smuzhiyun static inline int sync_blockdev(struct block_device *bdev)
2055*4882a593Smuzhiyun {
2056*4882a593Smuzhiyun return 0;
2057*4882a593Smuzhiyun }
2058*4882a593Smuzhiyun #endif
2059*4882a593Smuzhiyun int fsync_bdev(struct block_device *bdev);
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun int freeze_bdev(struct block_device *bdev);
2062*4882a593Smuzhiyun int thaw_bdev(struct block_device *bdev);
2063*4882a593Smuzhiyun
2064*4882a593Smuzhiyun #endif /* _LINUX_BLKDEV_H */
2065