xref: /OK3568_Linux_fs/kernel/drivers/block/null_blk.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __BLK_NULL_BLK_H
3*4882a593Smuzhiyun #define __BLK_NULL_BLK_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #undef pr_fmt
6*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/blkdev.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/blk-mq.h>
11*4882a593Smuzhiyun #include <linux/hrtimer.h>
12*4882a593Smuzhiyun #include <linux/configfs.h>
13*4882a593Smuzhiyun #include <linux/badblocks.h>
14*4882a593Smuzhiyun #include <linux/fault-inject.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun struct nullb_cmd {
17*4882a593Smuzhiyun 	struct request *rq;
18*4882a593Smuzhiyun 	struct bio *bio;
19*4882a593Smuzhiyun 	unsigned int tag;
20*4882a593Smuzhiyun 	blk_status_t error;
21*4882a593Smuzhiyun 	struct nullb_queue *nq;
22*4882a593Smuzhiyun 	struct hrtimer timer;
23*4882a593Smuzhiyun 	bool fake_timeout;
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun struct nullb_queue {
27*4882a593Smuzhiyun 	unsigned long *tag_map;
28*4882a593Smuzhiyun 	wait_queue_head_t wait;
29*4882a593Smuzhiyun 	unsigned int queue_depth;
30*4882a593Smuzhiyun 	struct nullb_device *dev;
31*4882a593Smuzhiyun 	unsigned int requeue_selection;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	struct nullb_cmd *cmds;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun struct nullb_device {
37*4882a593Smuzhiyun 	struct nullb *nullb;
38*4882a593Smuzhiyun 	struct config_item item;
39*4882a593Smuzhiyun 	struct radix_tree_root data; /* data stored in the disk */
40*4882a593Smuzhiyun 	struct radix_tree_root cache; /* disk cache data */
41*4882a593Smuzhiyun 	unsigned long flags; /* device flags */
42*4882a593Smuzhiyun 	unsigned int curr_cache;
43*4882a593Smuzhiyun 	struct badblocks badblocks;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	unsigned int nr_zones;
46*4882a593Smuzhiyun 	unsigned int nr_zones_imp_open;
47*4882a593Smuzhiyun 	unsigned int nr_zones_exp_open;
48*4882a593Smuzhiyun 	unsigned int nr_zones_closed;
49*4882a593Smuzhiyun 	struct blk_zone *zones;
50*4882a593Smuzhiyun 	sector_t zone_size_sects;
51*4882a593Smuzhiyun 	spinlock_t zone_lock;
52*4882a593Smuzhiyun 	unsigned long *zone_locks;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	unsigned long size; /* device size in MB */
55*4882a593Smuzhiyun 	unsigned long completion_nsec; /* time in ns to complete a request */
56*4882a593Smuzhiyun 	unsigned long cache_size; /* disk cache size in MB */
57*4882a593Smuzhiyun 	unsigned long zone_size; /* zone size in MB if device is zoned */
58*4882a593Smuzhiyun 	unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
59*4882a593Smuzhiyun 	unsigned int zone_nr_conv; /* number of conventional zones */
60*4882a593Smuzhiyun 	unsigned int zone_max_open; /* max number of open zones */
61*4882a593Smuzhiyun 	unsigned int zone_max_active; /* max number of active zones */
62*4882a593Smuzhiyun 	unsigned int submit_queues; /* number of submission queues */
63*4882a593Smuzhiyun 	unsigned int home_node; /* home node for the device */
64*4882a593Smuzhiyun 	unsigned int queue_mode; /* block interface */
65*4882a593Smuzhiyun 	unsigned int blocksize; /* block size */
66*4882a593Smuzhiyun 	unsigned int irqmode; /* IRQ completion handler */
67*4882a593Smuzhiyun 	unsigned int hw_queue_depth; /* queue depth */
68*4882a593Smuzhiyun 	unsigned int index; /* index of the disk, only valid with a disk */
69*4882a593Smuzhiyun 	unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
70*4882a593Smuzhiyun 	bool blocking; /* blocking blk-mq device */
71*4882a593Smuzhiyun 	bool use_per_node_hctx; /* use per-node allocation for hardware context */
72*4882a593Smuzhiyun 	bool power; /* power on/off the device */
73*4882a593Smuzhiyun 	bool memory_backed; /* if data is stored in memory */
74*4882a593Smuzhiyun 	bool discard; /* if support discard */
75*4882a593Smuzhiyun 	bool zoned; /* if device is zoned */
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun struct nullb {
79*4882a593Smuzhiyun 	struct nullb_device *dev;
80*4882a593Smuzhiyun 	struct list_head list;
81*4882a593Smuzhiyun 	unsigned int index;
82*4882a593Smuzhiyun 	struct request_queue *q;
83*4882a593Smuzhiyun 	struct gendisk *disk;
84*4882a593Smuzhiyun 	struct blk_mq_tag_set *tag_set;
85*4882a593Smuzhiyun 	struct blk_mq_tag_set __tag_set;
86*4882a593Smuzhiyun 	unsigned int queue_depth;
87*4882a593Smuzhiyun 	atomic_long_t cur_bytes;
88*4882a593Smuzhiyun 	struct hrtimer bw_timer;
89*4882a593Smuzhiyun 	unsigned long cache_flush_pos;
90*4882a593Smuzhiyun 	spinlock_t lock;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	struct nullb_queue *queues;
93*4882a593Smuzhiyun 	unsigned int nr_queues;
94*4882a593Smuzhiyun 	char disk_name[DISK_NAME_LEN];
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun blk_status_t null_process_cmd(struct nullb_cmd *cmd,
98*4882a593Smuzhiyun 			      enum req_opf op, sector_t sector,
99*4882a593Smuzhiyun 			      unsigned int nr_sectors);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
102*4882a593Smuzhiyun int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
103*4882a593Smuzhiyun int null_register_zoned_dev(struct nullb *nullb);
104*4882a593Smuzhiyun void null_free_zoned_dev(struct nullb_device *dev);
105*4882a593Smuzhiyun int null_report_zones(struct gendisk *disk, sector_t sector,
106*4882a593Smuzhiyun 		      unsigned int nr_zones, report_zones_cb cb, void *data);
107*4882a593Smuzhiyun blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
108*4882a593Smuzhiyun 				    enum req_opf op, sector_t sector,
109*4882a593Smuzhiyun 				    sector_t nr_sectors);
110*4882a593Smuzhiyun size_t null_zone_valid_read_len(struct nullb *nullb,
111*4882a593Smuzhiyun 				sector_t sector, unsigned int len);
112*4882a593Smuzhiyun #else
null_init_zoned_dev(struct nullb_device * dev,struct request_queue * q)113*4882a593Smuzhiyun static inline int null_init_zoned_dev(struct nullb_device *dev,
114*4882a593Smuzhiyun 				      struct request_queue *q)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
117*4882a593Smuzhiyun 	return -EINVAL;
118*4882a593Smuzhiyun }
null_register_zoned_dev(struct nullb * nullb)119*4882a593Smuzhiyun static inline int null_register_zoned_dev(struct nullb *nullb)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	return -ENODEV;
122*4882a593Smuzhiyun }
null_free_zoned_dev(struct nullb_device * dev)123*4882a593Smuzhiyun static inline void null_free_zoned_dev(struct nullb_device *dev) {}
null_process_zoned_cmd(struct nullb_cmd * cmd,enum req_opf op,sector_t sector,sector_t nr_sectors)124*4882a593Smuzhiyun static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
125*4882a593Smuzhiyun 			enum req_opf op, sector_t sector, sector_t nr_sectors)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	return BLK_STS_NOTSUPP;
128*4882a593Smuzhiyun }
null_zone_valid_read_len(struct nullb * nullb,sector_t sector,unsigned int len)129*4882a593Smuzhiyun static inline size_t null_zone_valid_read_len(struct nullb *nullb,
130*4882a593Smuzhiyun 					      sector_t sector,
131*4882a593Smuzhiyun 					      unsigned int len)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	return len;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun #define null_report_zones	NULL
136*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_ZONED */
137*4882a593Smuzhiyun #endif /* __NULL_BLK_H */
138