xref: /OK3568_Linux_fs/kernel/include/linux/elevator.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_ELEVATOR_H
3*4882a593Smuzhiyun #define _LINUX_ELEVATOR_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/percpu.h>
6*4882a593Smuzhiyun #include <linux/hashtable.h>
7*4882a593Smuzhiyun #include <linux/android_kabi.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifdef CONFIG_BLOCK
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun struct io_cq;
12*4882a593Smuzhiyun struct elevator_type;
13*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEBUG_FS
14*4882a593Smuzhiyun struct blk_mq_debugfs_attr;
15*4882a593Smuzhiyun #endif
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun  * Return values from elevator merger
19*4882a593Smuzhiyun  */
20*4882a593Smuzhiyun enum elv_merge {
21*4882a593Smuzhiyun 	ELEVATOR_NO_MERGE	= 0,
22*4882a593Smuzhiyun 	ELEVATOR_FRONT_MERGE	= 1,
23*4882a593Smuzhiyun 	ELEVATOR_BACK_MERGE	= 2,
24*4882a593Smuzhiyun 	ELEVATOR_DISCARD_MERGE	= 3,
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun struct blk_mq_alloc_data;
28*4882a593Smuzhiyun struct blk_mq_hw_ctx;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct elevator_mq_ops {
31*4882a593Smuzhiyun 	int (*init_sched)(struct request_queue *, struct elevator_type *);
32*4882a593Smuzhiyun 	void (*exit_sched)(struct elevator_queue *);
33*4882a593Smuzhiyun 	int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
34*4882a593Smuzhiyun 	void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
35*4882a593Smuzhiyun 	void (*depth_updated)(struct blk_mq_hw_ctx *);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
38*4882a593Smuzhiyun 	bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
39*4882a593Smuzhiyun 	int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
40*4882a593Smuzhiyun 	void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
41*4882a593Smuzhiyun 	void (*requests_merged)(struct request_queue *, struct request *, struct request *);
42*4882a593Smuzhiyun 	void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
43*4882a593Smuzhiyun 	void (*prepare_request)(struct request *);
44*4882a593Smuzhiyun 	void (*finish_request)(struct request *);
45*4882a593Smuzhiyun 	void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
46*4882a593Smuzhiyun 	struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
47*4882a593Smuzhiyun 	bool (*has_work)(struct blk_mq_hw_ctx *);
48*4882a593Smuzhiyun 	void (*completed_request)(struct request *, u64);
49*4882a593Smuzhiyun 	void (*requeue_request)(struct request *);
50*4882a593Smuzhiyun 	struct request *(*former_request)(struct request_queue *, struct request *);
51*4882a593Smuzhiyun 	struct request *(*next_request)(struct request_queue *, struct request *);
52*4882a593Smuzhiyun 	void (*init_icq)(struct io_cq *);
53*4882a593Smuzhiyun 	void (*exit_icq)(struct io_cq *);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(1);
56*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(2);
57*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(3);
58*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(4);
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #define ELV_NAME_MAX	(16)
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun struct elv_fs_entry {
64*4882a593Smuzhiyun 	struct attribute attr;
65*4882a593Smuzhiyun 	ssize_t (*show)(struct elevator_queue *, char *);
66*4882a593Smuzhiyun 	ssize_t (*store)(struct elevator_queue *, const char *, size_t);
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun  * identifies an elevator type, such as AS or deadline
71*4882a593Smuzhiyun  */
72*4882a593Smuzhiyun struct elevator_type
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	/* managed by elevator core */
75*4882a593Smuzhiyun 	struct kmem_cache *icq_cache;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/* fields provided by elevator implementation */
78*4882a593Smuzhiyun 	struct elevator_mq_ops ops;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	size_t icq_size;	/* see iocontext.h */
81*4882a593Smuzhiyun 	size_t icq_align;	/* ditto */
82*4882a593Smuzhiyun 	struct elv_fs_entry *elevator_attrs;
83*4882a593Smuzhiyun 	const char *elevator_name;
84*4882a593Smuzhiyun 	const char *elevator_alias;
85*4882a593Smuzhiyun 	const unsigned int elevator_features;
86*4882a593Smuzhiyun 	struct module *elevator_owner;
87*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEBUG_FS
88*4882a593Smuzhiyun 	const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
89*4882a593Smuzhiyun 	const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
90*4882a593Smuzhiyun #endif
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/* managed by elevator core */
93*4882a593Smuzhiyun 	char icq_cache_name[ELV_NAME_MAX + 6];	/* elvname + "_io_cq" */
94*4882a593Smuzhiyun 	struct list_head list;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(1);
97*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(2);
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #define ELV_HASH_BITS 6
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun void elv_rqhash_del(struct request_queue *q, struct request *rq);
103*4882a593Smuzhiyun void elv_rqhash_add(struct request_queue *q, struct request *rq);
104*4882a593Smuzhiyun void elv_rqhash_reposition(struct request_queue *q, struct request *rq);
105*4882a593Smuzhiyun struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun  * each queue has an elevator_queue associated with it
109*4882a593Smuzhiyun  */
110*4882a593Smuzhiyun struct elevator_queue
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	struct elevator_type *type;
113*4882a593Smuzhiyun 	void *elevator_data;
114*4882a593Smuzhiyun 	struct kobject kobj;
115*4882a593Smuzhiyun 	struct mutex sysfs_lock;
116*4882a593Smuzhiyun 	unsigned int registered:1;
117*4882a593Smuzhiyun 	DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun  * block elevator interface
122*4882a593Smuzhiyun  */
123*4882a593Smuzhiyun extern enum elv_merge elv_merge(struct request_queue *, struct request **,
124*4882a593Smuzhiyun 		struct bio *);
125*4882a593Smuzhiyun extern void elv_merge_requests(struct request_queue *, struct request *,
126*4882a593Smuzhiyun 			       struct request *);
127*4882a593Smuzhiyun extern void elv_merged_request(struct request_queue *, struct request *,
128*4882a593Smuzhiyun 		enum elv_merge);
129*4882a593Smuzhiyun extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
130*4882a593Smuzhiyun extern struct request *elv_former_request(struct request_queue *, struct request *);
131*4882a593Smuzhiyun extern struct request *elv_latter_request(struct request_queue *, struct request *);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun  * io scheduler registration
135*4882a593Smuzhiyun  */
136*4882a593Smuzhiyun extern int elv_register(struct elevator_type *);
137*4882a593Smuzhiyun extern void elv_unregister(struct elevator_type *);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun  * io scheduler sysfs switching
141*4882a593Smuzhiyun  */
142*4882a593Smuzhiyun extern ssize_t elv_iosched_show(struct request_queue *, char *);
143*4882a593Smuzhiyun extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun extern bool elv_bio_merge_ok(struct request *, struct bio *);
146*4882a593Smuzhiyun extern struct elevator_queue *elevator_alloc(struct request_queue *,
147*4882a593Smuzhiyun 					struct elevator_type *);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun  * Helper functions.
151*4882a593Smuzhiyun  */
152*4882a593Smuzhiyun extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
153*4882a593Smuzhiyun extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun  * rb support functions.
157*4882a593Smuzhiyun  */
158*4882a593Smuzhiyun extern void elv_rb_add(struct rb_root *, struct request *);
159*4882a593Smuzhiyun extern void elv_rb_del(struct rb_root *, struct request *);
160*4882a593Smuzhiyun extern struct request *elv_rb_find(struct rb_root *, sector_t);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun  * Insertion selection
164*4882a593Smuzhiyun  */
165*4882a593Smuzhiyun #define ELEVATOR_INSERT_FRONT	1
166*4882a593Smuzhiyun #define ELEVATOR_INSERT_BACK	2
167*4882a593Smuzhiyun #define ELEVATOR_INSERT_SORT	3
168*4882a593Smuzhiyun #define ELEVATOR_INSERT_REQUEUE	4
169*4882a593Smuzhiyun #define ELEVATOR_INSERT_FLUSH	5
170*4882a593Smuzhiyun #define ELEVATOR_INSERT_SORT_MERGE	6
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun #define rq_end_sector(rq)	(blk_rq_pos(rq) + blk_rq_sectors(rq))
173*4882a593Smuzhiyun #define rb_entry_rq(node)	rb_entry((node), struct request, rb_node)
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun #define rq_entry_fifo(ptr)	list_entry((ptr), struct request, queuelist)
176*4882a593Smuzhiyun #define rq_fifo_clear(rq)	list_del_init(&(rq)->queuelist)
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun  * Elevator features.
180*4882a593Smuzhiyun  */
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /* Supports zoned block devices sequential write constraint */
183*4882a593Smuzhiyun #define ELEVATOR_F_ZBD_SEQ_WRITE	(1U << 0)
184*4882a593Smuzhiyun /* Supports scheduling on multiple hardware queues */
185*4882a593Smuzhiyun #define ELEVATOR_F_MQ_AWARE		(1U << 1)
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun #endif /* CONFIG_BLOCK */
188*4882a593Smuzhiyun #endif
189