1*4882a593Smuzhiyun #ifndef INTERNAL_IO_WQ_H
2*4882a593Smuzhiyun #define INTERNAL_IO_WQ_H
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #ifdef __GENKSYMS__
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * ANDROID ABI HACK
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * See the big comment in the linux/io_uring.h file for details. This
9*4882a593Smuzhiyun * include is not needed for any real functionality, but must be here to
10*4882a593Smuzhiyun * preserve the CRC of a number of variables and functions.
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun #include <linux/io_uring.h>
13*4882a593Smuzhiyun #endif
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/refcount.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun struct io_wq;
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun enum {
20*4882a593Smuzhiyun IO_WQ_WORK_CANCEL = 1,
21*4882a593Smuzhiyun IO_WQ_WORK_HASHED = 2,
22*4882a593Smuzhiyun IO_WQ_WORK_UNBOUND = 4,
23*4882a593Smuzhiyun IO_WQ_WORK_CONCURRENT = 16,
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun enum io_wq_cancel {
29*4882a593Smuzhiyun IO_WQ_CANCEL_OK, /* cancelled before started */
30*4882a593Smuzhiyun IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
31*4882a593Smuzhiyun IO_WQ_CANCEL_NOTFOUND, /* work not found */
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct io_wq_work_node {
35*4882a593Smuzhiyun struct io_wq_work_node *next;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun struct io_wq_work_list {
39*4882a593Smuzhiyun struct io_wq_work_node *first;
40*4882a593Smuzhiyun struct io_wq_work_node *last;
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun
wq_list_add_after(struct io_wq_work_node * node,struct io_wq_work_node * pos,struct io_wq_work_list * list)43*4882a593Smuzhiyun static inline void wq_list_add_after(struct io_wq_work_node *node,
44*4882a593Smuzhiyun struct io_wq_work_node *pos,
45*4882a593Smuzhiyun struct io_wq_work_list *list)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun struct io_wq_work_node *next = pos->next;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun pos->next = node;
50*4882a593Smuzhiyun node->next = next;
51*4882a593Smuzhiyun if (!next)
52*4882a593Smuzhiyun list->last = node;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
wq_list_add_tail(struct io_wq_work_node * node,struct io_wq_work_list * list)55*4882a593Smuzhiyun static inline void wq_list_add_tail(struct io_wq_work_node *node,
56*4882a593Smuzhiyun struct io_wq_work_list *list)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun node->next = NULL;
59*4882a593Smuzhiyun if (!list->first) {
60*4882a593Smuzhiyun list->last = node;
61*4882a593Smuzhiyun WRITE_ONCE(list->first, node);
62*4882a593Smuzhiyun } else {
63*4882a593Smuzhiyun list->last->next = node;
64*4882a593Smuzhiyun list->last = node;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
wq_list_cut(struct io_wq_work_list * list,struct io_wq_work_node * last,struct io_wq_work_node * prev)68*4882a593Smuzhiyun static inline void wq_list_cut(struct io_wq_work_list *list,
69*4882a593Smuzhiyun struct io_wq_work_node *last,
70*4882a593Smuzhiyun struct io_wq_work_node *prev)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun /* first in the list, if prev==NULL */
73*4882a593Smuzhiyun if (!prev)
74*4882a593Smuzhiyun WRITE_ONCE(list->first, last->next);
75*4882a593Smuzhiyun else
76*4882a593Smuzhiyun prev->next = last->next;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (last == list->last)
79*4882a593Smuzhiyun list->last = prev;
80*4882a593Smuzhiyun last->next = NULL;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
wq_list_del(struct io_wq_work_list * list,struct io_wq_work_node * node,struct io_wq_work_node * prev)83*4882a593Smuzhiyun static inline void wq_list_del(struct io_wq_work_list *list,
84*4882a593Smuzhiyun struct io_wq_work_node *node,
85*4882a593Smuzhiyun struct io_wq_work_node *prev)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun wq_list_cut(list, node, prev);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #define wq_list_for_each(pos, prv, head) \
91*4882a593Smuzhiyun for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
94*4882a593Smuzhiyun #define INIT_WQ_LIST(list) do { \
95*4882a593Smuzhiyun (list)->first = NULL; \
96*4882a593Smuzhiyun (list)->last = NULL; \
97*4882a593Smuzhiyun } while (0)
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun struct io_wq_work {
100*4882a593Smuzhiyun struct io_wq_work_node list;
101*4882a593Smuzhiyun unsigned flags;
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun
wq_next_work(struct io_wq_work * work)104*4882a593Smuzhiyun static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun if (!work->list.next)
107*4882a593Smuzhiyun return NULL;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun return container_of(work->list.next, struct io_wq_work, list);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
113*4882a593Smuzhiyun typedef void (io_wq_work_fn)(struct io_wq_work *);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun struct io_wq_hash {
116*4882a593Smuzhiyun refcount_t refs;
117*4882a593Smuzhiyun unsigned long map;
118*4882a593Smuzhiyun struct wait_queue_head wait;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun
io_wq_put_hash(struct io_wq_hash * hash)121*4882a593Smuzhiyun static inline void io_wq_put_hash(struct io_wq_hash *hash)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun if (refcount_dec_and_test(&hash->refs))
124*4882a593Smuzhiyun kfree(hash);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun struct io_wq_data {
128*4882a593Smuzhiyun struct io_wq_hash *hash;
129*4882a593Smuzhiyun struct task_struct *task;
130*4882a593Smuzhiyun io_wq_work_fn *do_work;
131*4882a593Smuzhiyun free_work_fn *free_work;
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
135*4882a593Smuzhiyun void io_wq_exit_start(struct io_wq *wq);
136*4882a593Smuzhiyun void io_wq_put_and_exit(struct io_wq *wq);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
139*4882a593Smuzhiyun void io_wq_hash_work(struct io_wq_work *work, void *val);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
142*4882a593Smuzhiyun int io_wq_max_workers(struct io_wq *wq, int *new_count);
143*4882a593Smuzhiyun
io_wq_is_hashed(struct io_wq_work * work)144*4882a593Smuzhiyun static inline bool io_wq_is_hashed(struct io_wq_work *work)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun return work->flags & IO_WQ_WORK_HASHED;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
152*4882a593Smuzhiyun void *data, bool cancel_all);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun #if defined(CONFIG_IO_WQ)
155*4882a593Smuzhiyun extern void io_wq_worker_sleeping(struct task_struct *);
156*4882a593Smuzhiyun extern void io_wq_worker_running(struct task_struct *);
157*4882a593Smuzhiyun #else
io_wq_worker_sleeping(struct task_struct * tsk)158*4882a593Smuzhiyun static inline void io_wq_worker_sleeping(struct task_struct *tsk)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun }
io_wq_worker_running(struct task_struct * tsk)161*4882a593Smuzhiyun static inline void io_wq_worker_running(struct task_struct *tsk)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun #endif
165*4882a593Smuzhiyun
io_wq_current_is_worker(void)166*4882a593Smuzhiyun static inline bool io_wq_current_is_worker(void)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun return in_task() && (current->flags & PF_IO_WORKER) &&
169*4882a593Smuzhiyun current->pf_io_worker;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun #endif
172