1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Functions related to io context handling
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/init.h>
8*4882a593Smuzhiyun #include <linux/bio.h>
9*4882a593Smuzhiyun #include <linux/blkdev.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/sched/task.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "blk.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun * For io context allocations
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun static struct kmem_cache *iocontext_cachep;
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /**
21*4882a593Smuzhiyun * get_io_context - increment reference count to io_context
22*4882a593Smuzhiyun * @ioc: io_context to get
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Increment reference count to @ioc.
25*4882a593Smuzhiyun */
get_io_context(struct io_context * ioc)26*4882a593Smuzhiyun void get_io_context(struct io_context *ioc)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
29*4882a593Smuzhiyun atomic_long_inc(&ioc->refcount);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
icq_free_icq_rcu(struct rcu_head * head)32*4882a593Smuzhiyun static void icq_free_icq_rcu(struct rcu_head *head)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun kmem_cache_free(icq->__rcu_icq_cache, icq);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
41*4882a593Smuzhiyun * and queue locked for legacy.
42*4882a593Smuzhiyun */
ioc_exit_icq(struct io_cq * icq)43*4882a593Smuzhiyun static void ioc_exit_icq(struct io_cq *icq)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun struct elevator_type *et = icq->q->elevator->type;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun if (icq->flags & ICQ_EXITED)
48*4882a593Smuzhiyun return;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (et->ops.exit_icq)
51*4882a593Smuzhiyun et->ops.exit_icq(icq);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun icq->flags |= ICQ_EXITED;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * Release an icq. Called with ioc locked for blk-mq, and with both ioc
58*4882a593Smuzhiyun * and queue locked for legacy.
59*4882a593Smuzhiyun */
ioc_destroy_icq(struct io_cq * icq)60*4882a593Smuzhiyun static void ioc_destroy_icq(struct io_cq *icq)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun struct io_context *ioc = icq->ioc;
63*4882a593Smuzhiyun struct request_queue *q = icq->q;
64*4882a593Smuzhiyun struct elevator_type *et = q->elevator->type;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun lockdep_assert_held(&ioc->lock);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun radix_tree_delete(&ioc->icq_tree, icq->q->id);
69*4882a593Smuzhiyun hlist_del_init(&icq->ioc_node);
70*4882a593Smuzhiyun list_del_init(&icq->q_node);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * Both setting lookup hint to and clearing it from @icq are done
74*4882a593Smuzhiyun * under queue_lock. If it's not pointing to @icq now, it never
75*4882a593Smuzhiyun * will. Hint assignment itself can race safely.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun if (rcu_access_pointer(ioc->icq_hint) == icq)
78*4882a593Smuzhiyun rcu_assign_pointer(ioc->icq_hint, NULL);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun ioc_exit_icq(icq);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * @icq->q might have gone away by the time RCU callback runs
84*4882a593Smuzhiyun * making it impossible to determine icq_cache. Record it in @icq.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun icq->__rcu_icq_cache = et->icq_cache;
87*4882a593Smuzhiyun icq->flags |= ICQ_DESTROYED;
88*4882a593Smuzhiyun call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun * Slow path for ioc release in put_io_context(). Performs double-lock
93*4882a593Smuzhiyun * dancing to unlink all icq's and then frees ioc.
94*4882a593Smuzhiyun */
ioc_release_fn(struct work_struct * work)95*4882a593Smuzhiyun static void ioc_release_fn(struct work_struct *work)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun struct io_context *ioc = container_of(work, struct io_context,
98*4882a593Smuzhiyun release_work);
99*4882a593Smuzhiyun spin_lock_irq(&ioc->lock);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun while (!hlist_empty(&ioc->icq_list)) {
102*4882a593Smuzhiyun struct io_cq *icq = hlist_entry(ioc->icq_list.first,
103*4882a593Smuzhiyun struct io_cq, ioc_node);
104*4882a593Smuzhiyun struct request_queue *q = icq->q;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (spin_trylock(&q->queue_lock)) {
107*4882a593Smuzhiyun ioc_destroy_icq(icq);
108*4882a593Smuzhiyun spin_unlock(&q->queue_lock);
109*4882a593Smuzhiyun } else {
110*4882a593Smuzhiyun /* Make sure q and icq cannot be freed. */
111*4882a593Smuzhiyun rcu_read_lock();
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Re-acquire the locks in the correct order. */
114*4882a593Smuzhiyun spin_unlock(&ioc->lock);
115*4882a593Smuzhiyun spin_lock(&q->queue_lock);
116*4882a593Smuzhiyun spin_lock(&ioc->lock);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun * The icq may have been destroyed when the ioc lock
120*4882a593Smuzhiyun * was released.
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun if (!(icq->flags & ICQ_DESTROYED))
123*4882a593Smuzhiyun ioc_destroy_icq(icq);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun spin_unlock(&q->queue_lock);
126*4882a593Smuzhiyun rcu_read_unlock();
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun spin_unlock_irq(&ioc->lock);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun kmem_cache_free(iocontext_cachep, ioc);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun * put_io_context - put a reference of io_context
137*4882a593Smuzhiyun * @ioc: io_context to put
138*4882a593Smuzhiyun *
139*4882a593Smuzhiyun * Decrement reference count of @ioc and release it if the count reaches
140*4882a593Smuzhiyun * zero.
141*4882a593Smuzhiyun */
put_io_context(struct io_context * ioc)142*4882a593Smuzhiyun void put_io_context(struct io_context *ioc)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun unsigned long flags;
145*4882a593Smuzhiyun bool free_ioc = false;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (ioc == NULL)
148*4882a593Smuzhiyun return;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun * Releasing ioc requires reverse order double locking and we may
154*4882a593Smuzhiyun * already be holding a queue_lock. Do it asynchronously from wq.
155*4882a593Smuzhiyun */
156*4882a593Smuzhiyun if (atomic_long_dec_and_test(&ioc->refcount)) {
157*4882a593Smuzhiyun spin_lock_irqsave(&ioc->lock, flags);
158*4882a593Smuzhiyun if (!hlist_empty(&ioc->icq_list))
159*4882a593Smuzhiyun queue_work(system_power_efficient_wq,
160*4882a593Smuzhiyun &ioc->release_work);
161*4882a593Smuzhiyun else
162*4882a593Smuzhiyun free_ioc = true;
163*4882a593Smuzhiyun spin_unlock_irqrestore(&ioc->lock, flags);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (free_ioc)
167*4882a593Smuzhiyun kmem_cache_free(iocontext_cachep, ioc);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /**
171*4882a593Smuzhiyun * put_io_context_active - put active reference on ioc
172*4882a593Smuzhiyun * @ioc: ioc of interest
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun * Undo get_io_context_active(). If active reference reaches zero after
175*4882a593Smuzhiyun * put, @ioc can never issue further IOs and ioscheds are notified.
176*4882a593Smuzhiyun */
put_io_context_active(struct io_context * ioc)177*4882a593Smuzhiyun void put_io_context_active(struct io_context *ioc)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun struct io_cq *icq;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun if (!atomic_dec_and_test(&ioc->active_ref)) {
182*4882a593Smuzhiyun put_io_context(ioc);
183*4882a593Smuzhiyun return;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun spin_lock_irq(&ioc->lock);
187*4882a593Smuzhiyun hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
188*4882a593Smuzhiyun if (icq->flags & ICQ_EXITED)
189*4882a593Smuzhiyun continue;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun ioc_exit_icq(icq);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun spin_unlock_irq(&ioc->lock);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun put_io_context(ioc);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* Called by the exiting task */
exit_io_context(struct task_struct * task)199*4882a593Smuzhiyun void exit_io_context(struct task_struct *task)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun struct io_context *ioc;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun task_lock(task);
204*4882a593Smuzhiyun ioc = task->io_context;
205*4882a593Smuzhiyun task->io_context = NULL;
206*4882a593Smuzhiyun task_unlock(task);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun atomic_dec(&ioc->nr_tasks);
209*4882a593Smuzhiyun put_io_context_active(ioc);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
__ioc_clear_queue(struct list_head * icq_list)212*4882a593Smuzhiyun static void __ioc_clear_queue(struct list_head *icq_list)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun unsigned long flags;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun rcu_read_lock();
217*4882a593Smuzhiyun while (!list_empty(icq_list)) {
218*4882a593Smuzhiyun struct io_cq *icq = list_entry(icq_list->next,
219*4882a593Smuzhiyun struct io_cq, q_node);
220*4882a593Smuzhiyun struct io_context *ioc = icq->ioc;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun spin_lock_irqsave(&ioc->lock, flags);
223*4882a593Smuzhiyun if (icq->flags & ICQ_DESTROYED) {
224*4882a593Smuzhiyun spin_unlock_irqrestore(&ioc->lock, flags);
225*4882a593Smuzhiyun continue;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun ioc_destroy_icq(icq);
228*4882a593Smuzhiyun spin_unlock_irqrestore(&ioc->lock, flags);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun rcu_read_unlock();
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /**
234*4882a593Smuzhiyun * ioc_clear_queue - break any ioc association with the specified queue
235*4882a593Smuzhiyun * @q: request_queue being cleared
236*4882a593Smuzhiyun *
237*4882a593Smuzhiyun * Walk @q->icq_list and exit all io_cq's.
238*4882a593Smuzhiyun */
ioc_clear_queue(struct request_queue * q)239*4882a593Smuzhiyun void ioc_clear_queue(struct request_queue *q)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun LIST_HEAD(icq_list);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun spin_lock_irq(&q->queue_lock);
244*4882a593Smuzhiyun list_splice_init(&q->icq_list, &icq_list);
245*4882a593Smuzhiyun spin_unlock_irq(&q->queue_lock);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun __ioc_clear_queue(&icq_list);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
create_task_io_context(struct task_struct * task,gfp_t gfp_flags,int node)250*4882a593Smuzhiyun int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun struct io_context *ioc;
253*4882a593Smuzhiyun int ret;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
256*4882a593Smuzhiyun node);
257*4882a593Smuzhiyun if (unlikely(!ioc))
258*4882a593Smuzhiyun return -ENOMEM;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* initialize */
261*4882a593Smuzhiyun atomic_long_set(&ioc->refcount, 1);
262*4882a593Smuzhiyun atomic_set(&ioc->nr_tasks, 1);
263*4882a593Smuzhiyun atomic_set(&ioc->active_ref, 1);
264*4882a593Smuzhiyun spin_lock_init(&ioc->lock);
265*4882a593Smuzhiyun INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
266*4882a593Smuzhiyun INIT_HLIST_HEAD(&ioc->icq_list);
267*4882a593Smuzhiyun INIT_WORK(&ioc->release_work, ioc_release_fn);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun * Try to install. ioc shouldn't be installed if someone else
271*4882a593Smuzhiyun * already did or @task, which isn't %current, is exiting. Note
272*4882a593Smuzhiyun * that we need to allow ioc creation on exiting %current as exit
273*4882a593Smuzhiyun * path may issue IOs from e.g. exit_files(). The exit path is
274*4882a593Smuzhiyun * responsible for not issuing IO after exit_io_context().
275*4882a593Smuzhiyun */
276*4882a593Smuzhiyun task_lock(task);
277*4882a593Smuzhiyun if (!task->io_context &&
278*4882a593Smuzhiyun (task == current || !(task->flags & PF_EXITING)))
279*4882a593Smuzhiyun task->io_context = ioc;
280*4882a593Smuzhiyun else
281*4882a593Smuzhiyun kmem_cache_free(iocontext_cachep, ioc);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun ret = task->io_context ? 0 : -EBUSY;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun task_unlock(task);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun return ret;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun * get_task_io_context - get io_context of a task
292*4882a593Smuzhiyun * @task: task of interest
293*4882a593Smuzhiyun * @gfp_flags: allocation flags, used if allocation is necessary
294*4882a593Smuzhiyun * @node: allocation node, used if allocation is necessary
295*4882a593Smuzhiyun *
296*4882a593Smuzhiyun * Return io_context of @task. If it doesn't exist, it is created with
297*4882a593Smuzhiyun * @gfp_flags and @node. The returned io_context has its reference count
298*4882a593Smuzhiyun * incremented.
299*4882a593Smuzhiyun *
300*4882a593Smuzhiyun * This function always goes through task_lock() and it's better to use
301*4882a593Smuzhiyun * %current->io_context + get_io_context() for %current.
302*4882a593Smuzhiyun */
get_task_io_context(struct task_struct * task,gfp_t gfp_flags,int node)303*4882a593Smuzhiyun struct io_context *get_task_io_context(struct task_struct *task,
304*4882a593Smuzhiyun gfp_t gfp_flags, int node)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct io_context *ioc;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun might_sleep_if(gfpflags_allow_blocking(gfp_flags));
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun do {
311*4882a593Smuzhiyun task_lock(task);
312*4882a593Smuzhiyun ioc = task->io_context;
313*4882a593Smuzhiyun if (likely(ioc)) {
314*4882a593Smuzhiyun get_io_context(ioc);
315*4882a593Smuzhiyun task_unlock(task);
316*4882a593Smuzhiyun return ioc;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun task_unlock(task);
319*4882a593Smuzhiyun } while (!create_task_io_context(task, gfp_flags, node));
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun return NULL;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /**
325*4882a593Smuzhiyun * ioc_lookup_icq - lookup io_cq from ioc
326*4882a593Smuzhiyun * @ioc: the associated io_context
327*4882a593Smuzhiyun * @q: the associated request_queue
328*4882a593Smuzhiyun *
329*4882a593Smuzhiyun * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
330*4882a593Smuzhiyun * with @q->queue_lock held.
331*4882a593Smuzhiyun */
ioc_lookup_icq(struct io_context * ioc,struct request_queue * q)332*4882a593Smuzhiyun struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct io_cq *icq;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun lockdep_assert_held(&q->queue_lock);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /*
339*4882a593Smuzhiyun * icq's are indexed from @ioc using radix tree and hint pointer,
340*4882a593Smuzhiyun * both of which are protected with RCU. All removals are done
341*4882a593Smuzhiyun * holding both q and ioc locks, and we're holding q lock - if we
342*4882a593Smuzhiyun * find a icq which points to us, it's guaranteed to be valid.
343*4882a593Smuzhiyun */
344*4882a593Smuzhiyun rcu_read_lock();
345*4882a593Smuzhiyun icq = rcu_dereference(ioc->icq_hint);
346*4882a593Smuzhiyun if (icq && icq->q == q)
347*4882a593Smuzhiyun goto out;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun icq = radix_tree_lookup(&ioc->icq_tree, q->id);
350*4882a593Smuzhiyun if (icq && icq->q == q)
351*4882a593Smuzhiyun rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
352*4882a593Smuzhiyun else
353*4882a593Smuzhiyun icq = NULL;
354*4882a593Smuzhiyun out:
355*4882a593Smuzhiyun rcu_read_unlock();
356*4882a593Smuzhiyun return icq;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun EXPORT_SYMBOL(ioc_lookup_icq);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /**
361*4882a593Smuzhiyun * ioc_create_icq - create and link io_cq
362*4882a593Smuzhiyun * @ioc: io_context of interest
363*4882a593Smuzhiyun * @q: request_queue of interest
364*4882a593Smuzhiyun * @gfp_mask: allocation mask
365*4882a593Smuzhiyun *
366*4882a593Smuzhiyun * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
367*4882a593Smuzhiyun * will be created using @gfp_mask.
368*4882a593Smuzhiyun *
369*4882a593Smuzhiyun * The caller is responsible for ensuring @ioc won't go away and @q is
370*4882a593Smuzhiyun * alive and will stay alive until this function returns.
371*4882a593Smuzhiyun */
ioc_create_icq(struct io_context * ioc,struct request_queue * q,gfp_t gfp_mask)372*4882a593Smuzhiyun struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
373*4882a593Smuzhiyun gfp_t gfp_mask)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun struct elevator_type *et = q->elevator->type;
376*4882a593Smuzhiyun struct io_cq *icq;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /* allocate stuff */
379*4882a593Smuzhiyun icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
380*4882a593Smuzhiyun q->node);
381*4882a593Smuzhiyun if (!icq)
382*4882a593Smuzhiyun return NULL;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (radix_tree_maybe_preload(gfp_mask) < 0) {
385*4882a593Smuzhiyun kmem_cache_free(et->icq_cache, icq);
386*4882a593Smuzhiyun return NULL;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun icq->ioc = ioc;
390*4882a593Smuzhiyun icq->q = q;
391*4882a593Smuzhiyun INIT_LIST_HEAD(&icq->q_node);
392*4882a593Smuzhiyun INIT_HLIST_NODE(&icq->ioc_node);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /* lock both q and ioc and try to link @icq */
395*4882a593Smuzhiyun spin_lock_irq(&q->queue_lock);
396*4882a593Smuzhiyun spin_lock(&ioc->lock);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
399*4882a593Smuzhiyun hlist_add_head(&icq->ioc_node, &ioc->icq_list);
400*4882a593Smuzhiyun list_add(&icq->q_node, &q->icq_list);
401*4882a593Smuzhiyun if (et->ops.init_icq)
402*4882a593Smuzhiyun et->ops.init_icq(icq);
403*4882a593Smuzhiyun } else {
404*4882a593Smuzhiyun kmem_cache_free(et->icq_cache, icq);
405*4882a593Smuzhiyun icq = ioc_lookup_icq(ioc, q);
406*4882a593Smuzhiyun if (!icq)
407*4882a593Smuzhiyun printk(KERN_ERR "cfq: icq link failed!\n");
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun spin_unlock(&ioc->lock);
411*4882a593Smuzhiyun spin_unlock_irq(&q->queue_lock);
412*4882a593Smuzhiyun radix_tree_preload_end();
413*4882a593Smuzhiyun return icq;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
blk_ioc_init(void)416*4882a593Smuzhiyun static int __init blk_ioc_init(void)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun iocontext_cachep = kmem_cache_create("blkdev_ioc",
419*4882a593Smuzhiyun sizeof(struct io_context), 0, SLAB_PANIC, NULL);
420*4882a593Smuzhiyun return 0;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun subsys_initcall(blk_ioc_init);
423