1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * RDMA Network Block Driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6*4882a593Smuzhiyun * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7*4882a593Smuzhiyun * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #undef pr_fmt
11*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/blkdev.h>
15*4882a593Smuzhiyun #include <linux/hdreg.h>
16*4882a593Smuzhiyun #include <linux/scatterlist.h>
17*4882a593Smuzhiyun #include <linux/idr.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "rnbd-clt.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun MODULE_DESCRIPTION("RDMA Network Block Device Client");
22*4882a593Smuzhiyun MODULE_LICENSE("GPL");
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun static int rnbd_client_major;
25*4882a593Smuzhiyun static DEFINE_IDA(index_ida);
26*4882a593Smuzhiyun static DEFINE_MUTEX(ida_lock);
27*4882a593Smuzhiyun static DEFINE_MUTEX(sess_lock);
28*4882a593Smuzhiyun static LIST_HEAD(sess_list);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * Maximum number of partitions an instance can have.
32*4882a593Smuzhiyun * 6 bits = 64 minors = 63 partitions (one minor is used for the device itself)
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun #define RNBD_PART_BITS 6
35*4882a593Smuzhiyun
rnbd_clt_get_sess(struct rnbd_clt_session * sess)36*4882a593Smuzhiyun static inline bool rnbd_clt_get_sess(struct rnbd_clt_session *sess)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun return refcount_inc_not_zero(&sess->refcount);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun static void free_sess(struct rnbd_clt_session *sess);
42*4882a593Smuzhiyun
rnbd_clt_put_sess(struct rnbd_clt_session * sess)43*4882a593Smuzhiyun static void rnbd_clt_put_sess(struct rnbd_clt_session *sess)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun might_sleep();
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun if (refcount_dec_and_test(&sess->refcount))
48*4882a593Smuzhiyun free_sess(sess);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
rnbd_clt_put_dev(struct rnbd_clt_dev * dev)51*4882a593Smuzhiyun static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun might_sleep();
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun if (!refcount_dec_and_test(&dev->refcount))
56*4882a593Smuzhiyun return;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun mutex_lock(&ida_lock);
59*4882a593Smuzhiyun ida_simple_remove(&index_ida, dev->clt_device_id);
60*4882a593Smuzhiyun mutex_unlock(&ida_lock);
61*4882a593Smuzhiyun kfree(dev->hw_queues);
62*4882a593Smuzhiyun kfree(dev->pathname);
63*4882a593Smuzhiyun rnbd_clt_put_sess(dev->sess);
64*4882a593Smuzhiyun mutex_destroy(&dev->lock);
65*4882a593Smuzhiyun kfree(dev);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
rnbd_clt_get_dev(struct rnbd_clt_dev * dev)68*4882a593Smuzhiyun static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun return refcount_inc_not_zero(&dev->refcount);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
rnbd_clt_set_dev_attr(struct rnbd_clt_dev * dev,const struct rnbd_msg_open_rsp * rsp)73*4882a593Smuzhiyun static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
74*4882a593Smuzhiyun const struct rnbd_msg_open_rsp *rsp)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct rnbd_clt_session *sess = dev->sess;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (!rsp->logical_block_size)
79*4882a593Smuzhiyun return -EINVAL;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun dev->device_id = le32_to_cpu(rsp->device_id);
82*4882a593Smuzhiyun dev->nsectors = le64_to_cpu(rsp->nsectors);
83*4882a593Smuzhiyun dev->logical_block_size = le16_to_cpu(rsp->logical_block_size);
84*4882a593Smuzhiyun dev->physical_block_size = le16_to_cpu(rsp->physical_block_size);
85*4882a593Smuzhiyun dev->max_write_same_sectors = le32_to_cpu(rsp->max_write_same_sectors);
86*4882a593Smuzhiyun dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors);
87*4882a593Smuzhiyun dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
88*4882a593Smuzhiyun dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
89*4882a593Smuzhiyun dev->secure_discard = le16_to_cpu(rsp->secure_discard);
90*4882a593Smuzhiyun dev->rotational = rsp->rotational;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
93*4882a593Smuzhiyun dev->max_segments = BMAX_SEGMENTS;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun return 0;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
rnbd_clt_change_capacity(struct rnbd_clt_dev * dev,size_t new_nsectors)98*4882a593Smuzhiyun static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
99*4882a593Smuzhiyun size_t new_nsectors)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
102*4882a593Smuzhiyun dev->nsectors, new_nsectors);
103*4882a593Smuzhiyun dev->nsectors = new_nsectors;
104*4882a593Smuzhiyun set_capacity(dev->gd, dev->nsectors);
105*4882a593Smuzhiyun revalidate_disk_size(dev->gd, true);
106*4882a593Smuzhiyun return 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
process_msg_open_rsp(struct rnbd_clt_dev * dev,struct rnbd_msg_open_rsp * rsp)109*4882a593Smuzhiyun static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
110*4882a593Smuzhiyun struct rnbd_msg_open_rsp *rsp)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun int err = 0;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun mutex_lock(&dev->lock);
115*4882a593Smuzhiyun if (dev->dev_state == DEV_STATE_UNMAPPED) {
116*4882a593Smuzhiyun rnbd_clt_info(dev,
117*4882a593Smuzhiyun "Ignoring Open-Response message from server for unmapped device\n");
118*4882a593Smuzhiyun err = -ENOENT;
119*4882a593Smuzhiyun goto out;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) {
122*4882a593Smuzhiyun u64 nsectors = le64_to_cpu(rsp->nsectors);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * If the device was remapped and the size changed in the
126*4882a593Smuzhiyun * meantime we need to revalidate it
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun if (dev->nsectors != nsectors)
129*4882a593Smuzhiyun rnbd_clt_change_capacity(dev, nsectors);
130*4882a593Smuzhiyun rnbd_clt_info(dev, "Device online, device remapped successfully\n");
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun err = rnbd_clt_set_dev_attr(dev, rsp);
133*4882a593Smuzhiyun if (err)
134*4882a593Smuzhiyun goto out;
135*4882a593Smuzhiyun dev->dev_state = DEV_STATE_MAPPED;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun out:
138*4882a593Smuzhiyun mutex_unlock(&dev->lock);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun return err;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
rnbd_clt_resize_disk(struct rnbd_clt_dev * dev,size_t newsize)143*4882a593Smuzhiyun int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun int ret = 0;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun mutex_lock(&dev->lock);
148*4882a593Smuzhiyun if (dev->dev_state != DEV_STATE_MAPPED) {
149*4882a593Smuzhiyun pr_err("Failed to set new size of the device, device is not opened\n");
150*4882a593Smuzhiyun ret = -ENOENT;
151*4882a593Smuzhiyun goto out;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun ret = rnbd_clt_change_capacity(dev, newsize);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun out:
156*4882a593Smuzhiyun mutex_unlock(&dev->lock);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun return ret;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
rnbd_clt_dev_requeue(struct rnbd_queue * q)161*4882a593Smuzhiyun static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun if (WARN_ON(!q->hctx))
164*4882a593Smuzhiyun return;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* We can come here from interrupt, thus async=true */
167*4882a593Smuzhiyun blk_mq_run_hw_queue(q->hctx, true);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun enum {
171*4882a593Smuzhiyun RNBD_DELAY_IFBUSY = -1,
172*4882a593Smuzhiyun };
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /**
175*4882a593Smuzhiyun * rnbd_get_cpu_qlist() - finds a list with HW queues to be rerun
176*4882a593Smuzhiyun * @sess: Session to find a queue for
177*4882a593Smuzhiyun * @cpu: Cpu to start the search from
178*4882a593Smuzhiyun *
179*4882a593Smuzhiyun * Description:
180*4882a593Smuzhiyun * Each CPU has a list of HW queues, which needs to be rerun. If a list
181*4882a593Smuzhiyun * is not empty - it is marked with a bit. This function finds first
182*4882a593Smuzhiyun * set bit in a bitmap and returns corresponding CPU list.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun static struct rnbd_cpu_qlist *
rnbd_get_cpu_qlist(struct rnbd_clt_session * sess,int cpu)185*4882a593Smuzhiyun rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun int bit;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* Search from cpu to nr_cpu_ids */
190*4882a593Smuzhiyun bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu);
191*4882a593Smuzhiyun if (bit < nr_cpu_ids) {
192*4882a593Smuzhiyun return per_cpu_ptr(sess->cpu_queues, bit);
193*4882a593Smuzhiyun } else if (cpu != 0) {
194*4882a593Smuzhiyun /* Search from 0 to cpu */
195*4882a593Smuzhiyun bit = find_next_bit(sess->cpu_queues_bm, cpu, 0);
196*4882a593Smuzhiyun if (bit < cpu)
197*4882a593Smuzhiyun return per_cpu_ptr(sess->cpu_queues, bit);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun return NULL;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
nxt_cpu(int cpu)203*4882a593Smuzhiyun static inline int nxt_cpu(int cpu)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun return (cpu + 1) % nr_cpu_ids;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /**
209*4882a593Smuzhiyun * rnbd_rerun_if_needed() - rerun next queue marked as stopped
210*4882a593Smuzhiyun * @sess: Session to rerun a queue on
211*4882a593Smuzhiyun *
212*4882a593Smuzhiyun * Description:
213*4882a593Smuzhiyun * Each CPU has it's own list of HW queues, which should be rerun.
214*4882a593Smuzhiyun * Function finds such list with HW queues, takes a list lock, picks up
215*4882a593Smuzhiyun * the first HW queue out of the list and requeues it.
216*4882a593Smuzhiyun *
217*4882a593Smuzhiyun * Return:
218*4882a593Smuzhiyun * True if the queue was requeued, false otherwise.
219*4882a593Smuzhiyun *
220*4882a593Smuzhiyun * Context:
221*4882a593Smuzhiyun * Does not matter.
222*4882a593Smuzhiyun */
rnbd_rerun_if_needed(struct rnbd_clt_session * sess)223*4882a593Smuzhiyun static bool rnbd_rerun_if_needed(struct rnbd_clt_session *sess)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun struct rnbd_queue *q = NULL;
226*4882a593Smuzhiyun struct rnbd_cpu_qlist *cpu_q;
227*4882a593Smuzhiyun unsigned long flags;
228*4882a593Smuzhiyun int *cpup;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun * To keep fairness and not to let other queues starve we always
232*4882a593Smuzhiyun * try to wake up someone else in round-robin manner. That of course
233*4882a593Smuzhiyun * increases latency but queues always have a chance to be executed.
234*4882a593Smuzhiyun */
235*4882a593Smuzhiyun cpup = get_cpu_ptr(sess->cpu_rr);
236*4882a593Smuzhiyun for (cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(*cpup)); cpu_q;
237*4882a593Smuzhiyun cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) {
238*4882a593Smuzhiyun if (!spin_trylock_irqsave(&cpu_q->requeue_lock, flags))
239*4882a593Smuzhiyun continue;
240*4882a593Smuzhiyun if (unlikely(!test_bit(cpu_q->cpu, sess->cpu_queues_bm)))
241*4882a593Smuzhiyun goto unlock;
242*4882a593Smuzhiyun q = list_first_entry_or_null(&cpu_q->requeue_list,
243*4882a593Smuzhiyun typeof(*q), requeue_list);
244*4882a593Smuzhiyun if (WARN_ON(!q))
245*4882a593Smuzhiyun goto clear_bit;
246*4882a593Smuzhiyun list_del_init(&q->requeue_list);
247*4882a593Smuzhiyun clear_bit_unlock(0, &q->in_list);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun if (list_empty(&cpu_q->requeue_list)) {
250*4882a593Smuzhiyun /* Clear bit if nothing is left */
251*4882a593Smuzhiyun clear_bit:
252*4882a593Smuzhiyun clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun unlock:
255*4882a593Smuzhiyun spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (q)
258*4882a593Smuzhiyun break;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /**
262*4882a593Smuzhiyun * Saves the CPU that is going to be requeued on the per-cpu var. Just
263*4882a593Smuzhiyun * incrementing it doesn't work because rnbd_get_cpu_qlist() will
264*4882a593Smuzhiyun * always return the first CPU with something on the queue list when the
265*4882a593Smuzhiyun * value stored on the var is greater than the last CPU with something
266*4882a593Smuzhiyun * on the list.
267*4882a593Smuzhiyun */
268*4882a593Smuzhiyun if (cpu_q)
269*4882a593Smuzhiyun *cpup = cpu_q->cpu;
270*4882a593Smuzhiyun put_cpu_var(sess->cpu_rr);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (q)
273*4882a593Smuzhiyun rnbd_clt_dev_requeue(q);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return q;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /**
279*4882a593Smuzhiyun * rnbd_rerun_all_if_idle() - rerun all queues left in the list if
280*4882a593Smuzhiyun * session is idling (there are no requests
281*4882a593Smuzhiyun * in-flight).
282*4882a593Smuzhiyun * @sess: Session to rerun the queues on
283*4882a593Smuzhiyun *
284*4882a593Smuzhiyun * Description:
285*4882a593Smuzhiyun * This function tries to rerun all stopped queues if there are no
286*4882a593Smuzhiyun * requests in-flight anymore. This function tries to solve an obvious
287*4882a593Smuzhiyun * problem, when number of tags < than number of queues (hctx), which
288*4882a593Smuzhiyun * are stopped and put to sleep. If last permit, which has been just put,
289*4882a593Smuzhiyun * does not wake up all left queues (hctxs), IO requests hang forever.
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * That can happen when all number of permits, say N, have been exhausted
292*4882a593Smuzhiyun * from one CPU, and we have many block devices per session, say M.
293*4882a593Smuzhiyun * Each block device has it's own queue (hctx) for each CPU, so eventually
294*4882a593Smuzhiyun * we can put that number of queues (hctxs) to sleep: M x nr_cpu_ids.
295*4882a593Smuzhiyun * If number of permits N < M x nr_cpu_ids finally we will get an IO hang.
296*4882a593Smuzhiyun *
297*4882a593Smuzhiyun * To avoid this hang last caller of rnbd_put_permit() (last caller is the
298*4882a593Smuzhiyun * one who observes sess->busy == 0) must wake up all remaining queues.
299*4882a593Smuzhiyun *
300*4882a593Smuzhiyun * Context:
301*4882a593Smuzhiyun * Does not matter.
302*4882a593Smuzhiyun */
rnbd_rerun_all_if_idle(struct rnbd_clt_session * sess)303*4882a593Smuzhiyun static void rnbd_rerun_all_if_idle(struct rnbd_clt_session *sess)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun bool requeued;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun do {
308*4882a593Smuzhiyun requeued = rnbd_rerun_if_needed(sess);
309*4882a593Smuzhiyun } while (atomic_read(&sess->busy) == 0 && requeued);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
rnbd_get_permit(struct rnbd_clt_session * sess,enum rtrs_clt_con_type con_type,int wait)312*4882a593Smuzhiyun static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess,
313*4882a593Smuzhiyun enum rtrs_clt_con_type con_type,
314*4882a593Smuzhiyun int wait)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun struct rtrs_permit *permit;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun permit = rtrs_clt_get_permit(sess->rtrs, con_type,
319*4882a593Smuzhiyun wait ? RTRS_PERMIT_WAIT :
320*4882a593Smuzhiyun RTRS_PERMIT_NOWAIT);
321*4882a593Smuzhiyun if (likely(permit))
322*4882a593Smuzhiyun /* We have a subtle rare case here, when all permits can be
323*4882a593Smuzhiyun * consumed before busy counter increased. This is safe,
324*4882a593Smuzhiyun * because loser will get NULL as a permit, observe 0 busy
325*4882a593Smuzhiyun * counter and immediately restart the queue himself.
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun atomic_inc(&sess->busy);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return permit;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
rnbd_put_permit(struct rnbd_clt_session * sess,struct rtrs_permit * permit)332*4882a593Smuzhiyun static void rnbd_put_permit(struct rnbd_clt_session *sess,
333*4882a593Smuzhiyun struct rtrs_permit *permit)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun rtrs_clt_put_permit(sess->rtrs, permit);
336*4882a593Smuzhiyun atomic_dec(&sess->busy);
337*4882a593Smuzhiyun /* Paired with rnbd_clt_dev_add_to_requeue(). Decrement first
338*4882a593Smuzhiyun * and then check queue bits.
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun smp_mb__after_atomic();
341*4882a593Smuzhiyun rnbd_rerun_all_if_idle(sess);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
rnbd_get_iu(struct rnbd_clt_session * sess,enum rtrs_clt_con_type con_type,int wait)344*4882a593Smuzhiyun static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
345*4882a593Smuzhiyun enum rtrs_clt_con_type con_type,
346*4882a593Smuzhiyun int wait)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun struct rnbd_iu *iu;
349*4882a593Smuzhiyun struct rtrs_permit *permit;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun permit = rnbd_get_permit(sess, con_type,
352*4882a593Smuzhiyun wait ? RTRS_PERMIT_WAIT :
353*4882a593Smuzhiyun RTRS_PERMIT_NOWAIT);
354*4882a593Smuzhiyun if (unlikely(!permit))
355*4882a593Smuzhiyun return NULL;
356*4882a593Smuzhiyun iu = rtrs_permit_to_pdu(permit);
357*4882a593Smuzhiyun iu->permit = permit;
358*4882a593Smuzhiyun /*
359*4882a593Smuzhiyun * 1st reference is dropped after finishing sending a "user" message,
360*4882a593Smuzhiyun * 2nd reference is dropped after confirmation with the response is
361*4882a593Smuzhiyun * returned.
362*4882a593Smuzhiyun * 1st and 2nd can happen in any order, so the rnbd_iu should be
363*4882a593Smuzhiyun * released (rtrs_permit returned to ibbtrs) only leased after both
364*4882a593Smuzhiyun * are finished.
365*4882a593Smuzhiyun */
366*4882a593Smuzhiyun atomic_set(&iu->refcount, 2);
367*4882a593Smuzhiyun init_waitqueue_head(&iu->comp.wait);
368*4882a593Smuzhiyun iu->comp.errno = INT_MAX;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun return iu;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
rnbd_put_iu(struct rnbd_clt_session * sess,struct rnbd_iu * iu)373*4882a593Smuzhiyun static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun if (atomic_dec_and_test(&iu->refcount))
376*4882a593Smuzhiyun rnbd_put_permit(sess, iu->permit);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
rnbd_softirq_done_fn(struct request * rq)379*4882a593Smuzhiyun static void rnbd_softirq_done_fn(struct request *rq)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
382*4882a593Smuzhiyun struct rnbd_clt_session *sess = dev->sess;
383*4882a593Smuzhiyun struct rnbd_iu *iu;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun iu = blk_mq_rq_to_pdu(rq);
386*4882a593Smuzhiyun rnbd_put_permit(sess, iu->permit);
387*4882a593Smuzhiyun blk_mq_end_request(rq, errno_to_blk_status(iu->errno));
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
msg_io_conf(void * priv,int errno)390*4882a593Smuzhiyun static void msg_io_conf(void *priv, int errno)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun struct rnbd_iu *iu = priv;
393*4882a593Smuzhiyun struct rnbd_clt_dev *dev = iu->dev;
394*4882a593Smuzhiyun struct request *rq = iu->rq;
395*4882a593Smuzhiyun int rw = rq_data_dir(rq);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun iu->errno = errno;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun blk_mq_complete_request(rq);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun if (errno)
402*4882a593Smuzhiyun rnbd_clt_info_rl(dev, "%s I/O failed with err: %d\n",
403*4882a593Smuzhiyun rw == READ ? "read" : "write", errno);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
wake_up_iu_comp(struct rnbd_iu * iu,int errno)406*4882a593Smuzhiyun static void wake_up_iu_comp(struct rnbd_iu *iu, int errno)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun iu->comp.errno = errno;
409*4882a593Smuzhiyun wake_up(&iu->comp.wait);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
msg_conf(void * priv,int errno)412*4882a593Smuzhiyun static void msg_conf(void *priv, int errno)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun struct rnbd_iu *iu = priv;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun iu->errno = errno;
417*4882a593Smuzhiyun schedule_work(&iu->work);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun enum wait_type {
421*4882a593Smuzhiyun NO_WAIT = 0,
422*4882a593Smuzhiyun WAIT = 1
423*4882a593Smuzhiyun };
424*4882a593Smuzhiyun
send_usr_msg(struct rtrs_clt * rtrs,int dir,struct rnbd_iu * iu,struct kvec * vec,size_t len,struct scatterlist * sg,unsigned int sg_len,void (* conf)(struct work_struct * work),int * errno,enum wait_type wait)425*4882a593Smuzhiyun static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
426*4882a593Smuzhiyun struct rnbd_iu *iu, struct kvec *vec,
427*4882a593Smuzhiyun size_t len, struct scatterlist *sg, unsigned int sg_len,
428*4882a593Smuzhiyun void (*conf)(struct work_struct *work),
429*4882a593Smuzhiyun int *errno, enum wait_type wait)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun int err;
432*4882a593Smuzhiyun struct rtrs_clt_req_ops req_ops;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun INIT_WORK(&iu->work, conf);
435*4882a593Smuzhiyun req_ops = (struct rtrs_clt_req_ops) {
436*4882a593Smuzhiyun .priv = iu,
437*4882a593Smuzhiyun .conf_fn = msg_conf,
438*4882a593Smuzhiyun };
439*4882a593Smuzhiyun err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit,
440*4882a593Smuzhiyun vec, 1, len, sg, sg_len);
441*4882a593Smuzhiyun if (!err && wait) {
442*4882a593Smuzhiyun wait_event(iu->comp.wait, iu->comp.errno != INT_MAX);
443*4882a593Smuzhiyun *errno = iu->comp.errno;
444*4882a593Smuzhiyun } else {
445*4882a593Smuzhiyun *errno = 0;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun return err;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
msg_close_conf(struct work_struct * work)451*4882a593Smuzhiyun static void msg_close_conf(struct work_struct *work)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
454*4882a593Smuzhiyun struct rnbd_clt_dev *dev = iu->dev;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun wake_up_iu_comp(iu, iu->errno);
457*4882a593Smuzhiyun rnbd_put_iu(dev->sess, iu);
458*4882a593Smuzhiyun rnbd_clt_put_dev(dev);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
send_msg_close(struct rnbd_clt_dev * dev,u32 device_id,bool wait)461*4882a593Smuzhiyun static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct rnbd_clt_session *sess = dev->sess;
464*4882a593Smuzhiyun struct rnbd_msg_close msg;
465*4882a593Smuzhiyun struct rnbd_iu *iu;
466*4882a593Smuzhiyun struct kvec vec = {
467*4882a593Smuzhiyun .iov_base = &msg,
468*4882a593Smuzhiyun .iov_len = sizeof(msg)
469*4882a593Smuzhiyun };
470*4882a593Smuzhiyun int err, errno;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
473*4882a593Smuzhiyun if (!iu)
474*4882a593Smuzhiyun return -ENOMEM;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun iu->buf = NULL;
477*4882a593Smuzhiyun iu->dev = dev;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun sg_mark_end(&iu->sglist[0]);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
482*4882a593Smuzhiyun msg.device_id = cpu_to_le32(device_id);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun WARN_ON(!rnbd_clt_get_dev(dev));
485*4882a593Smuzhiyun err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 0, NULL, 0,
486*4882a593Smuzhiyun msg_close_conf, &errno, wait);
487*4882a593Smuzhiyun if (err) {
488*4882a593Smuzhiyun rnbd_clt_put_dev(dev);
489*4882a593Smuzhiyun rnbd_put_iu(sess, iu);
490*4882a593Smuzhiyun } else {
491*4882a593Smuzhiyun err = errno;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun rnbd_put_iu(sess, iu);
495*4882a593Smuzhiyun return err;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
msg_open_conf(struct work_struct * work)498*4882a593Smuzhiyun static void msg_open_conf(struct work_struct *work)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
501*4882a593Smuzhiyun struct rnbd_msg_open_rsp *rsp = iu->buf;
502*4882a593Smuzhiyun struct rnbd_clt_dev *dev = iu->dev;
503*4882a593Smuzhiyun int errno = iu->errno;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun if (errno) {
506*4882a593Smuzhiyun rnbd_clt_err(dev,
507*4882a593Smuzhiyun "Opening failed, server responded: %d\n",
508*4882a593Smuzhiyun errno);
509*4882a593Smuzhiyun } else {
510*4882a593Smuzhiyun errno = process_msg_open_rsp(dev, rsp);
511*4882a593Smuzhiyun if (errno) {
512*4882a593Smuzhiyun u32 device_id = le32_to_cpu(rsp->device_id);
513*4882a593Smuzhiyun /*
514*4882a593Smuzhiyun * If server thinks its fine, but we fail to process
515*4882a593Smuzhiyun * then be nice and send a close to server.
516*4882a593Smuzhiyun */
517*4882a593Smuzhiyun (void)send_msg_close(dev, device_id, NO_WAIT);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun kfree(rsp);
521*4882a593Smuzhiyun wake_up_iu_comp(iu, errno);
522*4882a593Smuzhiyun rnbd_put_iu(dev->sess, iu);
523*4882a593Smuzhiyun rnbd_clt_put_dev(dev);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
msg_sess_info_conf(struct work_struct * work)526*4882a593Smuzhiyun static void msg_sess_info_conf(struct work_struct *work)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
529*4882a593Smuzhiyun struct rnbd_msg_sess_info_rsp *rsp = iu->buf;
530*4882a593Smuzhiyun struct rnbd_clt_session *sess = iu->sess;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun if (!iu->errno)
533*4882a593Smuzhiyun sess->ver = min_t(u8, rsp->ver, RNBD_PROTO_VER_MAJOR);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun kfree(rsp);
536*4882a593Smuzhiyun wake_up_iu_comp(iu, iu->errno);
537*4882a593Smuzhiyun rnbd_put_iu(sess, iu);
538*4882a593Smuzhiyun rnbd_clt_put_sess(sess);
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
send_msg_open(struct rnbd_clt_dev * dev,bool wait)541*4882a593Smuzhiyun static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun struct rnbd_clt_session *sess = dev->sess;
544*4882a593Smuzhiyun struct rnbd_msg_open_rsp *rsp;
545*4882a593Smuzhiyun struct rnbd_msg_open msg;
546*4882a593Smuzhiyun struct rnbd_iu *iu;
547*4882a593Smuzhiyun struct kvec vec = {
548*4882a593Smuzhiyun .iov_base = &msg,
549*4882a593Smuzhiyun .iov_len = sizeof(msg)
550*4882a593Smuzhiyun };
551*4882a593Smuzhiyun int err, errno;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
554*4882a593Smuzhiyun if (!rsp)
555*4882a593Smuzhiyun return -ENOMEM;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
558*4882a593Smuzhiyun if (!iu) {
559*4882a593Smuzhiyun kfree(rsp);
560*4882a593Smuzhiyun return -ENOMEM;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun iu->buf = rsp;
564*4882a593Smuzhiyun iu->dev = dev;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun sg_init_one(iu->sglist, rsp, sizeof(*rsp));
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
569*4882a593Smuzhiyun msg.access_mode = dev->access_mode;
570*4882a593Smuzhiyun strlcpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name));
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun WARN_ON(!rnbd_clt_get_dev(dev));
573*4882a593Smuzhiyun err = send_usr_msg(sess->rtrs, READ, iu,
574*4882a593Smuzhiyun &vec, sizeof(*rsp), iu->sglist, 1,
575*4882a593Smuzhiyun msg_open_conf, &errno, wait);
576*4882a593Smuzhiyun if (err) {
577*4882a593Smuzhiyun rnbd_clt_put_dev(dev);
578*4882a593Smuzhiyun rnbd_put_iu(sess, iu);
579*4882a593Smuzhiyun kfree(rsp);
580*4882a593Smuzhiyun } else {
581*4882a593Smuzhiyun err = errno;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun rnbd_put_iu(sess, iu);
585*4882a593Smuzhiyun return err;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
send_msg_sess_info(struct rnbd_clt_session * sess,bool wait)588*4882a593Smuzhiyun static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun struct rnbd_msg_sess_info_rsp *rsp;
591*4882a593Smuzhiyun struct rnbd_msg_sess_info msg;
592*4882a593Smuzhiyun struct rnbd_iu *iu;
593*4882a593Smuzhiyun struct kvec vec = {
594*4882a593Smuzhiyun .iov_base = &msg,
595*4882a593Smuzhiyun .iov_len = sizeof(msg)
596*4882a593Smuzhiyun };
597*4882a593Smuzhiyun int err, errno;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
600*4882a593Smuzhiyun if (!rsp)
601*4882a593Smuzhiyun return -ENOMEM;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
604*4882a593Smuzhiyun if (!iu) {
605*4882a593Smuzhiyun kfree(rsp);
606*4882a593Smuzhiyun return -ENOMEM;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun iu->buf = rsp;
610*4882a593Smuzhiyun iu->sess = sess;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun sg_init_one(iu->sglist, rsp, sizeof(*rsp));
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
615*4882a593Smuzhiyun msg.ver = RNBD_PROTO_VER_MAJOR;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun if (!rnbd_clt_get_sess(sess)) {
618*4882a593Smuzhiyun /*
619*4882a593Smuzhiyun * That can happen only in one case, when RTRS has restablished
620*4882a593Smuzhiyun * the connection and link_ev() is called, but session is almost
621*4882a593Smuzhiyun * dead, last reference on session is put and caller is waiting
622*4882a593Smuzhiyun * for RTRS to close everything.
623*4882a593Smuzhiyun */
624*4882a593Smuzhiyun err = -ENODEV;
625*4882a593Smuzhiyun goto put_iu;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun err = send_usr_msg(sess->rtrs, READ, iu,
628*4882a593Smuzhiyun &vec, sizeof(*rsp), iu->sglist, 1,
629*4882a593Smuzhiyun msg_sess_info_conf, &errno, wait);
630*4882a593Smuzhiyun if (err) {
631*4882a593Smuzhiyun rnbd_clt_put_sess(sess);
632*4882a593Smuzhiyun put_iu:
633*4882a593Smuzhiyun rnbd_put_iu(sess, iu);
634*4882a593Smuzhiyun kfree(rsp);
635*4882a593Smuzhiyun } else {
636*4882a593Smuzhiyun err = errno;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun rnbd_put_iu(sess, iu);
640*4882a593Smuzhiyun return err;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
set_dev_states_to_disconnected(struct rnbd_clt_session * sess)643*4882a593Smuzhiyun static void set_dev_states_to_disconnected(struct rnbd_clt_session *sess)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun struct rnbd_clt_dev *dev;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun mutex_lock(&sess->lock);
648*4882a593Smuzhiyun list_for_each_entry(dev, &sess->devs_list, list) {
649*4882a593Smuzhiyun rnbd_clt_err(dev, "Device disconnected.\n");
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun mutex_lock(&dev->lock);
652*4882a593Smuzhiyun if (dev->dev_state == DEV_STATE_MAPPED)
653*4882a593Smuzhiyun dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED;
654*4882a593Smuzhiyun mutex_unlock(&dev->lock);
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun mutex_unlock(&sess->lock);
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
remap_devs(struct rnbd_clt_session * sess)659*4882a593Smuzhiyun static void remap_devs(struct rnbd_clt_session *sess)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun struct rnbd_clt_dev *dev;
662*4882a593Smuzhiyun struct rtrs_attrs attrs;
663*4882a593Smuzhiyun int err;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /*
666*4882a593Smuzhiyun * Careful here: we are called from RTRS link event directly,
667*4882a593Smuzhiyun * thus we can't send any RTRS request and wait for response
668*4882a593Smuzhiyun * or RTRS will not be able to complete request with failure
669*4882a593Smuzhiyun * if something goes wrong (failing of outstanding requests
670*4882a593Smuzhiyun * happens exactly from the context where we are blocking now).
671*4882a593Smuzhiyun *
672*4882a593Smuzhiyun * So to avoid deadlocks each usr message sent from here must
673*4882a593Smuzhiyun * be asynchronous.
674*4882a593Smuzhiyun */
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun err = send_msg_sess_info(sess, NO_WAIT);
677*4882a593Smuzhiyun if (err) {
678*4882a593Smuzhiyun pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err);
679*4882a593Smuzhiyun return;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun err = rtrs_clt_query(sess->rtrs, &attrs);
683*4882a593Smuzhiyun if (err) {
684*4882a593Smuzhiyun pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
685*4882a593Smuzhiyun return;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun mutex_lock(&sess->lock);
688*4882a593Smuzhiyun sess->max_io_size = attrs.max_io_size;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun list_for_each_entry(dev, &sess->devs_list, list) {
691*4882a593Smuzhiyun bool skip;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun mutex_lock(&dev->lock);
694*4882a593Smuzhiyun skip = (dev->dev_state == DEV_STATE_INIT);
695*4882a593Smuzhiyun mutex_unlock(&dev->lock);
696*4882a593Smuzhiyun if (skip)
697*4882a593Smuzhiyun /*
698*4882a593Smuzhiyun * When device is establishing connection for the first
699*4882a593Smuzhiyun * time - do not remap, it will be closed soon.
700*4882a593Smuzhiyun */
701*4882a593Smuzhiyun continue;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun rnbd_clt_info(dev, "session reconnected, remapping device\n");
704*4882a593Smuzhiyun err = send_msg_open(dev, NO_WAIT);
705*4882a593Smuzhiyun if (err) {
706*4882a593Smuzhiyun rnbd_clt_err(dev, "send_msg_open(): %d\n", err);
707*4882a593Smuzhiyun break;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun mutex_unlock(&sess->lock);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
rnbd_clt_link_ev(void * priv,enum rtrs_clt_link_ev ev)713*4882a593Smuzhiyun static void rnbd_clt_link_ev(void *priv, enum rtrs_clt_link_ev ev)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun struct rnbd_clt_session *sess = priv;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun switch (ev) {
718*4882a593Smuzhiyun case RTRS_CLT_LINK_EV_DISCONNECTED:
719*4882a593Smuzhiyun set_dev_states_to_disconnected(sess);
720*4882a593Smuzhiyun break;
721*4882a593Smuzhiyun case RTRS_CLT_LINK_EV_RECONNECTED:
722*4882a593Smuzhiyun remap_devs(sess);
723*4882a593Smuzhiyun break;
724*4882a593Smuzhiyun default:
725*4882a593Smuzhiyun pr_err("Unknown session event received (%d), session: %s\n",
726*4882a593Smuzhiyun ev, sess->sessname);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
rnbd_init_cpu_qlists(struct rnbd_cpu_qlist __percpu * cpu_queues)730*4882a593Smuzhiyun static void rnbd_init_cpu_qlists(struct rnbd_cpu_qlist __percpu *cpu_queues)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun unsigned int cpu;
733*4882a593Smuzhiyun struct rnbd_cpu_qlist *cpu_q;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
736*4882a593Smuzhiyun cpu_q = per_cpu_ptr(cpu_queues, cpu);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun cpu_q->cpu = cpu;
739*4882a593Smuzhiyun INIT_LIST_HEAD(&cpu_q->requeue_list);
740*4882a593Smuzhiyun spin_lock_init(&cpu_q->requeue_lock);
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
destroy_mq_tags(struct rnbd_clt_session * sess)744*4882a593Smuzhiyun static void destroy_mq_tags(struct rnbd_clt_session *sess)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun if (sess->tag_set.tags)
747*4882a593Smuzhiyun blk_mq_free_tag_set(&sess->tag_set);
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
wake_up_rtrs_waiters(struct rnbd_clt_session * sess)750*4882a593Smuzhiyun static inline void wake_up_rtrs_waiters(struct rnbd_clt_session *sess)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun sess->rtrs_ready = true;
753*4882a593Smuzhiyun wake_up_all(&sess->rtrs_waitq);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
close_rtrs(struct rnbd_clt_session * sess)756*4882a593Smuzhiyun static void close_rtrs(struct rnbd_clt_session *sess)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun might_sleep();
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(sess->rtrs)) {
761*4882a593Smuzhiyun rtrs_clt_close(sess->rtrs);
762*4882a593Smuzhiyun sess->rtrs = NULL;
763*4882a593Smuzhiyun wake_up_rtrs_waiters(sess);
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
free_sess(struct rnbd_clt_session * sess)767*4882a593Smuzhiyun static void free_sess(struct rnbd_clt_session *sess)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun WARN_ON(!list_empty(&sess->devs_list));
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun might_sleep();
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun close_rtrs(sess);
774*4882a593Smuzhiyun destroy_mq_tags(sess);
775*4882a593Smuzhiyun if (!list_empty(&sess->list)) {
776*4882a593Smuzhiyun mutex_lock(&sess_lock);
777*4882a593Smuzhiyun list_del(&sess->list);
778*4882a593Smuzhiyun mutex_unlock(&sess_lock);
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun free_percpu(sess->cpu_queues);
781*4882a593Smuzhiyun free_percpu(sess->cpu_rr);
782*4882a593Smuzhiyun mutex_destroy(&sess->lock);
783*4882a593Smuzhiyun kfree(sess);
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
alloc_sess(const char * sessname)786*4882a593Smuzhiyun static struct rnbd_clt_session *alloc_sess(const char *sessname)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun struct rnbd_clt_session *sess;
789*4882a593Smuzhiyun int err, cpu;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun sess = kzalloc_node(sizeof(*sess), GFP_KERNEL, NUMA_NO_NODE);
792*4882a593Smuzhiyun if (!sess)
793*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
794*4882a593Smuzhiyun strlcpy(sess->sessname, sessname, sizeof(sess->sessname));
795*4882a593Smuzhiyun atomic_set(&sess->busy, 0);
796*4882a593Smuzhiyun mutex_init(&sess->lock);
797*4882a593Smuzhiyun INIT_LIST_HEAD(&sess->devs_list);
798*4882a593Smuzhiyun INIT_LIST_HEAD(&sess->list);
799*4882a593Smuzhiyun bitmap_zero(sess->cpu_queues_bm, NR_CPUS);
800*4882a593Smuzhiyun init_waitqueue_head(&sess->rtrs_waitq);
801*4882a593Smuzhiyun refcount_set(&sess->refcount, 1);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist);
804*4882a593Smuzhiyun if (!sess->cpu_queues) {
805*4882a593Smuzhiyun err = -ENOMEM;
806*4882a593Smuzhiyun goto err;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun rnbd_init_cpu_qlists(sess->cpu_queues);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun /*
811*4882a593Smuzhiyun * That is simple percpu variable which stores cpu indeces, which are
812*4882a593Smuzhiyun * incremented on each access. We need that for the sake of fairness
813*4882a593Smuzhiyun * to wake up queues in a round-robin manner.
814*4882a593Smuzhiyun */
815*4882a593Smuzhiyun sess->cpu_rr = alloc_percpu(int);
816*4882a593Smuzhiyun if (!sess->cpu_rr) {
817*4882a593Smuzhiyun err = -ENOMEM;
818*4882a593Smuzhiyun goto err;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun for_each_possible_cpu(cpu)
821*4882a593Smuzhiyun * per_cpu_ptr(sess->cpu_rr, cpu) = cpu;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun return sess;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun err:
826*4882a593Smuzhiyun free_sess(sess);
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun return ERR_PTR(err);
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun
wait_for_rtrs_connection(struct rnbd_clt_session * sess)831*4882a593Smuzhiyun static int wait_for_rtrs_connection(struct rnbd_clt_session *sess)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun wait_event(sess->rtrs_waitq, sess->rtrs_ready);
834*4882a593Smuzhiyun if (IS_ERR_OR_NULL(sess->rtrs))
835*4882a593Smuzhiyun return -ECONNRESET;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun return 0;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
wait_for_rtrs_disconnection(struct rnbd_clt_session * sess)840*4882a593Smuzhiyun static void wait_for_rtrs_disconnection(struct rnbd_clt_session *sess)
841*4882a593Smuzhiyun __releases(&sess_lock)
842*4882a593Smuzhiyun __acquires(&sess_lock)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun DEFINE_WAIT(wait);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun prepare_to_wait(&sess->rtrs_waitq, &wait, TASK_UNINTERRUPTIBLE);
847*4882a593Smuzhiyun if (IS_ERR_OR_NULL(sess->rtrs)) {
848*4882a593Smuzhiyun finish_wait(&sess->rtrs_waitq, &wait);
849*4882a593Smuzhiyun return;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun mutex_unlock(&sess_lock);
852*4882a593Smuzhiyun /* loop in caller, see __find_and_get_sess().
853*4882a593Smuzhiyun * You can't leave mutex locked and call schedule(), you will catch a
854*4882a593Smuzhiyun * deadlock with a caller of free_sess(), which has just put the last
855*4882a593Smuzhiyun * reference and is about to take the sess_lock in order to delete
856*4882a593Smuzhiyun * the session from the list.
857*4882a593Smuzhiyun */
858*4882a593Smuzhiyun schedule();
859*4882a593Smuzhiyun mutex_lock(&sess_lock);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
__find_and_get_sess(const char * sessname)862*4882a593Smuzhiyun static struct rnbd_clt_session *__find_and_get_sess(const char *sessname)
863*4882a593Smuzhiyun __releases(&sess_lock)
864*4882a593Smuzhiyun __acquires(&sess_lock)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun struct rnbd_clt_session *sess, *sn;
867*4882a593Smuzhiyun int err;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun again:
870*4882a593Smuzhiyun list_for_each_entry_safe(sess, sn, &sess_list, list) {
871*4882a593Smuzhiyun if (strcmp(sessname, sess->sessname))
872*4882a593Smuzhiyun continue;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun if (sess->rtrs_ready && IS_ERR_OR_NULL(sess->rtrs))
875*4882a593Smuzhiyun /*
876*4882a593Smuzhiyun * No RTRS connection, session is dying.
877*4882a593Smuzhiyun */
878*4882a593Smuzhiyun continue;
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun if (rnbd_clt_get_sess(sess)) {
881*4882a593Smuzhiyun /*
882*4882a593Smuzhiyun * Alive session is found, wait for RTRS connection.
883*4882a593Smuzhiyun */
884*4882a593Smuzhiyun mutex_unlock(&sess_lock);
885*4882a593Smuzhiyun err = wait_for_rtrs_connection(sess);
886*4882a593Smuzhiyun if (err)
887*4882a593Smuzhiyun rnbd_clt_put_sess(sess);
888*4882a593Smuzhiyun mutex_lock(&sess_lock);
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun if (err)
891*4882a593Smuzhiyun /* Session is dying, repeat the loop */
892*4882a593Smuzhiyun goto again;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun return sess;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun /*
897*4882a593Smuzhiyun * Ref is 0, session is dying, wait for RTRS disconnect
898*4882a593Smuzhiyun * in order to avoid session names clashes.
899*4882a593Smuzhiyun */
900*4882a593Smuzhiyun wait_for_rtrs_disconnection(sess);
901*4882a593Smuzhiyun /*
902*4882a593Smuzhiyun * RTRS is disconnected and soon session will be freed,
903*4882a593Smuzhiyun * so repeat a loop.
904*4882a593Smuzhiyun */
905*4882a593Smuzhiyun goto again;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun return NULL;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun static struct
find_or_create_sess(const char * sessname,bool * first)912*4882a593Smuzhiyun rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun struct rnbd_clt_session *sess = NULL;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun mutex_lock(&sess_lock);
917*4882a593Smuzhiyun sess = __find_and_get_sess(sessname);
918*4882a593Smuzhiyun if (!sess) {
919*4882a593Smuzhiyun sess = alloc_sess(sessname);
920*4882a593Smuzhiyun if (IS_ERR(sess)) {
921*4882a593Smuzhiyun mutex_unlock(&sess_lock);
922*4882a593Smuzhiyun return sess;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun list_add(&sess->list, &sess_list);
925*4882a593Smuzhiyun *first = true;
926*4882a593Smuzhiyun } else
927*4882a593Smuzhiyun *first = false;
928*4882a593Smuzhiyun mutex_unlock(&sess_lock);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun return sess;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
rnbd_client_open(struct block_device * block_device,fmode_t mode)933*4882a593Smuzhiyun static int rnbd_client_open(struct block_device *block_device, fmode_t mode)
934*4882a593Smuzhiyun {
935*4882a593Smuzhiyun struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun if (dev->read_only && (mode & FMODE_WRITE))
938*4882a593Smuzhiyun return -EPERM;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun if (dev->dev_state == DEV_STATE_UNMAPPED ||
941*4882a593Smuzhiyun !rnbd_clt_get_dev(dev))
942*4882a593Smuzhiyun return -EIO;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun return 0;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun
rnbd_client_release(struct gendisk * gen,fmode_t mode)947*4882a593Smuzhiyun static void rnbd_client_release(struct gendisk *gen, fmode_t mode)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun struct rnbd_clt_dev *dev = gen->private_data;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun rnbd_clt_put_dev(dev);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
rnbd_client_getgeo(struct block_device * block_device,struct hd_geometry * geo)954*4882a593Smuzhiyun static int rnbd_client_getgeo(struct block_device *block_device,
955*4882a593Smuzhiyun struct hd_geometry *geo)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun u64 size;
958*4882a593Smuzhiyun struct rnbd_clt_dev *dev;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun dev = block_device->bd_disk->private_data;
961*4882a593Smuzhiyun size = dev->size * (dev->logical_block_size / SECTOR_SIZE);
962*4882a593Smuzhiyun geo->cylinders = size >> 6; /* size/64 */
963*4882a593Smuzhiyun geo->heads = 4;
964*4882a593Smuzhiyun geo->sectors = 16;
965*4882a593Smuzhiyun geo->start = 0;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun return 0;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun static const struct block_device_operations rnbd_client_ops = {
971*4882a593Smuzhiyun .owner = THIS_MODULE,
972*4882a593Smuzhiyun .open = rnbd_client_open,
973*4882a593Smuzhiyun .release = rnbd_client_release,
974*4882a593Smuzhiyun .getgeo = rnbd_client_getgeo
975*4882a593Smuzhiyun };
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun /* The amount of data that belongs to an I/O and the amount of data that
978*4882a593Smuzhiyun * should be read or written to the disk (bi_size) can differ.
979*4882a593Smuzhiyun *
980*4882a593Smuzhiyun * E.g. When WRITE_SAME is used, only a small amount of data is
981*4882a593Smuzhiyun * transferred that is then written repeatedly over a lot of sectors.
982*4882a593Smuzhiyun *
983*4882a593Smuzhiyun * Get the size of data to be transferred via RTRS by summing up the size
984*4882a593Smuzhiyun * of the scather-gather list entries.
985*4882a593Smuzhiyun */
rnbd_clt_get_sg_size(struct scatterlist * sglist,u32 len)986*4882a593Smuzhiyun static size_t rnbd_clt_get_sg_size(struct scatterlist *sglist, u32 len)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun struct scatterlist *sg;
989*4882a593Smuzhiyun size_t tsize = 0;
990*4882a593Smuzhiyun int i;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun for_each_sg(sglist, sg, len, i)
993*4882a593Smuzhiyun tsize += sg->length;
994*4882a593Smuzhiyun return tsize;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
rnbd_client_xfer_request(struct rnbd_clt_dev * dev,struct request * rq,struct rnbd_iu * iu)997*4882a593Smuzhiyun static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
998*4882a593Smuzhiyun struct request *rq,
999*4882a593Smuzhiyun struct rnbd_iu *iu)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun struct rtrs_clt *rtrs = dev->sess->rtrs;
1002*4882a593Smuzhiyun struct rtrs_permit *permit = iu->permit;
1003*4882a593Smuzhiyun struct rnbd_msg_io msg;
1004*4882a593Smuzhiyun struct rtrs_clt_req_ops req_ops;
1005*4882a593Smuzhiyun unsigned int sg_cnt = 0;
1006*4882a593Smuzhiyun struct kvec vec;
1007*4882a593Smuzhiyun size_t size;
1008*4882a593Smuzhiyun int err;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun iu->rq = rq;
1011*4882a593Smuzhiyun iu->dev = dev;
1012*4882a593Smuzhiyun msg.sector = cpu_to_le64(blk_rq_pos(rq));
1013*4882a593Smuzhiyun msg.bi_size = cpu_to_le32(blk_rq_bytes(rq));
1014*4882a593Smuzhiyun msg.rw = cpu_to_le32(rq_to_rnbd_flags(rq));
1015*4882a593Smuzhiyun msg.prio = cpu_to_le16(req_get_ioprio(rq));
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun /*
1018*4882a593Smuzhiyun * We only support discards with single segment for now.
1019*4882a593Smuzhiyun * See queue limits.
1020*4882a593Smuzhiyun */
1021*4882a593Smuzhiyun if (req_op(rq) != REQ_OP_DISCARD)
1022*4882a593Smuzhiyun sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sglist);
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun if (sg_cnt == 0)
1025*4882a593Smuzhiyun /* Do not forget to mark the end */
1026*4882a593Smuzhiyun sg_mark_end(&iu->sglist[0]);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun msg.hdr.type = cpu_to_le16(RNBD_MSG_IO);
1029*4882a593Smuzhiyun msg.device_id = cpu_to_le32(dev->device_id);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun vec = (struct kvec) {
1032*4882a593Smuzhiyun .iov_base = &msg,
1033*4882a593Smuzhiyun .iov_len = sizeof(msg)
1034*4882a593Smuzhiyun };
1035*4882a593Smuzhiyun size = rnbd_clt_get_sg_size(iu->sglist, sg_cnt);
1036*4882a593Smuzhiyun req_ops = (struct rtrs_clt_req_ops) {
1037*4882a593Smuzhiyun .priv = iu,
1038*4882a593Smuzhiyun .conf_fn = msg_io_conf,
1039*4882a593Smuzhiyun };
1040*4882a593Smuzhiyun err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit,
1041*4882a593Smuzhiyun &vec, 1, size, iu->sglist, sg_cnt);
1042*4882a593Smuzhiyun if (unlikely(err)) {
1043*4882a593Smuzhiyun rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n",
1044*4882a593Smuzhiyun err);
1045*4882a593Smuzhiyun return err;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun return 0;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun /**
1052*4882a593Smuzhiyun * rnbd_clt_dev_add_to_requeue() - add device to requeue if session is busy
1053*4882a593Smuzhiyun * @dev: Device to be checked
1054*4882a593Smuzhiyun * @q: Queue to be added to the requeue list if required
1055*4882a593Smuzhiyun *
1056*4882a593Smuzhiyun * Description:
1057*4882a593Smuzhiyun * If session is busy, that means someone will requeue us when resources
1058*4882a593Smuzhiyun * are freed. If session is not doing anything - device is not added to
1059*4882a593Smuzhiyun * the list and @false is returned.
1060*4882a593Smuzhiyun */
rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev * dev,struct rnbd_queue * q)1061*4882a593Smuzhiyun static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev,
1062*4882a593Smuzhiyun struct rnbd_queue *q)
1063*4882a593Smuzhiyun {
1064*4882a593Smuzhiyun struct rnbd_clt_session *sess = dev->sess;
1065*4882a593Smuzhiyun struct rnbd_cpu_qlist *cpu_q;
1066*4882a593Smuzhiyun unsigned long flags;
1067*4882a593Smuzhiyun bool added = true;
1068*4882a593Smuzhiyun bool need_set;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun cpu_q = get_cpu_ptr(sess->cpu_queues);
1071*4882a593Smuzhiyun spin_lock_irqsave(&cpu_q->requeue_lock, flags);
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun if (likely(!test_and_set_bit_lock(0, &q->in_list))) {
1074*4882a593Smuzhiyun if (WARN_ON(!list_empty(&q->requeue_list)))
1075*4882a593Smuzhiyun goto unlock;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm);
1078*4882a593Smuzhiyun if (need_set) {
1079*4882a593Smuzhiyun set_bit(cpu_q->cpu, sess->cpu_queues_bm);
1080*4882a593Smuzhiyun /* Paired with rnbd_put_permit(). Set a bit first
1081*4882a593Smuzhiyun * and then observe the busy counter.
1082*4882a593Smuzhiyun */
1083*4882a593Smuzhiyun smp_mb__before_atomic();
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun if (likely(atomic_read(&sess->busy))) {
1086*4882a593Smuzhiyun list_add_tail(&q->requeue_list, &cpu_q->requeue_list);
1087*4882a593Smuzhiyun } else {
1088*4882a593Smuzhiyun /* Very unlikely, but possible: busy counter was
1089*4882a593Smuzhiyun * observed as zero. Drop all bits and return
1090*4882a593Smuzhiyun * false to restart the queue by ourselves.
1091*4882a593Smuzhiyun */
1092*4882a593Smuzhiyun if (need_set)
1093*4882a593Smuzhiyun clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
1094*4882a593Smuzhiyun clear_bit_unlock(0, &q->in_list);
1095*4882a593Smuzhiyun added = false;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun unlock:
1099*4882a593Smuzhiyun spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
1100*4882a593Smuzhiyun put_cpu_ptr(sess->cpu_queues);
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun return added;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun
rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev * dev,struct blk_mq_hw_ctx * hctx,int delay)1105*4882a593Smuzhiyun static void rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev *dev,
1106*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx,
1107*4882a593Smuzhiyun int delay)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun struct rnbd_queue *q = hctx->driver_data;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun if (delay != RNBD_DELAY_IFBUSY)
1112*4882a593Smuzhiyun blk_mq_delay_run_hw_queue(hctx, delay);
1113*4882a593Smuzhiyun else if (unlikely(!rnbd_clt_dev_add_to_requeue(dev, q)))
1114*4882a593Smuzhiyun /*
1115*4882a593Smuzhiyun * If session is not busy we have to restart
1116*4882a593Smuzhiyun * the queue ourselves.
1117*4882a593Smuzhiyun */
1118*4882a593Smuzhiyun blk_mq_delay_run_hw_queue(hctx, 10/*ms*/);
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun
rnbd_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1121*4882a593Smuzhiyun static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1122*4882a593Smuzhiyun const struct blk_mq_queue_data *bd)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun struct request *rq = bd->rq;
1125*4882a593Smuzhiyun struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
1126*4882a593Smuzhiyun struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
1127*4882a593Smuzhiyun int err;
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun if (unlikely(dev->dev_state != DEV_STATE_MAPPED))
1130*4882a593Smuzhiyun return BLK_STS_IOERR;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON,
1133*4882a593Smuzhiyun RTRS_PERMIT_NOWAIT);
1134*4882a593Smuzhiyun if (unlikely(!iu->permit)) {
1135*4882a593Smuzhiyun rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY);
1136*4882a593Smuzhiyun return BLK_STS_RESOURCE;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun blk_mq_start_request(rq);
1140*4882a593Smuzhiyun err = rnbd_client_xfer_request(dev, rq, iu);
1141*4882a593Smuzhiyun if (likely(err == 0))
1142*4882a593Smuzhiyun return BLK_STS_OK;
1143*4882a593Smuzhiyun if (unlikely(err == -EAGAIN || err == -ENOMEM)) {
1144*4882a593Smuzhiyun rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
1145*4882a593Smuzhiyun rnbd_put_permit(dev->sess, iu->permit);
1146*4882a593Smuzhiyun return BLK_STS_RESOURCE;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun rnbd_put_permit(dev->sess, iu->permit);
1150*4882a593Smuzhiyun return BLK_STS_IOERR;
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun
rnbd_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,unsigned int numa_node)1153*4882a593Smuzhiyun static int rnbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1154*4882a593Smuzhiyun unsigned int hctx_idx, unsigned int numa_node)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun sg_init_table(iu->sglist, BMAX_SEGMENTS);
1159*4882a593Smuzhiyun return 0;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun static struct blk_mq_ops rnbd_mq_ops = {
1163*4882a593Smuzhiyun .queue_rq = rnbd_queue_rq,
1164*4882a593Smuzhiyun .init_request = rnbd_init_request,
1165*4882a593Smuzhiyun .complete = rnbd_softirq_done_fn,
1166*4882a593Smuzhiyun };
1167*4882a593Smuzhiyun
setup_mq_tags(struct rnbd_clt_session * sess)1168*4882a593Smuzhiyun static int setup_mq_tags(struct rnbd_clt_session *sess)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun struct blk_mq_tag_set *tag_set = &sess->tag_set;
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun memset(tag_set, 0, sizeof(*tag_set));
1173*4882a593Smuzhiyun tag_set->ops = &rnbd_mq_ops;
1174*4882a593Smuzhiyun tag_set->queue_depth = sess->queue_depth;
1175*4882a593Smuzhiyun tag_set->numa_node = NUMA_NO_NODE;
1176*4882a593Smuzhiyun tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
1177*4882a593Smuzhiyun BLK_MQ_F_TAG_QUEUE_SHARED;
1178*4882a593Smuzhiyun tag_set->cmd_size = sizeof(struct rnbd_iu);
1179*4882a593Smuzhiyun tag_set->nr_hw_queues = num_online_cpus();
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun return blk_mq_alloc_tag_set(tag_set);
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun static struct rnbd_clt_session *
find_and_get_or_create_sess(const char * sessname,const struct rtrs_addr * paths,size_t path_cnt,u16 port_nr)1185*4882a593Smuzhiyun find_and_get_or_create_sess(const char *sessname,
1186*4882a593Smuzhiyun const struct rtrs_addr *paths,
1187*4882a593Smuzhiyun size_t path_cnt, u16 port_nr)
1188*4882a593Smuzhiyun {
1189*4882a593Smuzhiyun struct rnbd_clt_session *sess;
1190*4882a593Smuzhiyun struct rtrs_attrs attrs;
1191*4882a593Smuzhiyun int err;
1192*4882a593Smuzhiyun bool first;
1193*4882a593Smuzhiyun struct rtrs_clt_ops rtrs_ops;
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun sess = find_or_create_sess(sessname, &first);
1196*4882a593Smuzhiyun if (sess == ERR_PTR(-ENOMEM))
1197*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1198*4882a593Smuzhiyun else if (!first)
1199*4882a593Smuzhiyun return sess;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun rtrs_ops = (struct rtrs_clt_ops) {
1202*4882a593Smuzhiyun .priv = sess,
1203*4882a593Smuzhiyun .link_ev = rnbd_clt_link_ev,
1204*4882a593Smuzhiyun };
1205*4882a593Smuzhiyun /*
1206*4882a593Smuzhiyun * Nothing was found, establish rtrs connection and proceed further.
1207*4882a593Smuzhiyun */
1208*4882a593Smuzhiyun sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname,
1209*4882a593Smuzhiyun paths, path_cnt, port_nr,
1210*4882a593Smuzhiyun sizeof(struct rnbd_iu),
1211*4882a593Smuzhiyun RECONNECT_DELAY, BMAX_SEGMENTS,
1212*4882a593Smuzhiyun BLK_MAX_SEGMENT_SIZE,
1213*4882a593Smuzhiyun MAX_RECONNECTS);
1214*4882a593Smuzhiyun if (IS_ERR(sess->rtrs)) {
1215*4882a593Smuzhiyun err = PTR_ERR(sess->rtrs);
1216*4882a593Smuzhiyun goto wake_up_and_put;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun err = rtrs_clt_query(sess->rtrs, &attrs);
1220*4882a593Smuzhiyun if (err)
1221*4882a593Smuzhiyun goto close_rtrs;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun sess->max_io_size = attrs.max_io_size;
1224*4882a593Smuzhiyun sess->queue_depth = attrs.queue_depth;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun err = setup_mq_tags(sess);
1227*4882a593Smuzhiyun if (err)
1228*4882a593Smuzhiyun goto close_rtrs;
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun err = send_msg_sess_info(sess, WAIT);
1231*4882a593Smuzhiyun if (err)
1232*4882a593Smuzhiyun goto close_rtrs;
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun wake_up_rtrs_waiters(sess);
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun return sess;
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun close_rtrs:
1239*4882a593Smuzhiyun close_rtrs(sess);
1240*4882a593Smuzhiyun put_sess:
1241*4882a593Smuzhiyun rnbd_clt_put_sess(sess);
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun return ERR_PTR(err);
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun wake_up_and_put:
1246*4882a593Smuzhiyun wake_up_rtrs_waiters(sess);
1247*4882a593Smuzhiyun goto put_sess;
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun
rnbd_init_hw_queue(struct rnbd_clt_dev * dev,struct rnbd_queue * q,struct blk_mq_hw_ctx * hctx)1250*4882a593Smuzhiyun static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev,
1251*4882a593Smuzhiyun struct rnbd_queue *q,
1252*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx)
1253*4882a593Smuzhiyun {
1254*4882a593Smuzhiyun INIT_LIST_HEAD(&q->requeue_list);
1255*4882a593Smuzhiyun q->dev = dev;
1256*4882a593Smuzhiyun q->hctx = hctx;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
rnbd_init_mq_hw_queues(struct rnbd_clt_dev * dev)1259*4882a593Smuzhiyun static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
1260*4882a593Smuzhiyun {
1261*4882a593Smuzhiyun int i;
1262*4882a593Smuzhiyun struct blk_mq_hw_ctx *hctx;
1263*4882a593Smuzhiyun struct rnbd_queue *q;
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun queue_for_each_hw_ctx(dev->queue, hctx, i) {
1266*4882a593Smuzhiyun q = &dev->hw_queues[i];
1267*4882a593Smuzhiyun rnbd_init_hw_queue(dev, q, hctx);
1268*4882a593Smuzhiyun hctx->driver_data = q;
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun
setup_mq_dev(struct rnbd_clt_dev * dev)1272*4882a593Smuzhiyun static int setup_mq_dev(struct rnbd_clt_dev *dev)
1273*4882a593Smuzhiyun {
1274*4882a593Smuzhiyun dev->queue = blk_mq_init_queue(&dev->sess->tag_set);
1275*4882a593Smuzhiyun if (IS_ERR(dev->queue)) {
1276*4882a593Smuzhiyun rnbd_clt_err(dev, "Initializing multiqueue queue failed, err: %ld\n",
1277*4882a593Smuzhiyun PTR_ERR(dev->queue));
1278*4882a593Smuzhiyun return PTR_ERR(dev->queue);
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun rnbd_init_mq_hw_queues(dev);
1281*4882a593Smuzhiyun return 0;
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun
setup_request_queue(struct rnbd_clt_dev * dev)1284*4882a593Smuzhiyun static void setup_request_queue(struct rnbd_clt_dev *dev)
1285*4882a593Smuzhiyun {
1286*4882a593Smuzhiyun blk_queue_logical_block_size(dev->queue, dev->logical_block_size);
1287*4882a593Smuzhiyun blk_queue_physical_block_size(dev->queue, dev->physical_block_size);
1288*4882a593Smuzhiyun blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors);
1289*4882a593Smuzhiyun blk_queue_max_write_same_sectors(dev->queue,
1290*4882a593Smuzhiyun dev->max_write_same_sectors);
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun /*
1293*4882a593Smuzhiyun * we don't support discards to "discontiguous" segments
1294*4882a593Smuzhiyun * in on request
1295*4882a593Smuzhiyun */
1296*4882a593Smuzhiyun blk_queue_max_discard_segments(dev->queue, 1);
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors);
1299*4882a593Smuzhiyun dev->queue->limits.discard_granularity = dev->discard_granularity;
1300*4882a593Smuzhiyun dev->queue->limits.discard_alignment = dev->discard_alignment;
1301*4882a593Smuzhiyun if (dev->max_discard_sectors)
1302*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->queue);
1303*4882a593Smuzhiyun if (dev->secure_discard)
1304*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue);
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
1307*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
1308*4882a593Smuzhiyun blk_queue_max_segments(dev->queue, dev->max_segments);
1309*4882a593Smuzhiyun blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
1310*4882a593Smuzhiyun blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
1311*4882a593Smuzhiyun blk_queue_write_cache(dev->queue, true, true);
1312*4882a593Smuzhiyun dev->queue->queuedata = dev;
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun
rnbd_clt_setup_gen_disk(struct rnbd_clt_dev * dev,int idx)1315*4882a593Smuzhiyun static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
1316*4882a593Smuzhiyun {
1317*4882a593Smuzhiyun dev->gd->major = rnbd_client_major;
1318*4882a593Smuzhiyun dev->gd->first_minor = idx << RNBD_PART_BITS;
1319*4882a593Smuzhiyun dev->gd->fops = &rnbd_client_ops;
1320*4882a593Smuzhiyun dev->gd->queue = dev->queue;
1321*4882a593Smuzhiyun dev->gd->private_data = dev;
1322*4882a593Smuzhiyun snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d",
1323*4882a593Smuzhiyun idx);
1324*4882a593Smuzhiyun pr_debug("disk_name=%s, capacity=%zu\n",
1325*4882a593Smuzhiyun dev->gd->disk_name,
1326*4882a593Smuzhiyun dev->nsectors * (dev->logical_block_size / SECTOR_SIZE)
1327*4882a593Smuzhiyun );
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun set_capacity(dev->gd, dev->nsectors);
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun if (dev->access_mode == RNBD_ACCESS_RO) {
1332*4882a593Smuzhiyun dev->read_only = true;
1333*4882a593Smuzhiyun set_disk_ro(dev->gd, true);
1334*4882a593Smuzhiyun } else {
1335*4882a593Smuzhiyun dev->read_only = false;
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun if (!dev->rotational)
1339*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun
rnbd_client_setup_device(struct rnbd_clt_session * sess,struct rnbd_clt_dev * dev,int idx)1342*4882a593Smuzhiyun static int rnbd_client_setup_device(struct rnbd_clt_session *sess,
1343*4882a593Smuzhiyun struct rnbd_clt_dev *dev, int idx)
1344*4882a593Smuzhiyun {
1345*4882a593Smuzhiyun int err;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun dev->size = dev->nsectors * dev->logical_block_size;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun err = setup_mq_dev(dev);
1350*4882a593Smuzhiyun if (err)
1351*4882a593Smuzhiyun return err;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun setup_request_queue(dev);
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun dev->gd = alloc_disk_node(1 << RNBD_PART_BITS, NUMA_NO_NODE);
1356*4882a593Smuzhiyun if (!dev->gd) {
1357*4882a593Smuzhiyun blk_cleanup_queue(dev->queue);
1358*4882a593Smuzhiyun return -ENOMEM;
1359*4882a593Smuzhiyun }
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun rnbd_clt_setup_gen_disk(dev, idx);
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun return 0;
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun
init_dev(struct rnbd_clt_session * sess,enum rnbd_access_mode access_mode,const char * pathname)1366*4882a593Smuzhiyun static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
1367*4882a593Smuzhiyun enum rnbd_access_mode access_mode,
1368*4882a593Smuzhiyun const char *pathname)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun struct rnbd_clt_dev *dev;
1371*4882a593Smuzhiyun int ret;
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, NUMA_NO_NODE);
1374*4882a593Smuzhiyun if (!dev)
1375*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun dev->hw_queues = kcalloc(nr_cpu_ids, sizeof(*dev->hw_queues),
1378*4882a593Smuzhiyun GFP_KERNEL);
1379*4882a593Smuzhiyun if (!dev->hw_queues) {
1380*4882a593Smuzhiyun ret = -ENOMEM;
1381*4882a593Smuzhiyun goto out_alloc;
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun mutex_lock(&ida_lock);
1385*4882a593Smuzhiyun ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS),
1386*4882a593Smuzhiyun GFP_KERNEL);
1387*4882a593Smuzhiyun mutex_unlock(&ida_lock);
1388*4882a593Smuzhiyun if (ret < 0) {
1389*4882a593Smuzhiyun pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
1390*4882a593Smuzhiyun pathname, sess->sessname, ret);
1391*4882a593Smuzhiyun goto out_queues;
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun dev->pathname = kstrdup(pathname, GFP_KERNEL);
1395*4882a593Smuzhiyun if (!dev->pathname) {
1396*4882a593Smuzhiyun ret = -ENOMEM;
1397*4882a593Smuzhiyun goto out_queues;
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun dev->clt_device_id = ret;
1401*4882a593Smuzhiyun dev->sess = sess;
1402*4882a593Smuzhiyun dev->access_mode = access_mode;
1403*4882a593Smuzhiyun mutex_init(&dev->lock);
1404*4882a593Smuzhiyun refcount_set(&dev->refcount, 1);
1405*4882a593Smuzhiyun dev->dev_state = DEV_STATE_INIT;
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun /*
1408*4882a593Smuzhiyun * Here we called from sysfs entry, thus clt-sysfs is
1409*4882a593Smuzhiyun * responsible that session will not disappear.
1410*4882a593Smuzhiyun */
1411*4882a593Smuzhiyun WARN_ON(!rnbd_clt_get_sess(sess));
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun return dev;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun out_queues:
1416*4882a593Smuzhiyun kfree(dev->hw_queues);
1417*4882a593Smuzhiyun out_alloc:
1418*4882a593Smuzhiyun kfree(dev);
1419*4882a593Smuzhiyun return ERR_PTR(ret);
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun
__exists_dev(const char * pathname)1422*4882a593Smuzhiyun static bool __exists_dev(const char *pathname)
1423*4882a593Smuzhiyun {
1424*4882a593Smuzhiyun struct rnbd_clt_session *sess;
1425*4882a593Smuzhiyun struct rnbd_clt_dev *dev;
1426*4882a593Smuzhiyun bool found = false;
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun list_for_each_entry(sess, &sess_list, list) {
1429*4882a593Smuzhiyun mutex_lock(&sess->lock);
1430*4882a593Smuzhiyun list_for_each_entry(dev, &sess->devs_list, list) {
1431*4882a593Smuzhiyun if (strlen(dev->pathname) == strlen(pathname) &&
1432*4882a593Smuzhiyun !strcmp(dev->pathname, pathname)) {
1433*4882a593Smuzhiyun found = true;
1434*4882a593Smuzhiyun break;
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun mutex_unlock(&sess->lock);
1438*4882a593Smuzhiyun if (found)
1439*4882a593Smuzhiyun break;
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun return found;
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun
exists_devpath(const char * pathname)1445*4882a593Smuzhiyun static bool exists_devpath(const char *pathname)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun bool found;
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun mutex_lock(&sess_lock);
1450*4882a593Smuzhiyun found = __exists_dev(pathname);
1451*4882a593Smuzhiyun mutex_unlock(&sess_lock);
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun return found;
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun
insert_dev_if_not_exists_devpath(const char * pathname,struct rnbd_clt_session * sess,struct rnbd_clt_dev * dev)1456*4882a593Smuzhiyun static bool insert_dev_if_not_exists_devpath(const char *pathname,
1457*4882a593Smuzhiyun struct rnbd_clt_session *sess,
1458*4882a593Smuzhiyun struct rnbd_clt_dev *dev)
1459*4882a593Smuzhiyun {
1460*4882a593Smuzhiyun bool found;
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun mutex_lock(&sess_lock);
1463*4882a593Smuzhiyun found = __exists_dev(pathname);
1464*4882a593Smuzhiyun if (!found) {
1465*4882a593Smuzhiyun mutex_lock(&sess->lock);
1466*4882a593Smuzhiyun list_add_tail(&dev->list, &sess->devs_list);
1467*4882a593Smuzhiyun mutex_unlock(&sess->lock);
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun mutex_unlock(&sess_lock);
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun return found;
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
delete_dev(struct rnbd_clt_dev * dev)1474*4882a593Smuzhiyun static void delete_dev(struct rnbd_clt_dev *dev)
1475*4882a593Smuzhiyun {
1476*4882a593Smuzhiyun struct rnbd_clt_session *sess = dev->sess;
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun mutex_lock(&sess->lock);
1479*4882a593Smuzhiyun list_del(&dev->list);
1480*4882a593Smuzhiyun mutex_unlock(&sess->lock);
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun
rnbd_clt_map_device(const char * sessname,struct rtrs_addr * paths,size_t path_cnt,u16 port_nr,const char * pathname,enum rnbd_access_mode access_mode)1483*4882a593Smuzhiyun struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
1484*4882a593Smuzhiyun struct rtrs_addr *paths,
1485*4882a593Smuzhiyun size_t path_cnt, u16 port_nr,
1486*4882a593Smuzhiyun const char *pathname,
1487*4882a593Smuzhiyun enum rnbd_access_mode access_mode)
1488*4882a593Smuzhiyun {
1489*4882a593Smuzhiyun struct rnbd_clt_session *sess;
1490*4882a593Smuzhiyun struct rnbd_clt_dev *dev;
1491*4882a593Smuzhiyun int ret;
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun if (exists_devpath(pathname))
1494*4882a593Smuzhiyun return ERR_PTR(-EEXIST);
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr);
1497*4882a593Smuzhiyun if (IS_ERR(sess))
1498*4882a593Smuzhiyun return ERR_CAST(sess);
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun dev = init_dev(sess, access_mode, pathname);
1501*4882a593Smuzhiyun if (IS_ERR(dev)) {
1502*4882a593Smuzhiyun pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n",
1503*4882a593Smuzhiyun pathname, sess->sessname, PTR_ERR(dev));
1504*4882a593Smuzhiyun ret = PTR_ERR(dev);
1505*4882a593Smuzhiyun goto put_sess;
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun if (insert_dev_if_not_exists_devpath(pathname, sess, dev)) {
1508*4882a593Smuzhiyun ret = -EEXIST;
1509*4882a593Smuzhiyun goto put_dev;
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun ret = send_msg_open(dev, WAIT);
1512*4882a593Smuzhiyun if (ret) {
1513*4882a593Smuzhiyun rnbd_clt_err(dev,
1514*4882a593Smuzhiyun "map_device: failed, can't open remote device, err: %d\n",
1515*4882a593Smuzhiyun ret);
1516*4882a593Smuzhiyun goto del_dev;
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun mutex_lock(&dev->lock);
1519*4882a593Smuzhiyun pr_debug("Opened remote device: session=%s, path='%s'\n",
1520*4882a593Smuzhiyun sess->sessname, pathname);
1521*4882a593Smuzhiyun ret = rnbd_client_setup_device(sess, dev, dev->clt_device_id);
1522*4882a593Smuzhiyun if (ret) {
1523*4882a593Smuzhiyun rnbd_clt_err(dev,
1524*4882a593Smuzhiyun "map_device: Failed to configure device, err: %d\n",
1525*4882a593Smuzhiyun ret);
1526*4882a593Smuzhiyun mutex_unlock(&dev->lock);
1527*4882a593Smuzhiyun goto send_close;
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun rnbd_clt_info(dev,
1531*4882a593Smuzhiyun "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d)\n",
1532*4882a593Smuzhiyun dev->gd->disk_name, dev->nsectors,
1533*4882a593Smuzhiyun dev->logical_block_size, dev->physical_block_size,
1534*4882a593Smuzhiyun dev->max_write_same_sectors, dev->max_discard_sectors,
1535*4882a593Smuzhiyun dev->discard_granularity, dev->discard_alignment,
1536*4882a593Smuzhiyun dev->secure_discard, dev->max_segments,
1537*4882a593Smuzhiyun dev->max_hw_sectors, dev->rotational);
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun mutex_unlock(&dev->lock);
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun add_disk(dev->gd);
1542*4882a593Smuzhiyun rnbd_clt_put_sess(sess);
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun return dev;
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun send_close:
1547*4882a593Smuzhiyun send_msg_close(dev, dev->device_id, WAIT);
1548*4882a593Smuzhiyun del_dev:
1549*4882a593Smuzhiyun delete_dev(dev);
1550*4882a593Smuzhiyun put_dev:
1551*4882a593Smuzhiyun rnbd_clt_put_dev(dev);
1552*4882a593Smuzhiyun put_sess:
1553*4882a593Smuzhiyun rnbd_clt_put_sess(sess);
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun return ERR_PTR(ret);
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun
destroy_gen_disk(struct rnbd_clt_dev * dev)1558*4882a593Smuzhiyun static void destroy_gen_disk(struct rnbd_clt_dev *dev)
1559*4882a593Smuzhiyun {
1560*4882a593Smuzhiyun del_gendisk(dev->gd);
1561*4882a593Smuzhiyun blk_cleanup_queue(dev->queue);
1562*4882a593Smuzhiyun put_disk(dev->gd);
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun
destroy_sysfs(struct rnbd_clt_dev * dev,const struct attribute * sysfs_self)1565*4882a593Smuzhiyun static void destroy_sysfs(struct rnbd_clt_dev *dev,
1566*4882a593Smuzhiyun const struct attribute *sysfs_self)
1567*4882a593Smuzhiyun {
1568*4882a593Smuzhiyun rnbd_clt_remove_dev_symlink(dev);
1569*4882a593Smuzhiyun if (dev->kobj.state_initialized) {
1570*4882a593Smuzhiyun if (sysfs_self)
1571*4882a593Smuzhiyun /* To avoid deadlock firstly remove itself */
1572*4882a593Smuzhiyun sysfs_remove_file_self(&dev->kobj, sysfs_self);
1573*4882a593Smuzhiyun kobject_del(&dev->kobj);
1574*4882a593Smuzhiyun kobject_put(&dev->kobj);
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun
rnbd_clt_unmap_device(struct rnbd_clt_dev * dev,bool force,const struct attribute * sysfs_self)1578*4882a593Smuzhiyun int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
1579*4882a593Smuzhiyun const struct attribute *sysfs_self)
1580*4882a593Smuzhiyun {
1581*4882a593Smuzhiyun struct rnbd_clt_session *sess = dev->sess;
1582*4882a593Smuzhiyun int refcount, ret = 0;
1583*4882a593Smuzhiyun bool was_mapped;
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun mutex_lock(&dev->lock);
1586*4882a593Smuzhiyun if (dev->dev_state == DEV_STATE_UNMAPPED) {
1587*4882a593Smuzhiyun rnbd_clt_info(dev, "Device is already being unmapped\n");
1588*4882a593Smuzhiyun ret = -EALREADY;
1589*4882a593Smuzhiyun goto err;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun refcount = refcount_read(&dev->refcount);
1592*4882a593Smuzhiyun if (!force && refcount > 1) {
1593*4882a593Smuzhiyun rnbd_clt_err(dev,
1594*4882a593Smuzhiyun "Closing device failed, device is in use, (%d device users)\n",
1595*4882a593Smuzhiyun refcount - 1);
1596*4882a593Smuzhiyun ret = -EBUSY;
1597*4882a593Smuzhiyun goto err;
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun was_mapped = (dev->dev_state == DEV_STATE_MAPPED);
1600*4882a593Smuzhiyun dev->dev_state = DEV_STATE_UNMAPPED;
1601*4882a593Smuzhiyun mutex_unlock(&dev->lock);
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun delete_dev(dev);
1604*4882a593Smuzhiyun destroy_sysfs(dev, sysfs_self);
1605*4882a593Smuzhiyun destroy_gen_disk(dev);
1606*4882a593Smuzhiyun if (was_mapped && sess->rtrs)
1607*4882a593Smuzhiyun send_msg_close(dev, dev->device_id, WAIT);
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun rnbd_clt_info(dev, "Device is unmapped\n");
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun /* Likely last reference put */
1612*4882a593Smuzhiyun rnbd_clt_put_dev(dev);
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun /*
1615*4882a593Smuzhiyun * Here device and session can be vanished!
1616*4882a593Smuzhiyun */
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun return 0;
1619*4882a593Smuzhiyun err:
1620*4882a593Smuzhiyun mutex_unlock(&dev->lock);
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun return ret;
1623*4882a593Smuzhiyun }
1624*4882a593Smuzhiyun
rnbd_clt_remap_device(struct rnbd_clt_dev * dev)1625*4882a593Smuzhiyun int rnbd_clt_remap_device(struct rnbd_clt_dev *dev)
1626*4882a593Smuzhiyun {
1627*4882a593Smuzhiyun int err;
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun mutex_lock(&dev->lock);
1630*4882a593Smuzhiyun if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED)
1631*4882a593Smuzhiyun err = 0;
1632*4882a593Smuzhiyun else if (dev->dev_state == DEV_STATE_UNMAPPED)
1633*4882a593Smuzhiyun err = -ENODEV;
1634*4882a593Smuzhiyun else if (dev->dev_state == DEV_STATE_MAPPED)
1635*4882a593Smuzhiyun err = -EALREADY;
1636*4882a593Smuzhiyun else
1637*4882a593Smuzhiyun err = -EBUSY;
1638*4882a593Smuzhiyun mutex_unlock(&dev->lock);
1639*4882a593Smuzhiyun if (!err) {
1640*4882a593Smuzhiyun rnbd_clt_info(dev, "Remapping device.\n");
1641*4882a593Smuzhiyun err = send_msg_open(dev, WAIT);
1642*4882a593Smuzhiyun if (err)
1643*4882a593Smuzhiyun rnbd_clt_err(dev, "remap_device: %d\n", err);
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun return err;
1647*4882a593Smuzhiyun }
1648*4882a593Smuzhiyun
unmap_device_work(struct work_struct * work)1649*4882a593Smuzhiyun static void unmap_device_work(struct work_struct *work)
1650*4882a593Smuzhiyun {
1651*4882a593Smuzhiyun struct rnbd_clt_dev *dev;
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun dev = container_of(work, typeof(*dev), unmap_on_rmmod_work);
1654*4882a593Smuzhiyun rnbd_clt_unmap_device(dev, true, NULL);
1655*4882a593Smuzhiyun }
1656*4882a593Smuzhiyun
rnbd_destroy_sessions(void)1657*4882a593Smuzhiyun static void rnbd_destroy_sessions(void)
1658*4882a593Smuzhiyun {
1659*4882a593Smuzhiyun struct rnbd_clt_session *sess, *sn;
1660*4882a593Smuzhiyun struct rnbd_clt_dev *dev, *tn;
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun /* Firstly forbid access through sysfs interface */
1663*4882a593Smuzhiyun rnbd_clt_destroy_default_group();
1664*4882a593Smuzhiyun rnbd_clt_destroy_sysfs_files();
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun /*
1667*4882a593Smuzhiyun * Here at this point there is no any concurrent access to sessions
1668*4882a593Smuzhiyun * list and devices list:
1669*4882a593Smuzhiyun * 1. New session or device can'be be created - session sysfs files
1670*4882a593Smuzhiyun * are removed.
1671*4882a593Smuzhiyun * 2. Device or session can't be removed - module reference is taken
1672*4882a593Smuzhiyun * into account in unmap device sysfs callback.
1673*4882a593Smuzhiyun * 3. No IO requests inflight - each file open of block_dev increases
1674*4882a593Smuzhiyun * module reference in get_disk().
1675*4882a593Smuzhiyun *
1676*4882a593Smuzhiyun * But still there can be user requests inflights, which are sent by
1677*4882a593Smuzhiyun * asynchronous send_msg_*() functions, thus before unmapping devices
1678*4882a593Smuzhiyun * RTRS session must be explicitly closed.
1679*4882a593Smuzhiyun */
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun list_for_each_entry_safe(sess, sn, &sess_list, list) {
1682*4882a593Smuzhiyun if (!rnbd_clt_get_sess(sess))
1683*4882a593Smuzhiyun continue;
1684*4882a593Smuzhiyun close_rtrs(sess);
1685*4882a593Smuzhiyun list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
1686*4882a593Smuzhiyun /*
1687*4882a593Smuzhiyun * Here unmap happens in parallel for only one reason:
1688*4882a593Smuzhiyun * blk_cleanup_queue() takes around half a second, so
1689*4882a593Smuzhiyun * on huge amount of devices the whole module unload
1690*4882a593Smuzhiyun * procedure takes minutes.
1691*4882a593Smuzhiyun */
1692*4882a593Smuzhiyun INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work);
1693*4882a593Smuzhiyun queue_work(system_long_wq, &dev->unmap_on_rmmod_work);
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun rnbd_clt_put_sess(sess);
1696*4882a593Smuzhiyun }
1697*4882a593Smuzhiyun /* Wait for all scheduled unmap works */
1698*4882a593Smuzhiyun flush_workqueue(system_long_wq);
1699*4882a593Smuzhiyun WARN_ON(!list_empty(&sess_list));
1700*4882a593Smuzhiyun }
1701*4882a593Smuzhiyun
rnbd_client_init(void)1702*4882a593Smuzhiyun static int __init rnbd_client_init(void)
1703*4882a593Smuzhiyun {
1704*4882a593Smuzhiyun int err = 0;
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4);
1707*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36);
1708*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36);
1709*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264);
1710*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8);
1711*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56);
1712*4882a593Smuzhiyun rnbd_client_major = register_blkdev(rnbd_client_major, "rnbd");
1713*4882a593Smuzhiyun if (rnbd_client_major <= 0) {
1714*4882a593Smuzhiyun pr_err("Failed to load module, block device registration failed\n");
1715*4882a593Smuzhiyun return -EBUSY;
1716*4882a593Smuzhiyun }
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun err = rnbd_clt_create_sysfs_files();
1719*4882a593Smuzhiyun if (err) {
1720*4882a593Smuzhiyun pr_err("Failed to load module, creating sysfs device files failed, err: %d\n",
1721*4882a593Smuzhiyun err);
1722*4882a593Smuzhiyun unregister_blkdev(rnbd_client_major, "rnbd");
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun return err;
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun
rnbd_client_exit(void)1728*4882a593Smuzhiyun static void __exit rnbd_client_exit(void)
1729*4882a593Smuzhiyun {
1730*4882a593Smuzhiyun rnbd_destroy_sessions();
1731*4882a593Smuzhiyun unregister_blkdev(rnbd_client_major, "rnbd");
1732*4882a593Smuzhiyun ida_destroy(&index_ida);
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun module_init(rnbd_client_init);
1736*4882a593Smuzhiyun module_exit(rnbd_client_exit);
1737