1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * RDMA Transport Layer
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6*4882a593Smuzhiyun * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7*4882a593Smuzhiyun * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #undef pr_fmt
11*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/rculist.h>
15*4882a593Smuzhiyun #include <linux/random.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "rtrs-clt.h"
18*4882a593Smuzhiyun #include "rtrs-log.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define RTRS_CONNECT_TIMEOUT_MS 30000
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun * Wait a bit before trying to reconnect after a failure
23*4882a593Smuzhiyun * in order to give server time to finish clean up which
24*4882a593Smuzhiyun * leads to "false positives" failed reconnect attempts
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun #define RTRS_RECONNECT_BACKOFF 1000
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * Wait for additional random time between 0 and 8 seconds
29*4882a593Smuzhiyun * before starting to reconnect to avoid clients reconnecting
30*4882a593Smuzhiyun * all at once in case of a major network outage
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun #define RTRS_RECONNECT_SEED 8
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define FIRST_CONN 0x01
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun MODULE_DESCRIPTION("RDMA Transport Client");
37*4882a593Smuzhiyun MODULE_LICENSE("GPL");
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
40*4882a593Smuzhiyun static struct rtrs_rdma_dev_pd dev_pd = {
41*4882a593Smuzhiyun .ops = &dev_pd_ops
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static struct workqueue_struct *rtrs_wq;
45*4882a593Smuzhiyun static struct class *rtrs_clt_dev_class;
46*4882a593Smuzhiyun
rtrs_clt_is_connected(const struct rtrs_clt * clt)47*4882a593Smuzhiyun static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct rtrs_clt_sess *sess;
50*4882a593Smuzhiyun bool connected = false;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun rcu_read_lock();
53*4882a593Smuzhiyun list_for_each_entry_rcu(sess, &clt->paths_list, s.entry)
54*4882a593Smuzhiyun connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED;
55*4882a593Smuzhiyun rcu_read_unlock();
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun return connected;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun static struct rtrs_permit *
__rtrs_get_permit(struct rtrs_clt * clt,enum rtrs_clt_con_type con_type)61*4882a593Smuzhiyun __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun size_t max_depth = clt->queue_depth;
64*4882a593Smuzhiyun struct rtrs_permit *permit;
65*4882a593Smuzhiyun int bit;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * Adapted from null_blk get_tag(). Callers from different cpus may
69*4882a593Smuzhiyun * grab the same bit, since find_first_zero_bit is not atomic.
70*4882a593Smuzhiyun * But then the test_and_set_bit_lock will fail for all the
71*4882a593Smuzhiyun * callers but one, so that they will loop again.
72*4882a593Smuzhiyun * This way an explicit spinlock is not required.
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun do {
75*4882a593Smuzhiyun bit = find_first_zero_bit(clt->permits_map, max_depth);
76*4882a593Smuzhiyun if (unlikely(bit >= max_depth))
77*4882a593Smuzhiyun return NULL;
78*4882a593Smuzhiyun } while (unlikely(test_and_set_bit_lock(bit, clt->permits_map)));
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun permit = get_permit(clt, bit);
81*4882a593Smuzhiyun WARN_ON(permit->mem_id != bit);
82*4882a593Smuzhiyun permit->cpu_id = raw_smp_processor_id();
83*4882a593Smuzhiyun permit->con_type = con_type;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun return permit;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
__rtrs_put_permit(struct rtrs_clt * clt,struct rtrs_permit * permit)88*4882a593Smuzhiyun static inline void __rtrs_put_permit(struct rtrs_clt *clt,
89*4882a593Smuzhiyun struct rtrs_permit *permit)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun clear_bit_unlock(permit->mem_id, clt->permits_map);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun * rtrs_clt_get_permit() - allocates permit for future RDMA operation
96*4882a593Smuzhiyun * @clt: Current session
97*4882a593Smuzhiyun * @con_type: Type of connection to use with the permit
98*4882a593Smuzhiyun * @can_wait: Wait type
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * Description:
101*4882a593Smuzhiyun * Allocates permit for the following RDMA operation. Permit is used
102*4882a593Smuzhiyun * to preallocate all resources and to propagate memory pressure
103*4882a593Smuzhiyun * up earlier.
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * Context:
106*4882a593Smuzhiyun * Can sleep if @wait == RTRS_TAG_WAIT
107*4882a593Smuzhiyun */
rtrs_clt_get_permit(struct rtrs_clt * clt,enum rtrs_clt_con_type con_type,int can_wait)108*4882a593Smuzhiyun struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
109*4882a593Smuzhiyun enum rtrs_clt_con_type con_type,
110*4882a593Smuzhiyun int can_wait)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun struct rtrs_permit *permit;
113*4882a593Smuzhiyun DEFINE_WAIT(wait);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun permit = __rtrs_get_permit(clt, con_type);
116*4882a593Smuzhiyun if (likely(permit) || !can_wait)
117*4882a593Smuzhiyun return permit;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun do {
120*4882a593Smuzhiyun prepare_to_wait(&clt->permits_wait, &wait,
121*4882a593Smuzhiyun TASK_UNINTERRUPTIBLE);
122*4882a593Smuzhiyun permit = __rtrs_get_permit(clt, con_type);
123*4882a593Smuzhiyun if (likely(permit))
124*4882a593Smuzhiyun break;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun io_schedule();
127*4882a593Smuzhiyun } while (1);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun finish_wait(&clt->permits_wait, &wait);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun return permit;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun EXPORT_SYMBOL(rtrs_clt_get_permit);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun * rtrs_clt_put_permit() - puts allocated permit
137*4882a593Smuzhiyun * @clt: Current session
138*4882a593Smuzhiyun * @permit: Permit to be freed
139*4882a593Smuzhiyun *
140*4882a593Smuzhiyun * Context:
141*4882a593Smuzhiyun * Does not matter
142*4882a593Smuzhiyun */
rtrs_clt_put_permit(struct rtrs_clt * clt,struct rtrs_permit * permit)143*4882a593Smuzhiyun void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
146*4882a593Smuzhiyun return;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun __rtrs_put_permit(clt, permit);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
152*4882a593Smuzhiyun * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
153*4882a593Smuzhiyun * it must have added itself to &clt->permits_wait before
154*4882a593Smuzhiyun * __rtrs_put_permit() finished.
155*4882a593Smuzhiyun * Hence it is safe to guard wake_up() with a waitqueue_active() test.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun if (waitqueue_active(&clt->permits_wait))
158*4882a593Smuzhiyun wake_up(&clt->permits_wait);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun EXPORT_SYMBOL(rtrs_clt_put_permit);
161*4882a593Smuzhiyun
rtrs_permit_to_pdu(struct rtrs_permit * permit)162*4882a593Smuzhiyun void *rtrs_permit_to_pdu(struct rtrs_permit *permit)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun return permit + 1;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun EXPORT_SYMBOL(rtrs_permit_to_pdu);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /**
169*4882a593Smuzhiyun * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
170*4882a593Smuzhiyun * @sess: client session pointer
171*4882a593Smuzhiyun * @permit: permit for the allocation of the RDMA buffer
172*4882a593Smuzhiyun * Note:
173*4882a593Smuzhiyun * IO connection starts from 1.
174*4882a593Smuzhiyun * 0 connection is for user messages.
175*4882a593Smuzhiyun */
176*4882a593Smuzhiyun static
rtrs_permit_to_clt_con(struct rtrs_clt_sess * sess,struct rtrs_permit * permit)177*4882a593Smuzhiyun struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
178*4882a593Smuzhiyun struct rtrs_permit *permit)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun int id = 0;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (likely(permit->con_type == RTRS_IO_CON))
183*4882a593Smuzhiyun id = (permit->cpu_id % (sess->s.con_num - 1)) + 1;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return to_clt_con(sess->s.con[id]);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /**
189*4882a593Smuzhiyun * __rtrs_clt_change_state() - change the session state through session state
190*4882a593Smuzhiyun * machine.
191*4882a593Smuzhiyun *
192*4882a593Smuzhiyun * @sess: client session to change the state of.
193*4882a593Smuzhiyun * @new_state: state to change to.
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun * returns true if successful, false if the requested state can not be set.
196*4882a593Smuzhiyun *
197*4882a593Smuzhiyun * Locks:
198*4882a593Smuzhiyun * state_wq lock must be hold.
199*4882a593Smuzhiyun */
__rtrs_clt_change_state(struct rtrs_clt_sess * sess,enum rtrs_clt_state new_state)200*4882a593Smuzhiyun static bool __rtrs_clt_change_state(struct rtrs_clt_sess *sess,
201*4882a593Smuzhiyun enum rtrs_clt_state new_state)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun enum rtrs_clt_state old_state;
204*4882a593Smuzhiyun bool changed = false;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun lockdep_assert_held(&sess->state_wq.lock);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun old_state = sess->state;
209*4882a593Smuzhiyun switch (new_state) {
210*4882a593Smuzhiyun case RTRS_CLT_CONNECTING:
211*4882a593Smuzhiyun switch (old_state) {
212*4882a593Smuzhiyun case RTRS_CLT_RECONNECTING:
213*4882a593Smuzhiyun changed = true;
214*4882a593Smuzhiyun fallthrough;
215*4882a593Smuzhiyun default:
216*4882a593Smuzhiyun break;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun break;
219*4882a593Smuzhiyun case RTRS_CLT_RECONNECTING:
220*4882a593Smuzhiyun switch (old_state) {
221*4882a593Smuzhiyun case RTRS_CLT_CONNECTED:
222*4882a593Smuzhiyun case RTRS_CLT_CONNECTING_ERR:
223*4882a593Smuzhiyun case RTRS_CLT_CLOSED:
224*4882a593Smuzhiyun changed = true;
225*4882a593Smuzhiyun fallthrough;
226*4882a593Smuzhiyun default:
227*4882a593Smuzhiyun break;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun break;
230*4882a593Smuzhiyun case RTRS_CLT_CONNECTED:
231*4882a593Smuzhiyun switch (old_state) {
232*4882a593Smuzhiyun case RTRS_CLT_CONNECTING:
233*4882a593Smuzhiyun changed = true;
234*4882a593Smuzhiyun fallthrough;
235*4882a593Smuzhiyun default:
236*4882a593Smuzhiyun break;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun break;
239*4882a593Smuzhiyun case RTRS_CLT_CONNECTING_ERR:
240*4882a593Smuzhiyun switch (old_state) {
241*4882a593Smuzhiyun case RTRS_CLT_CONNECTING:
242*4882a593Smuzhiyun changed = true;
243*4882a593Smuzhiyun fallthrough;
244*4882a593Smuzhiyun default:
245*4882a593Smuzhiyun break;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun break;
248*4882a593Smuzhiyun case RTRS_CLT_CLOSING:
249*4882a593Smuzhiyun switch (old_state) {
250*4882a593Smuzhiyun case RTRS_CLT_CONNECTING:
251*4882a593Smuzhiyun case RTRS_CLT_CONNECTING_ERR:
252*4882a593Smuzhiyun case RTRS_CLT_RECONNECTING:
253*4882a593Smuzhiyun case RTRS_CLT_CONNECTED:
254*4882a593Smuzhiyun changed = true;
255*4882a593Smuzhiyun fallthrough;
256*4882a593Smuzhiyun default:
257*4882a593Smuzhiyun break;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun break;
260*4882a593Smuzhiyun case RTRS_CLT_CLOSED:
261*4882a593Smuzhiyun switch (old_state) {
262*4882a593Smuzhiyun case RTRS_CLT_CLOSING:
263*4882a593Smuzhiyun changed = true;
264*4882a593Smuzhiyun fallthrough;
265*4882a593Smuzhiyun default:
266*4882a593Smuzhiyun break;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun break;
269*4882a593Smuzhiyun case RTRS_CLT_DEAD:
270*4882a593Smuzhiyun switch (old_state) {
271*4882a593Smuzhiyun case RTRS_CLT_CLOSED:
272*4882a593Smuzhiyun changed = true;
273*4882a593Smuzhiyun fallthrough;
274*4882a593Smuzhiyun default:
275*4882a593Smuzhiyun break;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun break;
278*4882a593Smuzhiyun default:
279*4882a593Smuzhiyun break;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun if (changed) {
282*4882a593Smuzhiyun sess->state = new_state;
283*4882a593Smuzhiyun wake_up_locked(&sess->state_wq);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun return changed;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
rtrs_clt_change_state_from_to(struct rtrs_clt_sess * sess,enum rtrs_clt_state old_state,enum rtrs_clt_state new_state)289*4882a593Smuzhiyun static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
290*4882a593Smuzhiyun enum rtrs_clt_state old_state,
291*4882a593Smuzhiyun enum rtrs_clt_state new_state)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun bool changed = false;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun spin_lock_irq(&sess->state_wq.lock);
296*4882a593Smuzhiyun if (sess->state == old_state)
297*4882a593Smuzhiyun changed = __rtrs_clt_change_state(sess, new_state);
298*4882a593Smuzhiyun spin_unlock_irq(&sess->state_wq.lock);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun return changed;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
rtrs_rdma_error_recovery(struct rtrs_clt_con * con)303*4882a593Smuzhiyun static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (rtrs_clt_change_state_from_to(sess,
308*4882a593Smuzhiyun RTRS_CLT_CONNECTED,
309*4882a593Smuzhiyun RTRS_CLT_RECONNECTING)) {
310*4882a593Smuzhiyun struct rtrs_clt *clt = sess->clt;
311*4882a593Smuzhiyun unsigned int delay_ms;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /*
314*4882a593Smuzhiyun * Normal scenario, reconnect if we were successfully connected
315*4882a593Smuzhiyun */
316*4882a593Smuzhiyun delay_ms = clt->reconnect_delay_sec * 1000;
317*4882a593Smuzhiyun queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
318*4882a593Smuzhiyun msecs_to_jiffies(delay_ms +
319*4882a593Smuzhiyun prandom_u32() % RTRS_RECONNECT_SEED));
320*4882a593Smuzhiyun } else {
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun * Error can happen just on establishing new connection,
323*4882a593Smuzhiyun * so notify waiter with error state, waiter is responsible
324*4882a593Smuzhiyun * for cleaning the rest and reconnect if needed.
325*4882a593Smuzhiyun */
326*4882a593Smuzhiyun rtrs_clt_change_state_from_to(sess,
327*4882a593Smuzhiyun RTRS_CLT_CONNECTING,
328*4882a593Smuzhiyun RTRS_CLT_CONNECTING_ERR);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
rtrs_clt_fast_reg_done(struct ib_cq * cq,struct ib_wc * wc)332*4882a593Smuzhiyun static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct rtrs_clt_con *con = cq->cq_context;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (unlikely(wc->status != IB_WC_SUCCESS)) {
337*4882a593Smuzhiyun rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
338*4882a593Smuzhiyun ib_wc_status_msg(wc->status));
339*4882a593Smuzhiyun rtrs_rdma_error_recovery(con);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun static struct ib_cqe fast_reg_cqe = {
344*4882a593Smuzhiyun .done = rtrs_clt_fast_reg_done
345*4882a593Smuzhiyun };
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
348*4882a593Smuzhiyun bool notify, bool can_wait);
349*4882a593Smuzhiyun
rtrs_clt_inv_rkey_done(struct ib_cq * cq,struct ib_wc * wc)350*4882a593Smuzhiyun static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun struct rtrs_clt_io_req *req =
353*4882a593Smuzhiyun container_of(wc->wr_cqe, typeof(*req), inv_cqe);
354*4882a593Smuzhiyun struct rtrs_clt_con *con = cq->cq_context;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (unlikely(wc->status != IB_WC_SUCCESS)) {
357*4882a593Smuzhiyun rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
358*4882a593Smuzhiyun ib_wc_status_msg(wc->status));
359*4882a593Smuzhiyun rtrs_rdma_error_recovery(con);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun req->need_inv = false;
362*4882a593Smuzhiyun if (likely(req->need_inv_comp))
363*4882a593Smuzhiyun complete(&req->inv_comp);
364*4882a593Smuzhiyun else
365*4882a593Smuzhiyun /* Complete request from INV callback */
366*4882a593Smuzhiyun complete_rdma_req(req, req->inv_errno, true, false);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
rtrs_inv_rkey(struct rtrs_clt_io_req * req)369*4882a593Smuzhiyun static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun struct rtrs_clt_con *con = req->con;
372*4882a593Smuzhiyun struct ib_send_wr wr = {
373*4882a593Smuzhiyun .opcode = IB_WR_LOCAL_INV,
374*4882a593Smuzhiyun .wr_cqe = &req->inv_cqe,
375*4882a593Smuzhiyun .send_flags = IB_SEND_SIGNALED,
376*4882a593Smuzhiyun .ex.invalidate_rkey = req->mr->rkey,
377*4882a593Smuzhiyun };
378*4882a593Smuzhiyun req->inv_cqe.done = rtrs_clt_inv_rkey_done;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun return ib_post_send(con->c.qp, &wr, NULL);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
complete_rdma_req(struct rtrs_clt_io_req * req,int errno,bool notify,bool can_wait)383*4882a593Smuzhiyun static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
384*4882a593Smuzhiyun bool notify, bool can_wait)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct rtrs_clt_con *con = req->con;
387*4882a593Smuzhiyun struct rtrs_clt_sess *sess;
388*4882a593Smuzhiyun int err;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (WARN_ON(!req->in_use))
391*4882a593Smuzhiyun return;
392*4882a593Smuzhiyun if (WARN_ON(!req->con))
393*4882a593Smuzhiyun return;
394*4882a593Smuzhiyun sess = to_clt_sess(con->c.sess);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (req->sg_cnt) {
397*4882a593Smuzhiyun if (unlikely(req->dir == DMA_FROM_DEVICE && req->need_inv)) {
398*4882a593Smuzhiyun /*
399*4882a593Smuzhiyun * We are here to invalidate read requests
400*4882a593Smuzhiyun * ourselves. In normal scenario server should
401*4882a593Smuzhiyun * send INV for all read requests, but
402*4882a593Smuzhiyun * we are here, thus two things could happen:
403*4882a593Smuzhiyun *
404*4882a593Smuzhiyun * 1. this is failover, when errno != 0
405*4882a593Smuzhiyun * and can_wait == 1,
406*4882a593Smuzhiyun *
407*4882a593Smuzhiyun * 2. something totally bad happened and
408*4882a593Smuzhiyun * server forgot to send INV, so we
409*4882a593Smuzhiyun * should do that ourselves.
410*4882a593Smuzhiyun */
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (likely(can_wait)) {
413*4882a593Smuzhiyun req->need_inv_comp = true;
414*4882a593Smuzhiyun } else {
415*4882a593Smuzhiyun /* This should be IO path, so always notify */
416*4882a593Smuzhiyun WARN_ON(!notify);
417*4882a593Smuzhiyun /* Save errno for INV callback */
418*4882a593Smuzhiyun req->inv_errno = errno;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun err = rtrs_inv_rkey(req);
422*4882a593Smuzhiyun if (unlikely(err)) {
423*4882a593Smuzhiyun rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
424*4882a593Smuzhiyun req->mr->rkey, err);
425*4882a593Smuzhiyun } else if (likely(can_wait)) {
426*4882a593Smuzhiyun wait_for_completion(&req->inv_comp);
427*4882a593Smuzhiyun } else {
428*4882a593Smuzhiyun /*
429*4882a593Smuzhiyun * Something went wrong, so request will be
430*4882a593Smuzhiyun * completed from INV callback.
431*4882a593Smuzhiyun */
432*4882a593Smuzhiyun WARN_ON_ONCE(1);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun return;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
438*4882a593Smuzhiyun req->sg_cnt, req->dir);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
441*4882a593Smuzhiyun atomic_dec(&sess->stats->inflight);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun req->in_use = false;
444*4882a593Smuzhiyun req->con = NULL;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if (notify)
447*4882a593Smuzhiyun req->conf(req->priv, errno);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
rtrs_post_send_rdma(struct rtrs_clt_con * con,struct rtrs_clt_io_req * req,struct rtrs_rbuf * rbuf,u32 off,u32 imm,struct ib_send_wr * wr)450*4882a593Smuzhiyun static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
451*4882a593Smuzhiyun struct rtrs_clt_io_req *req,
452*4882a593Smuzhiyun struct rtrs_rbuf *rbuf, u32 off,
453*4882a593Smuzhiyun u32 imm, struct ib_send_wr *wr)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
456*4882a593Smuzhiyun enum ib_send_flags flags;
457*4882a593Smuzhiyun struct ib_sge sge;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun if (unlikely(!req->sg_size)) {
460*4882a593Smuzhiyun rtrs_wrn(con->c.sess,
461*4882a593Smuzhiyun "Doing RDMA Write failed, no data supplied\n");
462*4882a593Smuzhiyun return -EINVAL;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* user data and user message in the first list element */
466*4882a593Smuzhiyun sge.addr = req->iu->dma_addr;
467*4882a593Smuzhiyun sge.length = req->sg_size;
468*4882a593Smuzhiyun sge.lkey = sess->s.dev->ib_pd->local_dma_lkey;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /*
471*4882a593Smuzhiyun * From time to time we have to post signalled sends,
472*4882a593Smuzhiyun * or send queue will fill up and only QP reset can help.
473*4882a593Smuzhiyun */
474*4882a593Smuzhiyun flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
475*4882a593Smuzhiyun 0 : IB_SEND_SIGNALED;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
478*4882a593Smuzhiyun req->sg_size, DMA_TO_DEVICE);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
481*4882a593Smuzhiyun rbuf->rkey, rbuf->addr + off,
482*4882a593Smuzhiyun imm, flags, wr);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
process_io_rsp(struct rtrs_clt_sess * sess,u32 msg_id,s16 errno,bool w_inval)485*4882a593Smuzhiyun static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
486*4882a593Smuzhiyun s16 errno, bool w_inval)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun struct rtrs_clt_io_req *req;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (WARN_ON(msg_id >= sess->queue_depth))
491*4882a593Smuzhiyun return;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun req = &sess->reqs[msg_id];
494*4882a593Smuzhiyun /* Drop need_inv if server responded with send with invalidation */
495*4882a593Smuzhiyun req->need_inv &= !w_inval;
496*4882a593Smuzhiyun complete_rdma_req(req, errno, true, false);
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
rtrs_clt_recv_done(struct rtrs_clt_con * con,struct ib_wc * wc)499*4882a593Smuzhiyun static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun struct rtrs_iu *iu;
502*4882a593Smuzhiyun int err;
503*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
506*4882a593Smuzhiyun iu = container_of(wc->wr_cqe, struct rtrs_iu,
507*4882a593Smuzhiyun cqe);
508*4882a593Smuzhiyun err = rtrs_iu_post_recv(&con->c, iu);
509*4882a593Smuzhiyun if (unlikely(err)) {
510*4882a593Smuzhiyun rtrs_err(con->c.sess, "post iu failed %d\n", err);
511*4882a593Smuzhiyun rtrs_rdma_error_recovery(con);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
rtrs_clt_rkey_rsp_done(struct rtrs_clt_con * con,struct ib_wc * wc)515*4882a593Smuzhiyun static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
518*4882a593Smuzhiyun struct rtrs_msg_rkey_rsp *msg;
519*4882a593Smuzhiyun u32 imm_type, imm_payload;
520*4882a593Smuzhiyun bool w_inval = false;
521*4882a593Smuzhiyun struct rtrs_iu *iu;
522*4882a593Smuzhiyun u32 buf_id;
523*4882a593Smuzhiyun int err;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun if (unlikely(wc->byte_len < sizeof(*msg))) {
530*4882a593Smuzhiyun rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
531*4882a593Smuzhiyun wc->byte_len);
532*4882a593Smuzhiyun goto out;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
535*4882a593Smuzhiyun iu->size, DMA_FROM_DEVICE);
536*4882a593Smuzhiyun msg = iu->buf;
537*4882a593Smuzhiyun if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP)) {
538*4882a593Smuzhiyun rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
539*4882a593Smuzhiyun le16_to_cpu(msg->type));
540*4882a593Smuzhiyun goto out;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun buf_id = le16_to_cpu(msg->buf_id);
543*4882a593Smuzhiyun if (WARN_ON(buf_id >= sess->queue_depth))
544*4882a593Smuzhiyun goto out;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
547*4882a593Smuzhiyun if (likely(imm_type == RTRS_IO_RSP_IMM ||
548*4882a593Smuzhiyun imm_type == RTRS_IO_RSP_W_INV_IMM)) {
549*4882a593Smuzhiyun u32 msg_id;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
552*4882a593Smuzhiyun rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun if (WARN_ON(buf_id != msg_id))
555*4882a593Smuzhiyun goto out;
556*4882a593Smuzhiyun sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
557*4882a593Smuzhiyun process_io_rsp(sess, msg_id, err, w_inval);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr,
560*4882a593Smuzhiyun iu->size, DMA_FROM_DEVICE);
561*4882a593Smuzhiyun return rtrs_clt_recv_done(con, wc);
562*4882a593Smuzhiyun out:
563*4882a593Smuzhiyun rtrs_rdma_error_recovery(con);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun static struct ib_cqe io_comp_cqe = {
569*4882a593Smuzhiyun .done = rtrs_clt_rdma_done
570*4882a593Smuzhiyun };
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun /*
573*4882a593Smuzhiyun * Post x2 empty WRs: first is for this RDMA with IMM,
574*4882a593Smuzhiyun * second is for RECV with INV, which happened earlier.
575*4882a593Smuzhiyun */
rtrs_post_recv_empty_x2(struct rtrs_con * con,struct ib_cqe * cqe)576*4882a593Smuzhiyun static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun struct ib_recv_wr wr_arr[2], *wr;
579*4882a593Smuzhiyun int i;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun memset(wr_arr, 0, sizeof(wr_arr));
582*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
583*4882a593Smuzhiyun wr = &wr_arr[i];
584*4882a593Smuzhiyun wr->wr_cqe = cqe;
585*4882a593Smuzhiyun if (i)
586*4882a593Smuzhiyun /* Chain backwards */
587*4882a593Smuzhiyun wr->next = &wr_arr[i - 1];
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun return ib_post_recv(con->qp, wr, NULL);
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
rtrs_clt_rdma_done(struct ib_cq * cq,struct ib_wc * wc)593*4882a593Smuzhiyun static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun struct rtrs_clt_con *con = cq->cq_context;
596*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
597*4882a593Smuzhiyun u32 imm_type, imm_payload;
598*4882a593Smuzhiyun bool w_inval = false;
599*4882a593Smuzhiyun int err;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun if (unlikely(wc->status != IB_WC_SUCCESS)) {
602*4882a593Smuzhiyun if (wc->status != IB_WC_WR_FLUSH_ERR) {
603*4882a593Smuzhiyun rtrs_err(sess->clt, "RDMA failed: %s\n",
604*4882a593Smuzhiyun ib_wc_status_msg(wc->status));
605*4882a593Smuzhiyun rtrs_rdma_error_recovery(con);
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun return;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun rtrs_clt_update_wc_stats(con);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun switch (wc->opcode) {
612*4882a593Smuzhiyun case IB_WC_RECV_RDMA_WITH_IMM:
613*4882a593Smuzhiyun /*
614*4882a593Smuzhiyun * post_recv() RDMA write completions of IO reqs (read/write)
615*4882a593Smuzhiyun * and hb
616*4882a593Smuzhiyun */
617*4882a593Smuzhiyun if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
618*4882a593Smuzhiyun return;
619*4882a593Smuzhiyun rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
620*4882a593Smuzhiyun &imm_type, &imm_payload);
621*4882a593Smuzhiyun if (likely(imm_type == RTRS_IO_RSP_IMM ||
622*4882a593Smuzhiyun imm_type == RTRS_IO_RSP_W_INV_IMM)) {
623*4882a593Smuzhiyun u32 msg_id;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
626*4882a593Smuzhiyun rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun process_io_rsp(sess, msg_id, err, w_inval);
629*4882a593Smuzhiyun } else if (imm_type == RTRS_HB_MSG_IMM) {
630*4882a593Smuzhiyun WARN_ON(con->c.cid);
631*4882a593Smuzhiyun rtrs_send_hb_ack(&sess->s);
632*4882a593Smuzhiyun if (sess->flags & RTRS_MSG_NEW_RKEY_F)
633*4882a593Smuzhiyun return rtrs_clt_recv_done(con, wc);
634*4882a593Smuzhiyun } else if (imm_type == RTRS_HB_ACK_IMM) {
635*4882a593Smuzhiyun WARN_ON(con->c.cid);
636*4882a593Smuzhiyun sess->s.hb_missed_cnt = 0;
637*4882a593Smuzhiyun if (sess->flags & RTRS_MSG_NEW_RKEY_F)
638*4882a593Smuzhiyun return rtrs_clt_recv_done(con, wc);
639*4882a593Smuzhiyun } else {
640*4882a593Smuzhiyun rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
641*4882a593Smuzhiyun imm_type);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun if (w_inval)
644*4882a593Smuzhiyun /*
645*4882a593Smuzhiyun * Post x2 empty WRs: first is for this RDMA with IMM,
646*4882a593Smuzhiyun * second is for RECV with INV, which happened earlier.
647*4882a593Smuzhiyun */
648*4882a593Smuzhiyun err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
649*4882a593Smuzhiyun else
650*4882a593Smuzhiyun err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
651*4882a593Smuzhiyun if (unlikely(err)) {
652*4882a593Smuzhiyun rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
653*4882a593Smuzhiyun err);
654*4882a593Smuzhiyun rtrs_rdma_error_recovery(con);
655*4882a593Smuzhiyun break;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun break;
658*4882a593Smuzhiyun case IB_WC_RECV:
659*4882a593Smuzhiyun /*
660*4882a593Smuzhiyun * Key invalidations from server side
661*4882a593Smuzhiyun */
662*4882a593Smuzhiyun WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
663*4882a593Smuzhiyun wc->wc_flags & IB_WC_WITH_IMM));
664*4882a593Smuzhiyun WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
665*4882a593Smuzhiyun if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
666*4882a593Smuzhiyun if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
667*4882a593Smuzhiyun return rtrs_clt_recv_done(con, wc);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun return rtrs_clt_rkey_rsp_done(con, wc);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun break;
672*4882a593Smuzhiyun case IB_WC_RDMA_WRITE:
673*4882a593Smuzhiyun /*
674*4882a593Smuzhiyun * post_send() RDMA write completions of IO reqs (read/write)
675*4882a593Smuzhiyun */
676*4882a593Smuzhiyun break;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun default:
679*4882a593Smuzhiyun rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode);
680*4882a593Smuzhiyun return;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
post_recv_io(struct rtrs_clt_con * con,size_t q_size)684*4882a593Smuzhiyun static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun int err, i;
687*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun for (i = 0; i < q_size; i++) {
690*4882a593Smuzhiyun if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
691*4882a593Smuzhiyun struct rtrs_iu *iu = &con->rsp_ius[i];
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun err = rtrs_iu_post_recv(&con->c, iu);
694*4882a593Smuzhiyun } else {
695*4882a593Smuzhiyun err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun if (unlikely(err))
698*4882a593Smuzhiyun return err;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun return 0;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
post_recv_sess(struct rtrs_clt_sess * sess)704*4882a593Smuzhiyun static int post_recv_sess(struct rtrs_clt_sess *sess)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun size_t q_size = 0;
707*4882a593Smuzhiyun int err, cid;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun for (cid = 0; cid < sess->s.con_num; cid++) {
710*4882a593Smuzhiyun if (cid == 0)
711*4882a593Smuzhiyun q_size = SERVICE_CON_QUEUE_DEPTH;
712*4882a593Smuzhiyun else
713*4882a593Smuzhiyun q_size = sess->queue_depth;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /*
716*4882a593Smuzhiyun * x2 for RDMA read responses + FR key invalidations,
717*4882a593Smuzhiyun * RDMA writes do not require any FR registrations.
718*4882a593Smuzhiyun */
719*4882a593Smuzhiyun q_size *= 2;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
722*4882a593Smuzhiyun if (unlikely(err)) {
723*4882a593Smuzhiyun rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
724*4882a593Smuzhiyun return err;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun return 0;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun struct path_it {
732*4882a593Smuzhiyun int i;
733*4882a593Smuzhiyun struct list_head skip_list;
734*4882a593Smuzhiyun struct rtrs_clt *clt;
735*4882a593Smuzhiyun struct rtrs_clt_sess *(*next_path)(struct path_it *it);
736*4882a593Smuzhiyun };
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun /**
739*4882a593Smuzhiyun * list_next_or_null_rr_rcu - get next list element in round-robin fashion.
740*4882a593Smuzhiyun * @head: the head for the list.
741*4882a593Smuzhiyun * @ptr: the list head to take the next element from.
742*4882a593Smuzhiyun * @type: the type of the struct this is embedded in.
743*4882a593Smuzhiyun * @memb: the name of the list_head within the struct.
744*4882a593Smuzhiyun *
745*4882a593Smuzhiyun * Next element returned in round-robin fashion, i.e. head will be skipped,
746*4882a593Smuzhiyun * but if list is observed as empty, NULL will be returned.
747*4882a593Smuzhiyun *
748*4882a593Smuzhiyun * This primitive may safely run concurrently with the _rcu list-mutation
749*4882a593Smuzhiyun * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
750*4882a593Smuzhiyun */
751*4882a593Smuzhiyun #define list_next_or_null_rr_rcu(head, ptr, type, memb) \
752*4882a593Smuzhiyun ({ \
753*4882a593Smuzhiyun list_next_or_null_rcu(head, ptr, type, memb) ?: \
754*4882a593Smuzhiyun list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \
755*4882a593Smuzhiyun type, memb); \
756*4882a593Smuzhiyun })
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun /**
759*4882a593Smuzhiyun * get_next_path_rr() - Returns path in round-robin fashion.
760*4882a593Smuzhiyun * @it: the path pointer
761*4882a593Smuzhiyun *
762*4882a593Smuzhiyun * Related to @MP_POLICY_RR
763*4882a593Smuzhiyun *
764*4882a593Smuzhiyun * Locks:
765*4882a593Smuzhiyun * rcu_read_lock() must be hold.
766*4882a593Smuzhiyun */
get_next_path_rr(struct path_it * it)767*4882a593Smuzhiyun static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun struct rtrs_clt_sess __rcu **ppcpu_path;
770*4882a593Smuzhiyun struct rtrs_clt_sess *path;
771*4882a593Smuzhiyun struct rtrs_clt *clt;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun clt = it->clt;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /*
776*4882a593Smuzhiyun * Here we use two RCU objects: @paths_list and @pcpu_path
777*4882a593Smuzhiyun * pointer. See rtrs_clt_remove_path_from_arr() for details
778*4882a593Smuzhiyun * how that is handled.
779*4882a593Smuzhiyun */
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun ppcpu_path = this_cpu_ptr(clt->pcpu_path);
782*4882a593Smuzhiyun path = rcu_dereference(*ppcpu_path);
783*4882a593Smuzhiyun if (unlikely(!path))
784*4882a593Smuzhiyun path = list_first_or_null_rcu(&clt->paths_list,
785*4882a593Smuzhiyun typeof(*path), s.entry);
786*4882a593Smuzhiyun else
787*4882a593Smuzhiyun path = list_next_or_null_rr_rcu(&clt->paths_list,
788*4882a593Smuzhiyun &path->s.entry,
789*4882a593Smuzhiyun typeof(*path),
790*4882a593Smuzhiyun s.entry);
791*4882a593Smuzhiyun rcu_assign_pointer(*ppcpu_path, path);
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun return path;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /**
797*4882a593Smuzhiyun * get_next_path_min_inflight() - Returns path with minimal inflight count.
798*4882a593Smuzhiyun * @it: the path pointer
799*4882a593Smuzhiyun *
800*4882a593Smuzhiyun * Related to @MP_POLICY_MIN_INFLIGHT
801*4882a593Smuzhiyun *
802*4882a593Smuzhiyun * Locks:
803*4882a593Smuzhiyun * rcu_read_lock() must be hold.
804*4882a593Smuzhiyun */
get_next_path_min_inflight(struct path_it * it)805*4882a593Smuzhiyun static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun struct rtrs_clt_sess *min_path = NULL;
808*4882a593Smuzhiyun struct rtrs_clt *clt = it->clt;
809*4882a593Smuzhiyun struct rtrs_clt_sess *sess;
810*4882a593Smuzhiyun int min_inflight = INT_MAX;
811*4882a593Smuzhiyun int inflight;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
814*4882a593Smuzhiyun if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
815*4882a593Smuzhiyun continue;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
818*4882a593Smuzhiyun continue;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun inflight = atomic_read(&sess->stats->inflight);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun if (inflight < min_inflight) {
823*4882a593Smuzhiyun min_inflight = inflight;
824*4882a593Smuzhiyun min_path = sess;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun /*
829*4882a593Smuzhiyun * add the path to the skip list, so that next time we can get
830*4882a593Smuzhiyun * a different one
831*4882a593Smuzhiyun */
832*4882a593Smuzhiyun if (min_path)
833*4882a593Smuzhiyun list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun return min_path;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
path_it_init(struct path_it * it,struct rtrs_clt * clt)838*4882a593Smuzhiyun static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun INIT_LIST_HEAD(&it->skip_list);
841*4882a593Smuzhiyun it->clt = clt;
842*4882a593Smuzhiyun it->i = 0;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (clt->mp_policy == MP_POLICY_RR)
845*4882a593Smuzhiyun it->next_path = get_next_path_rr;
846*4882a593Smuzhiyun else
847*4882a593Smuzhiyun it->next_path = get_next_path_min_inflight;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
path_it_deinit(struct path_it * it)850*4882a593Smuzhiyun static inline void path_it_deinit(struct path_it *it)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun struct list_head *skip, *tmp;
853*4882a593Smuzhiyun /*
854*4882a593Smuzhiyun * The skip_list is used only for the MIN_INFLIGHT policy.
855*4882a593Smuzhiyun * We need to remove paths from it, so that next IO can insert
856*4882a593Smuzhiyun * paths (->mp_skip_entry) into a skip_list again.
857*4882a593Smuzhiyun */
858*4882a593Smuzhiyun list_for_each_safe(skip, tmp, &it->skip_list)
859*4882a593Smuzhiyun list_del_init(skip);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /**
863*4882a593Smuzhiyun * rtrs_clt_init_req() Initialize an rtrs_clt_io_req holding information
864*4882a593Smuzhiyun * about an inflight IO.
865*4882a593Smuzhiyun * The user buffer holding user control message (not data) is copied into
866*4882a593Smuzhiyun * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
867*4882a593Smuzhiyun * also hold the control message of rtrs.
868*4882a593Smuzhiyun * @req: an io request holding information about IO.
869*4882a593Smuzhiyun * @sess: client session
870*4882a593Smuzhiyun * @conf: conformation callback function to notify upper layer.
871*4882a593Smuzhiyun * @permit: permit for allocation of RDMA remote buffer
872*4882a593Smuzhiyun * @priv: private pointer
873*4882a593Smuzhiyun * @vec: kernel vector containing control message
874*4882a593Smuzhiyun * @usr_len: length of the user message
875*4882a593Smuzhiyun * @sg: scater list for IO data
876*4882a593Smuzhiyun * @sg_cnt: number of scater list entries
877*4882a593Smuzhiyun * @data_len: length of the IO data
878*4882a593Smuzhiyun * @dir: direction of the IO.
879*4882a593Smuzhiyun */
rtrs_clt_init_req(struct rtrs_clt_io_req * req,struct rtrs_clt_sess * sess,void (* conf)(void * priv,int errno),struct rtrs_permit * permit,void * priv,const struct kvec * vec,size_t usr_len,struct scatterlist * sg,size_t sg_cnt,size_t data_len,int dir)880*4882a593Smuzhiyun static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
881*4882a593Smuzhiyun struct rtrs_clt_sess *sess,
882*4882a593Smuzhiyun void (*conf)(void *priv, int errno),
883*4882a593Smuzhiyun struct rtrs_permit *permit, void *priv,
884*4882a593Smuzhiyun const struct kvec *vec, size_t usr_len,
885*4882a593Smuzhiyun struct scatterlist *sg, size_t sg_cnt,
886*4882a593Smuzhiyun size_t data_len, int dir)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun struct iov_iter iter;
889*4882a593Smuzhiyun size_t len;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun req->permit = permit;
892*4882a593Smuzhiyun req->in_use = true;
893*4882a593Smuzhiyun req->usr_len = usr_len;
894*4882a593Smuzhiyun req->data_len = data_len;
895*4882a593Smuzhiyun req->sglist = sg;
896*4882a593Smuzhiyun req->sg_cnt = sg_cnt;
897*4882a593Smuzhiyun req->priv = priv;
898*4882a593Smuzhiyun req->dir = dir;
899*4882a593Smuzhiyun req->con = rtrs_permit_to_clt_con(sess, permit);
900*4882a593Smuzhiyun req->conf = conf;
901*4882a593Smuzhiyun req->need_inv = false;
902*4882a593Smuzhiyun req->need_inv_comp = false;
903*4882a593Smuzhiyun req->inv_errno = 0;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun iov_iter_kvec(&iter, READ, vec, 1, usr_len);
906*4882a593Smuzhiyun len = _copy_from_iter(req->iu->buf, usr_len, &iter);
907*4882a593Smuzhiyun WARN_ON(len != usr_len);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun reinit_completion(&req->inv_comp);
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun static struct rtrs_clt_io_req *
rtrs_clt_get_req(struct rtrs_clt_sess * sess,void (* conf)(void * priv,int errno),struct rtrs_permit * permit,void * priv,const struct kvec * vec,size_t usr_len,struct scatterlist * sg,size_t sg_cnt,size_t data_len,int dir)913*4882a593Smuzhiyun rtrs_clt_get_req(struct rtrs_clt_sess *sess,
914*4882a593Smuzhiyun void (*conf)(void *priv, int errno),
915*4882a593Smuzhiyun struct rtrs_permit *permit, void *priv,
916*4882a593Smuzhiyun const struct kvec *vec, size_t usr_len,
917*4882a593Smuzhiyun struct scatterlist *sg, size_t sg_cnt,
918*4882a593Smuzhiyun size_t data_len, int dir)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun struct rtrs_clt_io_req *req;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun req = &sess->reqs[permit->mem_id];
923*4882a593Smuzhiyun rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len,
924*4882a593Smuzhiyun sg, sg_cnt, data_len, dir);
925*4882a593Smuzhiyun return req;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun static struct rtrs_clt_io_req *
rtrs_clt_get_copy_req(struct rtrs_clt_sess * alive_sess,struct rtrs_clt_io_req * fail_req)929*4882a593Smuzhiyun rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
930*4882a593Smuzhiyun struct rtrs_clt_io_req *fail_req)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun struct rtrs_clt_io_req *req;
933*4882a593Smuzhiyun struct kvec vec = {
934*4882a593Smuzhiyun .iov_base = fail_req->iu->buf,
935*4882a593Smuzhiyun .iov_len = fail_req->usr_len
936*4882a593Smuzhiyun };
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun req = &alive_sess->reqs[fail_req->permit->mem_id];
939*4882a593Smuzhiyun rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit,
940*4882a593Smuzhiyun fail_req->priv, &vec, fail_req->usr_len,
941*4882a593Smuzhiyun fail_req->sglist, fail_req->sg_cnt,
942*4882a593Smuzhiyun fail_req->data_len, fail_req->dir);
943*4882a593Smuzhiyun return req;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
rtrs_post_rdma_write_sg(struct rtrs_clt_con * con,struct rtrs_clt_io_req * req,struct rtrs_rbuf * rbuf,u32 size,u32 imm)946*4882a593Smuzhiyun static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
947*4882a593Smuzhiyun struct rtrs_clt_io_req *req,
948*4882a593Smuzhiyun struct rtrs_rbuf *rbuf,
949*4882a593Smuzhiyun u32 size, u32 imm)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
952*4882a593Smuzhiyun struct ib_sge *sge = req->sge;
953*4882a593Smuzhiyun enum ib_send_flags flags;
954*4882a593Smuzhiyun struct scatterlist *sg;
955*4882a593Smuzhiyun size_t num_sge;
956*4882a593Smuzhiyun int i;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun for_each_sg(req->sglist, sg, req->sg_cnt, i) {
959*4882a593Smuzhiyun sge[i].addr = sg_dma_address(sg);
960*4882a593Smuzhiyun sge[i].length = sg_dma_len(sg);
961*4882a593Smuzhiyun sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun sge[i].addr = req->iu->dma_addr;
964*4882a593Smuzhiyun sge[i].length = size;
965*4882a593Smuzhiyun sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun num_sge = 1 + req->sg_cnt;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun /*
970*4882a593Smuzhiyun * From time to time we have to post signalled sends,
971*4882a593Smuzhiyun * or send queue will fill up and only QP reset can help.
972*4882a593Smuzhiyun */
973*4882a593Smuzhiyun flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
974*4882a593Smuzhiyun 0 : IB_SEND_SIGNALED;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
977*4882a593Smuzhiyun size, DMA_TO_DEVICE);
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
980*4882a593Smuzhiyun rbuf->rkey, rbuf->addr, imm,
981*4882a593Smuzhiyun flags, NULL);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
rtrs_clt_write_req(struct rtrs_clt_io_req * req)984*4882a593Smuzhiyun static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun struct rtrs_clt_con *con = req->con;
987*4882a593Smuzhiyun struct rtrs_sess *s = con->c.sess;
988*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(s);
989*4882a593Smuzhiyun struct rtrs_msg_rdma_write *msg;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun struct rtrs_rbuf *rbuf;
992*4882a593Smuzhiyun int ret, count = 0;
993*4882a593Smuzhiyun u32 imm, buf_id;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun if (unlikely(tsize > sess->chunk_size)) {
998*4882a593Smuzhiyun rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
999*4882a593Smuzhiyun tsize, sess->chunk_size);
1000*4882a593Smuzhiyun return -EMSGSIZE;
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun if (req->sg_cnt) {
1003*4882a593Smuzhiyun count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
1004*4882a593Smuzhiyun req->sg_cnt, req->dir);
1005*4882a593Smuzhiyun if (unlikely(!count)) {
1006*4882a593Smuzhiyun rtrs_wrn(s, "Write request failed, map failed\n");
1007*4882a593Smuzhiyun return -EINVAL;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun /* put rtrs msg after sg and user message */
1011*4882a593Smuzhiyun msg = req->iu->buf + req->usr_len;
1012*4882a593Smuzhiyun msg->type = cpu_to_le16(RTRS_MSG_WRITE);
1013*4882a593Smuzhiyun msg->usr_len = cpu_to_le16(req->usr_len);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun /* rtrs message on server side will be after user data and message */
1016*4882a593Smuzhiyun imm = req->permit->mem_off + req->data_len + req->usr_len;
1017*4882a593Smuzhiyun imm = rtrs_to_io_req_imm(imm);
1018*4882a593Smuzhiyun buf_id = req->permit->mem_id;
1019*4882a593Smuzhiyun req->sg_size = tsize;
1020*4882a593Smuzhiyun rbuf = &sess->rbufs[buf_id];
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun /*
1023*4882a593Smuzhiyun * Update stats now, after request is successfully sent it is not
1024*4882a593Smuzhiyun * safe anymore to touch it.
1025*4882a593Smuzhiyun */
1026*4882a593Smuzhiyun rtrs_clt_update_all_stats(req, WRITE);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun ret = rtrs_post_rdma_write_sg(req->con, req, rbuf,
1029*4882a593Smuzhiyun req->usr_len + sizeof(*msg),
1030*4882a593Smuzhiyun imm);
1031*4882a593Smuzhiyun if (unlikely(ret)) {
1032*4882a593Smuzhiyun rtrs_err(s, "Write request failed: %d\n", ret);
1033*4882a593Smuzhiyun if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
1034*4882a593Smuzhiyun atomic_dec(&sess->stats->inflight);
1035*4882a593Smuzhiyun if (req->sg_cnt)
1036*4882a593Smuzhiyun ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
1037*4882a593Smuzhiyun req->sg_cnt, req->dir);
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun return ret;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun
rtrs_map_sg_fr(struct rtrs_clt_io_req * req,size_t count)1043*4882a593Smuzhiyun static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun int nr;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun /* Align the MR to a 4K page size to match the block virt boundary */
1048*4882a593Smuzhiyun nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
1049*4882a593Smuzhiyun if (nr < 0)
1050*4882a593Smuzhiyun return nr;
1051*4882a593Smuzhiyun if (unlikely(nr < req->sg_cnt))
1052*4882a593Smuzhiyun return -EINVAL;
1053*4882a593Smuzhiyun ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun return nr;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
rtrs_clt_read_req(struct rtrs_clt_io_req * req)1058*4882a593Smuzhiyun static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
1059*4882a593Smuzhiyun {
1060*4882a593Smuzhiyun struct rtrs_clt_con *con = req->con;
1061*4882a593Smuzhiyun struct rtrs_sess *s = con->c.sess;
1062*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(s);
1063*4882a593Smuzhiyun struct rtrs_msg_rdma_read *msg;
1064*4882a593Smuzhiyun struct rtrs_ib_dev *dev;
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun struct ib_reg_wr rwr;
1067*4882a593Smuzhiyun struct ib_send_wr *wr = NULL;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun int ret, count = 0;
1070*4882a593Smuzhiyun u32 imm, buf_id;
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun s = &sess->s;
1075*4882a593Smuzhiyun dev = sess->s.dev;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (unlikely(tsize > sess->chunk_size)) {
1078*4882a593Smuzhiyun rtrs_wrn(s,
1079*4882a593Smuzhiyun "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
1080*4882a593Smuzhiyun tsize, sess->chunk_size);
1081*4882a593Smuzhiyun return -EMSGSIZE;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun if (req->sg_cnt) {
1085*4882a593Smuzhiyun count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1086*4882a593Smuzhiyun req->dir);
1087*4882a593Smuzhiyun if (unlikely(!count)) {
1088*4882a593Smuzhiyun rtrs_wrn(s,
1089*4882a593Smuzhiyun "Read request failed, dma map failed\n");
1090*4882a593Smuzhiyun return -EINVAL;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun /* put our message into req->buf after user message*/
1094*4882a593Smuzhiyun msg = req->iu->buf + req->usr_len;
1095*4882a593Smuzhiyun msg->type = cpu_to_le16(RTRS_MSG_READ);
1096*4882a593Smuzhiyun msg->usr_len = cpu_to_le16(req->usr_len);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun if (count) {
1099*4882a593Smuzhiyun ret = rtrs_map_sg_fr(req, count);
1100*4882a593Smuzhiyun if (ret < 0) {
1101*4882a593Smuzhiyun rtrs_err_rl(s,
1102*4882a593Smuzhiyun "Read request failed, failed to map fast reg. data, err: %d\n",
1103*4882a593Smuzhiyun ret);
1104*4882a593Smuzhiyun ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1105*4882a593Smuzhiyun req->dir);
1106*4882a593Smuzhiyun return ret;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun rwr = (struct ib_reg_wr) {
1109*4882a593Smuzhiyun .wr.opcode = IB_WR_REG_MR,
1110*4882a593Smuzhiyun .wr.wr_cqe = &fast_reg_cqe,
1111*4882a593Smuzhiyun .mr = req->mr,
1112*4882a593Smuzhiyun .key = req->mr->rkey,
1113*4882a593Smuzhiyun .access = (IB_ACCESS_LOCAL_WRITE |
1114*4882a593Smuzhiyun IB_ACCESS_REMOTE_WRITE),
1115*4882a593Smuzhiyun };
1116*4882a593Smuzhiyun wr = &rwr.wr;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun msg->sg_cnt = cpu_to_le16(1);
1119*4882a593Smuzhiyun msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun msg->desc[0].addr = cpu_to_le64(req->mr->iova);
1122*4882a593Smuzhiyun msg->desc[0].key = cpu_to_le32(req->mr->rkey);
1123*4882a593Smuzhiyun msg->desc[0].len = cpu_to_le32(req->mr->length);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun /* Further invalidation is required */
1126*4882a593Smuzhiyun req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun } else {
1129*4882a593Smuzhiyun msg->sg_cnt = 0;
1130*4882a593Smuzhiyun msg->flags = 0;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun /*
1133*4882a593Smuzhiyun * rtrs message will be after the space reserved for disk data and
1134*4882a593Smuzhiyun * user message
1135*4882a593Smuzhiyun */
1136*4882a593Smuzhiyun imm = req->permit->mem_off + req->data_len + req->usr_len;
1137*4882a593Smuzhiyun imm = rtrs_to_io_req_imm(imm);
1138*4882a593Smuzhiyun buf_id = req->permit->mem_id;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun req->sg_size = sizeof(*msg);
1141*4882a593Smuzhiyun req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
1142*4882a593Smuzhiyun req->sg_size += req->usr_len;
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun /*
1145*4882a593Smuzhiyun * Update stats now, after request is successfully sent it is not
1146*4882a593Smuzhiyun * safe anymore to touch it.
1147*4882a593Smuzhiyun */
1148*4882a593Smuzhiyun rtrs_clt_update_all_stats(req, READ);
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
1151*4882a593Smuzhiyun req->data_len, imm, wr);
1152*4882a593Smuzhiyun if (unlikely(ret)) {
1153*4882a593Smuzhiyun rtrs_err(s, "Read request failed: %d\n", ret);
1154*4882a593Smuzhiyun if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
1155*4882a593Smuzhiyun atomic_dec(&sess->stats->inflight);
1156*4882a593Smuzhiyun req->need_inv = false;
1157*4882a593Smuzhiyun if (req->sg_cnt)
1158*4882a593Smuzhiyun ib_dma_unmap_sg(dev->ib_dev, req->sglist,
1159*4882a593Smuzhiyun req->sg_cnt, req->dir);
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun return ret;
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun /**
1166*4882a593Smuzhiyun * rtrs_clt_failover_req() Try to find an active path for a failed request
1167*4882a593Smuzhiyun * @clt: clt context
1168*4882a593Smuzhiyun * @fail_req: a failed io request.
1169*4882a593Smuzhiyun */
rtrs_clt_failover_req(struct rtrs_clt * clt,struct rtrs_clt_io_req * fail_req)1170*4882a593Smuzhiyun static int rtrs_clt_failover_req(struct rtrs_clt *clt,
1171*4882a593Smuzhiyun struct rtrs_clt_io_req *fail_req)
1172*4882a593Smuzhiyun {
1173*4882a593Smuzhiyun struct rtrs_clt_sess *alive_sess;
1174*4882a593Smuzhiyun struct rtrs_clt_io_req *req;
1175*4882a593Smuzhiyun int err = -ECONNABORTED;
1176*4882a593Smuzhiyun struct path_it it;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun rcu_read_lock();
1179*4882a593Smuzhiyun for (path_it_init(&it, clt);
1180*4882a593Smuzhiyun (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num;
1181*4882a593Smuzhiyun it.i++) {
1182*4882a593Smuzhiyun if (unlikely(READ_ONCE(alive_sess->state) !=
1183*4882a593Smuzhiyun RTRS_CLT_CONNECTED))
1184*4882a593Smuzhiyun continue;
1185*4882a593Smuzhiyun req = rtrs_clt_get_copy_req(alive_sess, fail_req);
1186*4882a593Smuzhiyun if (req->dir == DMA_TO_DEVICE)
1187*4882a593Smuzhiyun err = rtrs_clt_write_req(req);
1188*4882a593Smuzhiyun else
1189*4882a593Smuzhiyun err = rtrs_clt_read_req(req);
1190*4882a593Smuzhiyun if (unlikely(err)) {
1191*4882a593Smuzhiyun req->in_use = false;
1192*4882a593Smuzhiyun continue;
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun /* Success path */
1195*4882a593Smuzhiyun rtrs_clt_inc_failover_cnt(alive_sess->stats);
1196*4882a593Smuzhiyun break;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun path_it_deinit(&it);
1199*4882a593Smuzhiyun rcu_read_unlock();
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun return err;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun
fail_all_outstanding_reqs(struct rtrs_clt_sess * sess)1204*4882a593Smuzhiyun static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
1205*4882a593Smuzhiyun {
1206*4882a593Smuzhiyun struct rtrs_clt *clt = sess->clt;
1207*4882a593Smuzhiyun struct rtrs_clt_io_req *req;
1208*4882a593Smuzhiyun int i, err;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun if (!sess->reqs)
1211*4882a593Smuzhiyun return;
1212*4882a593Smuzhiyun for (i = 0; i < sess->queue_depth; ++i) {
1213*4882a593Smuzhiyun req = &sess->reqs[i];
1214*4882a593Smuzhiyun if (!req->in_use)
1215*4882a593Smuzhiyun continue;
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun /*
1218*4882a593Smuzhiyun * Safely (without notification) complete failed request.
1219*4882a593Smuzhiyun * After completion this request is still useble and can
1220*4882a593Smuzhiyun * be failovered to another path.
1221*4882a593Smuzhiyun */
1222*4882a593Smuzhiyun complete_rdma_req(req, -ECONNABORTED, false, true);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun err = rtrs_clt_failover_req(clt, req);
1225*4882a593Smuzhiyun if (unlikely(err))
1226*4882a593Smuzhiyun /* Failover failed, notify anyway */
1227*4882a593Smuzhiyun req->conf(req->priv, err);
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun
free_sess_reqs(struct rtrs_clt_sess * sess)1231*4882a593Smuzhiyun static void free_sess_reqs(struct rtrs_clt_sess *sess)
1232*4882a593Smuzhiyun {
1233*4882a593Smuzhiyun struct rtrs_clt_io_req *req;
1234*4882a593Smuzhiyun int i;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun if (!sess->reqs)
1237*4882a593Smuzhiyun return;
1238*4882a593Smuzhiyun for (i = 0; i < sess->queue_depth; ++i) {
1239*4882a593Smuzhiyun req = &sess->reqs[i];
1240*4882a593Smuzhiyun if (req->mr)
1241*4882a593Smuzhiyun ib_dereg_mr(req->mr);
1242*4882a593Smuzhiyun kfree(req->sge);
1243*4882a593Smuzhiyun rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1);
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun kfree(sess->reqs);
1246*4882a593Smuzhiyun sess->reqs = NULL;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun
alloc_sess_reqs(struct rtrs_clt_sess * sess)1249*4882a593Smuzhiyun static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
1250*4882a593Smuzhiyun {
1251*4882a593Smuzhiyun struct rtrs_clt_io_req *req;
1252*4882a593Smuzhiyun struct rtrs_clt *clt = sess->clt;
1253*4882a593Smuzhiyun int i, err = -ENOMEM;
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
1256*4882a593Smuzhiyun GFP_KERNEL);
1257*4882a593Smuzhiyun if (!sess->reqs)
1258*4882a593Smuzhiyun return -ENOMEM;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun for (i = 0; i < sess->queue_depth; ++i) {
1261*4882a593Smuzhiyun req = &sess->reqs[i];
1262*4882a593Smuzhiyun req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL,
1263*4882a593Smuzhiyun sess->s.dev->ib_dev,
1264*4882a593Smuzhiyun DMA_TO_DEVICE,
1265*4882a593Smuzhiyun rtrs_clt_rdma_done);
1266*4882a593Smuzhiyun if (!req->iu)
1267*4882a593Smuzhiyun goto out;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun req->sge = kmalloc_array(clt->max_segments + 1,
1270*4882a593Smuzhiyun sizeof(*req->sge), GFP_KERNEL);
1271*4882a593Smuzhiyun if (!req->sge)
1272*4882a593Smuzhiyun goto out;
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
1275*4882a593Smuzhiyun sess->max_pages_per_mr);
1276*4882a593Smuzhiyun if (IS_ERR(req->mr)) {
1277*4882a593Smuzhiyun err = PTR_ERR(req->mr);
1278*4882a593Smuzhiyun req->mr = NULL;
1279*4882a593Smuzhiyun pr_err("Failed to alloc sess->max_pages_per_mr %d\n",
1280*4882a593Smuzhiyun sess->max_pages_per_mr);
1281*4882a593Smuzhiyun goto out;
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun init_completion(&req->inv_comp);
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun return 0;
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun out:
1290*4882a593Smuzhiyun free_sess_reqs(sess);
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun return err;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
alloc_permits(struct rtrs_clt * clt)1295*4882a593Smuzhiyun static int alloc_permits(struct rtrs_clt *clt)
1296*4882a593Smuzhiyun {
1297*4882a593Smuzhiyun unsigned int chunk_bits;
1298*4882a593Smuzhiyun int err, i;
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth),
1301*4882a593Smuzhiyun sizeof(long), GFP_KERNEL);
1302*4882a593Smuzhiyun if (!clt->permits_map) {
1303*4882a593Smuzhiyun err = -ENOMEM;
1304*4882a593Smuzhiyun goto out_err;
1305*4882a593Smuzhiyun }
1306*4882a593Smuzhiyun clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
1307*4882a593Smuzhiyun if (!clt->permits) {
1308*4882a593Smuzhiyun err = -ENOMEM;
1309*4882a593Smuzhiyun goto err_map;
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun chunk_bits = ilog2(clt->queue_depth - 1) + 1;
1312*4882a593Smuzhiyun for (i = 0; i < clt->queue_depth; i++) {
1313*4882a593Smuzhiyun struct rtrs_permit *permit;
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun permit = get_permit(clt, i);
1316*4882a593Smuzhiyun permit->mem_id = i;
1317*4882a593Smuzhiyun permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun return 0;
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun err_map:
1323*4882a593Smuzhiyun kfree(clt->permits_map);
1324*4882a593Smuzhiyun clt->permits_map = NULL;
1325*4882a593Smuzhiyun out_err:
1326*4882a593Smuzhiyun return err;
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun
free_permits(struct rtrs_clt * clt)1329*4882a593Smuzhiyun static void free_permits(struct rtrs_clt *clt)
1330*4882a593Smuzhiyun {
1331*4882a593Smuzhiyun if (clt->permits_map) {
1332*4882a593Smuzhiyun size_t sz = clt->queue_depth;
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun wait_event(clt->permits_wait,
1335*4882a593Smuzhiyun find_first_bit(clt->permits_map, sz) >= sz);
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun kfree(clt->permits_map);
1338*4882a593Smuzhiyun clt->permits_map = NULL;
1339*4882a593Smuzhiyun kfree(clt->permits);
1340*4882a593Smuzhiyun clt->permits = NULL;
1341*4882a593Smuzhiyun }
1342*4882a593Smuzhiyun
query_fast_reg_mode(struct rtrs_clt_sess * sess)1343*4882a593Smuzhiyun static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
1344*4882a593Smuzhiyun {
1345*4882a593Smuzhiyun struct ib_device *ib_dev;
1346*4882a593Smuzhiyun u64 max_pages_per_mr;
1347*4882a593Smuzhiyun int mr_page_shift;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun ib_dev = sess->s.dev->ib_dev;
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun /*
1352*4882a593Smuzhiyun * Use the smallest page size supported by the HCA, down to a
1353*4882a593Smuzhiyun * minimum of 4096 bytes. We're unlikely to build large sglists
1354*4882a593Smuzhiyun * out of smaller entries.
1355*4882a593Smuzhiyun */
1356*4882a593Smuzhiyun mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
1357*4882a593Smuzhiyun max_pages_per_mr = ib_dev->attrs.max_mr_size;
1358*4882a593Smuzhiyun do_div(max_pages_per_mr, (1ull << mr_page_shift));
1359*4882a593Smuzhiyun sess->max_pages_per_mr =
1360*4882a593Smuzhiyun min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
1361*4882a593Smuzhiyun ib_dev->attrs.max_fast_reg_page_list_len);
1362*4882a593Smuzhiyun sess->max_send_sge = ib_dev->attrs.max_send_sge;
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun
rtrs_clt_change_state_get_old(struct rtrs_clt_sess * sess,enum rtrs_clt_state new_state,enum rtrs_clt_state * old_state)1365*4882a593Smuzhiyun static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
1366*4882a593Smuzhiyun enum rtrs_clt_state new_state,
1367*4882a593Smuzhiyun enum rtrs_clt_state *old_state)
1368*4882a593Smuzhiyun {
1369*4882a593Smuzhiyun bool changed;
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun spin_lock_irq(&sess->state_wq.lock);
1372*4882a593Smuzhiyun *old_state = sess->state;
1373*4882a593Smuzhiyun changed = __rtrs_clt_change_state(sess, new_state);
1374*4882a593Smuzhiyun spin_unlock_irq(&sess->state_wq.lock);
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun return changed;
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun
rtrs_clt_change_state(struct rtrs_clt_sess * sess,enum rtrs_clt_state new_state)1379*4882a593Smuzhiyun static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
1380*4882a593Smuzhiyun enum rtrs_clt_state new_state)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun enum rtrs_clt_state old_state;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun return rtrs_clt_change_state_get_old(sess, new_state, &old_state);
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun
rtrs_clt_hb_err_handler(struct rtrs_con * c)1387*4882a593Smuzhiyun static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
1388*4882a593Smuzhiyun {
1389*4882a593Smuzhiyun struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun rtrs_rdma_error_recovery(con);
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun
rtrs_clt_init_hb(struct rtrs_clt_sess * sess)1394*4882a593Smuzhiyun static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
1395*4882a593Smuzhiyun {
1396*4882a593Smuzhiyun rtrs_init_hb(&sess->s, &io_comp_cqe,
1397*4882a593Smuzhiyun RTRS_HB_INTERVAL_MS,
1398*4882a593Smuzhiyun RTRS_HB_MISSED_MAX,
1399*4882a593Smuzhiyun rtrs_clt_hb_err_handler,
1400*4882a593Smuzhiyun rtrs_wq);
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun
rtrs_clt_start_hb(struct rtrs_clt_sess * sess)1403*4882a593Smuzhiyun static void rtrs_clt_start_hb(struct rtrs_clt_sess *sess)
1404*4882a593Smuzhiyun {
1405*4882a593Smuzhiyun rtrs_start_hb(&sess->s);
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
rtrs_clt_stop_hb(struct rtrs_clt_sess * sess)1408*4882a593Smuzhiyun static void rtrs_clt_stop_hb(struct rtrs_clt_sess *sess)
1409*4882a593Smuzhiyun {
1410*4882a593Smuzhiyun rtrs_stop_hb(&sess->s);
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun static void rtrs_clt_reconnect_work(struct work_struct *work);
1414*4882a593Smuzhiyun static void rtrs_clt_close_work(struct work_struct *work);
1415*4882a593Smuzhiyun
alloc_sess(struct rtrs_clt * clt,const struct rtrs_addr * path,size_t con_num,u16 max_segments,size_t max_segment_size)1416*4882a593Smuzhiyun static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
1417*4882a593Smuzhiyun const struct rtrs_addr *path,
1418*4882a593Smuzhiyun size_t con_num, u16 max_segments,
1419*4882a593Smuzhiyun size_t max_segment_size)
1420*4882a593Smuzhiyun {
1421*4882a593Smuzhiyun struct rtrs_clt_sess *sess;
1422*4882a593Smuzhiyun int err = -ENOMEM;
1423*4882a593Smuzhiyun int cpu;
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun sess = kzalloc(sizeof(*sess), GFP_KERNEL);
1426*4882a593Smuzhiyun if (!sess)
1427*4882a593Smuzhiyun goto err;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun /* Extra connection for user messages */
1430*4882a593Smuzhiyun con_num += 1;
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
1433*4882a593Smuzhiyun if (!sess->s.con)
1434*4882a593Smuzhiyun goto err_free_sess;
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
1437*4882a593Smuzhiyun if (!sess->stats)
1438*4882a593Smuzhiyun goto err_free_con;
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun mutex_init(&sess->init_mutex);
1441*4882a593Smuzhiyun uuid_gen(&sess->s.uuid);
1442*4882a593Smuzhiyun memcpy(&sess->s.dst_addr, path->dst,
1443*4882a593Smuzhiyun rdma_addr_size((struct sockaddr *)path->dst));
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun /*
1446*4882a593Smuzhiyun * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
1447*4882a593Smuzhiyun * checks the sa_family to be non-zero. If user passed src_addr=NULL
1448*4882a593Smuzhiyun * the sess->src_addr will contain only zeros, which is then fine.
1449*4882a593Smuzhiyun */
1450*4882a593Smuzhiyun if (path->src)
1451*4882a593Smuzhiyun memcpy(&sess->s.src_addr, path->src,
1452*4882a593Smuzhiyun rdma_addr_size((struct sockaddr *)path->src));
1453*4882a593Smuzhiyun strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
1454*4882a593Smuzhiyun sess->s.con_num = con_num;
1455*4882a593Smuzhiyun sess->clt = clt;
1456*4882a593Smuzhiyun sess->max_pages_per_mr = max_segments * max_segment_size >> 12;
1457*4882a593Smuzhiyun init_waitqueue_head(&sess->state_wq);
1458*4882a593Smuzhiyun sess->state = RTRS_CLT_CONNECTING;
1459*4882a593Smuzhiyun atomic_set(&sess->connected_cnt, 0);
1460*4882a593Smuzhiyun INIT_WORK(&sess->close_work, rtrs_clt_close_work);
1461*4882a593Smuzhiyun INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work);
1462*4882a593Smuzhiyun rtrs_clt_init_hb(sess);
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry));
1465*4882a593Smuzhiyun if (!sess->mp_skip_entry)
1466*4882a593Smuzhiyun goto err_free_stats;
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun for_each_possible_cpu(cpu)
1469*4882a593Smuzhiyun INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu));
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun err = rtrs_clt_init_stats(sess->stats);
1472*4882a593Smuzhiyun if (err)
1473*4882a593Smuzhiyun goto err_free_percpu;
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun return sess;
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun err_free_percpu:
1478*4882a593Smuzhiyun free_percpu(sess->mp_skip_entry);
1479*4882a593Smuzhiyun err_free_stats:
1480*4882a593Smuzhiyun kfree(sess->stats);
1481*4882a593Smuzhiyun err_free_con:
1482*4882a593Smuzhiyun kfree(sess->s.con);
1483*4882a593Smuzhiyun err_free_sess:
1484*4882a593Smuzhiyun kfree(sess);
1485*4882a593Smuzhiyun err:
1486*4882a593Smuzhiyun return ERR_PTR(err);
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun
free_sess(struct rtrs_clt_sess * sess)1489*4882a593Smuzhiyun void free_sess(struct rtrs_clt_sess *sess)
1490*4882a593Smuzhiyun {
1491*4882a593Smuzhiyun free_percpu(sess->mp_skip_entry);
1492*4882a593Smuzhiyun mutex_destroy(&sess->init_mutex);
1493*4882a593Smuzhiyun kfree(sess->s.con);
1494*4882a593Smuzhiyun kfree(sess->rbufs);
1495*4882a593Smuzhiyun kfree(sess);
1496*4882a593Smuzhiyun }
1497*4882a593Smuzhiyun
create_con(struct rtrs_clt_sess * sess,unsigned int cid)1498*4882a593Smuzhiyun static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
1499*4882a593Smuzhiyun {
1500*4882a593Smuzhiyun struct rtrs_clt_con *con;
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun con = kzalloc(sizeof(*con), GFP_KERNEL);
1503*4882a593Smuzhiyun if (!con)
1504*4882a593Smuzhiyun return -ENOMEM;
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun /* Map first two connections to the first CPU */
1507*4882a593Smuzhiyun con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
1508*4882a593Smuzhiyun con->c.cid = cid;
1509*4882a593Smuzhiyun con->c.sess = &sess->s;
1510*4882a593Smuzhiyun atomic_set(&con->io_cnt, 0);
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun sess->s.con[cid] = &con->c;
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun return 0;
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun
destroy_con(struct rtrs_clt_con * con)1517*4882a593Smuzhiyun static void destroy_con(struct rtrs_clt_con *con)
1518*4882a593Smuzhiyun {
1519*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun sess->s.con[con->c.cid] = NULL;
1522*4882a593Smuzhiyun kfree(con);
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun
create_con_cq_qp(struct rtrs_clt_con * con)1525*4882a593Smuzhiyun static int create_con_cq_qp(struct rtrs_clt_con *con)
1526*4882a593Smuzhiyun {
1527*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1528*4882a593Smuzhiyun u32 max_send_wr, max_recv_wr, cq_size;
1529*4882a593Smuzhiyun int err, cq_vector;
1530*4882a593Smuzhiyun struct rtrs_msg_rkey_rsp *rsp;
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun /*
1533*4882a593Smuzhiyun * This function can fail, but still destroy_con_cq_qp() should
1534*4882a593Smuzhiyun * be called, this is because create_con_cq_qp() is called on cm
1535*4882a593Smuzhiyun * event path, thus caller/waiter never knows: have we failed before
1536*4882a593Smuzhiyun * create_con_cq_qp() or after. To solve this dilemma without
1537*4882a593Smuzhiyun * creating any additional flags just allow destroy_con_cq_qp() be
1538*4882a593Smuzhiyun * called many times.
1539*4882a593Smuzhiyun */
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun if (con->c.cid == 0) {
1542*4882a593Smuzhiyun /*
1543*4882a593Smuzhiyun * One completion for each receive and two for each send
1544*4882a593Smuzhiyun * (send request + registration)
1545*4882a593Smuzhiyun * + 2 for drain and heartbeat
1546*4882a593Smuzhiyun * in case qp gets into error state
1547*4882a593Smuzhiyun */
1548*4882a593Smuzhiyun max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
1549*4882a593Smuzhiyun max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
1550*4882a593Smuzhiyun /* We must be the first here */
1551*4882a593Smuzhiyun if (WARN_ON(sess->s.dev))
1552*4882a593Smuzhiyun return -EINVAL;
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun /*
1555*4882a593Smuzhiyun * The whole session uses device from user connection.
1556*4882a593Smuzhiyun * Be careful not to close user connection before ib dev
1557*4882a593Smuzhiyun * is gracefully put.
1558*4882a593Smuzhiyun */
1559*4882a593Smuzhiyun sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
1560*4882a593Smuzhiyun &dev_pd);
1561*4882a593Smuzhiyun if (!sess->s.dev) {
1562*4882a593Smuzhiyun rtrs_wrn(sess->clt,
1563*4882a593Smuzhiyun "rtrs_ib_dev_find_get_or_add(): no memory\n");
1564*4882a593Smuzhiyun return -ENOMEM;
1565*4882a593Smuzhiyun }
1566*4882a593Smuzhiyun sess->s.dev_ref = 1;
1567*4882a593Smuzhiyun query_fast_reg_mode(sess);
1568*4882a593Smuzhiyun } else {
1569*4882a593Smuzhiyun /*
1570*4882a593Smuzhiyun * Here we assume that session members are correctly set.
1571*4882a593Smuzhiyun * This is always true if user connection (cid == 0) is
1572*4882a593Smuzhiyun * established first.
1573*4882a593Smuzhiyun */
1574*4882a593Smuzhiyun if (WARN_ON(!sess->s.dev))
1575*4882a593Smuzhiyun return -EINVAL;
1576*4882a593Smuzhiyun if (WARN_ON(!sess->queue_depth))
1577*4882a593Smuzhiyun return -EINVAL;
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun /* Shared between connections */
1580*4882a593Smuzhiyun sess->s.dev_ref++;
1581*4882a593Smuzhiyun max_send_wr =
1582*4882a593Smuzhiyun min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
1583*4882a593Smuzhiyun /* QD * (REQ + RSP + FR REGS or INVS) + drain */
1584*4882a593Smuzhiyun sess->queue_depth * 3 + 1);
1585*4882a593Smuzhiyun max_recv_wr =
1586*4882a593Smuzhiyun min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
1587*4882a593Smuzhiyun sess->queue_depth * 3 + 1);
1588*4882a593Smuzhiyun }
1589*4882a593Smuzhiyun /* alloc iu to recv new rkey reply when server reports flags set */
1590*4882a593Smuzhiyun if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
1591*4882a593Smuzhiyun con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
1592*4882a593Smuzhiyun GFP_KERNEL, sess->s.dev->ib_dev,
1593*4882a593Smuzhiyun DMA_FROM_DEVICE,
1594*4882a593Smuzhiyun rtrs_clt_rdma_done);
1595*4882a593Smuzhiyun if (!con->rsp_ius)
1596*4882a593Smuzhiyun return -ENOMEM;
1597*4882a593Smuzhiyun con->queue_size = max_recv_wr;
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun cq_size = max_send_wr + max_recv_wr;
1600*4882a593Smuzhiyun cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
1601*4882a593Smuzhiyun err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
1602*4882a593Smuzhiyun cq_vector, cq_size, max_send_wr,
1603*4882a593Smuzhiyun max_recv_wr, IB_POLL_SOFTIRQ);
1604*4882a593Smuzhiyun /*
1605*4882a593Smuzhiyun * In case of error we do not bother to clean previous allocations,
1606*4882a593Smuzhiyun * since destroy_con_cq_qp() must be called.
1607*4882a593Smuzhiyun */
1608*4882a593Smuzhiyun return err;
1609*4882a593Smuzhiyun }
1610*4882a593Smuzhiyun
destroy_con_cq_qp(struct rtrs_clt_con * con)1611*4882a593Smuzhiyun static void destroy_con_cq_qp(struct rtrs_clt_con *con)
1612*4882a593Smuzhiyun {
1613*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun /*
1616*4882a593Smuzhiyun * Be careful here: destroy_con_cq_qp() can be called even
1617*4882a593Smuzhiyun * create_con_cq_qp() failed, see comments there.
1618*4882a593Smuzhiyun */
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun rtrs_cq_qp_destroy(&con->c);
1621*4882a593Smuzhiyun if (con->rsp_ius) {
1622*4882a593Smuzhiyun rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_size);
1623*4882a593Smuzhiyun con->rsp_ius = NULL;
1624*4882a593Smuzhiyun con->queue_size = 0;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun if (sess->s.dev_ref && !--sess->s.dev_ref) {
1627*4882a593Smuzhiyun rtrs_ib_dev_put(sess->s.dev);
1628*4882a593Smuzhiyun sess->s.dev = NULL;
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun }
1631*4882a593Smuzhiyun
stop_cm(struct rtrs_clt_con * con)1632*4882a593Smuzhiyun static void stop_cm(struct rtrs_clt_con *con)
1633*4882a593Smuzhiyun {
1634*4882a593Smuzhiyun rdma_disconnect(con->c.cm_id);
1635*4882a593Smuzhiyun if (con->c.qp)
1636*4882a593Smuzhiyun ib_drain_qp(con->c.qp);
1637*4882a593Smuzhiyun }
1638*4882a593Smuzhiyun
destroy_cm(struct rtrs_clt_con * con)1639*4882a593Smuzhiyun static void destroy_cm(struct rtrs_clt_con *con)
1640*4882a593Smuzhiyun {
1641*4882a593Smuzhiyun rdma_destroy_id(con->c.cm_id);
1642*4882a593Smuzhiyun con->c.cm_id = NULL;
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun
rtrs_rdma_addr_resolved(struct rtrs_clt_con * con)1645*4882a593Smuzhiyun static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
1646*4882a593Smuzhiyun {
1647*4882a593Smuzhiyun struct rtrs_sess *s = con->c.sess;
1648*4882a593Smuzhiyun int err;
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun err = create_con_cq_qp(con);
1651*4882a593Smuzhiyun if (err) {
1652*4882a593Smuzhiyun rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
1653*4882a593Smuzhiyun return err;
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
1656*4882a593Smuzhiyun if (err)
1657*4882a593Smuzhiyun rtrs_err(s, "Resolving route failed, err: %d\n", err);
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun return err;
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun
rtrs_rdma_route_resolved(struct rtrs_clt_con * con)1662*4882a593Smuzhiyun static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
1663*4882a593Smuzhiyun {
1664*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1665*4882a593Smuzhiyun struct rtrs_clt *clt = sess->clt;
1666*4882a593Smuzhiyun struct rtrs_msg_conn_req msg;
1667*4882a593Smuzhiyun struct rdma_conn_param param;
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun int err;
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun param = (struct rdma_conn_param) {
1672*4882a593Smuzhiyun .retry_count = 7,
1673*4882a593Smuzhiyun .rnr_retry_count = 7,
1674*4882a593Smuzhiyun .private_data = &msg,
1675*4882a593Smuzhiyun .private_data_len = sizeof(msg),
1676*4882a593Smuzhiyun };
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun msg = (struct rtrs_msg_conn_req) {
1679*4882a593Smuzhiyun .magic = cpu_to_le16(RTRS_MAGIC),
1680*4882a593Smuzhiyun .version = cpu_to_le16(RTRS_PROTO_VER),
1681*4882a593Smuzhiyun .cid = cpu_to_le16(con->c.cid),
1682*4882a593Smuzhiyun .cid_num = cpu_to_le16(sess->s.con_num),
1683*4882a593Smuzhiyun .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
1684*4882a593Smuzhiyun };
1685*4882a593Smuzhiyun msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0;
1686*4882a593Smuzhiyun uuid_copy(&msg.sess_uuid, &sess->s.uuid);
1687*4882a593Smuzhiyun uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun err = rdma_connect_locked(con->c.cm_id, ¶m);
1690*4882a593Smuzhiyun if (err)
1691*4882a593Smuzhiyun rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
1692*4882a593Smuzhiyun
1693*4882a593Smuzhiyun return err;
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun
rtrs_rdma_conn_established(struct rtrs_clt_con * con,struct rdma_cm_event * ev)1696*4882a593Smuzhiyun static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
1697*4882a593Smuzhiyun struct rdma_cm_event *ev)
1698*4882a593Smuzhiyun {
1699*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1700*4882a593Smuzhiyun struct rtrs_clt *clt = sess->clt;
1701*4882a593Smuzhiyun const struct rtrs_msg_conn_rsp *msg;
1702*4882a593Smuzhiyun u16 version, queue_depth;
1703*4882a593Smuzhiyun int errno;
1704*4882a593Smuzhiyun u8 len;
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun msg = ev->param.conn.private_data;
1707*4882a593Smuzhiyun len = ev->param.conn.private_data_len;
1708*4882a593Smuzhiyun if (len < sizeof(*msg)) {
1709*4882a593Smuzhiyun rtrs_err(clt, "Invalid RTRS connection response\n");
1710*4882a593Smuzhiyun return -ECONNRESET;
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
1713*4882a593Smuzhiyun rtrs_err(clt, "Invalid RTRS magic\n");
1714*4882a593Smuzhiyun return -ECONNRESET;
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun version = le16_to_cpu(msg->version);
1717*4882a593Smuzhiyun if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
1718*4882a593Smuzhiyun rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
1719*4882a593Smuzhiyun version >> 8, RTRS_PROTO_VER_MAJOR);
1720*4882a593Smuzhiyun return -ECONNRESET;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun errno = le16_to_cpu(msg->errno);
1723*4882a593Smuzhiyun if (errno) {
1724*4882a593Smuzhiyun rtrs_err(clt, "Invalid RTRS message: errno %d\n",
1725*4882a593Smuzhiyun errno);
1726*4882a593Smuzhiyun return -ECONNRESET;
1727*4882a593Smuzhiyun }
1728*4882a593Smuzhiyun if (con->c.cid == 0) {
1729*4882a593Smuzhiyun queue_depth = le16_to_cpu(msg->queue_depth);
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
1732*4882a593Smuzhiyun rtrs_err(clt, "Error: queue depth changed\n");
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun /*
1735*4882a593Smuzhiyun * Stop any more reconnection attempts
1736*4882a593Smuzhiyun */
1737*4882a593Smuzhiyun sess->reconnect_attempts = -1;
1738*4882a593Smuzhiyun rtrs_err(clt,
1739*4882a593Smuzhiyun "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
1740*4882a593Smuzhiyun return -ECONNRESET;
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun if (!sess->rbufs) {
1744*4882a593Smuzhiyun kfree(sess->rbufs);
1745*4882a593Smuzhiyun sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
1746*4882a593Smuzhiyun GFP_KERNEL);
1747*4882a593Smuzhiyun if (!sess->rbufs)
1748*4882a593Smuzhiyun return -ENOMEM;
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun sess->queue_depth = queue_depth;
1751*4882a593Smuzhiyun sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
1752*4882a593Smuzhiyun sess->max_io_size = le32_to_cpu(msg->max_io_size);
1753*4882a593Smuzhiyun sess->flags = le32_to_cpu(msg->flags);
1754*4882a593Smuzhiyun sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun /*
1757*4882a593Smuzhiyun * Global IO size is always a minimum.
1758*4882a593Smuzhiyun * If while a reconnection server sends us a value a bit
1759*4882a593Smuzhiyun * higher - client does not care and uses cached minimum.
1760*4882a593Smuzhiyun *
1761*4882a593Smuzhiyun * Since we can have several sessions (paths) restablishing
1762*4882a593Smuzhiyun * connections in parallel, use lock.
1763*4882a593Smuzhiyun */
1764*4882a593Smuzhiyun mutex_lock(&clt->paths_mutex);
1765*4882a593Smuzhiyun clt->queue_depth = sess->queue_depth;
1766*4882a593Smuzhiyun clt->max_io_size = min_not_zero(sess->max_io_size,
1767*4882a593Smuzhiyun clt->max_io_size);
1768*4882a593Smuzhiyun mutex_unlock(&clt->paths_mutex);
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun /*
1771*4882a593Smuzhiyun * Cache the hca_port and hca_name for sysfs
1772*4882a593Smuzhiyun */
1773*4882a593Smuzhiyun sess->hca_port = con->c.cm_id->port_num;
1774*4882a593Smuzhiyun scnprintf(sess->hca_name, sizeof(sess->hca_name),
1775*4882a593Smuzhiyun sess->s.dev->ib_dev->name);
1776*4882a593Smuzhiyun sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
1777*4882a593Smuzhiyun /* set for_new_clt, to allow future reconnect on any path */
1778*4882a593Smuzhiyun sess->for_new_clt = 1;
1779*4882a593Smuzhiyun }
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun return 0;
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun
flag_success_on_conn(struct rtrs_clt_con * con)1784*4882a593Smuzhiyun static inline void flag_success_on_conn(struct rtrs_clt_con *con)
1785*4882a593Smuzhiyun {
1786*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun atomic_inc(&sess->connected_cnt);
1789*4882a593Smuzhiyun con->cm_err = 1;
1790*4882a593Smuzhiyun }
1791*4882a593Smuzhiyun
rtrs_rdma_conn_rejected(struct rtrs_clt_con * con,struct rdma_cm_event * ev)1792*4882a593Smuzhiyun static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
1793*4882a593Smuzhiyun struct rdma_cm_event *ev)
1794*4882a593Smuzhiyun {
1795*4882a593Smuzhiyun struct rtrs_sess *s = con->c.sess;
1796*4882a593Smuzhiyun const struct rtrs_msg_conn_rsp *msg;
1797*4882a593Smuzhiyun const char *rej_msg;
1798*4882a593Smuzhiyun int status, errno;
1799*4882a593Smuzhiyun u8 data_len;
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun status = ev->status;
1802*4882a593Smuzhiyun rej_msg = rdma_reject_msg(con->c.cm_id, status);
1803*4882a593Smuzhiyun msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun if (msg && data_len >= sizeof(*msg)) {
1806*4882a593Smuzhiyun errno = (int16_t)le16_to_cpu(msg->errno);
1807*4882a593Smuzhiyun if (errno == -EBUSY)
1808*4882a593Smuzhiyun rtrs_err(s,
1809*4882a593Smuzhiyun "Previous session is still exists on the server, please reconnect later\n");
1810*4882a593Smuzhiyun else
1811*4882a593Smuzhiyun rtrs_err(s,
1812*4882a593Smuzhiyun "Connect rejected: status %d (%s), rtrs errno %d\n",
1813*4882a593Smuzhiyun status, rej_msg, errno);
1814*4882a593Smuzhiyun } else {
1815*4882a593Smuzhiyun rtrs_err(s,
1816*4882a593Smuzhiyun "Connect rejected but with malformed message: status %d (%s)\n",
1817*4882a593Smuzhiyun status, rej_msg);
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun return -ECONNRESET;
1821*4882a593Smuzhiyun }
1822*4882a593Smuzhiyun
rtrs_clt_close_conns(struct rtrs_clt_sess * sess,bool wait)1823*4882a593Smuzhiyun static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
1824*4882a593Smuzhiyun {
1825*4882a593Smuzhiyun if (rtrs_clt_change_state(sess, RTRS_CLT_CLOSING))
1826*4882a593Smuzhiyun queue_work(rtrs_wq, &sess->close_work);
1827*4882a593Smuzhiyun if (wait)
1828*4882a593Smuzhiyun flush_work(&sess->close_work);
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun
flag_error_on_conn(struct rtrs_clt_con * con,int cm_err)1831*4882a593Smuzhiyun static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
1832*4882a593Smuzhiyun {
1833*4882a593Smuzhiyun if (con->cm_err == 1) {
1834*4882a593Smuzhiyun struct rtrs_clt_sess *sess;
1835*4882a593Smuzhiyun
1836*4882a593Smuzhiyun sess = to_clt_sess(con->c.sess);
1837*4882a593Smuzhiyun if (atomic_dec_and_test(&sess->connected_cnt))
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun wake_up(&sess->state_wq);
1840*4882a593Smuzhiyun }
1841*4882a593Smuzhiyun con->cm_err = cm_err;
1842*4882a593Smuzhiyun }
1843*4882a593Smuzhiyun
rtrs_clt_rdma_cm_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * ev)1844*4882a593Smuzhiyun static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
1845*4882a593Smuzhiyun struct rdma_cm_event *ev)
1846*4882a593Smuzhiyun {
1847*4882a593Smuzhiyun struct rtrs_clt_con *con = cm_id->context;
1848*4882a593Smuzhiyun struct rtrs_sess *s = con->c.sess;
1849*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(s);
1850*4882a593Smuzhiyun int cm_err = 0;
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun switch (ev->event) {
1853*4882a593Smuzhiyun case RDMA_CM_EVENT_ADDR_RESOLVED:
1854*4882a593Smuzhiyun cm_err = rtrs_rdma_addr_resolved(con);
1855*4882a593Smuzhiyun break;
1856*4882a593Smuzhiyun case RDMA_CM_EVENT_ROUTE_RESOLVED:
1857*4882a593Smuzhiyun cm_err = rtrs_rdma_route_resolved(con);
1858*4882a593Smuzhiyun break;
1859*4882a593Smuzhiyun case RDMA_CM_EVENT_ESTABLISHED:
1860*4882a593Smuzhiyun cm_err = rtrs_rdma_conn_established(con, ev);
1861*4882a593Smuzhiyun if (likely(!cm_err)) {
1862*4882a593Smuzhiyun /*
1863*4882a593Smuzhiyun * Report success and wake up. Here we abuse state_wq,
1864*4882a593Smuzhiyun * i.e. wake up without state change, but we set cm_err.
1865*4882a593Smuzhiyun */
1866*4882a593Smuzhiyun flag_success_on_conn(con);
1867*4882a593Smuzhiyun wake_up(&sess->state_wq);
1868*4882a593Smuzhiyun return 0;
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun break;
1871*4882a593Smuzhiyun case RDMA_CM_EVENT_REJECTED:
1872*4882a593Smuzhiyun cm_err = rtrs_rdma_conn_rejected(con, ev);
1873*4882a593Smuzhiyun break;
1874*4882a593Smuzhiyun case RDMA_CM_EVENT_CONNECT_ERROR:
1875*4882a593Smuzhiyun case RDMA_CM_EVENT_UNREACHABLE:
1876*4882a593Smuzhiyun rtrs_wrn(s, "CM error event %d\n", ev->event);
1877*4882a593Smuzhiyun cm_err = -ECONNRESET;
1878*4882a593Smuzhiyun break;
1879*4882a593Smuzhiyun case RDMA_CM_EVENT_ADDR_ERROR:
1880*4882a593Smuzhiyun case RDMA_CM_EVENT_ROUTE_ERROR:
1881*4882a593Smuzhiyun cm_err = -EHOSTUNREACH;
1882*4882a593Smuzhiyun break;
1883*4882a593Smuzhiyun case RDMA_CM_EVENT_DISCONNECTED:
1884*4882a593Smuzhiyun case RDMA_CM_EVENT_ADDR_CHANGE:
1885*4882a593Smuzhiyun case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1886*4882a593Smuzhiyun cm_err = -ECONNRESET;
1887*4882a593Smuzhiyun break;
1888*4882a593Smuzhiyun case RDMA_CM_EVENT_DEVICE_REMOVAL:
1889*4882a593Smuzhiyun /*
1890*4882a593Smuzhiyun * Device removal is a special case. Queue close and return 0.
1891*4882a593Smuzhiyun */
1892*4882a593Smuzhiyun rtrs_clt_close_conns(sess, false);
1893*4882a593Smuzhiyun return 0;
1894*4882a593Smuzhiyun default:
1895*4882a593Smuzhiyun rtrs_err(s, "Unexpected RDMA CM event (%d)\n", ev->event);
1896*4882a593Smuzhiyun cm_err = -ECONNRESET;
1897*4882a593Smuzhiyun break;
1898*4882a593Smuzhiyun }
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun if (cm_err) {
1901*4882a593Smuzhiyun /*
1902*4882a593Smuzhiyun * cm error makes sense only on connection establishing,
1903*4882a593Smuzhiyun * in other cases we rely on normal procedure of reconnecting.
1904*4882a593Smuzhiyun */
1905*4882a593Smuzhiyun flag_error_on_conn(con, cm_err);
1906*4882a593Smuzhiyun rtrs_rdma_error_recovery(con);
1907*4882a593Smuzhiyun }
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun return 0;
1910*4882a593Smuzhiyun }
1911*4882a593Smuzhiyun
create_cm(struct rtrs_clt_con * con)1912*4882a593Smuzhiyun static int create_cm(struct rtrs_clt_con *con)
1913*4882a593Smuzhiyun {
1914*4882a593Smuzhiyun struct rtrs_sess *s = con->c.sess;
1915*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(s);
1916*4882a593Smuzhiyun struct rdma_cm_id *cm_id;
1917*4882a593Smuzhiyun int err;
1918*4882a593Smuzhiyun
1919*4882a593Smuzhiyun cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
1920*4882a593Smuzhiyun sess->s.dst_addr.ss_family == AF_IB ?
1921*4882a593Smuzhiyun RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
1922*4882a593Smuzhiyun if (IS_ERR(cm_id)) {
1923*4882a593Smuzhiyun err = PTR_ERR(cm_id);
1924*4882a593Smuzhiyun rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
1925*4882a593Smuzhiyun
1926*4882a593Smuzhiyun return err;
1927*4882a593Smuzhiyun }
1928*4882a593Smuzhiyun con->c.cm_id = cm_id;
1929*4882a593Smuzhiyun con->cm_err = 0;
1930*4882a593Smuzhiyun /* allow the port to be reused */
1931*4882a593Smuzhiyun err = rdma_set_reuseaddr(cm_id, 1);
1932*4882a593Smuzhiyun if (err != 0) {
1933*4882a593Smuzhiyun rtrs_err(s, "Set address reuse failed, err: %d\n", err);
1934*4882a593Smuzhiyun goto destroy_cm;
1935*4882a593Smuzhiyun }
1936*4882a593Smuzhiyun err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr,
1937*4882a593Smuzhiyun (struct sockaddr *)&sess->s.dst_addr,
1938*4882a593Smuzhiyun RTRS_CONNECT_TIMEOUT_MS);
1939*4882a593Smuzhiyun if (err) {
1940*4882a593Smuzhiyun rtrs_err(s, "Failed to resolve address, err: %d\n", err);
1941*4882a593Smuzhiyun goto destroy_cm;
1942*4882a593Smuzhiyun }
1943*4882a593Smuzhiyun /*
1944*4882a593Smuzhiyun * Combine connection status and session events. This is needed
1945*4882a593Smuzhiyun * for waiting two possible cases: cm_err has something meaningful
1946*4882a593Smuzhiyun * or session state was really changed to error by device removal.
1947*4882a593Smuzhiyun */
1948*4882a593Smuzhiyun err = wait_event_interruptible_timeout(
1949*4882a593Smuzhiyun sess->state_wq,
1950*4882a593Smuzhiyun con->cm_err || sess->state != RTRS_CLT_CONNECTING,
1951*4882a593Smuzhiyun msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
1952*4882a593Smuzhiyun if (err == 0 || err == -ERESTARTSYS) {
1953*4882a593Smuzhiyun if (err == 0)
1954*4882a593Smuzhiyun err = -ETIMEDOUT;
1955*4882a593Smuzhiyun /* Timedout or interrupted */
1956*4882a593Smuzhiyun goto errr;
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun if (con->cm_err < 0) {
1959*4882a593Smuzhiyun err = con->cm_err;
1960*4882a593Smuzhiyun goto errr;
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) {
1963*4882a593Smuzhiyun /* Device removal */
1964*4882a593Smuzhiyun err = -ECONNABORTED;
1965*4882a593Smuzhiyun goto errr;
1966*4882a593Smuzhiyun }
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun return 0;
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun errr:
1971*4882a593Smuzhiyun stop_cm(con);
1972*4882a593Smuzhiyun /* Is safe to call destroy if cq_qp is not inited */
1973*4882a593Smuzhiyun destroy_con_cq_qp(con);
1974*4882a593Smuzhiyun destroy_cm:
1975*4882a593Smuzhiyun destroy_cm(con);
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun return err;
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun
rtrs_clt_sess_up(struct rtrs_clt_sess * sess)1980*4882a593Smuzhiyun static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
1981*4882a593Smuzhiyun {
1982*4882a593Smuzhiyun struct rtrs_clt *clt = sess->clt;
1983*4882a593Smuzhiyun int up;
1984*4882a593Smuzhiyun
1985*4882a593Smuzhiyun /*
1986*4882a593Smuzhiyun * We can fire RECONNECTED event only when all paths were
1987*4882a593Smuzhiyun * connected on rtrs_clt_open(), then each was disconnected
1988*4882a593Smuzhiyun * and the first one connected again. That's why this nasty
1989*4882a593Smuzhiyun * game with counter value.
1990*4882a593Smuzhiyun */
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun mutex_lock(&clt->paths_ev_mutex);
1993*4882a593Smuzhiyun up = ++clt->paths_up;
1994*4882a593Smuzhiyun /*
1995*4882a593Smuzhiyun * Here it is safe to access paths num directly since up counter
1996*4882a593Smuzhiyun * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
1997*4882a593Smuzhiyun * in progress, thus paths removals are impossible.
1998*4882a593Smuzhiyun */
1999*4882a593Smuzhiyun if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
2000*4882a593Smuzhiyun clt->paths_up = clt->paths_num;
2001*4882a593Smuzhiyun else if (up == 1)
2002*4882a593Smuzhiyun clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
2003*4882a593Smuzhiyun mutex_unlock(&clt->paths_ev_mutex);
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun /* Mark session as established */
2006*4882a593Smuzhiyun sess->established = true;
2007*4882a593Smuzhiyun sess->reconnect_attempts = 0;
2008*4882a593Smuzhiyun sess->stats->reconnects.successful_cnt++;
2009*4882a593Smuzhiyun }
2010*4882a593Smuzhiyun
rtrs_clt_sess_down(struct rtrs_clt_sess * sess)2011*4882a593Smuzhiyun static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
2012*4882a593Smuzhiyun {
2013*4882a593Smuzhiyun struct rtrs_clt *clt = sess->clt;
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun if (!sess->established)
2016*4882a593Smuzhiyun return;
2017*4882a593Smuzhiyun
2018*4882a593Smuzhiyun sess->established = false;
2019*4882a593Smuzhiyun mutex_lock(&clt->paths_ev_mutex);
2020*4882a593Smuzhiyun WARN_ON(!clt->paths_up);
2021*4882a593Smuzhiyun if (--clt->paths_up == 0)
2022*4882a593Smuzhiyun clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
2023*4882a593Smuzhiyun mutex_unlock(&clt->paths_ev_mutex);
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun
rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess * sess)2026*4882a593Smuzhiyun static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
2027*4882a593Smuzhiyun {
2028*4882a593Smuzhiyun struct rtrs_clt_con *con;
2029*4882a593Smuzhiyun unsigned int cid;
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED);
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun /*
2034*4882a593Smuzhiyun * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
2035*4882a593Smuzhiyun * exactly in between. Start destroying after it finishes.
2036*4882a593Smuzhiyun */
2037*4882a593Smuzhiyun mutex_lock(&sess->init_mutex);
2038*4882a593Smuzhiyun mutex_unlock(&sess->init_mutex);
2039*4882a593Smuzhiyun
2040*4882a593Smuzhiyun /*
2041*4882a593Smuzhiyun * All IO paths must observe !CONNECTED state before we
2042*4882a593Smuzhiyun * free everything.
2043*4882a593Smuzhiyun */
2044*4882a593Smuzhiyun synchronize_rcu();
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun rtrs_clt_stop_hb(sess);
2047*4882a593Smuzhiyun
2048*4882a593Smuzhiyun /*
2049*4882a593Smuzhiyun * The order it utterly crucial: firstly disconnect and complete all
2050*4882a593Smuzhiyun * rdma requests with error (thus set in_use=false for requests),
2051*4882a593Smuzhiyun * then fail outstanding requests checking in_use for each, and
2052*4882a593Smuzhiyun * eventually notify upper layer about session disconnection.
2053*4882a593Smuzhiyun */
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun for (cid = 0; cid < sess->s.con_num; cid++) {
2056*4882a593Smuzhiyun if (!sess->s.con[cid])
2057*4882a593Smuzhiyun break;
2058*4882a593Smuzhiyun con = to_clt_con(sess->s.con[cid]);
2059*4882a593Smuzhiyun stop_cm(con);
2060*4882a593Smuzhiyun }
2061*4882a593Smuzhiyun fail_all_outstanding_reqs(sess);
2062*4882a593Smuzhiyun free_sess_reqs(sess);
2063*4882a593Smuzhiyun rtrs_clt_sess_down(sess);
2064*4882a593Smuzhiyun
2065*4882a593Smuzhiyun /*
2066*4882a593Smuzhiyun * Wait for graceful shutdown, namely when peer side invokes
2067*4882a593Smuzhiyun * rdma_disconnect(). 'connected_cnt' is decremented only on
2068*4882a593Smuzhiyun * CM events, thus if other side had crashed and hb has detected
2069*4882a593Smuzhiyun * something is wrong, here we will stuck for exactly timeout ms,
2070*4882a593Smuzhiyun * since CM does not fire anything. That is fine, we are not in
2071*4882a593Smuzhiyun * hurry.
2072*4882a593Smuzhiyun */
2073*4882a593Smuzhiyun wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt),
2074*4882a593Smuzhiyun msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
2075*4882a593Smuzhiyun
2076*4882a593Smuzhiyun for (cid = 0; cid < sess->s.con_num; cid++) {
2077*4882a593Smuzhiyun if (!sess->s.con[cid])
2078*4882a593Smuzhiyun break;
2079*4882a593Smuzhiyun con = to_clt_con(sess->s.con[cid]);
2080*4882a593Smuzhiyun destroy_con_cq_qp(con);
2081*4882a593Smuzhiyun destroy_cm(con);
2082*4882a593Smuzhiyun destroy_con(con);
2083*4882a593Smuzhiyun }
2084*4882a593Smuzhiyun }
2085*4882a593Smuzhiyun
xchg_sessions(struct rtrs_clt_sess __rcu ** rcu_ppcpu_path,struct rtrs_clt_sess * sess,struct rtrs_clt_sess * next)2086*4882a593Smuzhiyun static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path,
2087*4882a593Smuzhiyun struct rtrs_clt_sess *sess,
2088*4882a593Smuzhiyun struct rtrs_clt_sess *next)
2089*4882a593Smuzhiyun {
2090*4882a593Smuzhiyun struct rtrs_clt_sess **ppcpu_path;
2091*4882a593Smuzhiyun
2092*4882a593Smuzhiyun /* Call cmpxchg() without sparse warnings */
2093*4882a593Smuzhiyun ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
2094*4882a593Smuzhiyun return sess == cmpxchg(ppcpu_path, sess, next);
2095*4882a593Smuzhiyun }
2096*4882a593Smuzhiyun
rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess * sess)2097*4882a593Smuzhiyun static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
2098*4882a593Smuzhiyun {
2099*4882a593Smuzhiyun struct rtrs_clt *clt = sess->clt;
2100*4882a593Smuzhiyun struct rtrs_clt_sess *next;
2101*4882a593Smuzhiyun bool wait_for_grace = false;
2102*4882a593Smuzhiyun int cpu;
2103*4882a593Smuzhiyun
2104*4882a593Smuzhiyun mutex_lock(&clt->paths_mutex);
2105*4882a593Smuzhiyun list_del_rcu(&sess->s.entry);
2106*4882a593Smuzhiyun
2107*4882a593Smuzhiyun /* Make sure everybody observes path removal. */
2108*4882a593Smuzhiyun synchronize_rcu();
2109*4882a593Smuzhiyun
2110*4882a593Smuzhiyun /*
2111*4882a593Smuzhiyun * At this point nobody sees @sess in the list, but still we have
2112*4882a593Smuzhiyun * dangling pointer @pcpu_path which _can_ point to @sess. Since
2113*4882a593Smuzhiyun * nobody can observe @sess in the list, we guarantee that IO path
2114*4882a593Smuzhiyun * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
2115*4882a593Smuzhiyun * to @sess, but can never again become @sess.
2116*4882a593Smuzhiyun */
2117*4882a593Smuzhiyun
2118*4882a593Smuzhiyun /*
2119*4882a593Smuzhiyun * Decrement paths number only after grace period, because
2120*4882a593Smuzhiyun * caller of do_each_path() must firstly observe list without
2121*4882a593Smuzhiyun * path and only then decremented paths number.
2122*4882a593Smuzhiyun *
2123*4882a593Smuzhiyun * Otherwise there can be the following situation:
2124*4882a593Smuzhiyun * o Two paths exist and IO is coming.
2125*4882a593Smuzhiyun * o One path is removed:
2126*4882a593Smuzhiyun * CPU#0 CPU#1
2127*4882a593Smuzhiyun * do_each_path(): rtrs_clt_remove_path_from_arr():
2128*4882a593Smuzhiyun * path = get_next_path()
2129*4882a593Smuzhiyun * ^^^ list_del_rcu(path)
2130*4882a593Smuzhiyun * [!CONNECTED path] clt->paths_num--
2131*4882a593Smuzhiyun * ^^^^^^^^^
2132*4882a593Smuzhiyun * load clt->paths_num from 2 to 1
2133*4882a593Smuzhiyun * ^^^^^^^^^
2134*4882a593Smuzhiyun * sees 1
2135*4882a593Smuzhiyun *
2136*4882a593Smuzhiyun * path is observed as !CONNECTED, but do_each_path() loop
2137*4882a593Smuzhiyun * ends, because expression i < clt->paths_num is false.
2138*4882a593Smuzhiyun */
2139*4882a593Smuzhiyun clt->paths_num--;
2140*4882a593Smuzhiyun
2141*4882a593Smuzhiyun /*
2142*4882a593Smuzhiyun * Get @next connection from current @sess which is going to be
2143*4882a593Smuzhiyun * removed. If @sess is the last element, then @next is NULL.
2144*4882a593Smuzhiyun */
2145*4882a593Smuzhiyun rcu_read_lock();
2146*4882a593Smuzhiyun next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry,
2147*4882a593Smuzhiyun typeof(*next), s.entry);
2148*4882a593Smuzhiyun rcu_read_unlock();
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun /*
2151*4882a593Smuzhiyun * @pcpu paths can still point to the path which is going to be
2152*4882a593Smuzhiyun * removed, so change the pointer manually.
2153*4882a593Smuzhiyun */
2154*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
2155*4882a593Smuzhiyun struct rtrs_clt_sess __rcu **ppcpu_path;
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
2158*4882a593Smuzhiyun if (rcu_dereference_protected(*ppcpu_path,
2159*4882a593Smuzhiyun lockdep_is_held(&clt->paths_mutex)) != sess)
2160*4882a593Smuzhiyun /*
2161*4882a593Smuzhiyun * synchronize_rcu() was called just after deleting
2162*4882a593Smuzhiyun * entry from the list, thus IO code path cannot
2163*4882a593Smuzhiyun * change pointer back to the pointer which is going
2164*4882a593Smuzhiyun * to be removed, we are safe here.
2165*4882a593Smuzhiyun */
2166*4882a593Smuzhiyun continue;
2167*4882a593Smuzhiyun
2168*4882a593Smuzhiyun /*
2169*4882a593Smuzhiyun * We race with IO code path, which also changes pointer,
2170*4882a593Smuzhiyun * thus we have to be careful not to overwrite it.
2171*4882a593Smuzhiyun */
2172*4882a593Smuzhiyun if (xchg_sessions(ppcpu_path, sess, next))
2173*4882a593Smuzhiyun /*
2174*4882a593Smuzhiyun * @ppcpu_path was successfully replaced with @next,
2175*4882a593Smuzhiyun * that means that someone could also pick up the
2176*4882a593Smuzhiyun * @sess and dereferencing it right now, so wait for
2177*4882a593Smuzhiyun * a grace period is required.
2178*4882a593Smuzhiyun */
2179*4882a593Smuzhiyun wait_for_grace = true;
2180*4882a593Smuzhiyun }
2181*4882a593Smuzhiyun if (wait_for_grace)
2182*4882a593Smuzhiyun synchronize_rcu();
2183*4882a593Smuzhiyun
2184*4882a593Smuzhiyun mutex_unlock(&clt->paths_mutex);
2185*4882a593Smuzhiyun }
2186*4882a593Smuzhiyun
rtrs_clt_add_path_to_arr(struct rtrs_clt_sess * sess,struct rtrs_addr * addr)2187*4882a593Smuzhiyun static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess,
2188*4882a593Smuzhiyun struct rtrs_addr *addr)
2189*4882a593Smuzhiyun {
2190*4882a593Smuzhiyun struct rtrs_clt *clt = sess->clt;
2191*4882a593Smuzhiyun
2192*4882a593Smuzhiyun mutex_lock(&clt->paths_mutex);
2193*4882a593Smuzhiyun clt->paths_num++;
2194*4882a593Smuzhiyun
2195*4882a593Smuzhiyun list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
2196*4882a593Smuzhiyun mutex_unlock(&clt->paths_mutex);
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun
rtrs_clt_close_work(struct work_struct * work)2199*4882a593Smuzhiyun static void rtrs_clt_close_work(struct work_struct *work)
2200*4882a593Smuzhiyun {
2201*4882a593Smuzhiyun struct rtrs_clt_sess *sess;
2202*4882a593Smuzhiyun
2203*4882a593Smuzhiyun sess = container_of(work, struct rtrs_clt_sess, close_work);
2204*4882a593Smuzhiyun
2205*4882a593Smuzhiyun cancel_delayed_work_sync(&sess->reconnect_dwork);
2206*4882a593Smuzhiyun rtrs_clt_stop_and_destroy_conns(sess);
2207*4882a593Smuzhiyun rtrs_clt_change_state(sess, RTRS_CLT_CLOSED);
2208*4882a593Smuzhiyun }
2209*4882a593Smuzhiyun
init_conns(struct rtrs_clt_sess * sess)2210*4882a593Smuzhiyun static int init_conns(struct rtrs_clt_sess *sess)
2211*4882a593Smuzhiyun {
2212*4882a593Smuzhiyun unsigned int cid;
2213*4882a593Smuzhiyun int err;
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun /*
2216*4882a593Smuzhiyun * On every new session connections increase reconnect counter
2217*4882a593Smuzhiyun * to avoid clashes with previous sessions not yet closed
2218*4882a593Smuzhiyun * sessions on a server side.
2219*4882a593Smuzhiyun */
2220*4882a593Smuzhiyun sess->s.recon_cnt++;
2221*4882a593Smuzhiyun
2222*4882a593Smuzhiyun /* Establish all RDMA connections */
2223*4882a593Smuzhiyun for (cid = 0; cid < sess->s.con_num; cid++) {
2224*4882a593Smuzhiyun err = create_con(sess, cid);
2225*4882a593Smuzhiyun if (err)
2226*4882a593Smuzhiyun goto destroy;
2227*4882a593Smuzhiyun
2228*4882a593Smuzhiyun err = create_cm(to_clt_con(sess->s.con[cid]));
2229*4882a593Smuzhiyun if (err) {
2230*4882a593Smuzhiyun destroy_con(to_clt_con(sess->s.con[cid]));
2231*4882a593Smuzhiyun goto destroy;
2232*4882a593Smuzhiyun }
2233*4882a593Smuzhiyun }
2234*4882a593Smuzhiyun err = alloc_sess_reqs(sess);
2235*4882a593Smuzhiyun if (err)
2236*4882a593Smuzhiyun goto destroy;
2237*4882a593Smuzhiyun
2238*4882a593Smuzhiyun rtrs_clt_start_hb(sess);
2239*4882a593Smuzhiyun
2240*4882a593Smuzhiyun return 0;
2241*4882a593Smuzhiyun
2242*4882a593Smuzhiyun destroy:
2243*4882a593Smuzhiyun while (cid--) {
2244*4882a593Smuzhiyun struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
2245*4882a593Smuzhiyun
2246*4882a593Smuzhiyun stop_cm(con);
2247*4882a593Smuzhiyun destroy_con_cq_qp(con);
2248*4882a593Smuzhiyun destroy_cm(con);
2249*4882a593Smuzhiyun destroy_con(con);
2250*4882a593Smuzhiyun }
2251*4882a593Smuzhiyun /*
2252*4882a593Smuzhiyun * If we've never taken async path and got an error, say,
2253*4882a593Smuzhiyun * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
2254*4882a593Smuzhiyun * manually to keep reconnecting.
2255*4882a593Smuzhiyun */
2256*4882a593Smuzhiyun rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
2257*4882a593Smuzhiyun
2258*4882a593Smuzhiyun return err;
2259*4882a593Smuzhiyun }
2260*4882a593Smuzhiyun
rtrs_clt_info_req_done(struct ib_cq * cq,struct ib_wc * wc)2261*4882a593Smuzhiyun static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
2262*4882a593Smuzhiyun {
2263*4882a593Smuzhiyun struct rtrs_clt_con *con = cq->cq_context;
2264*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
2265*4882a593Smuzhiyun struct rtrs_iu *iu;
2266*4882a593Smuzhiyun
2267*4882a593Smuzhiyun iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2268*4882a593Smuzhiyun rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
2269*4882a593Smuzhiyun
2270*4882a593Smuzhiyun if (unlikely(wc->status != IB_WC_SUCCESS)) {
2271*4882a593Smuzhiyun rtrs_err(sess->clt, "Sess info request send failed: %s\n",
2272*4882a593Smuzhiyun ib_wc_status_msg(wc->status));
2273*4882a593Smuzhiyun rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
2274*4882a593Smuzhiyun return;
2275*4882a593Smuzhiyun }
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun rtrs_clt_update_wc_stats(con);
2278*4882a593Smuzhiyun }
2279*4882a593Smuzhiyun
process_info_rsp(struct rtrs_clt_sess * sess,const struct rtrs_msg_info_rsp * msg)2280*4882a593Smuzhiyun static int process_info_rsp(struct rtrs_clt_sess *sess,
2281*4882a593Smuzhiyun const struct rtrs_msg_info_rsp *msg)
2282*4882a593Smuzhiyun {
2283*4882a593Smuzhiyun unsigned int sg_cnt, total_len;
2284*4882a593Smuzhiyun int i, sgi;
2285*4882a593Smuzhiyun
2286*4882a593Smuzhiyun sg_cnt = le16_to_cpu(msg->sg_cnt);
2287*4882a593Smuzhiyun if (unlikely(!sg_cnt))
2288*4882a593Smuzhiyun return -EINVAL;
2289*4882a593Smuzhiyun /*
2290*4882a593Smuzhiyun * Check if IB immediate data size is enough to hold the mem_id and
2291*4882a593Smuzhiyun * the offset inside the memory chunk.
2292*4882a593Smuzhiyun */
2293*4882a593Smuzhiyun if (unlikely((ilog2(sg_cnt - 1) + 1) +
2294*4882a593Smuzhiyun (ilog2(sess->chunk_size - 1) + 1) >
2295*4882a593Smuzhiyun MAX_IMM_PAYL_BITS)) {
2296*4882a593Smuzhiyun rtrs_err(sess->clt,
2297*4882a593Smuzhiyun "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
2298*4882a593Smuzhiyun MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
2299*4882a593Smuzhiyun return -EINVAL;
2300*4882a593Smuzhiyun }
2301*4882a593Smuzhiyun if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) {
2302*4882a593Smuzhiyun rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
2303*4882a593Smuzhiyun sg_cnt);
2304*4882a593Smuzhiyun return -EINVAL;
2305*4882a593Smuzhiyun }
2306*4882a593Smuzhiyun total_len = 0;
2307*4882a593Smuzhiyun for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
2308*4882a593Smuzhiyun const struct rtrs_sg_desc *desc = &msg->desc[sgi];
2309*4882a593Smuzhiyun u32 len, rkey;
2310*4882a593Smuzhiyun u64 addr;
2311*4882a593Smuzhiyun
2312*4882a593Smuzhiyun addr = le64_to_cpu(desc->addr);
2313*4882a593Smuzhiyun rkey = le32_to_cpu(desc->key);
2314*4882a593Smuzhiyun len = le32_to_cpu(desc->len);
2315*4882a593Smuzhiyun
2316*4882a593Smuzhiyun total_len += len;
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun if (unlikely(!len || (len % sess->chunk_size))) {
2319*4882a593Smuzhiyun rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
2320*4882a593Smuzhiyun len);
2321*4882a593Smuzhiyun return -EINVAL;
2322*4882a593Smuzhiyun }
2323*4882a593Smuzhiyun for ( ; len && i < sess->queue_depth; i++) {
2324*4882a593Smuzhiyun sess->rbufs[i].addr = addr;
2325*4882a593Smuzhiyun sess->rbufs[i].rkey = rkey;
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun len -= sess->chunk_size;
2328*4882a593Smuzhiyun addr += sess->chunk_size;
2329*4882a593Smuzhiyun }
2330*4882a593Smuzhiyun }
2331*4882a593Smuzhiyun /* Sanity check */
2332*4882a593Smuzhiyun if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) {
2333*4882a593Smuzhiyun rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
2334*4882a593Smuzhiyun return -EINVAL;
2335*4882a593Smuzhiyun }
2336*4882a593Smuzhiyun if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) {
2337*4882a593Smuzhiyun rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
2338*4882a593Smuzhiyun return -EINVAL;
2339*4882a593Smuzhiyun }
2340*4882a593Smuzhiyun
2341*4882a593Smuzhiyun return 0;
2342*4882a593Smuzhiyun }
2343*4882a593Smuzhiyun
rtrs_clt_info_rsp_done(struct ib_cq * cq,struct ib_wc * wc)2344*4882a593Smuzhiyun static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
2345*4882a593Smuzhiyun {
2346*4882a593Smuzhiyun struct rtrs_clt_con *con = cq->cq_context;
2347*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
2348*4882a593Smuzhiyun struct rtrs_msg_info_rsp *msg;
2349*4882a593Smuzhiyun enum rtrs_clt_state state;
2350*4882a593Smuzhiyun struct rtrs_iu *iu;
2351*4882a593Smuzhiyun size_t rx_sz;
2352*4882a593Smuzhiyun int err;
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun state = RTRS_CLT_CONNECTING_ERR;
2355*4882a593Smuzhiyun
2356*4882a593Smuzhiyun WARN_ON(con->c.cid);
2357*4882a593Smuzhiyun iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2358*4882a593Smuzhiyun if (unlikely(wc->status != IB_WC_SUCCESS)) {
2359*4882a593Smuzhiyun rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
2360*4882a593Smuzhiyun ib_wc_status_msg(wc->status));
2361*4882a593Smuzhiyun goto out;
2362*4882a593Smuzhiyun }
2363*4882a593Smuzhiyun WARN_ON(wc->opcode != IB_WC_RECV);
2364*4882a593Smuzhiyun
2365*4882a593Smuzhiyun if (unlikely(wc->byte_len < sizeof(*msg))) {
2366*4882a593Smuzhiyun rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
2367*4882a593Smuzhiyun wc->byte_len);
2368*4882a593Smuzhiyun goto out;
2369*4882a593Smuzhiyun }
2370*4882a593Smuzhiyun ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
2371*4882a593Smuzhiyun iu->size, DMA_FROM_DEVICE);
2372*4882a593Smuzhiyun msg = iu->buf;
2373*4882a593Smuzhiyun if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP)) {
2374*4882a593Smuzhiyun rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
2375*4882a593Smuzhiyun le16_to_cpu(msg->type));
2376*4882a593Smuzhiyun goto out;
2377*4882a593Smuzhiyun }
2378*4882a593Smuzhiyun rx_sz = sizeof(*msg);
2379*4882a593Smuzhiyun rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
2380*4882a593Smuzhiyun if (unlikely(wc->byte_len < rx_sz)) {
2381*4882a593Smuzhiyun rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
2382*4882a593Smuzhiyun wc->byte_len);
2383*4882a593Smuzhiyun goto out;
2384*4882a593Smuzhiyun }
2385*4882a593Smuzhiyun err = process_info_rsp(sess, msg);
2386*4882a593Smuzhiyun if (unlikely(err))
2387*4882a593Smuzhiyun goto out;
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun err = post_recv_sess(sess);
2390*4882a593Smuzhiyun if (unlikely(err))
2391*4882a593Smuzhiyun goto out;
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun state = RTRS_CLT_CONNECTED;
2394*4882a593Smuzhiyun
2395*4882a593Smuzhiyun out:
2396*4882a593Smuzhiyun rtrs_clt_update_wc_stats(con);
2397*4882a593Smuzhiyun rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
2398*4882a593Smuzhiyun rtrs_clt_change_state(sess, state);
2399*4882a593Smuzhiyun }
2400*4882a593Smuzhiyun
rtrs_send_sess_info(struct rtrs_clt_sess * sess)2401*4882a593Smuzhiyun static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
2402*4882a593Smuzhiyun {
2403*4882a593Smuzhiyun struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);
2404*4882a593Smuzhiyun struct rtrs_msg_info_req *msg;
2405*4882a593Smuzhiyun struct rtrs_iu *tx_iu, *rx_iu;
2406*4882a593Smuzhiyun size_t rx_sz;
2407*4882a593Smuzhiyun int err;
2408*4882a593Smuzhiyun
2409*4882a593Smuzhiyun rx_sz = sizeof(struct rtrs_msg_info_rsp);
2410*4882a593Smuzhiyun rx_sz += sizeof(u64) * MAX_SESS_QUEUE_DEPTH;
2411*4882a593Smuzhiyun
2412*4882a593Smuzhiyun tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
2413*4882a593Smuzhiyun sess->s.dev->ib_dev, DMA_TO_DEVICE,
2414*4882a593Smuzhiyun rtrs_clt_info_req_done);
2415*4882a593Smuzhiyun rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
2416*4882a593Smuzhiyun DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
2417*4882a593Smuzhiyun if (unlikely(!tx_iu || !rx_iu)) {
2418*4882a593Smuzhiyun err = -ENOMEM;
2419*4882a593Smuzhiyun goto out;
2420*4882a593Smuzhiyun }
2421*4882a593Smuzhiyun /* Prepare for getting info response */
2422*4882a593Smuzhiyun err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
2423*4882a593Smuzhiyun if (unlikely(err)) {
2424*4882a593Smuzhiyun rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
2425*4882a593Smuzhiyun goto out;
2426*4882a593Smuzhiyun }
2427*4882a593Smuzhiyun rx_iu = NULL;
2428*4882a593Smuzhiyun
2429*4882a593Smuzhiyun msg = tx_iu->buf;
2430*4882a593Smuzhiyun msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
2431*4882a593Smuzhiyun memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname));
2432*4882a593Smuzhiyun
2433*4882a593Smuzhiyun ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
2434*4882a593Smuzhiyun tx_iu->size, DMA_TO_DEVICE);
2435*4882a593Smuzhiyun
2436*4882a593Smuzhiyun /* Send info request */
2437*4882a593Smuzhiyun err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
2438*4882a593Smuzhiyun if (unlikely(err)) {
2439*4882a593Smuzhiyun rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
2440*4882a593Smuzhiyun goto out;
2441*4882a593Smuzhiyun }
2442*4882a593Smuzhiyun tx_iu = NULL;
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun /* Wait for state change */
2445*4882a593Smuzhiyun wait_event_interruptible_timeout(sess->state_wq,
2446*4882a593Smuzhiyun sess->state != RTRS_CLT_CONNECTING,
2447*4882a593Smuzhiyun msecs_to_jiffies(
2448*4882a593Smuzhiyun RTRS_CONNECT_TIMEOUT_MS));
2449*4882a593Smuzhiyun if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) {
2450*4882a593Smuzhiyun if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
2451*4882a593Smuzhiyun err = -ECONNRESET;
2452*4882a593Smuzhiyun else
2453*4882a593Smuzhiyun err = -ETIMEDOUT;
2454*4882a593Smuzhiyun goto out;
2455*4882a593Smuzhiyun }
2456*4882a593Smuzhiyun
2457*4882a593Smuzhiyun out:
2458*4882a593Smuzhiyun if (tx_iu)
2459*4882a593Smuzhiyun rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
2460*4882a593Smuzhiyun if (rx_iu)
2461*4882a593Smuzhiyun rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
2462*4882a593Smuzhiyun if (unlikely(err))
2463*4882a593Smuzhiyun /* If we've never taken async path because of malloc problems */
2464*4882a593Smuzhiyun rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
2465*4882a593Smuzhiyun
2466*4882a593Smuzhiyun return err;
2467*4882a593Smuzhiyun }
2468*4882a593Smuzhiyun
2469*4882a593Smuzhiyun /**
2470*4882a593Smuzhiyun * init_sess() - establishes all session connections and does handshake
2471*4882a593Smuzhiyun * @sess: client session.
2472*4882a593Smuzhiyun * In case of error full close or reconnect procedure should be taken,
2473*4882a593Smuzhiyun * because reconnect or close async works can be started.
2474*4882a593Smuzhiyun */
init_sess(struct rtrs_clt_sess * sess)2475*4882a593Smuzhiyun static int init_sess(struct rtrs_clt_sess *sess)
2476*4882a593Smuzhiyun {
2477*4882a593Smuzhiyun int err;
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun mutex_lock(&sess->init_mutex);
2480*4882a593Smuzhiyun err = init_conns(sess);
2481*4882a593Smuzhiyun if (err) {
2482*4882a593Smuzhiyun rtrs_err(sess->clt, "init_conns(), err: %d\n", err);
2483*4882a593Smuzhiyun goto out;
2484*4882a593Smuzhiyun }
2485*4882a593Smuzhiyun err = rtrs_send_sess_info(sess);
2486*4882a593Smuzhiyun if (err) {
2487*4882a593Smuzhiyun rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err);
2488*4882a593Smuzhiyun goto out;
2489*4882a593Smuzhiyun }
2490*4882a593Smuzhiyun rtrs_clt_sess_up(sess);
2491*4882a593Smuzhiyun out:
2492*4882a593Smuzhiyun mutex_unlock(&sess->init_mutex);
2493*4882a593Smuzhiyun
2494*4882a593Smuzhiyun return err;
2495*4882a593Smuzhiyun }
2496*4882a593Smuzhiyun
rtrs_clt_reconnect_work(struct work_struct * work)2497*4882a593Smuzhiyun static void rtrs_clt_reconnect_work(struct work_struct *work)
2498*4882a593Smuzhiyun {
2499*4882a593Smuzhiyun struct rtrs_clt_sess *sess;
2500*4882a593Smuzhiyun struct rtrs_clt *clt;
2501*4882a593Smuzhiyun unsigned int delay_ms;
2502*4882a593Smuzhiyun int err;
2503*4882a593Smuzhiyun
2504*4882a593Smuzhiyun sess = container_of(to_delayed_work(work), struct rtrs_clt_sess,
2505*4882a593Smuzhiyun reconnect_dwork);
2506*4882a593Smuzhiyun clt = sess->clt;
2507*4882a593Smuzhiyun
2508*4882a593Smuzhiyun if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING)
2509*4882a593Smuzhiyun return;
2510*4882a593Smuzhiyun
2511*4882a593Smuzhiyun if (sess->reconnect_attempts >= clt->max_reconnect_attempts) {
2512*4882a593Smuzhiyun /* Close a session completely if max attempts is reached */
2513*4882a593Smuzhiyun rtrs_clt_close_conns(sess, false);
2514*4882a593Smuzhiyun return;
2515*4882a593Smuzhiyun }
2516*4882a593Smuzhiyun sess->reconnect_attempts++;
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun /* Stop everything */
2519*4882a593Smuzhiyun rtrs_clt_stop_and_destroy_conns(sess);
2520*4882a593Smuzhiyun msleep(RTRS_RECONNECT_BACKOFF);
2521*4882a593Smuzhiyun if (rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING)) {
2522*4882a593Smuzhiyun err = init_sess(sess);
2523*4882a593Smuzhiyun if (err)
2524*4882a593Smuzhiyun goto reconnect_again;
2525*4882a593Smuzhiyun }
2526*4882a593Smuzhiyun
2527*4882a593Smuzhiyun return;
2528*4882a593Smuzhiyun
2529*4882a593Smuzhiyun reconnect_again:
2530*4882a593Smuzhiyun if (rtrs_clt_change_state(sess, RTRS_CLT_RECONNECTING)) {
2531*4882a593Smuzhiyun sess->stats->reconnects.fail_cnt++;
2532*4882a593Smuzhiyun delay_ms = clt->reconnect_delay_sec * 1000;
2533*4882a593Smuzhiyun queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
2534*4882a593Smuzhiyun msecs_to_jiffies(delay_ms +
2535*4882a593Smuzhiyun prandom_u32() %
2536*4882a593Smuzhiyun RTRS_RECONNECT_SEED));
2537*4882a593Smuzhiyun }
2538*4882a593Smuzhiyun }
2539*4882a593Smuzhiyun
rtrs_clt_dev_release(struct device * dev)2540*4882a593Smuzhiyun static void rtrs_clt_dev_release(struct device *dev)
2541*4882a593Smuzhiyun {
2542*4882a593Smuzhiyun struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
2543*4882a593Smuzhiyun
2544*4882a593Smuzhiyun mutex_destroy(&clt->paths_ev_mutex);
2545*4882a593Smuzhiyun mutex_destroy(&clt->paths_mutex);
2546*4882a593Smuzhiyun kfree(clt);
2547*4882a593Smuzhiyun }
2548*4882a593Smuzhiyun
alloc_clt(const char * sessname,size_t paths_num,u16 port,size_t pdu_sz,void * priv,void (* link_ev)(void * priv,enum rtrs_clt_link_ev ev),unsigned int max_segments,size_t max_segment_size,unsigned int reconnect_delay_sec,unsigned int max_reconnect_attempts)2549*4882a593Smuzhiyun static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
2550*4882a593Smuzhiyun u16 port, size_t pdu_sz, void *priv,
2551*4882a593Smuzhiyun void (*link_ev)(void *priv,
2552*4882a593Smuzhiyun enum rtrs_clt_link_ev ev),
2553*4882a593Smuzhiyun unsigned int max_segments,
2554*4882a593Smuzhiyun size_t max_segment_size,
2555*4882a593Smuzhiyun unsigned int reconnect_delay_sec,
2556*4882a593Smuzhiyun unsigned int max_reconnect_attempts)
2557*4882a593Smuzhiyun {
2558*4882a593Smuzhiyun struct rtrs_clt *clt;
2559*4882a593Smuzhiyun int err;
2560*4882a593Smuzhiyun
2561*4882a593Smuzhiyun if (!paths_num || paths_num > MAX_PATHS_NUM)
2562*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
2563*4882a593Smuzhiyun
2564*4882a593Smuzhiyun if (strlen(sessname) >= sizeof(clt->sessname))
2565*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
2566*4882a593Smuzhiyun
2567*4882a593Smuzhiyun clt = kzalloc(sizeof(*clt), GFP_KERNEL);
2568*4882a593Smuzhiyun if (!clt)
2569*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2570*4882a593Smuzhiyun
2571*4882a593Smuzhiyun clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
2572*4882a593Smuzhiyun if (!clt->pcpu_path) {
2573*4882a593Smuzhiyun kfree(clt);
2574*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2575*4882a593Smuzhiyun }
2576*4882a593Smuzhiyun
2577*4882a593Smuzhiyun clt->dev.class = rtrs_clt_dev_class;
2578*4882a593Smuzhiyun clt->dev.release = rtrs_clt_dev_release;
2579*4882a593Smuzhiyun uuid_gen(&clt->paths_uuid);
2580*4882a593Smuzhiyun INIT_LIST_HEAD_RCU(&clt->paths_list);
2581*4882a593Smuzhiyun clt->paths_num = paths_num;
2582*4882a593Smuzhiyun clt->paths_up = MAX_PATHS_NUM;
2583*4882a593Smuzhiyun clt->port = port;
2584*4882a593Smuzhiyun clt->pdu_sz = pdu_sz;
2585*4882a593Smuzhiyun clt->max_segments = max_segments;
2586*4882a593Smuzhiyun clt->max_segment_size = max_segment_size;
2587*4882a593Smuzhiyun clt->reconnect_delay_sec = reconnect_delay_sec;
2588*4882a593Smuzhiyun clt->max_reconnect_attempts = max_reconnect_attempts;
2589*4882a593Smuzhiyun clt->priv = priv;
2590*4882a593Smuzhiyun clt->link_ev = link_ev;
2591*4882a593Smuzhiyun clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
2592*4882a593Smuzhiyun strlcpy(clt->sessname, sessname, sizeof(clt->sessname));
2593*4882a593Smuzhiyun init_waitqueue_head(&clt->permits_wait);
2594*4882a593Smuzhiyun mutex_init(&clt->paths_ev_mutex);
2595*4882a593Smuzhiyun mutex_init(&clt->paths_mutex);
2596*4882a593Smuzhiyun device_initialize(&clt->dev);
2597*4882a593Smuzhiyun
2598*4882a593Smuzhiyun err = dev_set_name(&clt->dev, "%s", sessname);
2599*4882a593Smuzhiyun if (err)
2600*4882a593Smuzhiyun goto err_put;
2601*4882a593Smuzhiyun
2602*4882a593Smuzhiyun /*
2603*4882a593Smuzhiyun * Suppress user space notification until
2604*4882a593Smuzhiyun * sysfs files are created
2605*4882a593Smuzhiyun */
2606*4882a593Smuzhiyun dev_set_uevent_suppress(&clt->dev, true);
2607*4882a593Smuzhiyun err = device_add(&clt->dev);
2608*4882a593Smuzhiyun if (err)
2609*4882a593Smuzhiyun goto err_put;
2610*4882a593Smuzhiyun
2611*4882a593Smuzhiyun clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
2612*4882a593Smuzhiyun if (!clt->kobj_paths) {
2613*4882a593Smuzhiyun err = -ENOMEM;
2614*4882a593Smuzhiyun goto err_del;
2615*4882a593Smuzhiyun }
2616*4882a593Smuzhiyun err = rtrs_clt_create_sysfs_root_files(clt);
2617*4882a593Smuzhiyun if (err) {
2618*4882a593Smuzhiyun kobject_del(clt->kobj_paths);
2619*4882a593Smuzhiyun kobject_put(clt->kobj_paths);
2620*4882a593Smuzhiyun goto err_del;
2621*4882a593Smuzhiyun }
2622*4882a593Smuzhiyun dev_set_uevent_suppress(&clt->dev, false);
2623*4882a593Smuzhiyun kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
2624*4882a593Smuzhiyun
2625*4882a593Smuzhiyun return clt;
2626*4882a593Smuzhiyun err_del:
2627*4882a593Smuzhiyun device_del(&clt->dev);
2628*4882a593Smuzhiyun err_put:
2629*4882a593Smuzhiyun free_percpu(clt->pcpu_path);
2630*4882a593Smuzhiyun put_device(&clt->dev);
2631*4882a593Smuzhiyun return ERR_PTR(err);
2632*4882a593Smuzhiyun }
2633*4882a593Smuzhiyun
free_clt(struct rtrs_clt * clt)2634*4882a593Smuzhiyun static void free_clt(struct rtrs_clt *clt)
2635*4882a593Smuzhiyun {
2636*4882a593Smuzhiyun free_percpu(clt->pcpu_path);
2637*4882a593Smuzhiyun
2638*4882a593Smuzhiyun /*
2639*4882a593Smuzhiyun * release callback will free clt and destroy mutexes in last put
2640*4882a593Smuzhiyun */
2641*4882a593Smuzhiyun device_unregister(&clt->dev);
2642*4882a593Smuzhiyun }
2643*4882a593Smuzhiyun
2644*4882a593Smuzhiyun /**
2645*4882a593Smuzhiyun * rtrs_clt_open() - Open a session to an RTRS server
2646*4882a593Smuzhiyun * @ops: holds the link event callback and the private pointer.
2647*4882a593Smuzhiyun * @sessname: name of the session
2648*4882a593Smuzhiyun * @paths: Paths to be established defined by their src and dst addresses
2649*4882a593Smuzhiyun * @paths_num: Number of elements in the @paths array
2650*4882a593Smuzhiyun * @port: port to be used by the RTRS session
2651*4882a593Smuzhiyun * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
2652*4882a593Smuzhiyun * @reconnect_delay_sec: time between reconnect tries
2653*4882a593Smuzhiyun * @max_segments: Max. number of segments per IO request
2654*4882a593Smuzhiyun * @max_segment_size: Max. size of one segment
2655*4882a593Smuzhiyun * @max_reconnect_attempts: Number of times to reconnect on error before giving
2656*4882a593Smuzhiyun * up, 0 for * disabled, -1 for forever
2657*4882a593Smuzhiyun *
2658*4882a593Smuzhiyun * Starts session establishment with the rtrs_server. The function can block
2659*4882a593Smuzhiyun * up to ~2000ms before it returns.
2660*4882a593Smuzhiyun *
2661*4882a593Smuzhiyun * Return a valid pointer on success otherwise PTR_ERR.
2662*4882a593Smuzhiyun */
rtrs_clt_open(struct rtrs_clt_ops * ops,const char * sessname,const struct rtrs_addr * paths,size_t paths_num,u16 port,size_t pdu_sz,u8 reconnect_delay_sec,u16 max_segments,size_t max_segment_size,s16 max_reconnect_attempts)2663*4882a593Smuzhiyun struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
2664*4882a593Smuzhiyun const char *sessname,
2665*4882a593Smuzhiyun const struct rtrs_addr *paths,
2666*4882a593Smuzhiyun size_t paths_num, u16 port,
2667*4882a593Smuzhiyun size_t pdu_sz, u8 reconnect_delay_sec,
2668*4882a593Smuzhiyun u16 max_segments,
2669*4882a593Smuzhiyun size_t max_segment_size,
2670*4882a593Smuzhiyun s16 max_reconnect_attempts)
2671*4882a593Smuzhiyun {
2672*4882a593Smuzhiyun struct rtrs_clt_sess *sess, *tmp;
2673*4882a593Smuzhiyun struct rtrs_clt *clt;
2674*4882a593Smuzhiyun int err, i;
2675*4882a593Smuzhiyun
2676*4882a593Smuzhiyun clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
2677*4882a593Smuzhiyun ops->link_ev,
2678*4882a593Smuzhiyun max_segments, max_segment_size, reconnect_delay_sec,
2679*4882a593Smuzhiyun max_reconnect_attempts);
2680*4882a593Smuzhiyun if (IS_ERR(clt)) {
2681*4882a593Smuzhiyun err = PTR_ERR(clt);
2682*4882a593Smuzhiyun goto out;
2683*4882a593Smuzhiyun }
2684*4882a593Smuzhiyun for (i = 0; i < paths_num; i++) {
2685*4882a593Smuzhiyun struct rtrs_clt_sess *sess;
2686*4882a593Smuzhiyun
2687*4882a593Smuzhiyun sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
2688*4882a593Smuzhiyun max_segments, max_segment_size);
2689*4882a593Smuzhiyun if (IS_ERR(sess)) {
2690*4882a593Smuzhiyun err = PTR_ERR(sess);
2691*4882a593Smuzhiyun goto close_all_sess;
2692*4882a593Smuzhiyun }
2693*4882a593Smuzhiyun if (!i)
2694*4882a593Smuzhiyun sess->for_new_clt = 1;
2695*4882a593Smuzhiyun list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun err = init_sess(sess);
2698*4882a593Smuzhiyun if (err) {
2699*4882a593Smuzhiyun list_del_rcu(&sess->s.entry);
2700*4882a593Smuzhiyun rtrs_clt_close_conns(sess, true);
2701*4882a593Smuzhiyun free_percpu(sess->stats->pcpu_stats);
2702*4882a593Smuzhiyun kfree(sess->stats);
2703*4882a593Smuzhiyun free_sess(sess);
2704*4882a593Smuzhiyun goto close_all_sess;
2705*4882a593Smuzhiyun }
2706*4882a593Smuzhiyun
2707*4882a593Smuzhiyun err = rtrs_clt_create_sess_files(sess);
2708*4882a593Smuzhiyun if (err) {
2709*4882a593Smuzhiyun list_del_rcu(&sess->s.entry);
2710*4882a593Smuzhiyun rtrs_clt_close_conns(sess, true);
2711*4882a593Smuzhiyun free_percpu(sess->stats->pcpu_stats);
2712*4882a593Smuzhiyun kfree(sess->stats);
2713*4882a593Smuzhiyun free_sess(sess);
2714*4882a593Smuzhiyun goto close_all_sess;
2715*4882a593Smuzhiyun }
2716*4882a593Smuzhiyun }
2717*4882a593Smuzhiyun err = alloc_permits(clt);
2718*4882a593Smuzhiyun if (err)
2719*4882a593Smuzhiyun goto close_all_sess;
2720*4882a593Smuzhiyun
2721*4882a593Smuzhiyun return clt;
2722*4882a593Smuzhiyun
2723*4882a593Smuzhiyun close_all_sess:
2724*4882a593Smuzhiyun list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
2725*4882a593Smuzhiyun rtrs_clt_destroy_sess_files(sess, NULL);
2726*4882a593Smuzhiyun rtrs_clt_close_conns(sess, true);
2727*4882a593Smuzhiyun kobject_put(&sess->kobj);
2728*4882a593Smuzhiyun }
2729*4882a593Smuzhiyun rtrs_clt_destroy_sysfs_root_files(clt);
2730*4882a593Smuzhiyun rtrs_clt_destroy_sysfs_root_folders(clt);
2731*4882a593Smuzhiyun free_clt(clt);
2732*4882a593Smuzhiyun
2733*4882a593Smuzhiyun out:
2734*4882a593Smuzhiyun return ERR_PTR(err);
2735*4882a593Smuzhiyun }
2736*4882a593Smuzhiyun EXPORT_SYMBOL(rtrs_clt_open);
2737*4882a593Smuzhiyun
2738*4882a593Smuzhiyun /**
2739*4882a593Smuzhiyun * rtrs_clt_close() - Close a session
2740*4882a593Smuzhiyun * @clt: Session handle. Session is freed upon return.
2741*4882a593Smuzhiyun */
rtrs_clt_close(struct rtrs_clt * clt)2742*4882a593Smuzhiyun void rtrs_clt_close(struct rtrs_clt *clt)
2743*4882a593Smuzhiyun {
2744*4882a593Smuzhiyun struct rtrs_clt_sess *sess, *tmp;
2745*4882a593Smuzhiyun
2746*4882a593Smuzhiyun /* Firstly forbid sysfs access */
2747*4882a593Smuzhiyun rtrs_clt_destroy_sysfs_root_files(clt);
2748*4882a593Smuzhiyun rtrs_clt_destroy_sysfs_root_folders(clt);
2749*4882a593Smuzhiyun
2750*4882a593Smuzhiyun /* Now it is safe to iterate over all paths without locks */
2751*4882a593Smuzhiyun list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
2752*4882a593Smuzhiyun rtrs_clt_close_conns(sess, true);
2753*4882a593Smuzhiyun rtrs_clt_destroy_sess_files(sess, NULL);
2754*4882a593Smuzhiyun kobject_put(&sess->kobj);
2755*4882a593Smuzhiyun }
2756*4882a593Smuzhiyun free_permits(clt);
2757*4882a593Smuzhiyun free_clt(clt);
2758*4882a593Smuzhiyun }
2759*4882a593Smuzhiyun EXPORT_SYMBOL(rtrs_clt_close);
2760*4882a593Smuzhiyun
rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess * sess)2761*4882a593Smuzhiyun int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
2762*4882a593Smuzhiyun {
2763*4882a593Smuzhiyun enum rtrs_clt_state old_state;
2764*4882a593Smuzhiyun int err = -EBUSY;
2765*4882a593Smuzhiyun bool changed;
2766*4882a593Smuzhiyun
2767*4882a593Smuzhiyun changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING,
2768*4882a593Smuzhiyun &old_state);
2769*4882a593Smuzhiyun if (changed) {
2770*4882a593Smuzhiyun sess->reconnect_attempts = 0;
2771*4882a593Smuzhiyun queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0);
2772*4882a593Smuzhiyun }
2773*4882a593Smuzhiyun if (changed || old_state == RTRS_CLT_RECONNECTING) {
2774*4882a593Smuzhiyun /*
2775*4882a593Smuzhiyun * flush_delayed_work() queues pending work for immediate
2776*4882a593Smuzhiyun * execution, so do the flush if we have queued something
2777*4882a593Smuzhiyun * right now or work is pending.
2778*4882a593Smuzhiyun */
2779*4882a593Smuzhiyun flush_delayed_work(&sess->reconnect_dwork);
2780*4882a593Smuzhiyun err = (READ_ONCE(sess->state) ==
2781*4882a593Smuzhiyun RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun
2784*4882a593Smuzhiyun return err;
2785*4882a593Smuzhiyun }
2786*4882a593Smuzhiyun
rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess * sess)2787*4882a593Smuzhiyun int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess)
2788*4882a593Smuzhiyun {
2789*4882a593Smuzhiyun rtrs_clt_close_conns(sess, true);
2790*4882a593Smuzhiyun
2791*4882a593Smuzhiyun return 0;
2792*4882a593Smuzhiyun }
2793*4882a593Smuzhiyun
rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess * sess,const struct attribute * sysfs_self)2794*4882a593Smuzhiyun int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
2795*4882a593Smuzhiyun const struct attribute *sysfs_self)
2796*4882a593Smuzhiyun {
2797*4882a593Smuzhiyun enum rtrs_clt_state old_state;
2798*4882a593Smuzhiyun bool changed;
2799*4882a593Smuzhiyun
2800*4882a593Smuzhiyun /*
2801*4882a593Smuzhiyun * Continue stopping path till state was changed to DEAD or
2802*4882a593Smuzhiyun * state was observed as DEAD:
2803*4882a593Smuzhiyun * 1. State was changed to DEAD - we were fast and nobody
2804*4882a593Smuzhiyun * invoked rtrs_clt_reconnect(), which can again start
2805*4882a593Smuzhiyun * reconnecting.
2806*4882a593Smuzhiyun * 2. State was observed as DEAD - we have someone in parallel
2807*4882a593Smuzhiyun * removing the path.
2808*4882a593Smuzhiyun */
2809*4882a593Smuzhiyun do {
2810*4882a593Smuzhiyun rtrs_clt_close_conns(sess, true);
2811*4882a593Smuzhiyun changed = rtrs_clt_change_state_get_old(sess,
2812*4882a593Smuzhiyun RTRS_CLT_DEAD,
2813*4882a593Smuzhiyun &old_state);
2814*4882a593Smuzhiyun } while (!changed && old_state != RTRS_CLT_DEAD);
2815*4882a593Smuzhiyun
2816*4882a593Smuzhiyun if (likely(changed)) {
2817*4882a593Smuzhiyun rtrs_clt_remove_path_from_arr(sess);
2818*4882a593Smuzhiyun rtrs_clt_destroy_sess_files(sess, sysfs_self);
2819*4882a593Smuzhiyun kobject_put(&sess->kobj);
2820*4882a593Smuzhiyun }
2821*4882a593Smuzhiyun
2822*4882a593Smuzhiyun return 0;
2823*4882a593Smuzhiyun }
2824*4882a593Smuzhiyun
rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt * clt,int value)2825*4882a593Smuzhiyun void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value)
2826*4882a593Smuzhiyun {
2827*4882a593Smuzhiyun clt->max_reconnect_attempts = (unsigned int)value;
2828*4882a593Smuzhiyun }
2829*4882a593Smuzhiyun
rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt * clt)2830*4882a593Smuzhiyun int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
2831*4882a593Smuzhiyun {
2832*4882a593Smuzhiyun return (int)clt->max_reconnect_attempts;
2833*4882a593Smuzhiyun }
2834*4882a593Smuzhiyun
2835*4882a593Smuzhiyun /**
2836*4882a593Smuzhiyun * rtrs_clt_request() - Request data transfer to/from server via RDMA.
2837*4882a593Smuzhiyun *
2838*4882a593Smuzhiyun * @dir: READ/WRITE
2839*4882a593Smuzhiyun * @ops: callback function to be called as confirmation, and the pointer.
2840*4882a593Smuzhiyun * @clt: Session
2841*4882a593Smuzhiyun * @permit: Preallocated permit
2842*4882a593Smuzhiyun * @vec: Message that is sent to server together with the request.
2843*4882a593Smuzhiyun * Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
2844*4882a593Smuzhiyun * Since the msg is copied internally it can be allocated on stack.
2845*4882a593Smuzhiyun * @nr: Number of elements in @vec.
2846*4882a593Smuzhiyun * @data_len: length of data sent to/from server
2847*4882a593Smuzhiyun * @sg: Pages to be sent/received to/from server.
2848*4882a593Smuzhiyun * @sg_cnt: Number of elements in the @sg
2849*4882a593Smuzhiyun *
2850*4882a593Smuzhiyun * Return:
2851*4882a593Smuzhiyun * 0: Success
2852*4882a593Smuzhiyun * <0: Error
2853*4882a593Smuzhiyun *
2854*4882a593Smuzhiyun * On dir=READ rtrs client will request a data transfer from Server to client.
2855*4882a593Smuzhiyun * The data that the server will respond with will be stored in @sg when
2856*4882a593Smuzhiyun * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
2857*4882a593Smuzhiyun * On dir=WRITE rtrs client will rdma write data in sg to server side.
2858*4882a593Smuzhiyun */
rtrs_clt_request(int dir,struct rtrs_clt_req_ops * ops,struct rtrs_clt * clt,struct rtrs_permit * permit,const struct kvec * vec,size_t nr,size_t data_len,struct scatterlist * sg,unsigned int sg_cnt)2859*4882a593Smuzhiyun int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
2860*4882a593Smuzhiyun struct rtrs_clt *clt, struct rtrs_permit *permit,
2861*4882a593Smuzhiyun const struct kvec *vec, size_t nr, size_t data_len,
2862*4882a593Smuzhiyun struct scatterlist *sg, unsigned int sg_cnt)
2863*4882a593Smuzhiyun {
2864*4882a593Smuzhiyun struct rtrs_clt_io_req *req;
2865*4882a593Smuzhiyun struct rtrs_clt_sess *sess;
2866*4882a593Smuzhiyun
2867*4882a593Smuzhiyun enum dma_data_direction dma_dir;
2868*4882a593Smuzhiyun int err = -ECONNABORTED, i;
2869*4882a593Smuzhiyun size_t usr_len, hdr_len;
2870*4882a593Smuzhiyun struct path_it it;
2871*4882a593Smuzhiyun
2872*4882a593Smuzhiyun /* Get kvec length */
2873*4882a593Smuzhiyun for (i = 0, usr_len = 0; i < nr; i++)
2874*4882a593Smuzhiyun usr_len += vec[i].iov_len;
2875*4882a593Smuzhiyun
2876*4882a593Smuzhiyun if (dir == READ) {
2877*4882a593Smuzhiyun hdr_len = sizeof(struct rtrs_msg_rdma_read) +
2878*4882a593Smuzhiyun sg_cnt * sizeof(struct rtrs_sg_desc);
2879*4882a593Smuzhiyun dma_dir = DMA_FROM_DEVICE;
2880*4882a593Smuzhiyun } else {
2881*4882a593Smuzhiyun hdr_len = sizeof(struct rtrs_msg_rdma_write);
2882*4882a593Smuzhiyun dma_dir = DMA_TO_DEVICE;
2883*4882a593Smuzhiyun }
2884*4882a593Smuzhiyun
2885*4882a593Smuzhiyun rcu_read_lock();
2886*4882a593Smuzhiyun for (path_it_init(&it, clt);
2887*4882a593Smuzhiyun (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
2888*4882a593Smuzhiyun if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
2889*4882a593Smuzhiyun continue;
2890*4882a593Smuzhiyun
2891*4882a593Smuzhiyun if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) {
2892*4882a593Smuzhiyun rtrs_wrn_rl(sess->clt,
2893*4882a593Smuzhiyun "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
2894*4882a593Smuzhiyun dir == READ ? "Read" : "Write",
2895*4882a593Smuzhiyun usr_len, hdr_len, sess->max_hdr_size);
2896*4882a593Smuzhiyun err = -EMSGSIZE;
2897*4882a593Smuzhiyun break;
2898*4882a593Smuzhiyun }
2899*4882a593Smuzhiyun req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv,
2900*4882a593Smuzhiyun vec, usr_len, sg, sg_cnt, data_len,
2901*4882a593Smuzhiyun dma_dir);
2902*4882a593Smuzhiyun if (dir == READ)
2903*4882a593Smuzhiyun err = rtrs_clt_read_req(req);
2904*4882a593Smuzhiyun else
2905*4882a593Smuzhiyun err = rtrs_clt_write_req(req);
2906*4882a593Smuzhiyun if (unlikely(err)) {
2907*4882a593Smuzhiyun req->in_use = false;
2908*4882a593Smuzhiyun continue;
2909*4882a593Smuzhiyun }
2910*4882a593Smuzhiyun /* Success path */
2911*4882a593Smuzhiyun break;
2912*4882a593Smuzhiyun }
2913*4882a593Smuzhiyun path_it_deinit(&it);
2914*4882a593Smuzhiyun rcu_read_unlock();
2915*4882a593Smuzhiyun
2916*4882a593Smuzhiyun return err;
2917*4882a593Smuzhiyun }
2918*4882a593Smuzhiyun EXPORT_SYMBOL(rtrs_clt_request);
2919*4882a593Smuzhiyun
2920*4882a593Smuzhiyun /**
2921*4882a593Smuzhiyun * rtrs_clt_query() - queries RTRS session attributes
2922*4882a593Smuzhiyun *@clt: session pointer
2923*4882a593Smuzhiyun *@attr: query results for session attributes.
2924*4882a593Smuzhiyun * Returns:
2925*4882a593Smuzhiyun * 0 on success
2926*4882a593Smuzhiyun * -ECOMM no connection to the server
2927*4882a593Smuzhiyun */
rtrs_clt_query(struct rtrs_clt * clt,struct rtrs_attrs * attr)2928*4882a593Smuzhiyun int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
2929*4882a593Smuzhiyun {
2930*4882a593Smuzhiyun if (!rtrs_clt_is_connected(clt))
2931*4882a593Smuzhiyun return -ECOMM;
2932*4882a593Smuzhiyun
2933*4882a593Smuzhiyun attr->queue_depth = clt->queue_depth;
2934*4882a593Smuzhiyun attr->max_io_size = clt->max_io_size;
2935*4882a593Smuzhiyun attr->sess_kobj = &clt->dev.kobj;
2936*4882a593Smuzhiyun strlcpy(attr->sessname, clt->sessname, sizeof(attr->sessname));
2937*4882a593Smuzhiyun
2938*4882a593Smuzhiyun return 0;
2939*4882a593Smuzhiyun }
2940*4882a593Smuzhiyun EXPORT_SYMBOL(rtrs_clt_query);
2941*4882a593Smuzhiyun
rtrs_clt_create_path_from_sysfs(struct rtrs_clt * clt,struct rtrs_addr * addr)2942*4882a593Smuzhiyun int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
2943*4882a593Smuzhiyun struct rtrs_addr *addr)
2944*4882a593Smuzhiyun {
2945*4882a593Smuzhiyun struct rtrs_clt_sess *sess;
2946*4882a593Smuzhiyun int err;
2947*4882a593Smuzhiyun
2948*4882a593Smuzhiyun sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments,
2949*4882a593Smuzhiyun clt->max_segment_size);
2950*4882a593Smuzhiyun if (IS_ERR(sess))
2951*4882a593Smuzhiyun return PTR_ERR(sess);
2952*4882a593Smuzhiyun
2953*4882a593Smuzhiyun /*
2954*4882a593Smuzhiyun * It is totally safe to add path in CONNECTING state: coming
2955*4882a593Smuzhiyun * IO will never grab it. Also it is very important to add
2956*4882a593Smuzhiyun * path before init, since init fires LINK_CONNECTED event.
2957*4882a593Smuzhiyun */
2958*4882a593Smuzhiyun rtrs_clt_add_path_to_arr(sess, addr);
2959*4882a593Smuzhiyun
2960*4882a593Smuzhiyun err = init_sess(sess);
2961*4882a593Smuzhiyun if (err)
2962*4882a593Smuzhiyun goto close_sess;
2963*4882a593Smuzhiyun
2964*4882a593Smuzhiyun err = rtrs_clt_create_sess_files(sess);
2965*4882a593Smuzhiyun if (err)
2966*4882a593Smuzhiyun goto close_sess;
2967*4882a593Smuzhiyun
2968*4882a593Smuzhiyun return 0;
2969*4882a593Smuzhiyun
2970*4882a593Smuzhiyun close_sess:
2971*4882a593Smuzhiyun rtrs_clt_remove_path_from_arr(sess);
2972*4882a593Smuzhiyun rtrs_clt_close_conns(sess, true);
2973*4882a593Smuzhiyun free_percpu(sess->stats->pcpu_stats);
2974*4882a593Smuzhiyun kfree(sess->stats);
2975*4882a593Smuzhiyun free_sess(sess);
2976*4882a593Smuzhiyun
2977*4882a593Smuzhiyun return err;
2978*4882a593Smuzhiyun }
2979*4882a593Smuzhiyun
rtrs_clt_ib_dev_init(struct rtrs_ib_dev * dev)2980*4882a593Smuzhiyun static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
2981*4882a593Smuzhiyun {
2982*4882a593Smuzhiyun if (!(dev->ib_dev->attrs.device_cap_flags &
2983*4882a593Smuzhiyun IB_DEVICE_MEM_MGT_EXTENSIONS)) {
2984*4882a593Smuzhiyun pr_err("Memory registrations not supported.\n");
2985*4882a593Smuzhiyun return -ENOTSUPP;
2986*4882a593Smuzhiyun }
2987*4882a593Smuzhiyun
2988*4882a593Smuzhiyun return 0;
2989*4882a593Smuzhiyun }
2990*4882a593Smuzhiyun
2991*4882a593Smuzhiyun static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
2992*4882a593Smuzhiyun .init = rtrs_clt_ib_dev_init
2993*4882a593Smuzhiyun };
2994*4882a593Smuzhiyun
rtrs_client_init(void)2995*4882a593Smuzhiyun static int __init rtrs_client_init(void)
2996*4882a593Smuzhiyun {
2997*4882a593Smuzhiyun rtrs_rdma_dev_pd_init(0, &dev_pd);
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun rtrs_clt_dev_class = class_create(THIS_MODULE, "rtrs-client");
3000*4882a593Smuzhiyun if (IS_ERR(rtrs_clt_dev_class)) {
3001*4882a593Smuzhiyun pr_err("Failed to create rtrs-client dev class\n");
3002*4882a593Smuzhiyun return PTR_ERR(rtrs_clt_dev_class);
3003*4882a593Smuzhiyun }
3004*4882a593Smuzhiyun rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0);
3005*4882a593Smuzhiyun if (!rtrs_wq) {
3006*4882a593Smuzhiyun class_destroy(rtrs_clt_dev_class);
3007*4882a593Smuzhiyun return -ENOMEM;
3008*4882a593Smuzhiyun }
3009*4882a593Smuzhiyun
3010*4882a593Smuzhiyun return 0;
3011*4882a593Smuzhiyun }
3012*4882a593Smuzhiyun
rtrs_client_exit(void)3013*4882a593Smuzhiyun static void __exit rtrs_client_exit(void)
3014*4882a593Smuzhiyun {
3015*4882a593Smuzhiyun destroy_workqueue(rtrs_wq);
3016*4882a593Smuzhiyun class_destroy(rtrs_clt_dev_class);
3017*4882a593Smuzhiyun rtrs_rdma_dev_pd_deinit(&dev_pd);
3018*4882a593Smuzhiyun }
3019*4882a593Smuzhiyun
3020*4882a593Smuzhiyun module_init(rtrs_client_init);
3021*4882a593Smuzhiyun module_exit(rtrs_client_exit);
3022