1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright(c) 2016 - 2020 Intel Corporation.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
5*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * GPL LICENSE SUMMARY
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun * it under the terms of version 2 of the GNU General Public License as
11*4882a593Smuzhiyun * published by the Free Software Foundation.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
15*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16*4882a593Smuzhiyun * General Public License for more details.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * BSD LICENSE
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
21*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
22*4882a593Smuzhiyun * are met:
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * - Redistributions of source code must retain the above copyright
25*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
26*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above copyright
27*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
28*4882a593Smuzhiyun * the documentation and/or other materials provided with the
29*4882a593Smuzhiyun * distribution.
30*4882a593Smuzhiyun * - Neither the name of Intel Corporation nor the names of its
31*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
32*4882a593Smuzhiyun * from this software without specific prior written permission.
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #include <linux/hash.h>
49*4882a593Smuzhiyun #include <linux/bitops.h>
50*4882a593Smuzhiyun #include <linux/lockdep.h>
51*4882a593Smuzhiyun #include <linux/vmalloc.h>
52*4882a593Smuzhiyun #include <linux/slab.h>
53*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
54*4882a593Smuzhiyun #include <rdma/ib_hdrs.h>
55*4882a593Smuzhiyun #include <rdma/opa_addr.h>
56*4882a593Smuzhiyun #include <rdma/uverbs_ioctl.h>
57*4882a593Smuzhiyun #include "qp.h"
58*4882a593Smuzhiyun #include "vt.h"
59*4882a593Smuzhiyun #include "trace.h"
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #define RVT_RWQ_COUNT_THRESHOLD 16
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static void rvt_rc_timeout(struct timer_list *t);
64*4882a593Smuzhiyun static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
65*4882a593Smuzhiyun enum ib_qp_type type);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * Convert the AETH RNR timeout code into the number of microseconds.
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun static const u32 ib_rvt_rnr_table[32] = {
71*4882a593Smuzhiyun 655360, /* 00: 655.36 */
72*4882a593Smuzhiyun 10, /* 01: .01 */
73*4882a593Smuzhiyun 20, /* 02 .02 */
74*4882a593Smuzhiyun 30, /* 03: .03 */
75*4882a593Smuzhiyun 40, /* 04: .04 */
76*4882a593Smuzhiyun 60, /* 05: .06 */
77*4882a593Smuzhiyun 80, /* 06: .08 */
78*4882a593Smuzhiyun 120, /* 07: .12 */
79*4882a593Smuzhiyun 160, /* 08: .16 */
80*4882a593Smuzhiyun 240, /* 09: .24 */
81*4882a593Smuzhiyun 320, /* 0A: .32 */
82*4882a593Smuzhiyun 480, /* 0B: .48 */
83*4882a593Smuzhiyun 640, /* 0C: .64 */
84*4882a593Smuzhiyun 960, /* 0D: .96 */
85*4882a593Smuzhiyun 1280, /* 0E: 1.28 */
86*4882a593Smuzhiyun 1920, /* 0F: 1.92 */
87*4882a593Smuzhiyun 2560, /* 10: 2.56 */
88*4882a593Smuzhiyun 3840, /* 11: 3.84 */
89*4882a593Smuzhiyun 5120, /* 12: 5.12 */
90*4882a593Smuzhiyun 7680, /* 13: 7.68 */
91*4882a593Smuzhiyun 10240, /* 14: 10.24 */
92*4882a593Smuzhiyun 15360, /* 15: 15.36 */
93*4882a593Smuzhiyun 20480, /* 16: 20.48 */
94*4882a593Smuzhiyun 30720, /* 17: 30.72 */
95*4882a593Smuzhiyun 40960, /* 18: 40.96 */
96*4882a593Smuzhiyun 61440, /* 19: 61.44 */
97*4882a593Smuzhiyun 81920, /* 1A: 81.92 */
98*4882a593Smuzhiyun 122880, /* 1B: 122.88 */
99*4882a593Smuzhiyun 163840, /* 1C: 163.84 */
100*4882a593Smuzhiyun 245760, /* 1D: 245.76 */
101*4882a593Smuzhiyun 327680, /* 1E: 327.68 */
102*4882a593Smuzhiyun 491520 /* 1F: 491.52 */
103*4882a593Smuzhiyun };
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun * Note that it is OK to post send work requests in the SQE and ERR
107*4882a593Smuzhiyun * states; rvt_do_send() will process them and generate error
108*4882a593Smuzhiyun * completions as per IB 1.2 C10-96.
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
111*4882a593Smuzhiyun [IB_QPS_RESET] = 0,
112*4882a593Smuzhiyun [IB_QPS_INIT] = RVT_POST_RECV_OK,
113*4882a593Smuzhiyun [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
114*4882a593Smuzhiyun [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
115*4882a593Smuzhiyun RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
116*4882a593Smuzhiyun RVT_PROCESS_NEXT_SEND_OK,
117*4882a593Smuzhiyun [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
118*4882a593Smuzhiyun RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
119*4882a593Smuzhiyun [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
120*4882a593Smuzhiyun RVT_POST_SEND_OK | RVT_FLUSH_SEND,
121*4882a593Smuzhiyun [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
122*4882a593Smuzhiyun RVT_POST_SEND_OK | RVT_FLUSH_SEND,
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun EXPORT_SYMBOL(ib_rvt_state_ops);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* platform specific: return the last level cache (llc) size, in KiB */
rvt_wss_llc_size(void)127*4882a593Smuzhiyun static int rvt_wss_llc_size(void)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun /* assume that the boot CPU value is universal for all CPUs */
130*4882a593Smuzhiyun return boot_cpu_data.x86_cache_size;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* platform specific: cacheless copy */
cacheless_memcpy(void * dst,void * src,size_t n)134*4882a593Smuzhiyun static void cacheless_memcpy(void *dst, void *src, size_t n)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun * Use the only available X64 cacheless copy. Add a __user cast
138*4882a593Smuzhiyun * to quiet sparse. The src agument is already in the kernel so
139*4882a593Smuzhiyun * there are no security issues. The extra fault recovery machinery
140*4882a593Smuzhiyun * is not invoked.
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun __copy_user_nocache(dst, (void __user *)src, n, 0);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
rvt_wss_exit(struct rvt_dev_info * rdi)145*4882a593Smuzhiyun void rvt_wss_exit(struct rvt_dev_info *rdi)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct rvt_wss *wss = rdi->wss;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (!wss)
150*4882a593Smuzhiyun return;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* coded to handle partially initialized and repeat callers */
153*4882a593Smuzhiyun kfree(wss->entries);
154*4882a593Smuzhiyun wss->entries = NULL;
155*4882a593Smuzhiyun kfree(rdi->wss);
156*4882a593Smuzhiyun rdi->wss = NULL;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /**
160*4882a593Smuzhiyun * rvt_wss_init - Init wss data structures
161*4882a593Smuzhiyun *
162*4882a593Smuzhiyun * Return: 0 on success
163*4882a593Smuzhiyun */
rvt_wss_init(struct rvt_dev_info * rdi)164*4882a593Smuzhiyun int rvt_wss_init(struct rvt_dev_info *rdi)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
167*4882a593Smuzhiyun unsigned int wss_threshold = rdi->dparms.wss_threshold;
168*4882a593Smuzhiyun unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
169*4882a593Smuzhiyun long llc_size;
170*4882a593Smuzhiyun long llc_bits;
171*4882a593Smuzhiyun long table_size;
172*4882a593Smuzhiyun long table_bits;
173*4882a593Smuzhiyun struct rvt_wss *wss;
174*4882a593Smuzhiyun int node = rdi->dparms.node;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
177*4882a593Smuzhiyun rdi->wss = NULL;
178*4882a593Smuzhiyun return 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
182*4882a593Smuzhiyun if (!rdi->wss)
183*4882a593Smuzhiyun return -ENOMEM;
184*4882a593Smuzhiyun wss = rdi->wss;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* check for a valid percent range - default to 80 if none or invalid */
187*4882a593Smuzhiyun if (wss_threshold < 1 || wss_threshold > 100)
188*4882a593Smuzhiyun wss_threshold = 80;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* reject a wildly large period */
191*4882a593Smuzhiyun if (wss_clean_period > 1000000)
192*4882a593Smuzhiyun wss_clean_period = 256;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* reject a zero period */
195*4882a593Smuzhiyun if (wss_clean_period == 0)
196*4882a593Smuzhiyun wss_clean_period = 1;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * Calculate the table size - the next power of 2 larger than the
200*4882a593Smuzhiyun * LLC size. LLC size is in KiB.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun llc_size = rvt_wss_llc_size() * 1024;
203*4882a593Smuzhiyun table_size = roundup_pow_of_two(llc_size);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /* one bit per page in rounded up table */
206*4882a593Smuzhiyun llc_bits = llc_size / PAGE_SIZE;
207*4882a593Smuzhiyun table_bits = table_size / PAGE_SIZE;
208*4882a593Smuzhiyun wss->pages_mask = table_bits - 1;
209*4882a593Smuzhiyun wss->num_entries = table_bits / BITS_PER_LONG;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun wss->threshold = (llc_bits * wss_threshold) / 100;
212*4882a593Smuzhiyun if (wss->threshold == 0)
213*4882a593Smuzhiyun wss->threshold = 1;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun wss->clean_period = wss_clean_period;
216*4882a593Smuzhiyun atomic_set(&wss->clean_counter, wss_clean_period);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
219*4882a593Smuzhiyun GFP_KERNEL, node);
220*4882a593Smuzhiyun if (!wss->entries) {
221*4882a593Smuzhiyun rvt_wss_exit(rdi);
222*4882a593Smuzhiyun return -ENOMEM;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun return 0;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /*
229*4882a593Smuzhiyun * Advance the clean counter. When the clean period has expired,
230*4882a593Smuzhiyun * clean an entry.
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun * This is implemented in atomics to avoid locking. Because multiple
233*4882a593Smuzhiyun * variables are involved, it can be racy which can lead to slightly
234*4882a593Smuzhiyun * inaccurate information. Since this is only a heuristic, this is
235*4882a593Smuzhiyun * OK. Any innaccuracies will clean themselves out as the counter
236*4882a593Smuzhiyun * advances. That said, it is unlikely the entry clean operation will
237*4882a593Smuzhiyun * race - the next possible racer will not start until the next clean
238*4882a593Smuzhiyun * period.
239*4882a593Smuzhiyun *
240*4882a593Smuzhiyun * The clean counter is implemented as a decrement to zero. When zero
241*4882a593Smuzhiyun * is reached an entry is cleaned.
242*4882a593Smuzhiyun */
wss_advance_clean_counter(struct rvt_wss * wss)243*4882a593Smuzhiyun static void wss_advance_clean_counter(struct rvt_wss *wss)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun int entry;
246*4882a593Smuzhiyun int weight;
247*4882a593Smuzhiyun unsigned long bits;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* become the cleaner if we decrement the counter to zero */
250*4882a593Smuzhiyun if (atomic_dec_and_test(&wss->clean_counter)) {
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun * Set, not add, the clean period. This avoids an issue
253*4882a593Smuzhiyun * where the counter could decrement below the clean period.
254*4882a593Smuzhiyun * Doing a set can result in lost decrements, slowing the
255*4882a593Smuzhiyun * clean advance. Since this a heuristic, this possible
256*4882a593Smuzhiyun * slowdown is OK.
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun * An alternative is to loop, advancing the counter by a
259*4882a593Smuzhiyun * clean period until the result is > 0. However, this could
260*4882a593Smuzhiyun * lead to several threads keeping another in the clean loop.
261*4882a593Smuzhiyun * This could be mitigated by limiting the number of times
262*4882a593Smuzhiyun * we stay in the loop.
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun atomic_set(&wss->clean_counter, wss->clean_period);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * Uniquely grab the entry to clean and move to next.
268*4882a593Smuzhiyun * The current entry is always the lower bits of
269*4882a593Smuzhiyun * wss.clean_entry. The table size, wss.num_entries,
270*4882a593Smuzhiyun * is always a power-of-2.
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun entry = (atomic_inc_return(&wss->clean_entry) - 1)
273*4882a593Smuzhiyun & (wss->num_entries - 1);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /* clear the entry and count the bits */
276*4882a593Smuzhiyun bits = xchg(&wss->entries[entry], 0);
277*4882a593Smuzhiyun weight = hweight64((u64)bits);
278*4882a593Smuzhiyun /* only adjust the contended total count if needed */
279*4882a593Smuzhiyun if (weight)
280*4882a593Smuzhiyun atomic_sub(weight, &wss->total_count);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun * Insert the given address into the working set array.
286*4882a593Smuzhiyun */
wss_insert(struct rvt_wss * wss,void * address)287*4882a593Smuzhiyun static void wss_insert(struct rvt_wss *wss, void *address)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
290*4882a593Smuzhiyun u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
291*4882a593Smuzhiyun u32 nr = page & (BITS_PER_LONG - 1);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (!test_and_set_bit(nr, &wss->entries[entry]))
294*4882a593Smuzhiyun atomic_inc(&wss->total_count);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun wss_advance_clean_counter(wss);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun * Is the working set larger than the threshold?
301*4882a593Smuzhiyun */
wss_exceeds_threshold(struct rvt_wss * wss)302*4882a593Smuzhiyun static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun return atomic_read(&wss->total_count) >= wss->threshold;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
get_map_page(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map)307*4882a593Smuzhiyun static void get_map_page(struct rvt_qpn_table *qpt,
308*4882a593Smuzhiyun struct rvt_qpn_map *map)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun unsigned long page = get_zeroed_page(GFP_KERNEL);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun * Free the page if someone raced with us installing it.
314*4882a593Smuzhiyun */
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun spin_lock(&qpt->lock);
317*4882a593Smuzhiyun if (map->page)
318*4882a593Smuzhiyun free_page(page);
319*4882a593Smuzhiyun else
320*4882a593Smuzhiyun map->page = (void *)page;
321*4882a593Smuzhiyun spin_unlock(&qpt->lock);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /**
325*4882a593Smuzhiyun * init_qpn_table - initialize the QP number table for a device
326*4882a593Smuzhiyun * @qpt: the QPN table
327*4882a593Smuzhiyun */
init_qpn_table(struct rvt_dev_info * rdi,struct rvt_qpn_table * qpt)328*4882a593Smuzhiyun static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun u32 offset, i;
331*4882a593Smuzhiyun struct rvt_qpn_map *map;
332*4882a593Smuzhiyun int ret = 0;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
335*4882a593Smuzhiyun return -EINVAL;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun spin_lock_init(&qpt->lock);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun qpt->last = rdi->dparms.qpn_start;
340*4882a593Smuzhiyun qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /*
343*4882a593Smuzhiyun * Drivers may want some QPs beyond what we need for verbs let them use
344*4882a593Smuzhiyun * our qpn table. No need for two. Lets go ahead and mark the bitmaps
345*4882a593Smuzhiyun * for those. The reserved range must be *after* the range which verbs
346*4882a593Smuzhiyun * will pick from.
347*4882a593Smuzhiyun */
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /* Figure out number of bit maps needed before reserved range */
350*4882a593Smuzhiyun qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* This should always be zero */
353*4882a593Smuzhiyun offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /* Starting with the first reserved bit map */
356*4882a593Smuzhiyun map = &qpt->map[qpt->nmaps];
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
359*4882a593Smuzhiyun rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
360*4882a593Smuzhiyun for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
361*4882a593Smuzhiyun if (!map->page) {
362*4882a593Smuzhiyun get_map_page(qpt, map);
363*4882a593Smuzhiyun if (!map->page) {
364*4882a593Smuzhiyun ret = -ENOMEM;
365*4882a593Smuzhiyun break;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun set_bit(offset, map->page);
369*4882a593Smuzhiyun offset++;
370*4882a593Smuzhiyun if (offset == RVT_BITS_PER_PAGE) {
371*4882a593Smuzhiyun /* next page */
372*4882a593Smuzhiyun qpt->nmaps++;
373*4882a593Smuzhiyun map++;
374*4882a593Smuzhiyun offset = 0;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun return ret;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /**
381*4882a593Smuzhiyun * free_qpn_table - free the QP number table for a device
382*4882a593Smuzhiyun * @qpt: the QPN table
383*4882a593Smuzhiyun */
free_qpn_table(struct rvt_qpn_table * qpt)384*4882a593Smuzhiyun static void free_qpn_table(struct rvt_qpn_table *qpt)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun int i;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
389*4882a593Smuzhiyun free_page((unsigned long)qpt->map[i].page);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /**
393*4882a593Smuzhiyun * rvt_driver_qp_init - Init driver qp resources
394*4882a593Smuzhiyun * @rdi: rvt dev strucutre
395*4882a593Smuzhiyun *
396*4882a593Smuzhiyun * Return: 0 on success
397*4882a593Smuzhiyun */
rvt_driver_qp_init(struct rvt_dev_info * rdi)398*4882a593Smuzhiyun int rvt_driver_qp_init(struct rvt_dev_info *rdi)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun int i;
401*4882a593Smuzhiyun int ret = -ENOMEM;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun if (!rdi->dparms.qp_table_size)
404*4882a593Smuzhiyun return -EINVAL;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /*
407*4882a593Smuzhiyun * If driver is not doing any QP allocation then make sure it is
408*4882a593Smuzhiyun * providing the necessary QP functions.
409*4882a593Smuzhiyun */
410*4882a593Smuzhiyun if (!rdi->driver_f.free_all_qps ||
411*4882a593Smuzhiyun !rdi->driver_f.qp_priv_alloc ||
412*4882a593Smuzhiyun !rdi->driver_f.qp_priv_free ||
413*4882a593Smuzhiyun !rdi->driver_f.notify_qp_reset ||
414*4882a593Smuzhiyun !rdi->driver_f.notify_restart_rc)
415*4882a593Smuzhiyun return -EINVAL;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* allocate parent object */
418*4882a593Smuzhiyun rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
419*4882a593Smuzhiyun rdi->dparms.node);
420*4882a593Smuzhiyun if (!rdi->qp_dev)
421*4882a593Smuzhiyun return -ENOMEM;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* allocate hash table */
424*4882a593Smuzhiyun rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
425*4882a593Smuzhiyun rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
426*4882a593Smuzhiyun rdi->qp_dev->qp_table =
427*4882a593Smuzhiyun kmalloc_array_node(rdi->qp_dev->qp_table_size,
428*4882a593Smuzhiyun sizeof(*rdi->qp_dev->qp_table),
429*4882a593Smuzhiyun GFP_KERNEL, rdi->dparms.node);
430*4882a593Smuzhiyun if (!rdi->qp_dev->qp_table)
431*4882a593Smuzhiyun goto no_qp_table;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
434*4882a593Smuzhiyun RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun spin_lock_init(&rdi->qp_dev->qpt_lock);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /* initialize qpn map */
439*4882a593Smuzhiyun if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
440*4882a593Smuzhiyun goto fail_table;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun spin_lock_init(&rdi->n_qps_lock);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun return 0;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun fail_table:
447*4882a593Smuzhiyun kfree(rdi->qp_dev->qp_table);
448*4882a593Smuzhiyun free_qpn_table(&rdi->qp_dev->qpn_table);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun no_qp_table:
451*4882a593Smuzhiyun kfree(rdi->qp_dev);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun return ret;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /**
457*4882a593Smuzhiyun * rvt_free_qp_cb - callback function to reset a qp
458*4882a593Smuzhiyun * @qp: the qp to reset
459*4882a593Smuzhiyun * @v: a 64-bit value
460*4882a593Smuzhiyun *
461*4882a593Smuzhiyun * This function resets the qp and removes it from the
462*4882a593Smuzhiyun * qp hash table.
463*4882a593Smuzhiyun */
rvt_free_qp_cb(struct rvt_qp * qp,u64 v)464*4882a593Smuzhiyun static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun unsigned int *qp_inuse = (unsigned int *)v;
467*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* Reset the qp and remove it from the qp hash list */
470*4882a593Smuzhiyun rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* Increment the qp_inuse count */
473*4882a593Smuzhiyun (*qp_inuse)++;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /**
477*4882a593Smuzhiyun * rvt_free_all_qps - check for QPs still in use
478*4882a593Smuzhiyun * @rdi: rvt device info structure
479*4882a593Smuzhiyun *
480*4882a593Smuzhiyun * There should not be any QPs still in use.
481*4882a593Smuzhiyun * Free memory for table.
482*4882a593Smuzhiyun * Return the number of QPs still in use.
483*4882a593Smuzhiyun */
rvt_free_all_qps(struct rvt_dev_info * rdi)484*4882a593Smuzhiyun static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun unsigned int qp_inuse = 0;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun qp_inuse += rvt_mcast_tree_empty(rdi);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun return qp_inuse;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /**
496*4882a593Smuzhiyun * rvt_qp_exit - clean up qps on device exit
497*4882a593Smuzhiyun * @rdi: rvt dev structure
498*4882a593Smuzhiyun *
499*4882a593Smuzhiyun * Check for qp leaks and free resources.
500*4882a593Smuzhiyun */
rvt_qp_exit(struct rvt_dev_info * rdi)501*4882a593Smuzhiyun void rvt_qp_exit(struct rvt_dev_info *rdi)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun u32 qps_inuse = rvt_free_all_qps(rdi);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun if (qps_inuse)
506*4882a593Smuzhiyun rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
507*4882a593Smuzhiyun qps_inuse);
508*4882a593Smuzhiyun if (!rdi->qp_dev)
509*4882a593Smuzhiyun return;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun kfree(rdi->qp_dev->qp_table);
512*4882a593Smuzhiyun free_qpn_table(&rdi->qp_dev->qpn_table);
513*4882a593Smuzhiyun kfree(rdi->qp_dev);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
mk_qpn(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map,unsigned off)516*4882a593Smuzhiyun static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
517*4882a593Smuzhiyun struct rvt_qpn_map *map, unsigned off)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /**
523*4882a593Smuzhiyun * alloc_qpn - Allocate the next available qpn or zero/one for QP type
524*4882a593Smuzhiyun * IB_QPT_SMI/IB_QPT_GSI
525*4882a593Smuzhiyun * @rdi: rvt device info structure
526*4882a593Smuzhiyun * @qpt: queue pair number table pointer
527*4882a593Smuzhiyun * @port_num: IB port number, 1 based, comes from core
528*4882a593Smuzhiyun * @exclude_prefix: prefix of special queue pair number being allocated
529*4882a593Smuzhiyun *
530*4882a593Smuzhiyun * Return: The queue pair number
531*4882a593Smuzhiyun */
alloc_qpn(struct rvt_dev_info * rdi,struct rvt_qpn_table * qpt,enum ib_qp_type type,u8 port_num,u8 exclude_prefix)532*4882a593Smuzhiyun static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
533*4882a593Smuzhiyun enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun u32 i, offset, max_scan, qpn;
536*4882a593Smuzhiyun struct rvt_qpn_map *map;
537*4882a593Smuzhiyun u32 ret;
538*4882a593Smuzhiyun u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
539*4882a593Smuzhiyun RVT_AIP_QPN_MAX : RVT_QPN_MAX;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (rdi->driver_f.alloc_qpn)
542*4882a593Smuzhiyun return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
545*4882a593Smuzhiyun unsigned n;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun ret = type == IB_QPT_GSI;
548*4882a593Smuzhiyun n = 1 << (ret + 2 * (port_num - 1));
549*4882a593Smuzhiyun spin_lock(&qpt->lock);
550*4882a593Smuzhiyun if (qpt->flags & n)
551*4882a593Smuzhiyun ret = -EINVAL;
552*4882a593Smuzhiyun else
553*4882a593Smuzhiyun qpt->flags |= n;
554*4882a593Smuzhiyun spin_unlock(&qpt->lock);
555*4882a593Smuzhiyun goto bail;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun qpn = qpt->last + qpt->incr;
559*4882a593Smuzhiyun if (qpn >= max_qpn)
560*4882a593Smuzhiyun qpn = qpt->incr | ((qpt->last & 1) ^ 1);
561*4882a593Smuzhiyun /* offset carries bit 0 */
562*4882a593Smuzhiyun offset = qpn & RVT_BITS_PER_PAGE_MASK;
563*4882a593Smuzhiyun map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
564*4882a593Smuzhiyun max_scan = qpt->nmaps - !offset;
565*4882a593Smuzhiyun for (i = 0;;) {
566*4882a593Smuzhiyun if (unlikely(!map->page)) {
567*4882a593Smuzhiyun get_map_page(qpt, map);
568*4882a593Smuzhiyun if (unlikely(!map->page))
569*4882a593Smuzhiyun break;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun do {
572*4882a593Smuzhiyun if (!test_and_set_bit(offset, map->page)) {
573*4882a593Smuzhiyun qpt->last = qpn;
574*4882a593Smuzhiyun ret = qpn;
575*4882a593Smuzhiyun goto bail;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun offset += qpt->incr;
578*4882a593Smuzhiyun /*
579*4882a593Smuzhiyun * This qpn might be bogus if offset >= BITS_PER_PAGE.
580*4882a593Smuzhiyun * That is OK. It gets re-assigned below
581*4882a593Smuzhiyun */
582*4882a593Smuzhiyun qpn = mk_qpn(qpt, map, offset);
583*4882a593Smuzhiyun } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
584*4882a593Smuzhiyun /*
585*4882a593Smuzhiyun * In order to keep the number of pages allocated to a
586*4882a593Smuzhiyun * minimum, we scan the all existing pages before increasing
587*4882a593Smuzhiyun * the size of the bitmap table.
588*4882a593Smuzhiyun */
589*4882a593Smuzhiyun if (++i > max_scan) {
590*4882a593Smuzhiyun if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
591*4882a593Smuzhiyun break;
592*4882a593Smuzhiyun map = &qpt->map[qpt->nmaps++];
593*4882a593Smuzhiyun /* start at incr with current bit 0 */
594*4882a593Smuzhiyun offset = qpt->incr | (offset & 1);
595*4882a593Smuzhiyun } else if (map < &qpt->map[qpt->nmaps]) {
596*4882a593Smuzhiyun ++map;
597*4882a593Smuzhiyun /* start at incr with current bit 0 */
598*4882a593Smuzhiyun offset = qpt->incr | (offset & 1);
599*4882a593Smuzhiyun } else {
600*4882a593Smuzhiyun map = &qpt->map[0];
601*4882a593Smuzhiyun /* wrap to first map page, invert bit 0 */
602*4882a593Smuzhiyun offset = qpt->incr | ((offset & 1) ^ 1);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun /* there can be no set bits in low-order QoS bits */
605*4882a593Smuzhiyun WARN_ON(rdi->dparms.qos_shift > 1 &&
606*4882a593Smuzhiyun offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
607*4882a593Smuzhiyun qpn = mk_qpn(qpt, map, offset);
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun ret = -ENOMEM;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun bail:
613*4882a593Smuzhiyun return ret;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun /**
617*4882a593Smuzhiyun * rvt_clear_mr_refs - Drop help mr refs
618*4882a593Smuzhiyun * @qp: rvt qp data structure
619*4882a593Smuzhiyun * @clr_sends: If shoudl clear send side or not
620*4882a593Smuzhiyun */
rvt_clear_mr_refs(struct rvt_qp * qp,int clr_sends)621*4882a593Smuzhiyun static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun unsigned n;
624*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
627*4882a593Smuzhiyun rvt_put_ss(&qp->s_rdma_read_sge);
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun rvt_put_ss(&qp->r_sge);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun if (clr_sends) {
632*4882a593Smuzhiyun while (qp->s_last != qp->s_head) {
633*4882a593Smuzhiyun struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun rvt_put_qp_swqe(qp, wqe);
636*4882a593Smuzhiyun if (++qp->s_last >= qp->s_size)
637*4882a593Smuzhiyun qp->s_last = 0;
638*4882a593Smuzhiyun smp_wmb(); /* see qp_set_savail */
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun if (qp->s_rdma_mr) {
641*4882a593Smuzhiyun rvt_put_mr(qp->s_rdma_mr);
642*4882a593Smuzhiyun qp->s_rdma_mr = NULL;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
647*4882a593Smuzhiyun struct rvt_ack_entry *e = &qp->s_ack_queue[n];
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun if (e->rdma_sge.mr) {
650*4882a593Smuzhiyun rvt_put_mr(e->rdma_sge.mr);
651*4882a593Smuzhiyun e->rdma_sge.mr = NULL;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /**
657*4882a593Smuzhiyun * rvt_swqe_has_lkey - return true if lkey is used by swqe
658*4882a593Smuzhiyun * @wqe - the send wqe
659*4882a593Smuzhiyun * @lkey - the lkey
660*4882a593Smuzhiyun *
661*4882a593Smuzhiyun * Test the swqe for using lkey
662*4882a593Smuzhiyun */
rvt_swqe_has_lkey(struct rvt_swqe * wqe,u32 lkey)663*4882a593Smuzhiyun static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun int i;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun for (i = 0; i < wqe->wr.num_sge; i++) {
668*4882a593Smuzhiyun struct rvt_sge *sge = &wqe->sg_list[i];
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun if (rvt_mr_has_lkey(sge->mr, lkey))
671*4882a593Smuzhiyun return true;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun return false;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /**
677*4882a593Smuzhiyun * rvt_qp_sends_has_lkey - return true is qp sends use lkey
678*4882a593Smuzhiyun * @qp - the rvt_qp
679*4882a593Smuzhiyun * @lkey - the lkey
680*4882a593Smuzhiyun */
rvt_qp_sends_has_lkey(struct rvt_qp * qp,u32 lkey)681*4882a593Smuzhiyun static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun u32 s_last = qp->s_last;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun while (s_last != qp->s_head) {
686*4882a593Smuzhiyun struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun if (rvt_swqe_has_lkey(wqe, lkey))
689*4882a593Smuzhiyun return true;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun if (++s_last >= qp->s_size)
692*4882a593Smuzhiyun s_last = 0;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun if (qp->s_rdma_mr)
695*4882a593Smuzhiyun if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
696*4882a593Smuzhiyun return true;
697*4882a593Smuzhiyun return false;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /**
701*4882a593Smuzhiyun * rvt_qp_acks_has_lkey - return true if acks have lkey
702*4882a593Smuzhiyun * @qp - the qp
703*4882a593Smuzhiyun * @lkey - the lkey
704*4882a593Smuzhiyun */
rvt_qp_acks_has_lkey(struct rvt_qp * qp,u32 lkey)705*4882a593Smuzhiyun static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun int i;
708*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
711*4882a593Smuzhiyun struct rvt_ack_entry *e = &qp->s_ack_queue[i];
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
714*4882a593Smuzhiyun return true;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun return false;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /*
720*4882a593Smuzhiyun * rvt_qp_mr_clean - clean up remote ops for lkey
721*4882a593Smuzhiyun * @qp - the qp
722*4882a593Smuzhiyun * @lkey - the lkey that is being de-registered
723*4882a593Smuzhiyun *
724*4882a593Smuzhiyun * This routine checks if the lkey is being used by
725*4882a593Smuzhiyun * the qp.
726*4882a593Smuzhiyun *
727*4882a593Smuzhiyun * If so, the qp is put into an error state to elminate
728*4882a593Smuzhiyun * any references from the qp.
729*4882a593Smuzhiyun */
rvt_qp_mr_clean(struct rvt_qp * qp,u32 lkey)730*4882a593Smuzhiyun void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun bool lastwqe = false;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun if (qp->ibqp.qp_type == IB_QPT_SMI ||
735*4882a593Smuzhiyun qp->ibqp.qp_type == IB_QPT_GSI)
736*4882a593Smuzhiyun /* avoid special QPs */
737*4882a593Smuzhiyun return;
738*4882a593Smuzhiyun spin_lock_irq(&qp->r_lock);
739*4882a593Smuzhiyun spin_lock(&qp->s_hlock);
740*4882a593Smuzhiyun spin_lock(&qp->s_lock);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
743*4882a593Smuzhiyun goto check_lwqe;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
746*4882a593Smuzhiyun rvt_qp_sends_has_lkey(qp, lkey) ||
747*4882a593Smuzhiyun rvt_qp_acks_has_lkey(qp, lkey))
748*4882a593Smuzhiyun lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
749*4882a593Smuzhiyun check_lwqe:
750*4882a593Smuzhiyun spin_unlock(&qp->s_lock);
751*4882a593Smuzhiyun spin_unlock(&qp->s_hlock);
752*4882a593Smuzhiyun spin_unlock_irq(&qp->r_lock);
753*4882a593Smuzhiyun if (lastwqe) {
754*4882a593Smuzhiyun struct ib_event ev;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun ev.device = qp->ibqp.device;
757*4882a593Smuzhiyun ev.element.qp = &qp->ibqp;
758*4882a593Smuzhiyun ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
759*4882a593Smuzhiyun qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun /**
764*4882a593Smuzhiyun * rvt_remove_qp - remove qp form table
765*4882a593Smuzhiyun * @rdi: rvt dev struct
766*4882a593Smuzhiyun * @qp: qp to remove
767*4882a593Smuzhiyun *
768*4882a593Smuzhiyun * Remove the QP from the table so it can't be found asynchronously by
769*4882a593Smuzhiyun * the receive routine.
770*4882a593Smuzhiyun */
rvt_remove_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp)771*4882a593Smuzhiyun static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
774*4882a593Smuzhiyun u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
775*4882a593Smuzhiyun unsigned long flags;
776*4882a593Smuzhiyun int removed = 1;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (rcu_dereference_protected(rvp->qp[0],
781*4882a593Smuzhiyun lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
782*4882a593Smuzhiyun RCU_INIT_POINTER(rvp->qp[0], NULL);
783*4882a593Smuzhiyun } else if (rcu_dereference_protected(rvp->qp[1],
784*4882a593Smuzhiyun lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
785*4882a593Smuzhiyun RCU_INIT_POINTER(rvp->qp[1], NULL);
786*4882a593Smuzhiyun } else {
787*4882a593Smuzhiyun struct rvt_qp *q;
788*4882a593Smuzhiyun struct rvt_qp __rcu **qpp;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun removed = 0;
791*4882a593Smuzhiyun qpp = &rdi->qp_dev->qp_table[n];
792*4882a593Smuzhiyun for (; (q = rcu_dereference_protected(*qpp,
793*4882a593Smuzhiyun lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
794*4882a593Smuzhiyun qpp = &q->next) {
795*4882a593Smuzhiyun if (q == qp) {
796*4882a593Smuzhiyun RCU_INIT_POINTER(*qpp,
797*4882a593Smuzhiyun rcu_dereference_protected(qp->next,
798*4882a593Smuzhiyun lockdep_is_held(&rdi->qp_dev->qpt_lock)));
799*4882a593Smuzhiyun removed = 1;
800*4882a593Smuzhiyun trace_rvt_qpremove(qp, n);
801*4882a593Smuzhiyun break;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
807*4882a593Smuzhiyun if (removed) {
808*4882a593Smuzhiyun synchronize_rcu();
809*4882a593Smuzhiyun rvt_put_qp(qp);
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /**
814*4882a593Smuzhiyun * rvt_alloc_rq - allocate memory for user or kernel buffer
815*4882a593Smuzhiyun * @rq: receive queue data structure
816*4882a593Smuzhiyun * @size: number of request queue entries
817*4882a593Smuzhiyun * @node: The NUMA node
818*4882a593Smuzhiyun * @udata: True if user data is available or not false
819*4882a593Smuzhiyun *
820*4882a593Smuzhiyun * Return: If memory allocation failed, return -ENONEM
821*4882a593Smuzhiyun * This function is used by both shared receive
822*4882a593Smuzhiyun * queues and non-shared receive queues to allocate
823*4882a593Smuzhiyun * memory.
824*4882a593Smuzhiyun */
rvt_alloc_rq(struct rvt_rq * rq,u32 size,int node,struct ib_udata * udata)825*4882a593Smuzhiyun int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
826*4882a593Smuzhiyun struct ib_udata *udata)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun if (udata) {
829*4882a593Smuzhiyun rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
830*4882a593Smuzhiyun if (!rq->wq)
831*4882a593Smuzhiyun goto bail;
832*4882a593Smuzhiyun /* need kwq with no buffers */
833*4882a593Smuzhiyun rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
834*4882a593Smuzhiyun if (!rq->kwq)
835*4882a593Smuzhiyun goto bail;
836*4882a593Smuzhiyun rq->kwq->curr_wq = rq->wq->wq;
837*4882a593Smuzhiyun } else {
838*4882a593Smuzhiyun /* need kwq with buffers */
839*4882a593Smuzhiyun rq->kwq =
840*4882a593Smuzhiyun vzalloc_node(sizeof(struct rvt_krwq) + size, node);
841*4882a593Smuzhiyun if (!rq->kwq)
842*4882a593Smuzhiyun goto bail;
843*4882a593Smuzhiyun rq->kwq->curr_wq = rq->kwq->wq;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun spin_lock_init(&rq->kwq->p_lock);
847*4882a593Smuzhiyun spin_lock_init(&rq->kwq->c_lock);
848*4882a593Smuzhiyun return 0;
849*4882a593Smuzhiyun bail:
850*4882a593Smuzhiyun rvt_free_rq(rq);
851*4882a593Smuzhiyun return -ENOMEM;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun /**
855*4882a593Smuzhiyun * rvt_init_qp - initialize the QP state to the reset state
856*4882a593Smuzhiyun * @qp: the QP to init or reinit
857*4882a593Smuzhiyun * @type: the QP type
858*4882a593Smuzhiyun *
859*4882a593Smuzhiyun * This function is called from both rvt_create_qp() and
860*4882a593Smuzhiyun * rvt_reset_qp(). The difference is that the reset
861*4882a593Smuzhiyun * patch the necessary locks to protect against concurent
862*4882a593Smuzhiyun * access.
863*4882a593Smuzhiyun */
rvt_init_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)864*4882a593Smuzhiyun static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
865*4882a593Smuzhiyun enum ib_qp_type type)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun qp->remote_qpn = 0;
868*4882a593Smuzhiyun qp->qkey = 0;
869*4882a593Smuzhiyun qp->qp_access_flags = 0;
870*4882a593Smuzhiyun qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
871*4882a593Smuzhiyun qp->s_hdrwords = 0;
872*4882a593Smuzhiyun qp->s_wqe = NULL;
873*4882a593Smuzhiyun qp->s_draining = 0;
874*4882a593Smuzhiyun qp->s_next_psn = 0;
875*4882a593Smuzhiyun qp->s_last_psn = 0;
876*4882a593Smuzhiyun qp->s_sending_psn = 0;
877*4882a593Smuzhiyun qp->s_sending_hpsn = 0;
878*4882a593Smuzhiyun qp->s_psn = 0;
879*4882a593Smuzhiyun qp->r_psn = 0;
880*4882a593Smuzhiyun qp->r_msn = 0;
881*4882a593Smuzhiyun if (type == IB_QPT_RC) {
882*4882a593Smuzhiyun qp->s_state = IB_OPCODE_RC_SEND_LAST;
883*4882a593Smuzhiyun qp->r_state = IB_OPCODE_RC_SEND_LAST;
884*4882a593Smuzhiyun } else {
885*4882a593Smuzhiyun qp->s_state = IB_OPCODE_UC_SEND_LAST;
886*4882a593Smuzhiyun qp->r_state = IB_OPCODE_UC_SEND_LAST;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
889*4882a593Smuzhiyun qp->r_nak_state = 0;
890*4882a593Smuzhiyun qp->r_aflags = 0;
891*4882a593Smuzhiyun qp->r_flags = 0;
892*4882a593Smuzhiyun qp->s_head = 0;
893*4882a593Smuzhiyun qp->s_tail = 0;
894*4882a593Smuzhiyun qp->s_cur = 0;
895*4882a593Smuzhiyun qp->s_acked = 0;
896*4882a593Smuzhiyun qp->s_last = 0;
897*4882a593Smuzhiyun qp->s_ssn = 1;
898*4882a593Smuzhiyun qp->s_lsn = 0;
899*4882a593Smuzhiyun qp->s_mig_state = IB_MIG_MIGRATED;
900*4882a593Smuzhiyun qp->r_head_ack_queue = 0;
901*4882a593Smuzhiyun qp->s_tail_ack_queue = 0;
902*4882a593Smuzhiyun qp->s_acked_ack_queue = 0;
903*4882a593Smuzhiyun qp->s_num_rd_atomic = 0;
904*4882a593Smuzhiyun qp->r_sge.num_sge = 0;
905*4882a593Smuzhiyun atomic_set(&qp->s_reserved_used, 0);
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun /**
909*4882a593Smuzhiyun * _rvt_reset_qp - initialize the QP state to the reset state
910*4882a593Smuzhiyun * @qp: the QP to reset
911*4882a593Smuzhiyun * @type: the QP type
912*4882a593Smuzhiyun *
913*4882a593Smuzhiyun * r_lock, s_hlock, and s_lock are required to be held by the caller
914*4882a593Smuzhiyun */
_rvt_reset_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)915*4882a593Smuzhiyun static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
916*4882a593Smuzhiyun enum ib_qp_type type)
917*4882a593Smuzhiyun __must_hold(&qp->s_lock)
918*4882a593Smuzhiyun __must_hold(&qp->s_hlock)
919*4882a593Smuzhiyun __must_hold(&qp->r_lock)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun lockdep_assert_held(&qp->r_lock);
922*4882a593Smuzhiyun lockdep_assert_held(&qp->s_hlock);
923*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
924*4882a593Smuzhiyun if (qp->state != IB_QPS_RESET) {
925*4882a593Smuzhiyun qp->state = IB_QPS_RESET;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun /* Let drivers flush their waitlist */
928*4882a593Smuzhiyun rdi->driver_f.flush_qp_waiters(qp);
929*4882a593Smuzhiyun rvt_stop_rc_timers(qp);
930*4882a593Smuzhiyun qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
931*4882a593Smuzhiyun spin_unlock(&qp->s_lock);
932*4882a593Smuzhiyun spin_unlock(&qp->s_hlock);
933*4882a593Smuzhiyun spin_unlock_irq(&qp->r_lock);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun /* Stop the send queue and the retry timer */
936*4882a593Smuzhiyun rdi->driver_f.stop_send_queue(qp);
937*4882a593Smuzhiyun rvt_del_timers_sync(qp);
938*4882a593Smuzhiyun /* Wait for things to stop */
939*4882a593Smuzhiyun rdi->driver_f.quiesce_qp(qp);
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /* take qp out the hash and wait for it to be unused */
942*4882a593Smuzhiyun rvt_remove_qp(rdi, qp);
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /* grab the lock b/c it was locked at call time */
945*4882a593Smuzhiyun spin_lock_irq(&qp->r_lock);
946*4882a593Smuzhiyun spin_lock(&qp->s_hlock);
947*4882a593Smuzhiyun spin_lock(&qp->s_lock);
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun rvt_clear_mr_refs(qp, 1);
950*4882a593Smuzhiyun /*
951*4882a593Smuzhiyun * Let the driver do any tear down or re-init it needs to for
952*4882a593Smuzhiyun * a qp that has been reset
953*4882a593Smuzhiyun */
954*4882a593Smuzhiyun rdi->driver_f.notify_qp_reset(qp);
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun rvt_init_qp(rdi, qp, type);
957*4882a593Smuzhiyun lockdep_assert_held(&qp->r_lock);
958*4882a593Smuzhiyun lockdep_assert_held(&qp->s_hlock);
959*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun /**
963*4882a593Smuzhiyun * rvt_reset_qp - initialize the QP state to the reset state
964*4882a593Smuzhiyun * @rdi: the device info
965*4882a593Smuzhiyun * @qp: the QP to reset
966*4882a593Smuzhiyun * @type: the QP type
967*4882a593Smuzhiyun *
968*4882a593Smuzhiyun * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
969*4882a593Smuzhiyun * before calling _rvt_reset_qp().
970*4882a593Smuzhiyun */
rvt_reset_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)971*4882a593Smuzhiyun static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
972*4882a593Smuzhiyun enum ib_qp_type type)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun spin_lock_irq(&qp->r_lock);
975*4882a593Smuzhiyun spin_lock(&qp->s_hlock);
976*4882a593Smuzhiyun spin_lock(&qp->s_lock);
977*4882a593Smuzhiyun _rvt_reset_qp(rdi, qp, type);
978*4882a593Smuzhiyun spin_unlock(&qp->s_lock);
979*4882a593Smuzhiyun spin_unlock(&qp->s_hlock);
980*4882a593Smuzhiyun spin_unlock_irq(&qp->r_lock);
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun /** rvt_free_qpn - Free a qpn from the bit map
984*4882a593Smuzhiyun * @qpt: QP table
985*4882a593Smuzhiyun * @qpn: queue pair number to free
986*4882a593Smuzhiyun */
rvt_free_qpn(struct rvt_qpn_table * qpt,u32 qpn)987*4882a593Smuzhiyun static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun struct rvt_qpn_map *map;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
992*4882a593Smuzhiyun qpn &= RVT_AIP_QP_SUFFIX;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
995*4882a593Smuzhiyun if (map->page)
996*4882a593Smuzhiyun clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun /**
1000*4882a593Smuzhiyun * get_allowed_ops - Given a QP type return the appropriate allowed OP
1001*4882a593Smuzhiyun * @type: valid, supported, QP type
1002*4882a593Smuzhiyun */
get_allowed_ops(enum ib_qp_type type)1003*4882a593Smuzhiyun static u8 get_allowed_ops(enum ib_qp_type type)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ?
1006*4882a593Smuzhiyun IB_OPCODE_UC : IB_OPCODE_UD;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun /**
1010*4882a593Smuzhiyun * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
1011*4882a593Smuzhiyun * @qp: Valid QP with allowed_ops set
1012*4882a593Smuzhiyun *
1013*4882a593Smuzhiyun * The rvt_swqe data structure being used is a union, so this is
1014*4882a593Smuzhiyun * only valid for UD QPs.
1015*4882a593Smuzhiyun */
free_ud_wq_attr(struct rvt_qp * qp)1016*4882a593Smuzhiyun static void free_ud_wq_attr(struct rvt_qp *qp)
1017*4882a593Smuzhiyun {
1018*4882a593Smuzhiyun struct rvt_swqe *wqe;
1019*4882a593Smuzhiyun int i;
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1022*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, i);
1023*4882a593Smuzhiyun kfree(wqe->ud_wr.attr);
1024*4882a593Smuzhiyun wqe->ud_wr.attr = NULL;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun /**
1029*4882a593Smuzhiyun * alloc_ud_wq_attr - AH attribute cache for UD QPs
1030*4882a593Smuzhiyun * @qp: Valid QP with allowed_ops set
1031*4882a593Smuzhiyun * @node: Numa node for allocation
1032*4882a593Smuzhiyun *
1033*4882a593Smuzhiyun * The rvt_swqe data structure being used is a union, so this is
1034*4882a593Smuzhiyun * only valid for UD QPs.
1035*4882a593Smuzhiyun */
alloc_ud_wq_attr(struct rvt_qp * qp,int node)1036*4882a593Smuzhiyun static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun struct rvt_swqe *wqe;
1039*4882a593Smuzhiyun int i;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1042*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, i);
1043*4882a593Smuzhiyun wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr),
1044*4882a593Smuzhiyun GFP_KERNEL, node);
1045*4882a593Smuzhiyun if (!wqe->ud_wr.attr) {
1046*4882a593Smuzhiyun free_ud_wq_attr(qp);
1047*4882a593Smuzhiyun return -ENOMEM;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun return 0;
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun /**
1055*4882a593Smuzhiyun * rvt_create_qp - create a queue pair for a device
1056*4882a593Smuzhiyun * @ibpd: the protection domain who's device we create the queue pair for
1057*4882a593Smuzhiyun * @init_attr: the attributes of the queue pair
1058*4882a593Smuzhiyun * @udata: user data for libibverbs.so
1059*4882a593Smuzhiyun *
1060*4882a593Smuzhiyun * Queue pair creation is mostly an rvt issue. However, drivers have their own
1061*4882a593Smuzhiyun * unique idea of what queue pair numbers mean. For instance there is a reserved
1062*4882a593Smuzhiyun * range for PSM.
1063*4882a593Smuzhiyun *
1064*4882a593Smuzhiyun * Return: the queue pair on success, otherwise returns an errno.
1065*4882a593Smuzhiyun *
1066*4882a593Smuzhiyun * Called by the ib_create_qp() core verbs function.
1067*4882a593Smuzhiyun */
rvt_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)1068*4882a593Smuzhiyun struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
1069*4882a593Smuzhiyun struct ib_qp_init_attr *init_attr,
1070*4882a593Smuzhiyun struct ib_udata *udata)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun struct rvt_qp *qp;
1073*4882a593Smuzhiyun int err;
1074*4882a593Smuzhiyun struct rvt_swqe *swq = NULL;
1075*4882a593Smuzhiyun size_t sz;
1076*4882a593Smuzhiyun size_t sg_list_sz;
1077*4882a593Smuzhiyun struct ib_qp *ret = ERR_PTR(-ENOMEM);
1078*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
1079*4882a593Smuzhiyun void *priv = NULL;
1080*4882a593Smuzhiyun size_t sqsize;
1081*4882a593Smuzhiyun u8 exclude_prefix = 0;
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun if (!rdi)
1084*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
1087*4882a593Smuzhiyun init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
1088*4882a593Smuzhiyun (init_attr->create_flags &&
1089*4882a593Smuzhiyun init_attr->create_flags != IB_QP_CREATE_NETDEV_USE))
1090*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun /* Check receive queue parameters if no SRQ is specified. */
1093*4882a593Smuzhiyun if (!init_attr->srq) {
1094*4882a593Smuzhiyun if (init_attr->cap.max_recv_sge >
1095*4882a593Smuzhiyun rdi->dparms.props.max_recv_sge ||
1096*4882a593Smuzhiyun init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
1097*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun if (init_attr->cap.max_send_sge +
1100*4882a593Smuzhiyun init_attr->cap.max_send_wr +
1101*4882a593Smuzhiyun init_attr->cap.max_recv_sge +
1102*4882a593Smuzhiyun init_attr->cap.max_recv_wr == 0)
1103*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun sqsize =
1106*4882a593Smuzhiyun init_attr->cap.max_send_wr + 1 +
1107*4882a593Smuzhiyun rdi->dparms.reserved_operations;
1108*4882a593Smuzhiyun switch (init_attr->qp_type) {
1109*4882a593Smuzhiyun case IB_QPT_SMI:
1110*4882a593Smuzhiyun case IB_QPT_GSI:
1111*4882a593Smuzhiyun if (init_attr->port_num == 0 ||
1112*4882a593Smuzhiyun init_attr->port_num > ibpd->device->phys_port_cnt)
1113*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1114*4882a593Smuzhiyun fallthrough;
1115*4882a593Smuzhiyun case IB_QPT_UC:
1116*4882a593Smuzhiyun case IB_QPT_RC:
1117*4882a593Smuzhiyun case IB_QPT_UD:
1118*4882a593Smuzhiyun sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
1119*4882a593Smuzhiyun swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
1120*4882a593Smuzhiyun if (!swq)
1121*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun sz = sizeof(*qp);
1124*4882a593Smuzhiyun sg_list_sz = 0;
1125*4882a593Smuzhiyun if (init_attr->srq) {
1126*4882a593Smuzhiyun struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun if (srq->rq.max_sge > 1)
1129*4882a593Smuzhiyun sg_list_sz = sizeof(*qp->r_sg_list) *
1130*4882a593Smuzhiyun (srq->rq.max_sge - 1);
1131*4882a593Smuzhiyun } else if (init_attr->cap.max_recv_sge > 1)
1132*4882a593Smuzhiyun sg_list_sz = sizeof(*qp->r_sg_list) *
1133*4882a593Smuzhiyun (init_attr->cap.max_recv_sge - 1);
1134*4882a593Smuzhiyun qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
1135*4882a593Smuzhiyun rdi->dparms.node);
1136*4882a593Smuzhiyun if (!qp)
1137*4882a593Smuzhiyun goto bail_swq;
1138*4882a593Smuzhiyun qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun RCU_INIT_POINTER(qp->next, NULL);
1141*4882a593Smuzhiyun if (init_attr->qp_type == IB_QPT_RC) {
1142*4882a593Smuzhiyun qp->s_ack_queue =
1143*4882a593Smuzhiyun kcalloc_node(rvt_max_atomic(rdi),
1144*4882a593Smuzhiyun sizeof(*qp->s_ack_queue),
1145*4882a593Smuzhiyun GFP_KERNEL,
1146*4882a593Smuzhiyun rdi->dparms.node);
1147*4882a593Smuzhiyun if (!qp->s_ack_queue)
1148*4882a593Smuzhiyun goto bail_qp;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun /* initialize timers needed for rc qp */
1151*4882a593Smuzhiyun timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1152*4882a593Smuzhiyun hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
1153*4882a593Smuzhiyun HRTIMER_MODE_REL);
1154*4882a593Smuzhiyun qp->s_rnr_timer.function = rvt_rc_rnr_retry;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun /*
1157*4882a593Smuzhiyun * Driver needs to set up it's private QP structure and do any
1158*4882a593Smuzhiyun * initialization that is needed.
1159*4882a593Smuzhiyun */
1160*4882a593Smuzhiyun priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1161*4882a593Smuzhiyun if (IS_ERR(priv)) {
1162*4882a593Smuzhiyun ret = priv;
1163*4882a593Smuzhiyun goto bail_qp;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun qp->priv = priv;
1166*4882a593Smuzhiyun qp->timeout_jiffies =
1167*4882a593Smuzhiyun usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1168*4882a593Smuzhiyun 1000UL);
1169*4882a593Smuzhiyun if (init_attr->srq) {
1170*4882a593Smuzhiyun sz = 0;
1171*4882a593Smuzhiyun } else {
1172*4882a593Smuzhiyun qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1173*4882a593Smuzhiyun qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1174*4882a593Smuzhiyun sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1175*4882a593Smuzhiyun sizeof(struct rvt_rwqe);
1176*4882a593Smuzhiyun err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
1177*4882a593Smuzhiyun rdi->dparms.node, udata);
1178*4882a593Smuzhiyun if (err) {
1179*4882a593Smuzhiyun ret = ERR_PTR(err);
1180*4882a593Smuzhiyun goto bail_driver_priv;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun /*
1185*4882a593Smuzhiyun * ib_create_qp() will initialize qp->ibqp
1186*4882a593Smuzhiyun * except for qp->ibqp.qp_num.
1187*4882a593Smuzhiyun */
1188*4882a593Smuzhiyun spin_lock_init(&qp->r_lock);
1189*4882a593Smuzhiyun spin_lock_init(&qp->s_hlock);
1190*4882a593Smuzhiyun spin_lock_init(&qp->s_lock);
1191*4882a593Smuzhiyun atomic_set(&qp->refcount, 0);
1192*4882a593Smuzhiyun atomic_set(&qp->local_ops_pending, 0);
1193*4882a593Smuzhiyun init_waitqueue_head(&qp->wait);
1194*4882a593Smuzhiyun INIT_LIST_HEAD(&qp->rspwait);
1195*4882a593Smuzhiyun qp->state = IB_QPS_RESET;
1196*4882a593Smuzhiyun qp->s_wq = swq;
1197*4882a593Smuzhiyun qp->s_size = sqsize;
1198*4882a593Smuzhiyun qp->s_avail = init_attr->cap.max_send_wr;
1199*4882a593Smuzhiyun qp->s_max_sge = init_attr->cap.max_send_sge;
1200*4882a593Smuzhiyun if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1201*4882a593Smuzhiyun qp->s_flags = RVT_S_SIGNAL_REQ_WR;
1202*4882a593Smuzhiyun err = alloc_ud_wq_attr(qp, rdi->dparms.node);
1203*4882a593Smuzhiyun if (err) {
1204*4882a593Smuzhiyun ret = (ERR_PTR(err));
1205*4882a593Smuzhiyun goto bail_rq_rvt;
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
1209*4882a593Smuzhiyun exclude_prefix = RVT_AIP_QP_PREFIX;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1212*4882a593Smuzhiyun init_attr->qp_type,
1213*4882a593Smuzhiyun init_attr->port_num,
1214*4882a593Smuzhiyun exclude_prefix);
1215*4882a593Smuzhiyun if (err < 0) {
1216*4882a593Smuzhiyun ret = ERR_PTR(err);
1217*4882a593Smuzhiyun goto bail_rq_wq;
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun qp->ibqp.qp_num = err;
1220*4882a593Smuzhiyun if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
1221*4882a593Smuzhiyun qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
1222*4882a593Smuzhiyun qp->port_num = init_attr->port_num;
1223*4882a593Smuzhiyun rvt_init_qp(rdi, qp, init_attr->qp_type);
1224*4882a593Smuzhiyun if (rdi->driver_f.qp_priv_init) {
1225*4882a593Smuzhiyun err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1226*4882a593Smuzhiyun if (err) {
1227*4882a593Smuzhiyun ret = ERR_PTR(err);
1228*4882a593Smuzhiyun goto bail_rq_wq;
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun break;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun default:
1234*4882a593Smuzhiyun /* Don't support raw QPs */
1235*4882a593Smuzhiyun return ERR_PTR(-EOPNOTSUPP);
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun init_attr->cap.max_inline_data = 0;
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun /*
1241*4882a593Smuzhiyun * Return the address of the RWQ as the offset to mmap.
1242*4882a593Smuzhiyun * See rvt_mmap() for details.
1243*4882a593Smuzhiyun */
1244*4882a593Smuzhiyun if (udata && udata->outlen >= sizeof(__u64)) {
1245*4882a593Smuzhiyun if (!qp->r_rq.wq) {
1246*4882a593Smuzhiyun __u64 offset = 0;
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun err = ib_copy_to_udata(udata, &offset,
1249*4882a593Smuzhiyun sizeof(offset));
1250*4882a593Smuzhiyun if (err) {
1251*4882a593Smuzhiyun ret = ERR_PTR(err);
1252*4882a593Smuzhiyun goto bail_qpn;
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun } else {
1255*4882a593Smuzhiyun u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun qp->ip = rvt_create_mmap_info(rdi, s, udata,
1258*4882a593Smuzhiyun qp->r_rq.wq);
1259*4882a593Smuzhiyun if (IS_ERR(qp->ip)) {
1260*4882a593Smuzhiyun ret = ERR_CAST(qp->ip);
1261*4882a593Smuzhiyun goto bail_qpn;
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun err = ib_copy_to_udata(udata, &qp->ip->offset,
1265*4882a593Smuzhiyun sizeof(qp->ip->offset));
1266*4882a593Smuzhiyun if (err) {
1267*4882a593Smuzhiyun ret = ERR_PTR(err);
1268*4882a593Smuzhiyun goto bail_ip;
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun qp->pid = current->pid;
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun spin_lock(&rdi->n_qps_lock);
1275*4882a593Smuzhiyun if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1276*4882a593Smuzhiyun spin_unlock(&rdi->n_qps_lock);
1277*4882a593Smuzhiyun ret = ERR_PTR(-ENOMEM);
1278*4882a593Smuzhiyun goto bail_ip;
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun rdi->n_qps_allocated++;
1282*4882a593Smuzhiyun /*
1283*4882a593Smuzhiyun * Maintain a busy_jiffies variable that will be added to the timeout
1284*4882a593Smuzhiyun * period in mod_retry_timer and add_retry_timer. This busy jiffies
1285*4882a593Smuzhiyun * is scaled by the number of rc qps created for the device to reduce
1286*4882a593Smuzhiyun * the number of timeouts occurring when there is a large number of
1287*4882a593Smuzhiyun * qps. busy_jiffies is incremented every rc qp scaling interval.
1288*4882a593Smuzhiyun * The scaling interval is selected based on extensive performance
1289*4882a593Smuzhiyun * evaluation of targeted workloads.
1290*4882a593Smuzhiyun */
1291*4882a593Smuzhiyun if (init_attr->qp_type == IB_QPT_RC) {
1292*4882a593Smuzhiyun rdi->n_rc_qps++;
1293*4882a593Smuzhiyun rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun spin_unlock(&rdi->n_qps_lock);
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun if (qp->ip) {
1298*4882a593Smuzhiyun spin_lock_irq(&rdi->pending_lock);
1299*4882a593Smuzhiyun list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1300*4882a593Smuzhiyun spin_unlock_irq(&rdi->pending_lock);
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun ret = &qp->ibqp;
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun return ret;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun bail_ip:
1308*4882a593Smuzhiyun if (qp->ip)
1309*4882a593Smuzhiyun kref_put(&qp->ip->ref, rvt_release_mmap_info);
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun bail_qpn:
1312*4882a593Smuzhiyun rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun bail_rq_wq:
1315*4882a593Smuzhiyun free_ud_wq_attr(qp);
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun bail_rq_rvt:
1318*4882a593Smuzhiyun rvt_free_rq(&qp->r_rq);
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun bail_driver_priv:
1321*4882a593Smuzhiyun rdi->driver_f.qp_priv_free(rdi, qp);
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun bail_qp:
1324*4882a593Smuzhiyun kfree(qp->s_ack_queue);
1325*4882a593Smuzhiyun kfree(qp);
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun bail_swq:
1328*4882a593Smuzhiyun vfree(swq);
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun return ret;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun /**
1334*4882a593Smuzhiyun * rvt_error_qp - put a QP into the error state
1335*4882a593Smuzhiyun * @qp: the QP to put into the error state
1336*4882a593Smuzhiyun * @err: the receive completion error to signal if a RWQE is active
1337*4882a593Smuzhiyun *
1338*4882a593Smuzhiyun * Flushes both send and receive work queues.
1339*4882a593Smuzhiyun *
1340*4882a593Smuzhiyun * Return: true if last WQE event should be generated.
1341*4882a593Smuzhiyun * The QP r_lock and s_lock should be held and interrupts disabled.
1342*4882a593Smuzhiyun * If we are already in error state, just return.
1343*4882a593Smuzhiyun */
rvt_error_qp(struct rvt_qp * qp,enum ib_wc_status err)1344*4882a593Smuzhiyun int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1345*4882a593Smuzhiyun {
1346*4882a593Smuzhiyun struct ib_wc wc;
1347*4882a593Smuzhiyun int ret = 0;
1348*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun lockdep_assert_held(&qp->r_lock);
1351*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
1352*4882a593Smuzhiyun if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1353*4882a593Smuzhiyun goto bail;
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun qp->state = IB_QPS_ERR;
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1358*4882a593Smuzhiyun qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1359*4882a593Smuzhiyun del_timer(&qp->s_timer);
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1363*4882a593Smuzhiyun qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun rdi->driver_f.notify_error_qp(qp);
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun /* Schedule the sending tasklet to drain the send work queue. */
1368*4882a593Smuzhiyun if (READ_ONCE(qp->s_last) != qp->s_head)
1369*4882a593Smuzhiyun rdi->driver_f.schedule_send(qp);
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun rvt_clear_mr_refs(qp, 0);
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun memset(&wc, 0, sizeof(wc));
1374*4882a593Smuzhiyun wc.qp = &qp->ibqp;
1375*4882a593Smuzhiyun wc.opcode = IB_WC_RECV;
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1378*4882a593Smuzhiyun wc.wr_id = qp->r_wr_id;
1379*4882a593Smuzhiyun wc.status = err;
1380*4882a593Smuzhiyun rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun wc.status = IB_WC_WR_FLUSH_ERR;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun if (qp->r_rq.kwq) {
1385*4882a593Smuzhiyun u32 head;
1386*4882a593Smuzhiyun u32 tail;
1387*4882a593Smuzhiyun struct rvt_rwq *wq = NULL;
1388*4882a593Smuzhiyun struct rvt_krwq *kwq = NULL;
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun spin_lock(&qp->r_rq.kwq->c_lock);
1391*4882a593Smuzhiyun /* qp->ip used to validate if there is a user buffer mmaped */
1392*4882a593Smuzhiyun if (qp->ip) {
1393*4882a593Smuzhiyun wq = qp->r_rq.wq;
1394*4882a593Smuzhiyun head = RDMA_READ_UAPI_ATOMIC(wq->head);
1395*4882a593Smuzhiyun tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
1396*4882a593Smuzhiyun } else {
1397*4882a593Smuzhiyun kwq = qp->r_rq.kwq;
1398*4882a593Smuzhiyun head = kwq->head;
1399*4882a593Smuzhiyun tail = kwq->tail;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun /* sanity check pointers before trusting them */
1402*4882a593Smuzhiyun if (head >= qp->r_rq.size)
1403*4882a593Smuzhiyun head = 0;
1404*4882a593Smuzhiyun if (tail >= qp->r_rq.size)
1405*4882a593Smuzhiyun tail = 0;
1406*4882a593Smuzhiyun while (tail != head) {
1407*4882a593Smuzhiyun wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1408*4882a593Smuzhiyun if (++tail >= qp->r_rq.size)
1409*4882a593Smuzhiyun tail = 0;
1410*4882a593Smuzhiyun rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun if (qp->ip)
1413*4882a593Smuzhiyun RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
1414*4882a593Smuzhiyun else
1415*4882a593Smuzhiyun kwq->tail = tail;
1416*4882a593Smuzhiyun spin_unlock(&qp->r_rq.kwq->c_lock);
1417*4882a593Smuzhiyun } else if (qp->ibqp.event_handler) {
1418*4882a593Smuzhiyun ret = 1;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun bail:
1422*4882a593Smuzhiyun return ret;
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_error_qp);
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun /*
1427*4882a593Smuzhiyun * Put the QP into the hash table.
1428*4882a593Smuzhiyun * The hash table holds a reference to the QP.
1429*4882a593Smuzhiyun */
rvt_insert_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp)1430*4882a593Smuzhiyun static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1433*4882a593Smuzhiyun unsigned long flags;
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun rvt_get_qp(qp);
1436*4882a593Smuzhiyun spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun if (qp->ibqp.qp_num <= 1) {
1439*4882a593Smuzhiyun rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1440*4882a593Smuzhiyun } else {
1441*4882a593Smuzhiyun u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun qp->next = rdi->qp_dev->qp_table[n];
1444*4882a593Smuzhiyun rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1445*4882a593Smuzhiyun trace_rvt_qpinsert(qp, n);
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun /**
1452*4882a593Smuzhiyun * rvt_modify_qp - modify the attributes of a queue pair
1453*4882a593Smuzhiyun * @ibqp: the queue pair who's attributes we're modifying
1454*4882a593Smuzhiyun * @attr: the new attributes
1455*4882a593Smuzhiyun * @attr_mask: the mask of attributes to modify
1456*4882a593Smuzhiyun * @udata: user data for libibverbs.so
1457*4882a593Smuzhiyun *
1458*4882a593Smuzhiyun * Return: 0 on success, otherwise returns an errno.
1459*4882a593Smuzhiyun */
rvt_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1460*4882a593Smuzhiyun int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1461*4882a593Smuzhiyun int attr_mask, struct ib_udata *udata)
1462*4882a593Smuzhiyun {
1463*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1464*4882a593Smuzhiyun struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1465*4882a593Smuzhiyun enum ib_qp_state cur_state, new_state;
1466*4882a593Smuzhiyun struct ib_event ev;
1467*4882a593Smuzhiyun int lastwqe = 0;
1468*4882a593Smuzhiyun int mig = 0;
1469*4882a593Smuzhiyun int pmtu = 0; /* for gcc warning only */
1470*4882a593Smuzhiyun int opa_ah;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun spin_lock_irq(&qp->r_lock);
1473*4882a593Smuzhiyun spin_lock(&qp->s_hlock);
1474*4882a593Smuzhiyun spin_lock(&qp->s_lock);
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun cur_state = attr_mask & IB_QP_CUR_STATE ?
1477*4882a593Smuzhiyun attr->cur_qp_state : qp->state;
1478*4882a593Smuzhiyun new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1479*4882a593Smuzhiyun opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1482*4882a593Smuzhiyun attr_mask))
1483*4882a593Smuzhiyun goto inval;
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun if (rdi->driver_f.check_modify_qp &&
1486*4882a593Smuzhiyun rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1487*4882a593Smuzhiyun goto inval;
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun if (attr_mask & IB_QP_AV) {
1490*4882a593Smuzhiyun if (opa_ah) {
1491*4882a593Smuzhiyun if (rdma_ah_get_dlid(&attr->ah_attr) >=
1492*4882a593Smuzhiyun opa_get_mcast_base(OPA_MCAST_NR))
1493*4882a593Smuzhiyun goto inval;
1494*4882a593Smuzhiyun } else {
1495*4882a593Smuzhiyun if (rdma_ah_get_dlid(&attr->ah_attr) >=
1496*4882a593Smuzhiyun be16_to_cpu(IB_MULTICAST_LID_BASE))
1497*4882a593Smuzhiyun goto inval;
1498*4882a593Smuzhiyun }
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1501*4882a593Smuzhiyun goto inval;
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun if (attr_mask & IB_QP_ALT_PATH) {
1505*4882a593Smuzhiyun if (opa_ah) {
1506*4882a593Smuzhiyun if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1507*4882a593Smuzhiyun opa_get_mcast_base(OPA_MCAST_NR))
1508*4882a593Smuzhiyun goto inval;
1509*4882a593Smuzhiyun } else {
1510*4882a593Smuzhiyun if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1511*4882a593Smuzhiyun be16_to_cpu(IB_MULTICAST_LID_BASE))
1512*4882a593Smuzhiyun goto inval;
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1516*4882a593Smuzhiyun goto inval;
1517*4882a593Smuzhiyun if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1518*4882a593Smuzhiyun goto inval;
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun if (attr_mask & IB_QP_PKEY_INDEX)
1522*4882a593Smuzhiyun if (attr->pkey_index >= rvt_get_npkeys(rdi))
1523*4882a593Smuzhiyun goto inval;
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun if (attr_mask & IB_QP_MIN_RNR_TIMER)
1526*4882a593Smuzhiyun if (attr->min_rnr_timer > 31)
1527*4882a593Smuzhiyun goto inval;
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun if (attr_mask & IB_QP_PORT)
1530*4882a593Smuzhiyun if (qp->ibqp.qp_type == IB_QPT_SMI ||
1531*4882a593Smuzhiyun qp->ibqp.qp_type == IB_QPT_GSI ||
1532*4882a593Smuzhiyun attr->port_num == 0 ||
1533*4882a593Smuzhiyun attr->port_num > ibqp->device->phys_port_cnt)
1534*4882a593Smuzhiyun goto inval;
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun if (attr_mask & IB_QP_DEST_QPN)
1537*4882a593Smuzhiyun if (attr->dest_qp_num > RVT_QPN_MASK)
1538*4882a593Smuzhiyun goto inval;
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun if (attr_mask & IB_QP_RETRY_CNT)
1541*4882a593Smuzhiyun if (attr->retry_cnt > 7)
1542*4882a593Smuzhiyun goto inval;
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun if (attr_mask & IB_QP_RNR_RETRY)
1545*4882a593Smuzhiyun if (attr->rnr_retry > 7)
1546*4882a593Smuzhiyun goto inval;
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun /*
1549*4882a593Smuzhiyun * Don't allow invalid path_mtu values. OK to set greater
1550*4882a593Smuzhiyun * than the active mtu (or even the max_cap, if we have tuned
1551*4882a593Smuzhiyun * that to a small mtu. We'll set qp->path_mtu
1552*4882a593Smuzhiyun * to the lesser of requested attribute mtu and active,
1553*4882a593Smuzhiyun * for packetizing messages.
1554*4882a593Smuzhiyun * Note that the QP port has to be set in INIT and MTU in RTR.
1555*4882a593Smuzhiyun */
1556*4882a593Smuzhiyun if (attr_mask & IB_QP_PATH_MTU) {
1557*4882a593Smuzhiyun pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1558*4882a593Smuzhiyun if (pmtu < 0)
1559*4882a593Smuzhiyun goto inval;
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun if (attr_mask & IB_QP_PATH_MIG_STATE) {
1563*4882a593Smuzhiyun if (attr->path_mig_state == IB_MIG_REARM) {
1564*4882a593Smuzhiyun if (qp->s_mig_state == IB_MIG_ARMED)
1565*4882a593Smuzhiyun goto inval;
1566*4882a593Smuzhiyun if (new_state != IB_QPS_RTS)
1567*4882a593Smuzhiyun goto inval;
1568*4882a593Smuzhiyun } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1569*4882a593Smuzhiyun if (qp->s_mig_state == IB_MIG_REARM)
1570*4882a593Smuzhiyun goto inval;
1571*4882a593Smuzhiyun if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1572*4882a593Smuzhiyun goto inval;
1573*4882a593Smuzhiyun if (qp->s_mig_state == IB_MIG_ARMED)
1574*4882a593Smuzhiyun mig = 1;
1575*4882a593Smuzhiyun } else {
1576*4882a593Smuzhiyun goto inval;
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun }
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1581*4882a593Smuzhiyun if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1582*4882a593Smuzhiyun goto inval;
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun switch (new_state) {
1585*4882a593Smuzhiyun case IB_QPS_RESET:
1586*4882a593Smuzhiyun if (qp->state != IB_QPS_RESET)
1587*4882a593Smuzhiyun _rvt_reset_qp(rdi, qp, ibqp->qp_type);
1588*4882a593Smuzhiyun break;
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun case IB_QPS_RTR:
1591*4882a593Smuzhiyun /* Allow event to re-trigger if QP set to RTR more than once */
1592*4882a593Smuzhiyun qp->r_flags &= ~RVT_R_COMM_EST;
1593*4882a593Smuzhiyun qp->state = new_state;
1594*4882a593Smuzhiyun break;
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun case IB_QPS_SQD:
1597*4882a593Smuzhiyun qp->s_draining = qp->s_last != qp->s_cur;
1598*4882a593Smuzhiyun qp->state = new_state;
1599*4882a593Smuzhiyun break;
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun case IB_QPS_SQE:
1602*4882a593Smuzhiyun if (qp->ibqp.qp_type == IB_QPT_RC)
1603*4882a593Smuzhiyun goto inval;
1604*4882a593Smuzhiyun qp->state = new_state;
1605*4882a593Smuzhiyun break;
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun case IB_QPS_ERR:
1608*4882a593Smuzhiyun lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1609*4882a593Smuzhiyun break;
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun default:
1612*4882a593Smuzhiyun qp->state = new_state;
1613*4882a593Smuzhiyun break;
1614*4882a593Smuzhiyun }
1615*4882a593Smuzhiyun
1616*4882a593Smuzhiyun if (attr_mask & IB_QP_PKEY_INDEX)
1617*4882a593Smuzhiyun qp->s_pkey_index = attr->pkey_index;
1618*4882a593Smuzhiyun
1619*4882a593Smuzhiyun if (attr_mask & IB_QP_PORT)
1620*4882a593Smuzhiyun qp->port_num = attr->port_num;
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun if (attr_mask & IB_QP_DEST_QPN)
1623*4882a593Smuzhiyun qp->remote_qpn = attr->dest_qp_num;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun if (attr_mask & IB_QP_SQ_PSN) {
1626*4882a593Smuzhiyun qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1627*4882a593Smuzhiyun qp->s_psn = qp->s_next_psn;
1628*4882a593Smuzhiyun qp->s_sending_psn = qp->s_next_psn;
1629*4882a593Smuzhiyun qp->s_last_psn = qp->s_next_psn - 1;
1630*4882a593Smuzhiyun qp->s_sending_hpsn = qp->s_last_psn;
1631*4882a593Smuzhiyun }
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun if (attr_mask & IB_QP_RQ_PSN)
1634*4882a593Smuzhiyun qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun if (attr_mask & IB_QP_ACCESS_FLAGS)
1637*4882a593Smuzhiyun qp->qp_access_flags = attr->qp_access_flags;
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun if (attr_mask & IB_QP_AV) {
1640*4882a593Smuzhiyun rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1641*4882a593Smuzhiyun qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1642*4882a593Smuzhiyun qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun if (attr_mask & IB_QP_ALT_PATH) {
1646*4882a593Smuzhiyun rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1647*4882a593Smuzhiyun qp->s_alt_pkey_index = attr->alt_pkey_index;
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun if (attr_mask & IB_QP_PATH_MIG_STATE) {
1651*4882a593Smuzhiyun qp->s_mig_state = attr->path_mig_state;
1652*4882a593Smuzhiyun if (mig) {
1653*4882a593Smuzhiyun qp->remote_ah_attr = qp->alt_ah_attr;
1654*4882a593Smuzhiyun qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1655*4882a593Smuzhiyun qp->s_pkey_index = qp->s_alt_pkey_index;
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun if (attr_mask & IB_QP_PATH_MTU) {
1660*4882a593Smuzhiyun qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1661*4882a593Smuzhiyun qp->log_pmtu = ilog2(qp->pmtu);
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun if (attr_mask & IB_QP_RETRY_CNT) {
1665*4882a593Smuzhiyun qp->s_retry_cnt = attr->retry_cnt;
1666*4882a593Smuzhiyun qp->s_retry = attr->retry_cnt;
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun if (attr_mask & IB_QP_RNR_RETRY) {
1670*4882a593Smuzhiyun qp->s_rnr_retry_cnt = attr->rnr_retry;
1671*4882a593Smuzhiyun qp->s_rnr_retry = attr->rnr_retry;
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun if (attr_mask & IB_QP_MIN_RNR_TIMER)
1675*4882a593Smuzhiyun qp->r_min_rnr_timer = attr->min_rnr_timer;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun if (attr_mask & IB_QP_TIMEOUT) {
1678*4882a593Smuzhiyun qp->timeout = attr->timeout;
1679*4882a593Smuzhiyun qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun if (attr_mask & IB_QP_QKEY)
1683*4882a593Smuzhiyun qp->qkey = attr->qkey;
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1686*4882a593Smuzhiyun qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1689*4882a593Smuzhiyun qp->s_max_rd_atomic = attr->max_rd_atomic;
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun if (rdi->driver_f.modify_qp)
1692*4882a593Smuzhiyun rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun spin_unlock(&qp->s_lock);
1695*4882a593Smuzhiyun spin_unlock(&qp->s_hlock);
1696*4882a593Smuzhiyun spin_unlock_irq(&qp->r_lock);
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1699*4882a593Smuzhiyun rvt_insert_qp(rdi, qp);
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun if (lastwqe) {
1702*4882a593Smuzhiyun ev.device = qp->ibqp.device;
1703*4882a593Smuzhiyun ev.element.qp = &qp->ibqp;
1704*4882a593Smuzhiyun ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1705*4882a593Smuzhiyun qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun if (mig) {
1708*4882a593Smuzhiyun ev.device = qp->ibqp.device;
1709*4882a593Smuzhiyun ev.element.qp = &qp->ibqp;
1710*4882a593Smuzhiyun ev.event = IB_EVENT_PATH_MIG;
1711*4882a593Smuzhiyun qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun return 0;
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun inval:
1716*4882a593Smuzhiyun spin_unlock(&qp->s_lock);
1717*4882a593Smuzhiyun spin_unlock(&qp->s_hlock);
1718*4882a593Smuzhiyun spin_unlock_irq(&qp->r_lock);
1719*4882a593Smuzhiyun return -EINVAL;
1720*4882a593Smuzhiyun }
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun /**
1723*4882a593Smuzhiyun * rvt_destroy_qp - destroy a queue pair
1724*4882a593Smuzhiyun * @ibqp: the queue pair to destroy
1725*4882a593Smuzhiyun *
1726*4882a593Smuzhiyun * Note that this can be called while the QP is actively sending or
1727*4882a593Smuzhiyun * receiving!
1728*4882a593Smuzhiyun *
1729*4882a593Smuzhiyun * Return: 0 on success.
1730*4882a593Smuzhiyun */
rvt_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)1731*4882a593Smuzhiyun int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1732*4882a593Smuzhiyun {
1733*4882a593Smuzhiyun struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1734*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun rvt_reset_qp(rdi, qp, ibqp->qp_type);
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun wait_event(qp->wait, !atomic_read(&qp->refcount));
1739*4882a593Smuzhiyun /* qpn is now available for use again */
1740*4882a593Smuzhiyun rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun spin_lock(&rdi->n_qps_lock);
1743*4882a593Smuzhiyun rdi->n_qps_allocated--;
1744*4882a593Smuzhiyun if (qp->ibqp.qp_type == IB_QPT_RC) {
1745*4882a593Smuzhiyun rdi->n_rc_qps--;
1746*4882a593Smuzhiyun rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1747*4882a593Smuzhiyun }
1748*4882a593Smuzhiyun spin_unlock(&rdi->n_qps_lock);
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun if (qp->ip)
1751*4882a593Smuzhiyun kref_put(&qp->ip->ref, rvt_release_mmap_info);
1752*4882a593Smuzhiyun kvfree(qp->r_rq.kwq);
1753*4882a593Smuzhiyun rdi->driver_f.qp_priv_free(rdi, qp);
1754*4882a593Smuzhiyun kfree(qp->s_ack_queue);
1755*4882a593Smuzhiyun rdma_destroy_ah_attr(&qp->remote_ah_attr);
1756*4882a593Smuzhiyun rdma_destroy_ah_attr(&qp->alt_ah_attr);
1757*4882a593Smuzhiyun free_ud_wq_attr(qp);
1758*4882a593Smuzhiyun vfree(qp->s_wq);
1759*4882a593Smuzhiyun kfree(qp);
1760*4882a593Smuzhiyun return 0;
1761*4882a593Smuzhiyun }
1762*4882a593Smuzhiyun
1763*4882a593Smuzhiyun /**
1764*4882a593Smuzhiyun * rvt_query_qp - query an ipbq
1765*4882a593Smuzhiyun * @ibqp: IB qp to query
1766*4882a593Smuzhiyun * @attr: attr struct to fill in
1767*4882a593Smuzhiyun * @attr_mask: attr mask ignored
1768*4882a593Smuzhiyun * @init_attr: struct to fill in
1769*4882a593Smuzhiyun *
1770*4882a593Smuzhiyun * Return: always 0
1771*4882a593Smuzhiyun */
rvt_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)1772*4882a593Smuzhiyun int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1773*4882a593Smuzhiyun int attr_mask, struct ib_qp_init_attr *init_attr)
1774*4882a593Smuzhiyun {
1775*4882a593Smuzhiyun struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1776*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun attr->qp_state = qp->state;
1779*4882a593Smuzhiyun attr->cur_qp_state = attr->qp_state;
1780*4882a593Smuzhiyun attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1781*4882a593Smuzhiyun attr->path_mig_state = qp->s_mig_state;
1782*4882a593Smuzhiyun attr->qkey = qp->qkey;
1783*4882a593Smuzhiyun attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1784*4882a593Smuzhiyun attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1785*4882a593Smuzhiyun attr->dest_qp_num = qp->remote_qpn;
1786*4882a593Smuzhiyun attr->qp_access_flags = qp->qp_access_flags;
1787*4882a593Smuzhiyun attr->cap.max_send_wr = qp->s_size - 1 -
1788*4882a593Smuzhiyun rdi->dparms.reserved_operations;
1789*4882a593Smuzhiyun attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1790*4882a593Smuzhiyun attr->cap.max_send_sge = qp->s_max_sge;
1791*4882a593Smuzhiyun attr->cap.max_recv_sge = qp->r_rq.max_sge;
1792*4882a593Smuzhiyun attr->cap.max_inline_data = 0;
1793*4882a593Smuzhiyun attr->ah_attr = qp->remote_ah_attr;
1794*4882a593Smuzhiyun attr->alt_ah_attr = qp->alt_ah_attr;
1795*4882a593Smuzhiyun attr->pkey_index = qp->s_pkey_index;
1796*4882a593Smuzhiyun attr->alt_pkey_index = qp->s_alt_pkey_index;
1797*4882a593Smuzhiyun attr->en_sqd_async_notify = 0;
1798*4882a593Smuzhiyun attr->sq_draining = qp->s_draining;
1799*4882a593Smuzhiyun attr->max_rd_atomic = qp->s_max_rd_atomic;
1800*4882a593Smuzhiyun attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1801*4882a593Smuzhiyun attr->min_rnr_timer = qp->r_min_rnr_timer;
1802*4882a593Smuzhiyun attr->port_num = qp->port_num;
1803*4882a593Smuzhiyun attr->timeout = qp->timeout;
1804*4882a593Smuzhiyun attr->retry_cnt = qp->s_retry_cnt;
1805*4882a593Smuzhiyun attr->rnr_retry = qp->s_rnr_retry_cnt;
1806*4882a593Smuzhiyun attr->alt_port_num =
1807*4882a593Smuzhiyun rdma_ah_get_port_num(&qp->alt_ah_attr);
1808*4882a593Smuzhiyun attr->alt_timeout = qp->alt_timeout;
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun init_attr->event_handler = qp->ibqp.event_handler;
1811*4882a593Smuzhiyun init_attr->qp_context = qp->ibqp.qp_context;
1812*4882a593Smuzhiyun init_attr->send_cq = qp->ibqp.send_cq;
1813*4882a593Smuzhiyun init_attr->recv_cq = qp->ibqp.recv_cq;
1814*4882a593Smuzhiyun init_attr->srq = qp->ibqp.srq;
1815*4882a593Smuzhiyun init_attr->cap = attr->cap;
1816*4882a593Smuzhiyun if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1817*4882a593Smuzhiyun init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1818*4882a593Smuzhiyun else
1819*4882a593Smuzhiyun init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1820*4882a593Smuzhiyun init_attr->qp_type = qp->ibqp.qp_type;
1821*4882a593Smuzhiyun init_attr->port_num = qp->port_num;
1822*4882a593Smuzhiyun return 0;
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun /**
1826*4882a593Smuzhiyun * rvt_post_receive - post a receive on a QP
1827*4882a593Smuzhiyun * @ibqp: the QP to post the receive on
1828*4882a593Smuzhiyun * @wr: the WR to post
1829*4882a593Smuzhiyun * @bad_wr: the first bad WR is put here
1830*4882a593Smuzhiyun *
1831*4882a593Smuzhiyun * This may be called from interrupt context.
1832*4882a593Smuzhiyun *
1833*4882a593Smuzhiyun * Return: 0 on success otherwise errno
1834*4882a593Smuzhiyun */
rvt_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1835*4882a593Smuzhiyun int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1836*4882a593Smuzhiyun const struct ib_recv_wr **bad_wr)
1837*4882a593Smuzhiyun {
1838*4882a593Smuzhiyun struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1839*4882a593Smuzhiyun struct rvt_krwq *wq = qp->r_rq.kwq;
1840*4882a593Smuzhiyun unsigned long flags;
1841*4882a593Smuzhiyun int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1842*4882a593Smuzhiyun !qp->ibqp.srq;
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun /* Check that state is OK to post receive. */
1845*4882a593Smuzhiyun if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1846*4882a593Smuzhiyun *bad_wr = wr;
1847*4882a593Smuzhiyun return -EINVAL;
1848*4882a593Smuzhiyun }
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun for (; wr; wr = wr->next) {
1851*4882a593Smuzhiyun struct rvt_rwqe *wqe;
1852*4882a593Smuzhiyun u32 next;
1853*4882a593Smuzhiyun int i;
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1856*4882a593Smuzhiyun *bad_wr = wr;
1857*4882a593Smuzhiyun return -EINVAL;
1858*4882a593Smuzhiyun }
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags);
1861*4882a593Smuzhiyun next = wq->head + 1;
1862*4882a593Smuzhiyun if (next >= qp->r_rq.size)
1863*4882a593Smuzhiyun next = 0;
1864*4882a593Smuzhiyun if (next == READ_ONCE(wq->tail)) {
1865*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1866*4882a593Smuzhiyun *bad_wr = wr;
1867*4882a593Smuzhiyun return -ENOMEM;
1868*4882a593Smuzhiyun }
1869*4882a593Smuzhiyun if (unlikely(qp_err_flush)) {
1870*4882a593Smuzhiyun struct ib_wc wc;
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun memset(&wc, 0, sizeof(wc));
1873*4882a593Smuzhiyun wc.qp = &qp->ibqp;
1874*4882a593Smuzhiyun wc.opcode = IB_WC_RECV;
1875*4882a593Smuzhiyun wc.wr_id = wr->wr_id;
1876*4882a593Smuzhiyun wc.status = IB_WC_WR_FLUSH_ERR;
1877*4882a593Smuzhiyun rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1878*4882a593Smuzhiyun } else {
1879*4882a593Smuzhiyun wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1880*4882a593Smuzhiyun wqe->wr_id = wr->wr_id;
1881*4882a593Smuzhiyun wqe->num_sge = wr->num_sge;
1882*4882a593Smuzhiyun for (i = 0; i < wr->num_sge; i++) {
1883*4882a593Smuzhiyun wqe->sg_list[i].addr = wr->sg_list[i].addr;
1884*4882a593Smuzhiyun wqe->sg_list[i].length = wr->sg_list[i].length;
1885*4882a593Smuzhiyun wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
1886*4882a593Smuzhiyun }
1887*4882a593Smuzhiyun /*
1888*4882a593Smuzhiyun * Make sure queue entry is written
1889*4882a593Smuzhiyun * before the head index.
1890*4882a593Smuzhiyun */
1891*4882a593Smuzhiyun smp_store_release(&wq->head, next);
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun return 0;
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun /**
1899*4882a593Smuzhiyun * rvt_qp_valid_operation - validate post send wr request
1900*4882a593Smuzhiyun * @qp - the qp
1901*4882a593Smuzhiyun * @post-parms - the post send table for the driver
1902*4882a593Smuzhiyun * @wr - the work request
1903*4882a593Smuzhiyun *
1904*4882a593Smuzhiyun * The routine validates the operation based on the
1905*4882a593Smuzhiyun * validation table an returns the length of the operation
1906*4882a593Smuzhiyun * which can extend beyond the ib_send_bw. Operation
1907*4882a593Smuzhiyun * dependent flags key atomic operation validation.
1908*4882a593Smuzhiyun *
1909*4882a593Smuzhiyun * There is an exception for UD qps that validates the pd and
1910*4882a593Smuzhiyun * overrides the length to include the additional UD specific
1911*4882a593Smuzhiyun * length.
1912*4882a593Smuzhiyun *
1913*4882a593Smuzhiyun * Returns a negative error or the length of the work request
1914*4882a593Smuzhiyun * for building the swqe.
1915*4882a593Smuzhiyun */
rvt_qp_valid_operation(struct rvt_qp * qp,const struct rvt_operation_params * post_parms,const struct ib_send_wr * wr)1916*4882a593Smuzhiyun static inline int rvt_qp_valid_operation(
1917*4882a593Smuzhiyun struct rvt_qp *qp,
1918*4882a593Smuzhiyun const struct rvt_operation_params *post_parms,
1919*4882a593Smuzhiyun const struct ib_send_wr *wr)
1920*4882a593Smuzhiyun {
1921*4882a593Smuzhiyun int len;
1922*4882a593Smuzhiyun
1923*4882a593Smuzhiyun if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1924*4882a593Smuzhiyun return -EINVAL;
1925*4882a593Smuzhiyun if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1926*4882a593Smuzhiyun return -EINVAL;
1927*4882a593Smuzhiyun if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1928*4882a593Smuzhiyun ibpd_to_rvtpd(qp->ibqp.pd)->user)
1929*4882a593Smuzhiyun return -EINVAL;
1930*4882a593Smuzhiyun if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1931*4882a593Smuzhiyun (wr->num_sge == 0 ||
1932*4882a593Smuzhiyun wr->sg_list[0].length < sizeof(u64) ||
1933*4882a593Smuzhiyun wr->sg_list[0].addr & (sizeof(u64) - 1)))
1934*4882a593Smuzhiyun return -EINVAL;
1935*4882a593Smuzhiyun if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1936*4882a593Smuzhiyun !qp->s_max_rd_atomic)
1937*4882a593Smuzhiyun return -EINVAL;
1938*4882a593Smuzhiyun len = post_parms[wr->opcode].length;
1939*4882a593Smuzhiyun /* UD specific */
1940*4882a593Smuzhiyun if (qp->ibqp.qp_type != IB_QPT_UC &&
1941*4882a593Smuzhiyun qp->ibqp.qp_type != IB_QPT_RC) {
1942*4882a593Smuzhiyun if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1943*4882a593Smuzhiyun return -EINVAL;
1944*4882a593Smuzhiyun len = sizeof(struct ib_ud_wr);
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun return len;
1947*4882a593Smuzhiyun }
1948*4882a593Smuzhiyun
1949*4882a593Smuzhiyun /**
1950*4882a593Smuzhiyun * rvt_qp_is_avail - determine queue capacity
1951*4882a593Smuzhiyun * @qp: the qp
1952*4882a593Smuzhiyun * @rdi: the rdmavt device
1953*4882a593Smuzhiyun * @reserved_op: is reserved operation
1954*4882a593Smuzhiyun *
1955*4882a593Smuzhiyun * This assumes the s_hlock is held but the s_last
1956*4882a593Smuzhiyun * qp variable is uncontrolled.
1957*4882a593Smuzhiyun *
1958*4882a593Smuzhiyun * For non reserved operations, the qp->s_avail
1959*4882a593Smuzhiyun * may be changed.
1960*4882a593Smuzhiyun *
1961*4882a593Smuzhiyun * The return value is zero or a -ENOMEM.
1962*4882a593Smuzhiyun */
rvt_qp_is_avail(struct rvt_qp * qp,struct rvt_dev_info * rdi,bool reserved_op)1963*4882a593Smuzhiyun static inline int rvt_qp_is_avail(
1964*4882a593Smuzhiyun struct rvt_qp *qp,
1965*4882a593Smuzhiyun struct rvt_dev_info *rdi,
1966*4882a593Smuzhiyun bool reserved_op)
1967*4882a593Smuzhiyun {
1968*4882a593Smuzhiyun u32 slast;
1969*4882a593Smuzhiyun u32 avail;
1970*4882a593Smuzhiyun u32 reserved_used;
1971*4882a593Smuzhiyun
1972*4882a593Smuzhiyun /* see rvt_qp_wqe_unreserve() */
1973*4882a593Smuzhiyun smp_mb__before_atomic();
1974*4882a593Smuzhiyun if (unlikely(reserved_op)) {
1975*4882a593Smuzhiyun /* see rvt_qp_wqe_unreserve() */
1976*4882a593Smuzhiyun reserved_used = atomic_read(&qp->s_reserved_used);
1977*4882a593Smuzhiyun if (reserved_used >= rdi->dparms.reserved_operations)
1978*4882a593Smuzhiyun return -ENOMEM;
1979*4882a593Smuzhiyun return 0;
1980*4882a593Smuzhiyun }
1981*4882a593Smuzhiyun /* non-reserved operations */
1982*4882a593Smuzhiyun if (likely(qp->s_avail))
1983*4882a593Smuzhiyun return 0;
1984*4882a593Smuzhiyun /* See rvt_qp_complete_swqe() */
1985*4882a593Smuzhiyun slast = smp_load_acquire(&qp->s_last);
1986*4882a593Smuzhiyun if (qp->s_head >= slast)
1987*4882a593Smuzhiyun avail = qp->s_size - (qp->s_head - slast);
1988*4882a593Smuzhiyun else
1989*4882a593Smuzhiyun avail = slast - qp->s_head;
1990*4882a593Smuzhiyun
1991*4882a593Smuzhiyun reserved_used = atomic_read(&qp->s_reserved_used);
1992*4882a593Smuzhiyun avail = avail - 1 -
1993*4882a593Smuzhiyun (rdi->dparms.reserved_operations - reserved_used);
1994*4882a593Smuzhiyun /* insure we don't assign a negative s_avail */
1995*4882a593Smuzhiyun if ((s32)avail <= 0)
1996*4882a593Smuzhiyun return -ENOMEM;
1997*4882a593Smuzhiyun qp->s_avail = avail;
1998*4882a593Smuzhiyun if (WARN_ON(qp->s_avail >
1999*4882a593Smuzhiyun (qp->s_size - 1 - rdi->dparms.reserved_operations)))
2000*4882a593Smuzhiyun rvt_pr_err(rdi,
2001*4882a593Smuzhiyun "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
2002*4882a593Smuzhiyun qp->ibqp.qp_num, qp->s_size, qp->s_avail,
2003*4882a593Smuzhiyun qp->s_head, qp->s_tail, qp->s_cur,
2004*4882a593Smuzhiyun qp->s_acked, qp->s_last);
2005*4882a593Smuzhiyun return 0;
2006*4882a593Smuzhiyun }
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun /**
2009*4882a593Smuzhiyun * rvt_post_one_wr - post one RC, UC, or UD send work request
2010*4882a593Smuzhiyun * @qp: the QP to post on
2011*4882a593Smuzhiyun * @wr: the work request to send
2012*4882a593Smuzhiyun */
rvt_post_one_wr(struct rvt_qp * qp,const struct ib_send_wr * wr,bool * call_send)2013*4882a593Smuzhiyun static int rvt_post_one_wr(struct rvt_qp *qp,
2014*4882a593Smuzhiyun const struct ib_send_wr *wr,
2015*4882a593Smuzhiyun bool *call_send)
2016*4882a593Smuzhiyun {
2017*4882a593Smuzhiyun struct rvt_swqe *wqe;
2018*4882a593Smuzhiyun u32 next;
2019*4882a593Smuzhiyun int i;
2020*4882a593Smuzhiyun int j;
2021*4882a593Smuzhiyun int acc;
2022*4882a593Smuzhiyun struct rvt_lkey_table *rkt;
2023*4882a593Smuzhiyun struct rvt_pd *pd;
2024*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2025*4882a593Smuzhiyun u8 log_pmtu;
2026*4882a593Smuzhiyun int ret;
2027*4882a593Smuzhiyun size_t cplen;
2028*4882a593Smuzhiyun bool reserved_op;
2029*4882a593Smuzhiyun int local_ops_delayed = 0;
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun /* IB spec says that num_sge == 0 is OK. */
2034*4882a593Smuzhiyun if (unlikely(wr->num_sge > qp->s_max_sge))
2035*4882a593Smuzhiyun return -EINVAL;
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
2038*4882a593Smuzhiyun if (ret < 0)
2039*4882a593Smuzhiyun return ret;
2040*4882a593Smuzhiyun cplen = ret;
2041*4882a593Smuzhiyun
2042*4882a593Smuzhiyun /*
2043*4882a593Smuzhiyun * Local operations include fast register and local invalidate.
2044*4882a593Smuzhiyun * Fast register needs to be processed immediately because the
2045*4882a593Smuzhiyun * registered lkey may be used by following work requests and the
2046*4882a593Smuzhiyun * lkey needs to be valid at the time those requests are posted.
2047*4882a593Smuzhiyun * Local invalidate can be processed immediately if fencing is
2048*4882a593Smuzhiyun * not required and no previous local invalidate ops are pending.
2049*4882a593Smuzhiyun * Signaled local operations that have been processed immediately
2050*4882a593Smuzhiyun * need to have requests with "completion only" flags set posted
2051*4882a593Smuzhiyun * to the send queue in order to generate completions.
2052*4882a593Smuzhiyun */
2053*4882a593Smuzhiyun if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
2054*4882a593Smuzhiyun switch (wr->opcode) {
2055*4882a593Smuzhiyun case IB_WR_REG_MR:
2056*4882a593Smuzhiyun ret = rvt_fast_reg_mr(qp,
2057*4882a593Smuzhiyun reg_wr(wr)->mr,
2058*4882a593Smuzhiyun reg_wr(wr)->key,
2059*4882a593Smuzhiyun reg_wr(wr)->access);
2060*4882a593Smuzhiyun if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2061*4882a593Smuzhiyun return ret;
2062*4882a593Smuzhiyun break;
2063*4882a593Smuzhiyun case IB_WR_LOCAL_INV:
2064*4882a593Smuzhiyun if ((wr->send_flags & IB_SEND_FENCE) ||
2065*4882a593Smuzhiyun atomic_read(&qp->local_ops_pending)) {
2066*4882a593Smuzhiyun local_ops_delayed = 1;
2067*4882a593Smuzhiyun } else {
2068*4882a593Smuzhiyun ret = rvt_invalidate_rkey(
2069*4882a593Smuzhiyun qp, wr->ex.invalidate_rkey);
2070*4882a593Smuzhiyun if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2071*4882a593Smuzhiyun return ret;
2072*4882a593Smuzhiyun }
2073*4882a593Smuzhiyun break;
2074*4882a593Smuzhiyun default:
2075*4882a593Smuzhiyun return -EINVAL;
2076*4882a593Smuzhiyun }
2077*4882a593Smuzhiyun }
2078*4882a593Smuzhiyun
2079*4882a593Smuzhiyun reserved_op = rdi->post_parms[wr->opcode].flags &
2080*4882a593Smuzhiyun RVT_OPERATION_USE_RESERVE;
2081*4882a593Smuzhiyun /* check for avail */
2082*4882a593Smuzhiyun ret = rvt_qp_is_avail(qp, rdi, reserved_op);
2083*4882a593Smuzhiyun if (ret)
2084*4882a593Smuzhiyun return ret;
2085*4882a593Smuzhiyun next = qp->s_head + 1;
2086*4882a593Smuzhiyun if (next >= qp->s_size)
2087*4882a593Smuzhiyun next = 0;
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun rkt = &rdi->lkey_table;
2090*4882a593Smuzhiyun pd = ibpd_to_rvtpd(qp->ibqp.pd);
2091*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_head);
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun /* cplen has length from above */
2094*4882a593Smuzhiyun memcpy(&wqe->wr, wr, cplen);
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun wqe->length = 0;
2097*4882a593Smuzhiyun j = 0;
2098*4882a593Smuzhiyun if (wr->num_sge) {
2099*4882a593Smuzhiyun struct rvt_sge *last_sge = NULL;
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun acc = wr->opcode >= IB_WR_RDMA_READ ?
2102*4882a593Smuzhiyun IB_ACCESS_LOCAL_WRITE : 0;
2103*4882a593Smuzhiyun for (i = 0; i < wr->num_sge; i++) {
2104*4882a593Smuzhiyun u32 length = wr->sg_list[i].length;
2105*4882a593Smuzhiyun
2106*4882a593Smuzhiyun if (length == 0)
2107*4882a593Smuzhiyun continue;
2108*4882a593Smuzhiyun ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
2109*4882a593Smuzhiyun &wr->sg_list[i], acc);
2110*4882a593Smuzhiyun if (unlikely(ret < 0))
2111*4882a593Smuzhiyun goto bail_inval_free;
2112*4882a593Smuzhiyun wqe->length += length;
2113*4882a593Smuzhiyun if (ret)
2114*4882a593Smuzhiyun last_sge = &wqe->sg_list[j];
2115*4882a593Smuzhiyun j += ret;
2116*4882a593Smuzhiyun }
2117*4882a593Smuzhiyun wqe->wr.num_sge = j;
2118*4882a593Smuzhiyun }
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun /*
2121*4882a593Smuzhiyun * Calculate and set SWQE PSN values prior to handing it off
2122*4882a593Smuzhiyun * to the driver's check routine. This give the driver the
2123*4882a593Smuzhiyun * opportunity to adjust PSN values based on internal checks.
2124*4882a593Smuzhiyun */
2125*4882a593Smuzhiyun log_pmtu = qp->log_pmtu;
2126*4882a593Smuzhiyun if (qp->allowed_ops == IB_OPCODE_UD) {
2127*4882a593Smuzhiyun struct rvt_ah *ah = rvt_get_swqe_ah(wqe);
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun log_pmtu = ah->log_pmtu;
2130*4882a593Smuzhiyun rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr);
2131*4882a593Smuzhiyun }
2132*4882a593Smuzhiyun
2133*4882a593Smuzhiyun if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2134*4882a593Smuzhiyun if (local_ops_delayed)
2135*4882a593Smuzhiyun atomic_inc(&qp->local_ops_pending);
2136*4882a593Smuzhiyun else
2137*4882a593Smuzhiyun wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
2138*4882a593Smuzhiyun wqe->ssn = 0;
2139*4882a593Smuzhiyun wqe->psn = 0;
2140*4882a593Smuzhiyun wqe->lpsn = 0;
2141*4882a593Smuzhiyun } else {
2142*4882a593Smuzhiyun wqe->ssn = qp->s_ssn++;
2143*4882a593Smuzhiyun wqe->psn = qp->s_next_psn;
2144*4882a593Smuzhiyun wqe->lpsn = wqe->psn +
2145*4882a593Smuzhiyun (wqe->length ?
2146*4882a593Smuzhiyun ((wqe->length - 1) >> log_pmtu) :
2147*4882a593Smuzhiyun 0);
2148*4882a593Smuzhiyun }
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun /* general part of wqe valid - allow for driver checks */
2151*4882a593Smuzhiyun if (rdi->driver_f.setup_wqe) {
2152*4882a593Smuzhiyun ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2153*4882a593Smuzhiyun if (ret < 0)
2154*4882a593Smuzhiyun goto bail_inval_free_ref;
2155*4882a593Smuzhiyun }
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2158*4882a593Smuzhiyun qp->s_next_psn = wqe->lpsn + 1;
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun if (unlikely(reserved_op)) {
2161*4882a593Smuzhiyun wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
2162*4882a593Smuzhiyun rvt_qp_wqe_reserve(qp, wqe);
2163*4882a593Smuzhiyun } else {
2164*4882a593Smuzhiyun wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
2165*4882a593Smuzhiyun qp->s_avail--;
2166*4882a593Smuzhiyun }
2167*4882a593Smuzhiyun trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2168*4882a593Smuzhiyun smp_wmb(); /* see request builders */
2169*4882a593Smuzhiyun qp->s_head = next;
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun return 0;
2172*4882a593Smuzhiyun
2173*4882a593Smuzhiyun bail_inval_free_ref:
2174*4882a593Smuzhiyun if (qp->allowed_ops == IB_OPCODE_UD)
2175*4882a593Smuzhiyun rdma_destroy_ah_attr(wqe->ud_wr.attr);
2176*4882a593Smuzhiyun bail_inval_free:
2177*4882a593Smuzhiyun /* release mr holds */
2178*4882a593Smuzhiyun while (j) {
2179*4882a593Smuzhiyun struct rvt_sge *sge = &wqe->sg_list[--j];
2180*4882a593Smuzhiyun
2181*4882a593Smuzhiyun rvt_put_mr(sge->mr);
2182*4882a593Smuzhiyun }
2183*4882a593Smuzhiyun return ret;
2184*4882a593Smuzhiyun }
2185*4882a593Smuzhiyun
2186*4882a593Smuzhiyun /**
2187*4882a593Smuzhiyun * rvt_post_send - post a send on a QP
2188*4882a593Smuzhiyun * @ibqp: the QP to post the send on
2189*4882a593Smuzhiyun * @wr: the list of work requests to post
2190*4882a593Smuzhiyun * @bad_wr: the first bad WR is put here
2191*4882a593Smuzhiyun *
2192*4882a593Smuzhiyun * This may be called from interrupt context.
2193*4882a593Smuzhiyun *
2194*4882a593Smuzhiyun * Return: 0 on success else errno
2195*4882a593Smuzhiyun */
rvt_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)2196*4882a593Smuzhiyun int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2197*4882a593Smuzhiyun const struct ib_send_wr **bad_wr)
2198*4882a593Smuzhiyun {
2199*4882a593Smuzhiyun struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2200*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2201*4882a593Smuzhiyun unsigned long flags = 0;
2202*4882a593Smuzhiyun bool call_send;
2203*4882a593Smuzhiyun unsigned nreq = 0;
2204*4882a593Smuzhiyun int err = 0;
2205*4882a593Smuzhiyun
2206*4882a593Smuzhiyun spin_lock_irqsave(&qp->s_hlock, flags);
2207*4882a593Smuzhiyun
2208*4882a593Smuzhiyun /*
2209*4882a593Smuzhiyun * Ensure QP state is such that we can send. If not bail out early,
2210*4882a593Smuzhiyun * there is no need to do this every time we post a send.
2211*4882a593Smuzhiyun */
2212*4882a593Smuzhiyun if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2213*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_hlock, flags);
2214*4882a593Smuzhiyun return -EINVAL;
2215*4882a593Smuzhiyun }
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun /*
2218*4882a593Smuzhiyun * If the send queue is empty, and we only have a single WR then just go
2219*4882a593Smuzhiyun * ahead and kick the send engine into gear. Otherwise we will always
2220*4882a593Smuzhiyun * just schedule the send to happen later.
2221*4882a593Smuzhiyun */
2222*4882a593Smuzhiyun call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun for (; wr; wr = wr->next) {
2225*4882a593Smuzhiyun err = rvt_post_one_wr(qp, wr, &call_send);
2226*4882a593Smuzhiyun if (unlikely(err)) {
2227*4882a593Smuzhiyun *bad_wr = wr;
2228*4882a593Smuzhiyun goto bail;
2229*4882a593Smuzhiyun }
2230*4882a593Smuzhiyun nreq++;
2231*4882a593Smuzhiyun }
2232*4882a593Smuzhiyun bail:
2233*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_hlock, flags);
2234*4882a593Smuzhiyun if (nreq) {
2235*4882a593Smuzhiyun /*
2236*4882a593Smuzhiyun * Only call do_send if there is exactly one packet, and the
2237*4882a593Smuzhiyun * driver said it was ok.
2238*4882a593Smuzhiyun */
2239*4882a593Smuzhiyun if (nreq == 1 && call_send)
2240*4882a593Smuzhiyun rdi->driver_f.do_send(qp);
2241*4882a593Smuzhiyun else
2242*4882a593Smuzhiyun rdi->driver_f.schedule_send_no_lock(qp);
2243*4882a593Smuzhiyun }
2244*4882a593Smuzhiyun return err;
2245*4882a593Smuzhiyun }
2246*4882a593Smuzhiyun
2247*4882a593Smuzhiyun /**
2248*4882a593Smuzhiyun * rvt_post_srq_receive - post a receive on a shared receive queue
2249*4882a593Smuzhiyun * @ibsrq: the SRQ to post the receive on
2250*4882a593Smuzhiyun * @wr: the list of work requests to post
2251*4882a593Smuzhiyun * @bad_wr: A pointer to the first WR to cause a problem is put here
2252*4882a593Smuzhiyun *
2253*4882a593Smuzhiyun * This may be called from interrupt context.
2254*4882a593Smuzhiyun *
2255*4882a593Smuzhiyun * Return: 0 on success else errno
2256*4882a593Smuzhiyun */
rvt_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)2257*4882a593Smuzhiyun int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2258*4882a593Smuzhiyun const struct ib_recv_wr **bad_wr)
2259*4882a593Smuzhiyun {
2260*4882a593Smuzhiyun struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
2261*4882a593Smuzhiyun struct rvt_krwq *wq;
2262*4882a593Smuzhiyun unsigned long flags;
2263*4882a593Smuzhiyun
2264*4882a593Smuzhiyun for (; wr; wr = wr->next) {
2265*4882a593Smuzhiyun struct rvt_rwqe *wqe;
2266*4882a593Smuzhiyun u32 next;
2267*4882a593Smuzhiyun int i;
2268*4882a593Smuzhiyun
2269*4882a593Smuzhiyun if ((unsigned)wr->num_sge > srq->rq.max_sge) {
2270*4882a593Smuzhiyun *bad_wr = wr;
2271*4882a593Smuzhiyun return -EINVAL;
2272*4882a593Smuzhiyun }
2273*4882a593Smuzhiyun
2274*4882a593Smuzhiyun spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
2275*4882a593Smuzhiyun wq = srq->rq.kwq;
2276*4882a593Smuzhiyun next = wq->head + 1;
2277*4882a593Smuzhiyun if (next >= srq->rq.size)
2278*4882a593Smuzhiyun next = 0;
2279*4882a593Smuzhiyun if (next == READ_ONCE(wq->tail)) {
2280*4882a593Smuzhiyun spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2281*4882a593Smuzhiyun *bad_wr = wr;
2282*4882a593Smuzhiyun return -ENOMEM;
2283*4882a593Smuzhiyun }
2284*4882a593Smuzhiyun
2285*4882a593Smuzhiyun wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
2286*4882a593Smuzhiyun wqe->wr_id = wr->wr_id;
2287*4882a593Smuzhiyun wqe->num_sge = wr->num_sge;
2288*4882a593Smuzhiyun for (i = 0; i < wr->num_sge; i++) {
2289*4882a593Smuzhiyun wqe->sg_list[i].addr = wr->sg_list[i].addr;
2290*4882a593Smuzhiyun wqe->sg_list[i].length = wr->sg_list[i].length;
2291*4882a593Smuzhiyun wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
2292*4882a593Smuzhiyun }
2293*4882a593Smuzhiyun /* Make sure queue entry is written before the head index. */
2294*4882a593Smuzhiyun smp_store_release(&wq->head, next);
2295*4882a593Smuzhiyun spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2296*4882a593Smuzhiyun }
2297*4882a593Smuzhiyun return 0;
2298*4882a593Smuzhiyun }
2299*4882a593Smuzhiyun
2300*4882a593Smuzhiyun /*
2301*4882a593Smuzhiyun * rvt used the internal kernel struct as part of its ABI, for now make sure
2302*4882a593Smuzhiyun * the kernel struct does not change layout. FIXME: rvt should never cast the
2303*4882a593Smuzhiyun * user struct to a kernel struct.
2304*4882a593Smuzhiyun */
rvt_cast_sge(struct rvt_wqe_sge * sge)2305*4882a593Smuzhiyun static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge)
2306*4882a593Smuzhiyun {
2307*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct ib_sge, addr) !=
2308*4882a593Smuzhiyun offsetof(struct rvt_wqe_sge, addr));
2309*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct ib_sge, length) !=
2310*4882a593Smuzhiyun offsetof(struct rvt_wqe_sge, length));
2311*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct ib_sge, lkey) !=
2312*4882a593Smuzhiyun offsetof(struct rvt_wqe_sge, lkey));
2313*4882a593Smuzhiyun return (struct ib_sge *)sge;
2314*4882a593Smuzhiyun }
2315*4882a593Smuzhiyun
2316*4882a593Smuzhiyun /*
2317*4882a593Smuzhiyun * Validate a RWQE and fill in the SGE state.
2318*4882a593Smuzhiyun * Return 1 if OK.
2319*4882a593Smuzhiyun */
init_sge(struct rvt_qp * qp,struct rvt_rwqe * wqe)2320*4882a593Smuzhiyun static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2321*4882a593Smuzhiyun {
2322*4882a593Smuzhiyun int i, j, ret;
2323*4882a593Smuzhiyun struct ib_wc wc;
2324*4882a593Smuzhiyun struct rvt_lkey_table *rkt;
2325*4882a593Smuzhiyun struct rvt_pd *pd;
2326*4882a593Smuzhiyun struct rvt_sge_state *ss;
2327*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun rkt = &rdi->lkey_table;
2330*4882a593Smuzhiyun pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2331*4882a593Smuzhiyun ss = &qp->r_sge;
2332*4882a593Smuzhiyun ss->sg_list = qp->r_sg_list;
2333*4882a593Smuzhiyun qp->r_len = 0;
2334*4882a593Smuzhiyun for (i = j = 0; i < wqe->num_sge; i++) {
2335*4882a593Smuzhiyun if (wqe->sg_list[i].length == 0)
2336*4882a593Smuzhiyun continue;
2337*4882a593Smuzhiyun /* Check LKEY */
2338*4882a593Smuzhiyun ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
2339*4882a593Smuzhiyun NULL, rvt_cast_sge(&wqe->sg_list[i]),
2340*4882a593Smuzhiyun IB_ACCESS_LOCAL_WRITE);
2341*4882a593Smuzhiyun if (unlikely(ret <= 0))
2342*4882a593Smuzhiyun goto bad_lkey;
2343*4882a593Smuzhiyun qp->r_len += wqe->sg_list[i].length;
2344*4882a593Smuzhiyun j++;
2345*4882a593Smuzhiyun }
2346*4882a593Smuzhiyun ss->num_sge = j;
2347*4882a593Smuzhiyun ss->total_len = qp->r_len;
2348*4882a593Smuzhiyun return 1;
2349*4882a593Smuzhiyun
2350*4882a593Smuzhiyun bad_lkey:
2351*4882a593Smuzhiyun while (j) {
2352*4882a593Smuzhiyun struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun rvt_put_mr(sge->mr);
2355*4882a593Smuzhiyun }
2356*4882a593Smuzhiyun ss->num_sge = 0;
2357*4882a593Smuzhiyun memset(&wc, 0, sizeof(wc));
2358*4882a593Smuzhiyun wc.wr_id = wqe->wr_id;
2359*4882a593Smuzhiyun wc.status = IB_WC_LOC_PROT_ERR;
2360*4882a593Smuzhiyun wc.opcode = IB_WC_RECV;
2361*4882a593Smuzhiyun wc.qp = &qp->ibqp;
2362*4882a593Smuzhiyun /* Signal solicited completion event. */
2363*4882a593Smuzhiyun rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2364*4882a593Smuzhiyun return 0;
2365*4882a593Smuzhiyun }
2366*4882a593Smuzhiyun
2367*4882a593Smuzhiyun /**
2368*4882a593Smuzhiyun * get_rvt_head - get head indices of the circular buffer
2369*4882a593Smuzhiyun * @rq: data structure for request queue entry
2370*4882a593Smuzhiyun * @ip: the QP
2371*4882a593Smuzhiyun *
2372*4882a593Smuzhiyun * Return - head index value
2373*4882a593Smuzhiyun */
get_rvt_head(struct rvt_rq * rq,void * ip)2374*4882a593Smuzhiyun static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
2375*4882a593Smuzhiyun {
2376*4882a593Smuzhiyun u32 head;
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun if (ip)
2379*4882a593Smuzhiyun head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
2380*4882a593Smuzhiyun else
2381*4882a593Smuzhiyun head = rq->kwq->head;
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun return head;
2384*4882a593Smuzhiyun }
2385*4882a593Smuzhiyun
2386*4882a593Smuzhiyun /**
2387*4882a593Smuzhiyun * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2388*4882a593Smuzhiyun * @qp: the QP
2389*4882a593Smuzhiyun * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2390*4882a593Smuzhiyun *
2391*4882a593Smuzhiyun * Return -1 if there is a local error, 0 if no RWQE is available,
2392*4882a593Smuzhiyun * otherwise return 1.
2393*4882a593Smuzhiyun *
2394*4882a593Smuzhiyun * Can be called from interrupt level.
2395*4882a593Smuzhiyun */
rvt_get_rwqe(struct rvt_qp * qp,bool wr_id_only)2396*4882a593Smuzhiyun int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2397*4882a593Smuzhiyun {
2398*4882a593Smuzhiyun unsigned long flags;
2399*4882a593Smuzhiyun struct rvt_rq *rq;
2400*4882a593Smuzhiyun struct rvt_krwq *kwq = NULL;
2401*4882a593Smuzhiyun struct rvt_rwq *wq;
2402*4882a593Smuzhiyun struct rvt_srq *srq;
2403*4882a593Smuzhiyun struct rvt_rwqe *wqe;
2404*4882a593Smuzhiyun void (*handler)(struct ib_event *, void *);
2405*4882a593Smuzhiyun u32 tail;
2406*4882a593Smuzhiyun u32 head;
2407*4882a593Smuzhiyun int ret;
2408*4882a593Smuzhiyun void *ip = NULL;
2409*4882a593Smuzhiyun
2410*4882a593Smuzhiyun if (qp->ibqp.srq) {
2411*4882a593Smuzhiyun srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2412*4882a593Smuzhiyun handler = srq->ibsrq.event_handler;
2413*4882a593Smuzhiyun rq = &srq->rq;
2414*4882a593Smuzhiyun ip = srq->ip;
2415*4882a593Smuzhiyun } else {
2416*4882a593Smuzhiyun srq = NULL;
2417*4882a593Smuzhiyun handler = NULL;
2418*4882a593Smuzhiyun rq = &qp->r_rq;
2419*4882a593Smuzhiyun ip = qp->ip;
2420*4882a593Smuzhiyun }
2421*4882a593Smuzhiyun
2422*4882a593Smuzhiyun spin_lock_irqsave(&rq->kwq->c_lock, flags);
2423*4882a593Smuzhiyun if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2424*4882a593Smuzhiyun ret = 0;
2425*4882a593Smuzhiyun goto unlock;
2426*4882a593Smuzhiyun }
2427*4882a593Smuzhiyun kwq = rq->kwq;
2428*4882a593Smuzhiyun if (ip) {
2429*4882a593Smuzhiyun wq = rq->wq;
2430*4882a593Smuzhiyun tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
2431*4882a593Smuzhiyun } else {
2432*4882a593Smuzhiyun tail = kwq->tail;
2433*4882a593Smuzhiyun }
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun /* Validate tail before using it since it is user writable. */
2436*4882a593Smuzhiyun if (tail >= rq->size)
2437*4882a593Smuzhiyun tail = 0;
2438*4882a593Smuzhiyun
2439*4882a593Smuzhiyun if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
2440*4882a593Smuzhiyun head = get_rvt_head(rq, ip);
2441*4882a593Smuzhiyun kwq->count = rvt_get_rq_count(rq, head, tail);
2442*4882a593Smuzhiyun }
2443*4882a593Smuzhiyun if (unlikely(kwq->count == 0)) {
2444*4882a593Smuzhiyun ret = 0;
2445*4882a593Smuzhiyun goto unlock;
2446*4882a593Smuzhiyun }
2447*4882a593Smuzhiyun /* Make sure entry is read after the count is read. */
2448*4882a593Smuzhiyun smp_rmb();
2449*4882a593Smuzhiyun wqe = rvt_get_rwqe_ptr(rq, tail);
2450*4882a593Smuzhiyun /*
2451*4882a593Smuzhiyun * Even though we update the tail index in memory, the verbs
2452*4882a593Smuzhiyun * consumer is not supposed to post more entries until a
2453*4882a593Smuzhiyun * completion is generated.
2454*4882a593Smuzhiyun */
2455*4882a593Smuzhiyun if (++tail >= rq->size)
2456*4882a593Smuzhiyun tail = 0;
2457*4882a593Smuzhiyun if (ip)
2458*4882a593Smuzhiyun RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
2459*4882a593Smuzhiyun else
2460*4882a593Smuzhiyun kwq->tail = tail;
2461*4882a593Smuzhiyun if (!wr_id_only && !init_sge(qp, wqe)) {
2462*4882a593Smuzhiyun ret = -1;
2463*4882a593Smuzhiyun goto unlock;
2464*4882a593Smuzhiyun }
2465*4882a593Smuzhiyun qp->r_wr_id = wqe->wr_id;
2466*4882a593Smuzhiyun
2467*4882a593Smuzhiyun kwq->count--;
2468*4882a593Smuzhiyun ret = 1;
2469*4882a593Smuzhiyun set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2470*4882a593Smuzhiyun if (handler) {
2471*4882a593Smuzhiyun /*
2472*4882a593Smuzhiyun * Validate head pointer value and compute
2473*4882a593Smuzhiyun * the number of remaining WQEs.
2474*4882a593Smuzhiyun */
2475*4882a593Smuzhiyun if (kwq->count < srq->limit) {
2476*4882a593Smuzhiyun kwq->count =
2477*4882a593Smuzhiyun rvt_get_rq_count(rq,
2478*4882a593Smuzhiyun get_rvt_head(rq, ip), tail);
2479*4882a593Smuzhiyun if (kwq->count < srq->limit) {
2480*4882a593Smuzhiyun struct ib_event ev;
2481*4882a593Smuzhiyun
2482*4882a593Smuzhiyun srq->limit = 0;
2483*4882a593Smuzhiyun spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2484*4882a593Smuzhiyun ev.device = qp->ibqp.device;
2485*4882a593Smuzhiyun ev.element.srq = qp->ibqp.srq;
2486*4882a593Smuzhiyun ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2487*4882a593Smuzhiyun handler(&ev, srq->ibsrq.srq_context);
2488*4882a593Smuzhiyun goto bail;
2489*4882a593Smuzhiyun }
2490*4882a593Smuzhiyun }
2491*4882a593Smuzhiyun }
2492*4882a593Smuzhiyun unlock:
2493*4882a593Smuzhiyun spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2494*4882a593Smuzhiyun bail:
2495*4882a593Smuzhiyun return ret;
2496*4882a593Smuzhiyun }
2497*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_get_rwqe);
2498*4882a593Smuzhiyun
2499*4882a593Smuzhiyun /**
2500*4882a593Smuzhiyun * qp_comm_est - handle trap with QP established
2501*4882a593Smuzhiyun * @qp: the QP
2502*4882a593Smuzhiyun */
rvt_comm_est(struct rvt_qp * qp)2503*4882a593Smuzhiyun void rvt_comm_est(struct rvt_qp *qp)
2504*4882a593Smuzhiyun {
2505*4882a593Smuzhiyun qp->r_flags |= RVT_R_COMM_EST;
2506*4882a593Smuzhiyun if (qp->ibqp.event_handler) {
2507*4882a593Smuzhiyun struct ib_event ev;
2508*4882a593Smuzhiyun
2509*4882a593Smuzhiyun ev.device = qp->ibqp.device;
2510*4882a593Smuzhiyun ev.element.qp = &qp->ibqp;
2511*4882a593Smuzhiyun ev.event = IB_EVENT_COMM_EST;
2512*4882a593Smuzhiyun qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2513*4882a593Smuzhiyun }
2514*4882a593Smuzhiyun }
2515*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_comm_est);
2516*4882a593Smuzhiyun
rvt_rc_error(struct rvt_qp * qp,enum ib_wc_status err)2517*4882a593Smuzhiyun void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2518*4882a593Smuzhiyun {
2519*4882a593Smuzhiyun unsigned long flags;
2520*4882a593Smuzhiyun int lastwqe;
2521*4882a593Smuzhiyun
2522*4882a593Smuzhiyun spin_lock_irqsave(&qp->s_lock, flags);
2523*4882a593Smuzhiyun lastwqe = rvt_error_qp(qp, err);
2524*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_lock, flags);
2525*4882a593Smuzhiyun
2526*4882a593Smuzhiyun if (lastwqe) {
2527*4882a593Smuzhiyun struct ib_event ev;
2528*4882a593Smuzhiyun
2529*4882a593Smuzhiyun ev.device = qp->ibqp.device;
2530*4882a593Smuzhiyun ev.element.qp = &qp->ibqp;
2531*4882a593Smuzhiyun ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2532*4882a593Smuzhiyun qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2533*4882a593Smuzhiyun }
2534*4882a593Smuzhiyun }
2535*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_rc_error);
2536*4882a593Smuzhiyun
2537*4882a593Smuzhiyun /*
2538*4882a593Smuzhiyun * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2539*4882a593Smuzhiyun * @index - the index
2540*4882a593Smuzhiyun * return usec from an index into ib_rvt_rnr_table
2541*4882a593Smuzhiyun */
rvt_rnr_tbl_to_usec(u32 index)2542*4882a593Smuzhiyun unsigned long rvt_rnr_tbl_to_usec(u32 index)
2543*4882a593Smuzhiyun {
2544*4882a593Smuzhiyun return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2545*4882a593Smuzhiyun }
2546*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2547*4882a593Smuzhiyun
rvt_aeth_to_usec(u32 aeth)2548*4882a593Smuzhiyun static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2549*4882a593Smuzhiyun {
2550*4882a593Smuzhiyun return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2551*4882a593Smuzhiyun IB_AETH_CREDIT_MASK];
2552*4882a593Smuzhiyun }
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun /*
2555*4882a593Smuzhiyun * rvt_add_retry_timer_ext - add/start a retry timer
2556*4882a593Smuzhiyun * @qp - the QP
2557*4882a593Smuzhiyun * @shift - timeout shift to wait for multiple packets
2558*4882a593Smuzhiyun * add a retry timer on the QP
2559*4882a593Smuzhiyun */
rvt_add_retry_timer_ext(struct rvt_qp * qp,u8 shift)2560*4882a593Smuzhiyun void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
2561*4882a593Smuzhiyun {
2562*4882a593Smuzhiyun struct ib_qp *ibqp = &qp->ibqp;
2563*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2564*4882a593Smuzhiyun
2565*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
2566*4882a593Smuzhiyun qp->s_flags |= RVT_S_TIMER;
2567*4882a593Smuzhiyun /* 4.096 usec. * (1 << qp->timeout) */
2568*4882a593Smuzhiyun qp->s_timer.expires = jiffies + rdi->busy_jiffies +
2569*4882a593Smuzhiyun (qp->timeout_jiffies << shift);
2570*4882a593Smuzhiyun add_timer(&qp->s_timer);
2571*4882a593Smuzhiyun }
2572*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_add_retry_timer_ext);
2573*4882a593Smuzhiyun
2574*4882a593Smuzhiyun /**
2575*4882a593Smuzhiyun * rvt_add_rnr_timer - add/start an rnr timer on the QP
2576*4882a593Smuzhiyun * @qp: the QP
2577*4882a593Smuzhiyun * @aeth: aeth of RNR timeout, simulated aeth for loopback
2578*4882a593Smuzhiyun */
rvt_add_rnr_timer(struct rvt_qp * qp,u32 aeth)2579*4882a593Smuzhiyun void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2580*4882a593Smuzhiyun {
2581*4882a593Smuzhiyun u32 to;
2582*4882a593Smuzhiyun
2583*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
2584*4882a593Smuzhiyun qp->s_flags |= RVT_S_WAIT_RNR;
2585*4882a593Smuzhiyun to = rvt_aeth_to_usec(aeth);
2586*4882a593Smuzhiyun trace_rvt_rnrnak_add(qp, to);
2587*4882a593Smuzhiyun hrtimer_start(&qp->s_rnr_timer,
2588*4882a593Smuzhiyun ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2589*4882a593Smuzhiyun }
2590*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_add_rnr_timer);
2591*4882a593Smuzhiyun
2592*4882a593Smuzhiyun /**
2593*4882a593Smuzhiyun * rvt_stop_rc_timers - stop all timers
2594*4882a593Smuzhiyun * @qp: the QP
2595*4882a593Smuzhiyun * stop any pending timers
2596*4882a593Smuzhiyun */
rvt_stop_rc_timers(struct rvt_qp * qp)2597*4882a593Smuzhiyun void rvt_stop_rc_timers(struct rvt_qp *qp)
2598*4882a593Smuzhiyun {
2599*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
2600*4882a593Smuzhiyun /* Remove QP from all timers */
2601*4882a593Smuzhiyun if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2602*4882a593Smuzhiyun qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2603*4882a593Smuzhiyun del_timer(&qp->s_timer);
2604*4882a593Smuzhiyun hrtimer_try_to_cancel(&qp->s_rnr_timer);
2605*4882a593Smuzhiyun }
2606*4882a593Smuzhiyun }
2607*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_stop_rc_timers);
2608*4882a593Smuzhiyun
2609*4882a593Smuzhiyun /**
2610*4882a593Smuzhiyun * rvt_stop_rnr_timer - stop an rnr timer
2611*4882a593Smuzhiyun * @qp - the QP
2612*4882a593Smuzhiyun *
2613*4882a593Smuzhiyun * stop an rnr timer and return if the timer
2614*4882a593Smuzhiyun * had been pending.
2615*4882a593Smuzhiyun */
rvt_stop_rnr_timer(struct rvt_qp * qp)2616*4882a593Smuzhiyun static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2617*4882a593Smuzhiyun {
2618*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
2619*4882a593Smuzhiyun /* Remove QP from rnr timer */
2620*4882a593Smuzhiyun if (qp->s_flags & RVT_S_WAIT_RNR) {
2621*4882a593Smuzhiyun qp->s_flags &= ~RVT_S_WAIT_RNR;
2622*4882a593Smuzhiyun trace_rvt_rnrnak_stop(qp, 0);
2623*4882a593Smuzhiyun }
2624*4882a593Smuzhiyun }
2625*4882a593Smuzhiyun
2626*4882a593Smuzhiyun /**
2627*4882a593Smuzhiyun * rvt_del_timers_sync - wait for any timeout routines to exit
2628*4882a593Smuzhiyun * @qp: the QP
2629*4882a593Smuzhiyun */
rvt_del_timers_sync(struct rvt_qp * qp)2630*4882a593Smuzhiyun void rvt_del_timers_sync(struct rvt_qp *qp)
2631*4882a593Smuzhiyun {
2632*4882a593Smuzhiyun del_timer_sync(&qp->s_timer);
2633*4882a593Smuzhiyun hrtimer_cancel(&qp->s_rnr_timer);
2634*4882a593Smuzhiyun }
2635*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_del_timers_sync);
2636*4882a593Smuzhiyun
2637*4882a593Smuzhiyun /*
2638*4882a593Smuzhiyun * This is called from s_timer for missing responses.
2639*4882a593Smuzhiyun */
rvt_rc_timeout(struct timer_list * t)2640*4882a593Smuzhiyun static void rvt_rc_timeout(struct timer_list *t)
2641*4882a593Smuzhiyun {
2642*4882a593Smuzhiyun struct rvt_qp *qp = from_timer(qp, t, s_timer);
2643*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2644*4882a593Smuzhiyun unsigned long flags;
2645*4882a593Smuzhiyun
2646*4882a593Smuzhiyun spin_lock_irqsave(&qp->r_lock, flags);
2647*4882a593Smuzhiyun spin_lock(&qp->s_lock);
2648*4882a593Smuzhiyun if (qp->s_flags & RVT_S_TIMER) {
2649*4882a593Smuzhiyun struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2650*4882a593Smuzhiyun
2651*4882a593Smuzhiyun qp->s_flags &= ~RVT_S_TIMER;
2652*4882a593Smuzhiyun rvp->n_rc_timeouts++;
2653*4882a593Smuzhiyun del_timer(&qp->s_timer);
2654*4882a593Smuzhiyun trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2655*4882a593Smuzhiyun if (rdi->driver_f.notify_restart_rc)
2656*4882a593Smuzhiyun rdi->driver_f.notify_restart_rc(qp,
2657*4882a593Smuzhiyun qp->s_last_psn + 1,
2658*4882a593Smuzhiyun 1);
2659*4882a593Smuzhiyun rdi->driver_f.schedule_send(qp);
2660*4882a593Smuzhiyun }
2661*4882a593Smuzhiyun spin_unlock(&qp->s_lock);
2662*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->r_lock, flags);
2663*4882a593Smuzhiyun }
2664*4882a593Smuzhiyun
2665*4882a593Smuzhiyun /*
2666*4882a593Smuzhiyun * This is called from s_timer for RNR timeouts.
2667*4882a593Smuzhiyun */
rvt_rc_rnr_retry(struct hrtimer * t)2668*4882a593Smuzhiyun enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2669*4882a593Smuzhiyun {
2670*4882a593Smuzhiyun struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2671*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2672*4882a593Smuzhiyun unsigned long flags;
2673*4882a593Smuzhiyun
2674*4882a593Smuzhiyun spin_lock_irqsave(&qp->s_lock, flags);
2675*4882a593Smuzhiyun rvt_stop_rnr_timer(qp);
2676*4882a593Smuzhiyun trace_rvt_rnrnak_timeout(qp, 0);
2677*4882a593Smuzhiyun rdi->driver_f.schedule_send(qp);
2678*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_lock, flags);
2679*4882a593Smuzhiyun return HRTIMER_NORESTART;
2680*4882a593Smuzhiyun }
2681*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_rc_rnr_retry);
2682*4882a593Smuzhiyun
2683*4882a593Smuzhiyun /**
2684*4882a593Smuzhiyun * rvt_qp_iter_init - initial for QP iteration
2685*4882a593Smuzhiyun * @rdi: rvt devinfo
2686*4882a593Smuzhiyun * @v: u64 value
2687*4882a593Smuzhiyun * @cb: user-defined callback
2688*4882a593Smuzhiyun *
2689*4882a593Smuzhiyun * This returns an iterator suitable for iterating QPs
2690*4882a593Smuzhiyun * in the system.
2691*4882a593Smuzhiyun *
2692*4882a593Smuzhiyun * The @cb is a user-defined callback and @v is a 64-bit
2693*4882a593Smuzhiyun * value passed to and relevant for processing in the
2694*4882a593Smuzhiyun * @cb. An example use case would be to alter QP processing
2695*4882a593Smuzhiyun * based on criteria not part of the rvt_qp.
2696*4882a593Smuzhiyun *
2697*4882a593Smuzhiyun * Use cases that require memory allocation to succeed
2698*4882a593Smuzhiyun * must preallocate appropriately.
2699*4882a593Smuzhiyun *
2700*4882a593Smuzhiyun * Return: a pointer to an rvt_qp_iter or NULL
2701*4882a593Smuzhiyun */
rvt_qp_iter_init(struct rvt_dev_info * rdi,u64 v,void (* cb)(struct rvt_qp * qp,u64 v))2702*4882a593Smuzhiyun struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2703*4882a593Smuzhiyun u64 v,
2704*4882a593Smuzhiyun void (*cb)(struct rvt_qp *qp, u64 v))
2705*4882a593Smuzhiyun {
2706*4882a593Smuzhiyun struct rvt_qp_iter *i;
2707*4882a593Smuzhiyun
2708*4882a593Smuzhiyun i = kzalloc(sizeof(*i), GFP_KERNEL);
2709*4882a593Smuzhiyun if (!i)
2710*4882a593Smuzhiyun return NULL;
2711*4882a593Smuzhiyun
2712*4882a593Smuzhiyun i->rdi = rdi;
2713*4882a593Smuzhiyun /* number of special QPs (SMI/GSI) for device */
2714*4882a593Smuzhiyun i->specials = rdi->ibdev.phys_port_cnt * 2;
2715*4882a593Smuzhiyun i->v = v;
2716*4882a593Smuzhiyun i->cb = cb;
2717*4882a593Smuzhiyun
2718*4882a593Smuzhiyun return i;
2719*4882a593Smuzhiyun }
2720*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_qp_iter_init);
2721*4882a593Smuzhiyun
2722*4882a593Smuzhiyun /**
2723*4882a593Smuzhiyun * rvt_qp_iter_next - return the next QP in iter
2724*4882a593Smuzhiyun * @iter: the iterator
2725*4882a593Smuzhiyun *
2726*4882a593Smuzhiyun * Fine grained QP iterator suitable for use
2727*4882a593Smuzhiyun * with debugfs seq_file mechanisms.
2728*4882a593Smuzhiyun *
2729*4882a593Smuzhiyun * Updates iter->qp with the current QP when the return
2730*4882a593Smuzhiyun * value is 0.
2731*4882a593Smuzhiyun *
2732*4882a593Smuzhiyun * Return: 0 - iter->qp is valid 1 - no more QPs
2733*4882a593Smuzhiyun */
rvt_qp_iter_next(struct rvt_qp_iter * iter)2734*4882a593Smuzhiyun int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2735*4882a593Smuzhiyun __must_hold(RCU)
2736*4882a593Smuzhiyun {
2737*4882a593Smuzhiyun int n = iter->n;
2738*4882a593Smuzhiyun int ret = 1;
2739*4882a593Smuzhiyun struct rvt_qp *pqp = iter->qp;
2740*4882a593Smuzhiyun struct rvt_qp *qp;
2741*4882a593Smuzhiyun struct rvt_dev_info *rdi = iter->rdi;
2742*4882a593Smuzhiyun
2743*4882a593Smuzhiyun /*
2744*4882a593Smuzhiyun * The approach is to consider the special qps
2745*4882a593Smuzhiyun * as additional table entries before the
2746*4882a593Smuzhiyun * real hash table. Since the qp code sets
2747*4882a593Smuzhiyun * the qp->next hash link to NULL, this works just fine.
2748*4882a593Smuzhiyun *
2749*4882a593Smuzhiyun * iter->specials is 2 * # ports
2750*4882a593Smuzhiyun *
2751*4882a593Smuzhiyun * n = 0..iter->specials is the special qp indices
2752*4882a593Smuzhiyun *
2753*4882a593Smuzhiyun * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2754*4882a593Smuzhiyun * the potential hash bucket entries
2755*4882a593Smuzhiyun *
2756*4882a593Smuzhiyun */
2757*4882a593Smuzhiyun for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
2758*4882a593Smuzhiyun if (pqp) {
2759*4882a593Smuzhiyun qp = rcu_dereference(pqp->next);
2760*4882a593Smuzhiyun } else {
2761*4882a593Smuzhiyun if (n < iter->specials) {
2762*4882a593Smuzhiyun struct rvt_ibport *rvp;
2763*4882a593Smuzhiyun int pidx;
2764*4882a593Smuzhiyun
2765*4882a593Smuzhiyun pidx = n % rdi->ibdev.phys_port_cnt;
2766*4882a593Smuzhiyun rvp = rdi->ports[pidx];
2767*4882a593Smuzhiyun qp = rcu_dereference(rvp->qp[n & 1]);
2768*4882a593Smuzhiyun } else {
2769*4882a593Smuzhiyun qp = rcu_dereference(
2770*4882a593Smuzhiyun rdi->qp_dev->qp_table[
2771*4882a593Smuzhiyun (n - iter->specials)]);
2772*4882a593Smuzhiyun }
2773*4882a593Smuzhiyun }
2774*4882a593Smuzhiyun pqp = qp;
2775*4882a593Smuzhiyun if (qp) {
2776*4882a593Smuzhiyun iter->qp = qp;
2777*4882a593Smuzhiyun iter->n = n;
2778*4882a593Smuzhiyun return 0;
2779*4882a593Smuzhiyun }
2780*4882a593Smuzhiyun }
2781*4882a593Smuzhiyun return ret;
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_qp_iter_next);
2784*4882a593Smuzhiyun
2785*4882a593Smuzhiyun /**
2786*4882a593Smuzhiyun * rvt_qp_iter - iterate all QPs
2787*4882a593Smuzhiyun * @rdi: rvt devinfo
2788*4882a593Smuzhiyun * @v: a 64-bit value
2789*4882a593Smuzhiyun * @cb: a callback
2790*4882a593Smuzhiyun *
2791*4882a593Smuzhiyun * This provides a way for iterating all QPs.
2792*4882a593Smuzhiyun *
2793*4882a593Smuzhiyun * The @cb is a user-defined callback and @v is a 64-bit
2794*4882a593Smuzhiyun * value passed to and relevant for processing in the
2795*4882a593Smuzhiyun * cb. An example use case would be to alter QP processing
2796*4882a593Smuzhiyun * based on criteria not part of the rvt_qp.
2797*4882a593Smuzhiyun *
2798*4882a593Smuzhiyun * The code has an internal iterator to simplify
2799*4882a593Smuzhiyun * non seq_file use cases.
2800*4882a593Smuzhiyun */
rvt_qp_iter(struct rvt_dev_info * rdi,u64 v,void (* cb)(struct rvt_qp * qp,u64 v))2801*4882a593Smuzhiyun void rvt_qp_iter(struct rvt_dev_info *rdi,
2802*4882a593Smuzhiyun u64 v,
2803*4882a593Smuzhiyun void (*cb)(struct rvt_qp *qp, u64 v))
2804*4882a593Smuzhiyun {
2805*4882a593Smuzhiyun int ret;
2806*4882a593Smuzhiyun struct rvt_qp_iter i = {
2807*4882a593Smuzhiyun .rdi = rdi,
2808*4882a593Smuzhiyun .specials = rdi->ibdev.phys_port_cnt * 2,
2809*4882a593Smuzhiyun .v = v,
2810*4882a593Smuzhiyun .cb = cb
2811*4882a593Smuzhiyun };
2812*4882a593Smuzhiyun
2813*4882a593Smuzhiyun rcu_read_lock();
2814*4882a593Smuzhiyun do {
2815*4882a593Smuzhiyun ret = rvt_qp_iter_next(&i);
2816*4882a593Smuzhiyun if (!ret) {
2817*4882a593Smuzhiyun rvt_get_qp(i.qp);
2818*4882a593Smuzhiyun rcu_read_unlock();
2819*4882a593Smuzhiyun i.cb(i.qp, i.v);
2820*4882a593Smuzhiyun rcu_read_lock();
2821*4882a593Smuzhiyun rvt_put_qp(i.qp);
2822*4882a593Smuzhiyun }
2823*4882a593Smuzhiyun } while (!ret);
2824*4882a593Smuzhiyun rcu_read_unlock();
2825*4882a593Smuzhiyun }
2826*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_qp_iter);
2827*4882a593Smuzhiyun
2828*4882a593Smuzhiyun /*
2829*4882a593Smuzhiyun * This should be called with s_lock and r_lock held.
2830*4882a593Smuzhiyun */
rvt_send_complete(struct rvt_qp * qp,struct rvt_swqe * wqe,enum ib_wc_status status)2831*4882a593Smuzhiyun void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2832*4882a593Smuzhiyun enum ib_wc_status status)
2833*4882a593Smuzhiyun {
2834*4882a593Smuzhiyun u32 old_last, last;
2835*4882a593Smuzhiyun struct rvt_dev_info *rdi;
2836*4882a593Smuzhiyun
2837*4882a593Smuzhiyun if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2838*4882a593Smuzhiyun return;
2839*4882a593Smuzhiyun rdi = ib_to_rvt(qp->ibqp.device);
2840*4882a593Smuzhiyun
2841*4882a593Smuzhiyun old_last = qp->s_last;
2842*4882a593Smuzhiyun trace_rvt_qp_send_completion(qp, wqe, old_last);
2843*4882a593Smuzhiyun last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
2844*4882a593Smuzhiyun status);
2845*4882a593Smuzhiyun if (qp->s_acked == old_last)
2846*4882a593Smuzhiyun qp->s_acked = last;
2847*4882a593Smuzhiyun if (qp->s_cur == old_last)
2848*4882a593Smuzhiyun qp->s_cur = last;
2849*4882a593Smuzhiyun if (qp->s_tail == old_last)
2850*4882a593Smuzhiyun qp->s_tail = last;
2851*4882a593Smuzhiyun if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2852*4882a593Smuzhiyun qp->s_draining = 0;
2853*4882a593Smuzhiyun }
2854*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_send_complete);
2855*4882a593Smuzhiyun
2856*4882a593Smuzhiyun /**
2857*4882a593Smuzhiyun * rvt_copy_sge - copy data to SGE memory
2858*4882a593Smuzhiyun * @qp: associated QP
2859*4882a593Smuzhiyun * @ss: the SGE state
2860*4882a593Smuzhiyun * @data: the data to copy
2861*4882a593Smuzhiyun * @length: the length of the data
2862*4882a593Smuzhiyun * @release: boolean to release MR
2863*4882a593Smuzhiyun * @copy_last: do a separate copy of the last 8 bytes
2864*4882a593Smuzhiyun */
rvt_copy_sge(struct rvt_qp * qp,struct rvt_sge_state * ss,void * data,u32 length,bool release,bool copy_last)2865*4882a593Smuzhiyun void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2866*4882a593Smuzhiyun void *data, u32 length,
2867*4882a593Smuzhiyun bool release, bool copy_last)
2868*4882a593Smuzhiyun {
2869*4882a593Smuzhiyun struct rvt_sge *sge = &ss->sge;
2870*4882a593Smuzhiyun int i;
2871*4882a593Smuzhiyun bool in_last = false;
2872*4882a593Smuzhiyun bool cacheless_copy = false;
2873*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2874*4882a593Smuzhiyun struct rvt_wss *wss = rdi->wss;
2875*4882a593Smuzhiyun unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2876*4882a593Smuzhiyun
2877*4882a593Smuzhiyun if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
2878*4882a593Smuzhiyun cacheless_copy = length >= PAGE_SIZE;
2879*4882a593Smuzhiyun } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
2880*4882a593Smuzhiyun if (length >= PAGE_SIZE) {
2881*4882a593Smuzhiyun /*
2882*4882a593Smuzhiyun * NOTE: this *assumes*:
2883*4882a593Smuzhiyun * o The first vaddr is the dest.
2884*4882a593Smuzhiyun * o If multiple pages, then vaddr is sequential.
2885*4882a593Smuzhiyun */
2886*4882a593Smuzhiyun wss_insert(wss, sge->vaddr);
2887*4882a593Smuzhiyun if (length >= (2 * PAGE_SIZE))
2888*4882a593Smuzhiyun wss_insert(wss, (sge->vaddr + PAGE_SIZE));
2889*4882a593Smuzhiyun
2890*4882a593Smuzhiyun cacheless_copy = wss_exceeds_threshold(wss);
2891*4882a593Smuzhiyun } else {
2892*4882a593Smuzhiyun wss_advance_clean_counter(wss);
2893*4882a593Smuzhiyun }
2894*4882a593Smuzhiyun }
2895*4882a593Smuzhiyun
2896*4882a593Smuzhiyun if (copy_last) {
2897*4882a593Smuzhiyun if (length > 8) {
2898*4882a593Smuzhiyun length -= 8;
2899*4882a593Smuzhiyun } else {
2900*4882a593Smuzhiyun copy_last = false;
2901*4882a593Smuzhiyun in_last = true;
2902*4882a593Smuzhiyun }
2903*4882a593Smuzhiyun }
2904*4882a593Smuzhiyun
2905*4882a593Smuzhiyun again:
2906*4882a593Smuzhiyun while (length) {
2907*4882a593Smuzhiyun u32 len = rvt_get_sge_length(sge, length);
2908*4882a593Smuzhiyun
2909*4882a593Smuzhiyun WARN_ON_ONCE(len == 0);
2910*4882a593Smuzhiyun if (unlikely(in_last)) {
2911*4882a593Smuzhiyun /* enforce byte transfer ordering */
2912*4882a593Smuzhiyun for (i = 0; i < len; i++)
2913*4882a593Smuzhiyun ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
2914*4882a593Smuzhiyun } else if (cacheless_copy) {
2915*4882a593Smuzhiyun cacheless_memcpy(sge->vaddr, data, len);
2916*4882a593Smuzhiyun } else {
2917*4882a593Smuzhiyun memcpy(sge->vaddr, data, len);
2918*4882a593Smuzhiyun }
2919*4882a593Smuzhiyun rvt_update_sge(ss, len, release);
2920*4882a593Smuzhiyun data += len;
2921*4882a593Smuzhiyun length -= len;
2922*4882a593Smuzhiyun }
2923*4882a593Smuzhiyun
2924*4882a593Smuzhiyun if (copy_last) {
2925*4882a593Smuzhiyun copy_last = false;
2926*4882a593Smuzhiyun in_last = true;
2927*4882a593Smuzhiyun length = 8;
2928*4882a593Smuzhiyun goto again;
2929*4882a593Smuzhiyun }
2930*4882a593Smuzhiyun }
2931*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_copy_sge);
2932*4882a593Smuzhiyun
loopback_qp_drop(struct rvt_ibport * rvp,struct rvt_qp * sqp)2933*4882a593Smuzhiyun static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
2934*4882a593Smuzhiyun struct rvt_qp *sqp)
2935*4882a593Smuzhiyun {
2936*4882a593Smuzhiyun rvp->n_pkt_drops++;
2937*4882a593Smuzhiyun /*
2938*4882a593Smuzhiyun * For RC, the requester would timeout and retry so
2939*4882a593Smuzhiyun * shortcut the timeouts and just signal too many retries.
2940*4882a593Smuzhiyun */
2941*4882a593Smuzhiyun return sqp->ibqp.qp_type == IB_QPT_RC ?
2942*4882a593Smuzhiyun IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
2943*4882a593Smuzhiyun }
2944*4882a593Smuzhiyun
2945*4882a593Smuzhiyun /**
2946*4882a593Smuzhiyun * ruc_loopback - handle UC and RC loopback requests
2947*4882a593Smuzhiyun * @sqp: the sending QP
2948*4882a593Smuzhiyun *
2949*4882a593Smuzhiyun * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2950*4882a593Smuzhiyun * Note that although we are single threaded due to the send engine, we still
2951*4882a593Smuzhiyun * have to protect against post_send(). We don't have to worry about
2952*4882a593Smuzhiyun * receive interrupts since this is a connected protocol and all packets
2953*4882a593Smuzhiyun * will pass through here.
2954*4882a593Smuzhiyun */
rvt_ruc_loopback(struct rvt_qp * sqp)2955*4882a593Smuzhiyun void rvt_ruc_loopback(struct rvt_qp *sqp)
2956*4882a593Smuzhiyun {
2957*4882a593Smuzhiyun struct rvt_ibport *rvp = NULL;
2958*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2959*4882a593Smuzhiyun struct rvt_qp *qp;
2960*4882a593Smuzhiyun struct rvt_swqe *wqe;
2961*4882a593Smuzhiyun struct rvt_sge *sge;
2962*4882a593Smuzhiyun unsigned long flags;
2963*4882a593Smuzhiyun struct ib_wc wc;
2964*4882a593Smuzhiyun u64 sdata;
2965*4882a593Smuzhiyun atomic64_t *maddr;
2966*4882a593Smuzhiyun enum ib_wc_status send_status;
2967*4882a593Smuzhiyun bool release;
2968*4882a593Smuzhiyun int ret;
2969*4882a593Smuzhiyun bool copy_last = false;
2970*4882a593Smuzhiyun int local_ops = 0;
2971*4882a593Smuzhiyun
2972*4882a593Smuzhiyun rcu_read_lock();
2973*4882a593Smuzhiyun rvp = rdi->ports[sqp->port_num - 1];
2974*4882a593Smuzhiyun
2975*4882a593Smuzhiyun /*
2976*4882a593Smuzhiyun * Note that we check the responder QP state after
2977*4882a593Smuzhiyun * checking the requester's state.
2978*4882a593Smuzhiyun */
2979*4882a593Smuzhiyun
2980*4882a593Smuzhiyun qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
2981*4882a593Smuzhiyun sqp->remote_qpn);
2982*4882a593Smuzhiyun
2983*4882a593Smuzhiyun spin_lock_irqsave(&sqp->s_lock, flags);
2984*4882a593Smuzhiyun
2985*4882a593Smuzhiyun /* Return if we are already busy processing a work request. */
2986*4882a593Smuzhiyun if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
2987*4882a593Smuzhiyun !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2988*4882a593Smuzhiyun goto unlock;
2989*4882a593Smuzhiyun
2990*4882a593Smuzhiyun sqp->s_flags |= RVT_S_BUSY;
2991*4882a593Smuzhiyun
2992*4882a593Smuzhiyun again:
2993*4882a593Smuzhiyun if (sqp->s_last == READ_ONCE(sqp->s_head))
2994*4882a593Smuzhiyun goto clr_busy;
2995*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
2996*4882a593Smuzhiyun
2997*4882a593Smuzhiyun /* Return if it is not OK to start a new work request. */
2998*4882a593Smuzhiyun if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
2999*4882a593Smuzhiyun if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
3000*4882a593Smuzhiyun goto clr_busy;
3001*4882a593Smuzhiyun /* We are in the error state, flush the work request. */
3002*4882a593Smuzhiyun send_status = IB_WC_WR_FLUSH_ERR;
3003*4882a593Smuzhiyun goto flush_send;
3004*4882a593Smuzhiyun }
3005*4882a593Smuzhiyun
3006*4882a593Smuzhiyun /*
3007*4882a593Smuzhiyun * We can rely on the entry not changing without the s_lock
3008*4882a593Smuzhiyun * being held until we update s_last.
3009*4882a593Smuzhiyun * We increment s_cur to indicate s_last is in progress.
3010*4882a593Smuzhiyun */
3011*4882a593Smuzhiyun if (sqp->s_last == sqp->s_cur) {
3012*4882a593Smuzhiyun if (++sqp->s_cur >= sqp->s_size)
3013*4882a593Smuzhiyun sqp->s_cur = 0;
3014*4882a593Smuzhiyun }
3015*4882a593Smuzhiyun spin_unlock_irqrestore(&sqp->s_lock, flags);
3016*4882a593Smuzhiyun
3017*4882a593Smuzhiyun if (!qp) {
3018*4882a593Smuzhiyun send_status = loopback_qp_drop(rvp, sqp);
3019*4882a593Smuzhiyun goto serr_no_r_lock;
3020*4882a593Smuzhiyun }
3021*4882a593Smuzhiyun spin_lock_irqsave(&qp->r_lock, flags);
3022*4882a593Smuzhiyun if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
3023*4882a593Smuzhiyun qp->ibqp.qp_type != sqp->ibqp.qp_type) {
3024*4882a593Smuzhiyun send_status = loopback_qp_drop(rvp, sqp);
3025*4882a593Smuzhiyun goto serr;
3026*4882a593Smuzhiyun }
3027*4882a593Smuzhiyun
3028*4882a593Smuzhiyun memset(&wc, 0, sizeof(wc));
3029*4882a593Smuzhiyun send_status = IB_WC_SUCCESS;
3030*4882a593Smuzhiyun
3031*4882a593Smuzhiyun release = true;
3032*4882a593Smuzhiyun sqp->s_sge.sge = wqe->sg_list[0];
3033*4882a593Smuzhiyun sqp->s_sge.sg_list = wqe->sg_list + 1;
3034*4882a593Smuzhiyun sqp->s_sge.num_sge = wqe->wr.num_sge;
3035*4882a593Smuzhiyun sqp->s_len = wqe->length;
3036*4882a593Smuzhiyun switch (wqe->wr.opcode) {
3037*4882a593Smuzhiyun case IB_WR_REG_MR:
3038*4882a593Smuzhiyun goto send_comp;
3039*4882a593Smuzhiyun
3040*4882a593Smuzhiyun case IB_WR_LOCAL_INV:
3041*4882a593Smuzhiyun if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
3042*4882a593Smuzhiyun if (rvt_invalidate_rkey(sqp,
3043*4882a593Smuzhiyun wqe->wr.ex.invalidate_rkey))
3044*4882a593Smuzhiyun send_status = IB_WC_LOC_PROT_ERR;
3045*4882a593Smuzhiyun local_ops = 1;
3046*4882a593Smuzhiyun }
3047*4882a593Smuzhiyun goto send_comp;
3048*4882a593Smuzhiyun
3049*4882a593Smuzhiyun case IB_WR_SEND_WITH_INV:
3050*4882a593Smuzhiyun case IB_WR_SEND_WITH_IMM:
3051*4882a593Smuzhiyun case IB_WR_SEND:
3052*4882a593Smuzhiyun ret = rvt_get_rwqe(qp, false);
3053*4882a593Smuzhiyun if (ret < 0)
3054*4882a593Smuzhiyun goto op_err;
3055*4882a593Smuzhiyun if (!ret)
3056*4882a593Smuzhiyun goto rnr_nak;
3057*4882a593Smuzhiyun if (wqe->length > qp->r_len)
3058*4882a593Smuzhiyun goto inv_err;
3059*4882a593Smuzhiyun switch (wqe->wr.opcode) {
3060*4882a593Smuzhiyun case IB_WR_SEND_WITH_INV:
3061*4882a593Smuzhiyun if (!rvt_invalidate_rkey(qp,
3062*4882a593Smuzhiyun wqe->wr.ex.invalidate_rkey)) {
3063*4882a593Smuzhiyun wc.wc_flags = IB_WC_WITH_INVALIDATE;
3064*4882a593Smuzhiyun wc.ex.invalidate_rkey =
3065*4882a593Smuzhiyun wqe->wr.ex.invalidate_rkey;
3066*4882a593Smuzhiyun }
3067*4882a593Smuzhiyun break;
3068*4882a593Smuzhiyun case IB_WR_SEND_WITH_IMM:
3069*4882a593Smuzhiyun wc.wc_flags = IB_WC_WITH_IMM;
3070*4882a593Smuzhiyun wc.ex.imm_data = wqe->wr.ex.imm_data;
3071*4882a593Smuzhiyun break;
3072*4882a593Smuzhiyun default:
3073*4882a593Smuzhiyun break;
3074*4882a593Smuzhiyun }
3075*4882a593Smuzhiyun break;
3076*4882a593Smuzhiyun
3077*4882a593Smuzhiyun case IB_WR_RDMA_WRITE_WITH_IMM:
3078*4882a593Smuzhiyun if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3079*4882a593Smuzhiyun goto inv_err;
3080*4882a593Smuzhiyun wc.wc_flags = IB_WC_WITH_IMM;
3081*4882a593Smuzhiyun wc.ex.imm_data = wqe->wr.ex.imm_data;
3082*4882a593Smuzhiyun ret = rvt_get_rwqe(qp, true);
3083*4882a593Smuzhiyun if (ret < 0)
3084*4882a593Smuzhiyun goto op_err;
3085*4882a593Smuzhiyun if (!ret)
3086*4882a593Smuzhiyun goto rnr_nak;
3087*4882a593Smuzhiyun /* skip copy_last set and qp_access_flags recheck */
3088*4882a593Smuzhiyun goto do_write;
3089*4882a593Smuzhiyun case IB_WR_RDMA_WRITE:
3090*4882a593Smuzhiyun copy_last = rvt_is_user_qp(qp);
3091*4882a593Smuzhiyun if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3092*4882a593Smuzhiyun goto inv_err;
3093*4882a593Smuzhiyun do_write:
3094*4882a593Smuzhiyun if (wqe->length == 0)
3095*4882a593Smuzhiyun break;
3096*4882a593Smuzhiyun if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
3097*4882a593Smuzhiyun wqe->rdma_wr.remote_addr,
3098*4882a593Smuzhiyun wqe->rdma_wr.rkey,
3099*4882a593Smuzhiyun IB_ACCESS_REMOTE_WRITE)))
3100*4882a593Smuzhiyun goto acc_err;
3101*4882a593Smuzhiyun qp->r_sge.sg_list = NULL;
3102*4882a593Smuzhiyun qp->r_sge.num_sge = 1;
3103*4882a593Smuzhiyun qp->r_sge.total_len = wqe->length;
3104*4882a593Smuzhiyun break;
3105*4882a593Smuzhiyun
3106*4882a593Smuzhiyun case IB_WR_RDMA_READ:
3107*4882a593Smuzhiyun if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3108*4882a593Smuzhiyun goto inv_err;
3109*4882a593Smuzhiyun if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
3110*4882a593Smuzhiyun wqe->rdma_wr.remote_addr,
3111*4882a593Smuzhiyun wqe->rdma_wr.rkey,
3112*4882a593Smuzhiyun IB_ACCESS_REMOTE_READ)))
3113*4882a593Smuzhiyun goto acc_err;
3114*4882a593Smuzhiyun release = false;
3115*4882a593Smuzhiyun sqp->s_sge.sg_list = NULL;
3116*4882a593Smuzhiyun sqp->s_sge.num_sge = 1;
3117*4882a593Smuzhiyun qp->r_sge.sge = wqe->sg_list[0];
3118*4882a593Smuzhiyun qp->r_sge.sg_list = wqe->sg_list + 1;
3119*4882a593Smuzhiyun qp->r_sge.num_sge = wqe->wr.num_sge;
3120*4882a593Smuzhiyun qp->r_sge.total_len = wqe->length;
3121*4882a593Smuzhiyun break;
3122*4882a593Smuzhiyun
3123*4882a593Smuzhiyun case IB_WR_ATOMIC_CMP_AND_SWP:
3124*4882a593Smuzhiyun case IB_WR_ATOMIC_FETCH_AND_ADD:
3125*4882a593Smuzhiyun if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
3126*4882a593Smuzhiyun goto inv_err;
3127*4882a593Smuzhiyun if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
3128*4882a593Smuzhiyun goto inv_err;
3129*4882a593Smuzhiyun if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3130*4882a593Smuzhiyun wqe->atomic_wr.remote_addr,
3131*4882a593Smuzhiyun wqe->atomic_wr.rkey,
3132*4882a593Smuzhiyun IB_ACCESS_REMOTE_ATOMIC)))
3133*4882a593Smuzhiyun goto acc_err;
3134*4882a593Smuzhiyun /* Perform atomic OP and save result. */
3135*4882a593Smuzhiyun maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3136*4882a593Smuzhiyun sdata = wqe->atomic_wr.compare_add;
3137*4882a593Smuzhiyun *(u64 *)sqp->s_sge.sge.vaddr =
3138*4882a593Smuzhiyun (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
3139*4882a593Smuzhiyun (u64)atomic64_add_return(sdata, maddr) - sdata :
3140*4882a593Smuzhiyun (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3141*4882a593Smuzhiyun sdata, wqe->atomic_wr.swap);
3142*4882a593Smuzhiyun rvt_put_mr(qp->r_sge.sge.mr);
3143*4882a593Smuzhiyun qp->r_sge.num_sge = 0;
3144*4882a593Smuzhiyun goto send_comp;
3145*4882a593Smuzhiyun
3146*4882a593Smuzhiyun default:
3147*4882a593Smuzhiyun send_status = IB_WC_LOC_QP_OP_ERR;
3148*4882a593Smuzhiyun goto serr;
3149*4882a593Smuzhiyun }
3150*4882a593Smuzhiyun
3151*4882a593Smuzhiyun sge = &sqp->s_sge.sge;
3152*4882a593Smuzhiyun while (sqp->s_len) {
3153*4882a593Smuzhiyun u32 len = rvt_get_sge_length(sge, sqp->s_len);
3154*4882a593Smuzhiyun
3155*4882a593Smuzhiyun WARN_ON_ONCE(len == 0);
3156*4882a593Smuzhiyun rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
3157*4882a593Smuzhiyun len, release, copy_last);
3158*4882a593Smuzhiyun rvt_update_sge(&sqp->s_sge, len, !release);
3159*4882a593Smuzhiyun sqp->s_len -= len;
3160*4882a593Smuzhiyun }
3161*4882a593Smuzhiyun if (release)
3162*4882a593Smuzhiyun rvt_put_ss(&qp->r_sge);
3163*4882a593Smuzhiyun
3164*4882a593Smuzhiyun if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3165*4882a593Smuzhiyun goto send_comp;
3166*4882a593Smuzhiyun
3167*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
3168*4882a593Smuzhiyun wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
3169*4882a593Smuzhiyun else
3170*4882a593Smuzhiyun wc.opcode = IB_WC_RECV;
3171*4882a593Smuzhiyun wc.wr_id = qp->r_wr_id;
3172*4882a593Smuzhiyun wc.status = IB_WC_SUCCESS;
3173*4882a593Smuzhiyun wc.byte_len = wqe->length;
3174*4882a593Smuzhiyun wc.qp = &qp->ibqp;
3175*4882a593Smuzhiyun wc.src_qp = qp->remote_qpn;
3176*4882a593Smuzhiyun wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3177*4882a593Smuzhiyun wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3178*4882a593Smuzhiyun wc.port_num = 1;
3179*4882a593Smuzhiyun /* Signal completion event if the solicited bit is set. */
3180*4882a593Smuzhiyun rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
3181*4882a593Smuzhiyun
3182*4882a593Smuzhiyun send_comp:
3183*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->r_lock, flags);
3184*4882a593Smuzhiyun spin_lock_irqsave(&sqp->s_lock, flags);
3185*4882a593Smuzhiyun rvp->n_loop_pkts++;
3186*4882a593Smuzhiyun flush_send:
3187*4882a593Smuzhiyun sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
3188*4882a593Smuzhiyun spin_lock(&sqp->r_lock);
3189*4882a593Smuzhiyun rvt_send_complete(sqp, wqe, send_status);
3190*4882a593Smuzhiyun spin_unlock(&sqp->r_lock);
3191*4882a593Smuzhiyun if (local_ops) {
3192*4882a593Smuzhiyun atomic_dec(&sqp->local_ops_pending);
3193*4882a593Smuzhiyun local_ops = 0;
3194*4882a593Smuzhiyun }
3195*4882a593Smuzhiyun goto again;
3196*4882a593Smuzhiyun
3197*4882a593Smuzhiyun rnr_nak:
3198*4882a593Smuzhiyun /* Handle RNR NAK */
3199*4882a593Smuzhiyun if (qp->ibqp.qp_type == IB_QPT_UC)
3200*4882a593Smuzhiyun goto send_comp;
3201*4882a593Smuzhiyun rvp->n_rnr_naks++;
3202*4882a593Smuzhiyun /*
3203*4882a593Smuzhiyun * Note: we don't need the s_lock held since the BUSY flag
3204*4882a593Smuzhiyun * makes this single threaded.
3205*4882a593Smuzhiyun */
3206*4882a593Smuzhiyun if (sqp->s_rnr_retry == 0) {
3207*4882a593Smuzhiyun send_status = IB_WC_RNR_RETRY_EXC_ERR;
3208*4882a593Smuzhiyun goto serr;
3209*4882a593Smuzhiyun }
3210*4882a593Smuzhiyun if (sqp->s_rnr_retry_cnt < 7)
3211*4882a593Smuzhiyun sqp->s_rnr_retry--;
3212*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->r_lock, flags);
3213*4882a593Smuzhiyun spin_lock_irqsave(&sqp->s_lock, flags);
3214*4882a593Smuzhiyun if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
3215*4882a593Smuzhiyun goto clr_busy;
3216*4882a593Smuzhiyun rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3217*4882a593Smuzhiyun IB_AETH_CREDIT_SHIFT);
3218*4882a593Smuzhiyun goto clr_busy;
3219*4882a593Smuzhiyun
3220*4882a593Smuzhiyun op_err:
3221*4882a593Smuzhiyun send_status = IB_WC_REM_OP_ERR;
3222*4882a593Smuzhiyun wc.status = IB_WC_LOC_QP_OP_ERR;
3223*4882a593Smuzhiyun goto err;
3224*4882a593Smuzhiyun
3225*4882a593Smuzhiyun inv_err:
3226*4882a593Smuzhiyun send_status =
3227*4882a593Smuzhiyun sqp->ibqp.qp_type == IB_QPT_RC ?
3228*4882a593Smuzhiyun IB_WC_REM_INV_REQ_ERR :
3229*4882a593Smuzhiyun IB_WC_SUCCESS;
3230*4882a593Smuzhiyun wc.status = IB_WC_LOC_QP_OP_ERR;
3231*4882a593Smuzhiyun goto err;
3232*4882a593Smuzhiyun
3233*4882a593Smuzhiyun acc_err:
3234*4882a593Smuzhiyun send_status = IB_WC_REM_ACCESS_ERR;
3235*4882a593Smuzhiyun wc.status = IB_WC_LOC_PROT_ERR;
3236*4882a593Smuzhiyun err:
3237*4882a593Smuzhiyun /* responder goes to error state */
3238*4882a593Smuzhiyun rvt_rc_error(qp, wc.status);
3239*4882a593Smuzhiyun
3240*4882a593Smuzhiyun serr:
3241*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->r_lock, flags);
3242*4882a593Smuzhiyun serr_no_r_lock:
3243*4882a593Smuzhiyun spin_lock_irqsave(&sqp->s_lock, flags);
3244*4882a593Smuzhiyun spin_lock(&sqp->r_lock);
3245*4882a593Smuzhiyun rvt_send_complete(sqp, wqe, send_status);
3246*4882a593Smuzhiyun spin_unlock(&sqp->r_lock);
3247*4882a593Smuzhiyun if (sqp->ibqp.qp_type == IB_QPT_RC) {
3248*4882a593Smuzhiyun int lastwqe;
3249*4882a593Smuzhiyun
3250*4882a593Smuzhiyun spin_lock(&sqp->r_lock);
3251*4882a593Smuzhiyun lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
3252*4882a593Smuzhiyun spin_unlock(&sqp->r_lock);
3253*4882a593Smuzhiyun
3254*4882a593Smuzhiyun sqp->s_flags &= ~RVT_S_BUSY;
3255*4882a593Smuzhiyun spin_unlock_irqrestore(&sqp->s_lock, flags);
3256*4882a593Smuzhiyun if (lastwqe) {
3257*4882a593Smuzhiyun struct ib_event ev;
3258*4882a593Smuzhiyun
3259*4882a593Smuzhiyun ev.device = sqp->ibqp.device;
3260*4882a593Smuzhiyun ev.element.qp = &sqp->ibqp;
3261*4882a593Smuzhiyun ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
3262*4882a593Smuzhiyun sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
3263*4882a593Smuzhiyun }
3264*4882a593Smuzhiyun goto done;
3265*4882a593Smuzhiyun }
3266*4882a593Smuzhiyun clr_busy:
3267*4882a593Smuzhiyun sqp->s_flags &= ~RVT_S_BUSY;
3268*4882a593Smuzhiyun unlock:
3269*4882a593Smuzhiyun spin_unlock_irqrestore(&sqp->s_lock, flags);
3270*4882a593Smuzhiyun done:
3271*4882a593Smuzhiyun rcu_read_unlock();
3272*4882a593Smuzhiyun }
3273*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_ruc_loopback);
3274