1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun * OpenIB.org BSD license below:
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun * without modification, are permitted provided that the following
12*4882a593Smuzhiyun * conditions are met:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * - Redistributions of source code must retain the above
15*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun * disclaimer.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun * provided with the distribution.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun * SOFTWARE.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <linux/module.h>
34*4882a593Smuzhiyun #include <linux/moduleparam.h>
35*4882a593Smuzhiyun #include <rdma/ib_umem.h>
36*4882a593Smuzhiyun #include <linux/atomic.h>
37*4882a593Smuzhiyun #include <rdma/ib_user_verbs.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include "iw_cxgb4.h"
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun int use_dsgl = 1;
42*4882a593Smuzhiyun module_param(use_dsgl, int, 0644);
43*4882a593Smuzhiyun MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1) (DEPRECATED)");
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #define T4_ULPTX_MIN_IO 32
46*4882a593Smuzhiyun #define C4IW_MAX_INLINE_SIZE 96
47*4882a593Smuzhiyun #define T4_ULPTX_MAX_DMA 1024
48*4882a593Smuzhiyun #define C4IW_INLINE_THRESHOLD 128
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun static int inline_threshold = C4IW_INLINE_THRESHOLD;
51*4882a593Smuzhiyun module_param(inline_threshold, int, 0644);
52*4882a593Smuzhiyun MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
53*4882a593Smuzhiyun
mr_exceeds_hw_limits(struct c4iw_dev * dev,u64 length)54*4882a593Smuzhiyun static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun return (is_t4(dev->rdev.lldi.adapter_type) ||
57*4882a593Smuzhiyun is_t5(dev->rdev.lldi.adapter_type)) &&
58*4882a593Smuzhiyun length >= 8*1024*1024*1024ULL;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
_c4iw_write_mem_dma_aligned(struct c4iw_rdev * rdev,u32 addr,u32 len,dma_addr_t data,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)61*4882a593Smuzhiyun static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
62*4882a593Smuzhiyun u32 len, dma_addr_t data,
63*4882a593Smuzhiyun struct sk_buff *skb,
64*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun struct ulp_mem_io *req;
67*4882a593Smuzhiyun struct ulptx_sgl *sgl;
68*4882a593Smuzhiyun u8 wr_len;
69*4882a593Smuzhiyun int ret = 0;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun addr &= 0x7FFFFFF;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (wr_waitp)
74*4882a593Smuzhiyun c4iw_init_wr_wait(wr_waitp);
75*4882a593Smuzhiyun wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (!skb) {
78*4882a593Smuzhiyun skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
79*4882a593Smuzhiyun if (!skb)
80*4882a593Smuzhiyun return -ENOMEM;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun req = __skb_put_zero(skb, wr_len);
85*4882a593Smuzhiyun INIT_ULPTX_WR(req, wr_len, 0, 0);
86*4882a593Smuzhiyun req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
87*4882a593Smuzhiyun (wr_waitp ? FW_WR_COMPL_F : 0));
88*4882a593Smuzhiyun req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L;
89*4882a593Smuzhiyun req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
90*4882a593Smuzhiyun req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
91*4882a593Smuzhiyun T5_ULP_MEMIO_ORDER_V(1) |
92*4882a593Smuzhiyun T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0]));
93*4882a593Smuzhiyun req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
94*4882a593Smuzhiyun req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
95*4882a593Smuzhiyun req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun sgl = (struct ulptx_sgl *)(req + 1);
98*4882a593Smuzhiyun sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
99*4882a593Smuzhiyun ULPTX_NSGE_V(1));
100*4882a593Smuzhiyun sgl->len0 = cpu_to_be32(len);
101*4882a593Smuzhiyun sgl->addr0 = cpu_to_be64(data);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if (wr_waitp)
104*4882a593Smuzhiyun ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
105*4882a593Smuzhiyun else
106*4882a593Smuzhiyun ret = c4iw_ofld_send(rdev, skb);
107*4882a593Smuzhiyun return ret;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
_c4iw_write_mem_inline(struct c4iw_rdev * rdev,u32 addr,u32 len,void * data,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)110*4882a593Smuzhiyun static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
111*4882a593Smuzhiyun void *data, struct sk_buff *skb,
112*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct ulp_mem_io *req;
115*4882a593Smuzhiyun struct ulptx_idata *sc;
116*4882a593Smuzhiyun u8 wr_len, *to_dp, *from_dp;
117*4882a593Smuzhiyun int copy_len, num_wqe, i, ret = 0;
118*4882a593Smuzhiyun __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun if (is_t4(rdev->lldi.adapter_type))
121*4882a593Smuzhiyun cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F);
122*4882a593Smuzhiyun else
123*4882a593Smuzhiyun cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun addr &= 0x7FFFFFF;
126*4882a593Smuzhiyun pr_debug("addr 0x%x len %u\n", addr, len);
127*4882a593Smuzhiyun num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
128*4882a593Smuzhiyun c4iw_init_wr_wait(wr_waitp);
129*4882a593Smuzhiyun for (i = 0; i < num_wqe; i++) {
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
132*4882a593Smuzhiyun len;
133*4882a593Smuzhiyun wr_len = roundup(sizeof(*req) + sizeof(*sc) +
134*4882a593Smuzhiyun roundup(copy_len, T4_ULPTX_MIN_IO),
135*4882a593Smuzhiyun 16);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (!skb) {
138*4882a593Smuzhiyun skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
139*4882a593Smuzhiyun if (!skb)
140*4882a593Smuzhiyun return -ENOMEM;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun req = __skb_put_zero(skb, wr_len);
145*4882a593Smuzhiyun INIT_ULPTX_WR(req, wr_len, 0, 0);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (i == (num_wqe-1)) {
148*4882a593Smuzhiyun req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
149*4882a593Smuzhiyun FW_WR_COMPL_F);
150*4882a593Smuzhiyun req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp;
151*4882a593Smuzhiyun } else
152*4882a593Smuzhiyun req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
153*4882a593Smuzhiyun req->wr.wr_mid = cpu_to_be32(
154*4882a593Smuzhiyun FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun req->cmd = cmd;
157*4882a593Smuzhiyun req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
158*4882a593Smuzhiyun DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
159*4882a593Smuzhiyun req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
160*4882a593Smuzhiyun 16));
161*4882a593Smuzhiyun req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun sc = (struct ulptx_idata *)(req + 1);
164*4882a593Smuzhiyun sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
165*4882a593Smuzhiyun sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun to_dp = (u8 *)(sc + 1);
168*4882a593Smuzhiyun from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
169*4882a593Smuzhiyun if (data)
170*4882a593Smuzhiyun memcpy(to_dp, from_dp, copy_len);
171*4882a593Smuzhiyun else
172*4882a593Smuzhiyun memset(to_dp, 0, copy_len);
173*4882a593Smuzhiyun if (copy_len % T4_ULPTX_MIN_IO)
174*4882a593Smuzhiyun memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
175*4882a593Smuzhiyun (copy_len % T4_ULPTX_MIN_IO));
176*4882a593Smuzhiyun if (i == (num_wqe-1))
177*4882a593Smuzhiyun ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0,
178*4882a593Smuzhiyun __func__);
179*4882a593Smuzhiyun else
180*4882a593Smuzhiyun ret = c4iw_ofld_send(rdev, skb);
181*4882a593Smuzhiyun if (ret)
182*4882a593Smuzhiyun break;
183*4882a593Smuzhiyun skb = NULL;
184*4882a593Smuzhiyun len -= C4IW_MAX_INLINE_SIZE;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun return ret;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
_c4iw_write_mem_dma(struct c4iw_rdev * rdev,u32 addr,u32 len,void * data,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)190*4882a593Smuzhiyun static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
191*4882a593Smuzhiyun void *data, struct sk_buff *skb,
192*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun u32 remain = len;
195*4882a593Smuzhiyun u32 dmalen;
196*4882a593Smuzhiyun int ret = 0;
197*4882a593Smuzhiyun dma_addr_t daddr;
198*4882a593Smuzhiyun dma_addr_t save;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
201*4882a593Smuzhiyun if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
202*4882a593Smuzhiyun return -1;
203*4882a593Smuzhiyun save = daddr;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun while (remain > inline_threshold) {
206*4882a593Smuzhiyun if (remain < T4_ULPTX_MAX_DMA) {
207*4882a593Smuzhiyun if (remain & ~T4_ULPTX_MIN_IO)
208*4882a593Smuzhiyun dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
209*4882a593Smuzhiyun else
210*4882a593Smuzhiyun dmalen = remain;
211*4882a593Smuzhiyun } else
212*4882a593Smuzhiyun dmalen = T4_ULPTX_MAX_DMA;
213*4882a593Smuzhiyun remain -= dmalen;
214*4882a593Smuzhiyun ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
215*4882a593Smuzhiyun skb, remain ? NULL : wr_waitp);
216*4882a593Smuzhiyun if (ret)
217*4882a593Smuzhiyun goto out;
218*4882a593Smuzhiyun addr += dmalen >> 5;
219*4882a593Smuzhiyun data += dmalen;
220*4882a593Smuzhiyun daddr += dmalen;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun if (remain)
223*4882a593Smuzhiyun ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb,
224*4882a593Smuzhiyun wr_waitp);
225*4882a593Smuzhiyun out:
226*4882a593Smuzhiyun dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
227*4882a593Smuzhiyun return ret;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun * write len bytes of data into addr (32B aligned address)
232*4882a593Smuzhiyun * If data is NULL, clear len byte of memory to zero.
233*4882a593Smuzhiyun */
write_adapter_mem(struct c4iw_rdev * rdev,u32 addr,u32 len,void * data,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)234*4882a593Smuzhiyun static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
235*4882a593Smuzhiyun void *data, struct sk_buff *skb,
236*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun int ret;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) {
241*4882a593Smuzhiyun ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
242*4882a593Smuzhiyun wr_waitp);
243*4882a593Smuzhiyun goto out;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (len <= inline_threshold) {
247*4882a593Smuzhiyun ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
248*4882a593Smuzhiyun wr_waitp);
249*4882a593Smuzhiyun goto out;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp);
253*4882a593Smuzhiyun if (ret) {
254*4882a593Smuzhiyun pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
255*4882a593Smuzhiyun pci_name(rdev->lldi.pdev));
256*4882a593Smuzhiyun ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
257*4882a593Smuzhiyun wr_waitp);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun out:
260*4882a593Smuzhiyun return ret;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /*
265*4882a593Smuzhiyun * Build and write a TPT entry.
266*4882a593Smuzhiyun * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
267*4882a593Smuzhiyun * pbl_size and pbl_addr
268*4882a593Smuzhiyun * OUT: stag index
269*4882a593Smuzhiyun */
write_tpt_entry(struct c4iw_rdev * rdev,u32 reset_tpt_entry,u32 * stag,u8 stag_state,u32 pdid,enum fw_ri_stag_type type,enum fw_ri_mem_perms perm,int bind_enabled,u32 zbva,u64 to,u64 len,u8 page_size,u32 pbl_size,u32 pbl_addr,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)270*4882a593Smuzhiyun static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
271*4882a593Smuzhiyun u32 *stag, u8 stag_state, u32 pdid,
272*4882a593Smuzhiyun enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
273*4882a593Smuzhiyun int bind_enabled, u32 zbva, u64 to,
274*4882a593Smuzhiyun u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
275*4882a593Smuzhiyun struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun int err;
278*4882a593Smuzhiyun struct fw_ri_tpte *tpt;
279*4882a593Smuzhiyun u32 stag_idx;
280*4882a593Smuzhiyun static atomic_t key;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (c4iw_fatal_error(rdev))
283*4882a593Smuzhiyun return -EIO;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
286*4882a593Smuzhiyun if (!tpt)
287*4882a593Smuzhiyun return -ENOMEM;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun stag_state = stag_state > 0;
290*4882a593Smuzhiyun stag_idx = (*stag) >> 8;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
293*4882a593Smuzhiyun stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
294*4882a593Smuzhiyun if (!stag_idx) {
295*4882a593Smuzhiyun mutex_lock(&rdev->stats.lock);
296*4882a593Smuzhiyun rdev->stats.stag.fail++;
297*4882a593Smuzhiyun mutex_unlock(&rdev->stats.lock);
298*4882a593Smuzhiyun kfree(tpt);
299*4882a593Smuzhiyun return -ENOMEM;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun mutex_lock(&rdev->stats.lock);
302*4882a593Smuzhiyun rdev->stats.stag.cur += 32;
303*4882a593Smuzhiyun if (rdev->stats.stag.cur > rdev->stats.stag.max)
304*4882a593Smuzhiyun rdev->stats.stag.max = rdev->stats.stag.cur;
305*4882a593Smuzhiyun mutex_unlock(&rdev->stats.lock);
306*4882a593Smuzhiyun *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
309*4882a593Smuzhiyun stag_state, type, pdid, stag_idx);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* write TPT entry */
312*4882a593Smuzhiyun if (reset_tpt_entry)
313*4882a593Smuzhiyun memset(tpt, 0, sizeof(*tpt));
314*4882a593Smuzhiyun else {
315*4882a593Smuzhiyun tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
316*4882a593Smuzhiyun FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
317*4882a593Smuzhiyun FW_RI_TPTE_STAGSTATE_V(stag_state) |
318*4882a593Smuzhiyun FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
319*4882a593Smuzhiyun tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
320*4882a593Smuzhiyun (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
321*4882a593Smuzhiyun FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
322*4882a593Smuzhiyun FW_RI_VA_BASED_TO))|
323*4882a593Smuzhiyun FW_RI_TPTE_PS_V(page_size));
324*4882a593Smuzhiyun tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
325*4882a593Smuzhiyun FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
326*4882a593Smuzhiyun tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
327*4882a593Smuzhiyun tpt->va_hi = cpu_to_be32((u32)(to >> 32));
328*4882a593Smuzhiyun tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
329*4882a593Smuzhiyun tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
330*4882a593Smuzhiyun tpt->len_hi = cpu_to_be32((u32)(len >> 32));
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun err = write_adapter_mem(rdev, stag_idx +
333*4882a593Smuzhiyun (rdev->lldi.vr->stag.start >> 5),
334*4882a593Smuzhiyun sizeof(*tpt), tpt, skb, wr_waitp);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (reset_tpt_entry) {
337*4882a593Smuzhiyun c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
338*4882a593Smuzhiyun mutex_lock(&rdev->stats.lock);
339*4882a593Smuzhiyun rdev->stats.stag.cur -= 32;
340*4882a593Smuzhiyun mutex_unlock(&rdev->stats.lock);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun kfree(tpt);
343*4882a593Smuzhiyun return err;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
write_pbl(struct c4iw_rdev * rdev,__be64 * pbl,u32 pbl_addr,u32 pbl_size,struct c4iw_wr_wait * wr_waitp)346*4882a593Smuzhiyun static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
347*4882a593Smuzhiyun u32 pbl_addr, u32 pbl_size, struct c4iw_wr_wait *wr_waitp)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun int err;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
352*4882a593Smuzhiyun pbl_addr, rdev->lldi.vr->pbl.start,
353*4882a593Smuzhiyun pbl_size);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL,
356*4882a593Smuzhiyun wr_waitp);
357*4882a593Smuzhiyun return err;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
dereg_mem(struct c4iw_rdev * rdev,u32 stag,u32 pbl_size,u32 pbl_addr,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)360*4882a593Smuzhiyun static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
361*4882a593Smuzhiyun u32 pbl_addr, struct sk_buff *skb,
362*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
365*4882a593Smuzhiyun pbl_size, pbl_addr, skb, wr_waitp);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
allocate_window(struct c4iw_rdev * rdev,u32 * stag,u32 pdid,struct c4iw_wr_wait * wr_waitp)368*4882a593Smuzhiyun static int allocate_window(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
369*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun *stag = T4_STAG_UNSET;
372*4882a593Smuzhiyun return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
373*4882a593Smuzhiyun 0UL, 0, 0, 0, 0, NULL, wr_waitp);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
deallocate_window(struct c4iw_rdev * rdev,u32 stag,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)376*4882a593Smuzhiyun static int deallocate_window(struct c4iw_rdev *rdev, u32 stag,
377*4882a593Smuzhiyun struct sk_buff *skb,
378*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
381*4882a593Smuzhiyun 0, skb, wr_waitp);
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
allocate_stag(struct c4iw_rdev * rdev,u32 * stag,u32 pdid,u32 pbl_size,u32 pbl_addr,struct c4iw_wr_wait * wr_waitp)384*4882a593Smuzhiyun static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
385*4882a593Smuzhiyun u32 pbl_size, u32 pbl_addr,
386*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun *stag = T4_STAG_UNSET;
389*4882a593Smuzhiyun return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
390*4882a593Smuzhiyun 0UL, 0, 0, pbl_size, pbl_addr, NULL, wr_waitp);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
finish_mem_reg(struct c4iw_mr * mhp,u32 stag)393*4882a593Smuzhiyun static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun u32 mmid;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun mhp->attr.state = 1;
398*4882a593Smuzhiyun mhp->attr.stag = stag;
399*4882a593Smuzhiyun mmid = stag >> 8;
400*4882a593Smuzhiyun mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
401*4882a593Smuzhiyun mhp->ibmr.length = mhp->attr.len;
402*4882a593Smuzhiyun mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
403*4882a593Smuzhiyun pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
404*4882a593Smuzhiyun return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
register_mem(struct c4iw_dev * rhp,struct c4iw_pd * php,struct c4iw_mr * mhp,int shift)407*4882a593Smuzhiyun static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
408*4882a593Smuzhiyun struct c4iw_mr *mhp, int shift)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun u32 stag = T4_STAG_UNSET;
411*4882a593Smuzhiyun int ret;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
414*4882a593Smuzhiyun FW_RI_STAG_NSMR, mhp->attr.len ?
415*4882a593Smuzhiyun mhp->attr.perms : 0,
416*4882a593Smuzhiyun mhp->attr.mw_bind_enable, mhp->attr.zbva,
417*4882a593Smuzhiyun mhp->attr.va_fbo, mhp->attr.len ?
418*4882a593Smuzhiyun mhp->attr.len : -1, shift - 12,
419*4882a593Smuzhiyun mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL,
420*4882a593Smuzhiyun mhp->wr_waitp);
421*4882a593Smuzhiyun if (ret)
422*4882a593Smuzhiyun return ret;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun ret = finish_mem_reg(mhp, stag);
425*4882a593Smuzhiyun if (ret) {
426*4882a593Smuzhiyun dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
427*4882a593Smuzhiyun mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
428*4882a593Smuzhiyun mhp->dereg_skb = NULL;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun return ret;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
alloc_pbl(struct c4iw_mr * mhp,int npages)433*4882a593Smuzhiyun static int alloc_pbl(struct c4iw_mr *mhp, int npages)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
436*4882a593Smuzhiyun npages << 3);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun if (!mhp->attr.pbl_addr)
439*4882a593Smuzhiyun return -ENOMEM;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun mhp->attr.pbl_size = npages;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun return 0;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
c4iw_get_dma_mr(struct ib_pd * pd,int acc)446*4882a593Smuzhiyun struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun struct c4iw_dev *rhp;
449*4882a593Smuzhiyun struct c4iw_pd *php;
450*4882a593Smuzhiyun struct c4iw_mr *mhp;
451*4882a593Smuzhiyun int ret;
452*4882a593Smuzhiyun u32 stag = T4_STAG_UNSET;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun pr_debug("ib_pd %p\n", pd);
455*4882a593Smuzhiyun php = to_c4iw_pd(pd);
456*4882a593Smuzhiyun rhp = php->rhp;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
459*4882a593Smuzhiyun if (!mhp)
460*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
461*4882a593Smuzhiyun mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
462*4882a593Smuzhiyun if (!mhp->wr_waitp) {
463*4882a593Smuzhiyun ret = -ENOMEM;
464*4882a593Smuzhiyun goto err_free_mhp;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun c4iw_init_wr_wait(mhp->wr_waitp);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
469*4882a593Smuzhiyun if (!mhp->dereg_skb) {
470*4882a593Smuzhiyun ret = -ENOMEM;
471*4882a593Smuzhiyun goto err_free_wr_wait;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun mhp->rhp = rhp;
475*4882a593Smuzhiyun mhp->attr.pdid = php->pdid;
476*4882a593Smuzhiyun mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
477*4882a593Smuzhiyun mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
478*4882a593Smuzhiyun mhp->attr.zbva = 0;
479*4882a593Smuzhiyun mhp->attr.va_fbo = 0;
480*4882a593Smuzhiyun mhp->attr.page_size = 0;
481*4882a593Smuzhiyun mhp->attr.len = ~0ULL;
482*4882a593Smuzhiyun mhp->attr.pbl_size = 0;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
485*4882a593Smuzhiyun FW_RI_STAG_NSMR, mhp->attr.perms,
486*4882a593Smuzhiyun mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
487*4882a593Smuzhiyun NULL, mhp->wr_waitp);
488*4882a593Smuzhiyun if (ret)
489*4882a593Smuzhiyun goto err_free_skb;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun ret = finish_mem_reg(mhp, stag);
492*4882a593Smuzhiyun if (ret)
493*4882a593Smuzhiyun goto err_dereg_mem;
494*4882a593Smuzhiyun return &mhp->ibmr;
495*4882a593Smuzhiyun err_dereg_mem:
496*4882a593Smuzhiyun dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
497*4882a593Smuzhiyun mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
498*4882a593Smuzhiyun err_free_skb:
499*4882a593Smuzhiyun kfree_skb(mhp->dereg_skb);
500*4882a593Smuzhiyun err_free_wr_wait:
501*4882a593Smuzhiyun c4iw_put_wr_wait(mhp->wr_waitp);
502*4882a593Smuzhiyun err_free_mhp:
503*4882a593Smuzhiyun kfree(mhp);
504*4882a593Smuzhiyun return ERR_PTR(ret);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
c4iw_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt,int acc,struct ib_udata * udata)507*4882a593Smuzhiyun struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
508*4882a593Smuzhiyun u64 virt, int acc, struct ib_udata *udata)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun __be64 *pages;
511*4882a593Smuzhiyun int shift, n, i;
512*4882a593Smuzhiyun int err = -ENOMEM;
513*4882a593Smuzhiyun struct ib_block_iter biter;
514*4882a593Smuzhiyun struct c4iw_dev *rhp;
515*4882a593Smuzhiyun struct c4iw_pd *php;
516*4882a593Smuzhiyun struct c4iw_mr *mhp;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun pr_debug("ib_pd %p\n", pd);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun if (length == ~0ULL)
521*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun if ((length + start) < start)
524*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun php = to_c4iw_pd(pd);
527*4882a593Smuzhiyun rhp = php->rhp;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun if (mr_exceeds_hw_limits(rhp, length))
530*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
533*4882a593Smuzhiyun if (!mhp)
534*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
535*4882a593Smuzhiyun mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
536*4882a593Smuzhiyun if (!mhp->wr_waitp)
537*4882a593Smuzhiyun goto err_free_mhp;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
540*4882a593Smuzhiyun if (!mhp->dereg_skb)
541*4882a593Smuzhiyun goto err_free_wr_wait;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun mhp->rhp = rhp;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun mhp->umem = ib_umem_get(pd->device, start, length, acc);
546*4882a593Smuzhiyun if (IS_ERR(mhp->umem))
547*4882a593Smuzhiyun goto err_free_skb;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun shift = PAGE_SHIFT;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun n = ib_umem_num_dma_blocks(mhp->umem, 1 << shift);
552*4882a593Smuzhiyun err = alloc_pbl(mhp, n);
553*4882a593Smuzhiyun if (err)
554*4882a593Smuzhiyun goto err_umem_release;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun pages = (__be64 *) __get_free_page(GFP_KERNEL);
557*4882a593Smuzhiyun if (!pages) {
558*4882a593Smuzhiyun err = -ENOMEM;
559*4882a593Smuzhiyun goto err_pbl_free;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun i = n = 0;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) {
565*4882a593Smuzhiyun pages[i++] = cpu_to_be64(rdma_block_iter_dma_address(&biter));
566*4882a593Smuzhiyun if (i == PAGE_SIZE / sizeof(*pages)) {
567*4882a593Smuzhiyun err = write_pbl(&mhp->rhp->rdev, pages,
568*4882a593Smuzhiyun mhp->attr.pbl_addr + (n << 3), i,
569*4882a593Smuzhiyun mhp->wr_waitp);
570*4882a593Smuzhiyun if (err)
571*4882a593Smuzhiyun goto pbl_done;
572*4882a593Smuzhiyun n += i;
573*4882a593Smuzhiyun i = 0;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun if (i)
578*4882a593Smuzhiyun err = write_pbl(&mhp->rhp->rdev, pages,
579*4882a593Smuzhiyun mhp->attr.pbl_addr + (n << 3), i,
580*4882a593Smuzhiyun mhp->wr_waitp);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun pbl_done:
583*4882a593Smuzhiyun free_page((unsigned long) pages);
584*4882a593Smuzhiyun if (err)
585*4882a593Smuzhiyun goto err_pbl_free;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun mhp->attr.pdid = php->pdid;
588*4882a593Smuzhiyun mhp->attr.zbva = 0;
589*4882a593Smuzhiyun mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
590*4882a593Smuzhiyun mhp->attr.va_fbo = virt;
591*4882a593Smuzhiyun mhp->attr.page_size = shift - 12;
592*4882a593Smuzhiyun mhp->attr.len = length;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun err = register_mem(rhp, php, mhp, shift);
595*4882a593Smuzhiyun if (err)
596*4882a593Smuzhiyun goto err_pbl_free;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun return &mhp->ibmr;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun err_pbl_free:
601*4882a593Smuzhiyun c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
602*4882a593Smuzhiyun mhp->attr.pbl_size << 3);
603*4882a593Smuzhiyun err_umem_release:
604*4882a593Smuzhiyun ib_umem_release(mhp->umem);
605*4882a593Smuzhiyun err_free_skb:
606*4882a593Smuzhiyun kfree_skb(mhp->dereg_skb);
607*4882a593Smuzhiyun err_free_wr_wait:
608*4882a593Smuzhiyun c4iw_put_wr_wait(mhp->wr_waitp);
609*4882a593Smuzhiyun err_free_mhp:
610*4882a593Smuzhiyun kfree(mhp);
611*4882a593Smuzhiyun return ERR_PTR(err);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
c4iw_alloc_mw(struct ib_mw * ibmw,struct ib_udata * udata)614*4882a593Smuzhiyun int c4iw_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun struct c4iw_mw *mhp = to_c4iw_mw(ibmw);
617*4882a593Smuzhiyun struct c4iw_dev *rhp;
618*4882a593Smuzhiyun struct c4iw_pd *php;
619*4882a593Smuzhiyun u32 mmid;
620*4882a593Smuzhiyun u32 stag = 0;
621*4882a593Smuzhiyun int ret;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun if (ibmw->type != IB_MW_TYPE_1)
624*4882a593Smuzhiyun return -EINVAL;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun php = to_c4iw_pd(ibmw->pd);
627*4882a593Smuzhiyun rhp = php->rhp;
628*4882a593Smuzhiyun mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
629*4882a593Smuzhiyun if (!mhp->wr_waitp)
630*4882a593Smuzhiyun return -ENOMEM;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
633*4882a593Smuzhiyun if (!mhp->dereg_skb) {
634*4882a593Smuzhiyun ret = -ENOMEM;
635*4882a593Smuzhiyun goto free_wr_wait;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
639*4882a593Smuzhiyun if (ret)
640*4882a593Smuzhiyun goto free_skb;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun mhp->rhp = rhp;
643*4882a593Smuzhiyun mhp->attr.pdid = php->pdid;
644*4882a593Smuzhiyun mhp->attr.type = FW_RI_STAG_MW;
645*4882a593Smuzhiyun mhp->attr.stag = stag;
646*4882a593Smuzhiyun mmid = (stag) >> 8;
647*4882a593Smuzhiyun ibmw->rkey = stag;
648*4882a593Smuzhiyun if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
649*4882a593Smuzhiyun ret = -ENOMEM;
650*4882a593Smuzhiyun goto dealloc_win;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
653*4882a593Smuzhiyun return 0;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun dealloc_win:
656*4882a593Smuzhiyun deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
657*4882a593Smuzhiyun mhp->wr_waitp);
658*4882a593Smuzhiyun free_skb:
659*4882a593Smuzhiyun kfree_skb(mhp->dereg_skb);
660*4882a593Smuzhiyun free_wr_wait:
661*4882a593Smuzhiyun c4iw_put_wr_wait(mhp->wr_waitp);
662*4882a593Smuzhiyun return ret;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
c4iw_dealloc_mw(struct ib_mw * mw)665*4882a593Smuzhiyun int c4iw_dealloc_mw(struct ib_mw *mw)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun struct c4iw_dev *rhp;
668*4882a593Smuzhiyun struct c4iw_mw *mhp;
669*4882a593Smuzhiyun u32 mmid;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun mhp = to_c4iw_mw(mw);
672*4882a593Smuzhiyun rhp = mhp->rhp;
673*4882a593Smuzhiyun mmid = (mw->rkey) >> 8;
674*4882a593Smuzhiyun xa_erase_irq(&rhp->mrs, mmid);
675*4882a593Smuzhiyun deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
676*4882a593Smuzhiyun mhp->wr_waitp);
677*4882a593Smuzhiyun kfree_skb(mhp->dereg_skb);
678*4882a593Smuzhiyun c4iw_put_wr_wait(mhp->wr_waitp);
679*4882a593Smuzhiyun return 0;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
c4iw_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)682*4882a593Smuzhiyun struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
683*4882a593Smuzhiyun u32 max_num_sg)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun struct c4iw_dev *rhp;
686*4882a593Smuzhiyun struct c4iw_pd *php;
687*4882a593Smuzhiyun struct c4iw_mr *mhp;
688*4882a593Smuzhiyun u32 mmid;
689*4882a593Smuzhiyun u32 stag = 0;
690*4882a593Smuzhiyun int ret = 0;
691*4882a593Smuzhiyun int length = roundup(max_num_sg * sizeof(u64), 32);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun php = to_c4iw_pd(pd);
694*4882a593Smuzhiyun rhp = php->rhp;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (mr_type != IB_MR_TYPE_MEM_REG ||
697*4882a593Smuzhiyun max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
698*4882a593Smuzhiyun use_dsgl))
699*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
702*4882a593Smuzhiyun if (!mhp) {
703*4882a593Smuzhiyun ret = -ENOMEM;
704*4882a593Smuzhiyun goto err;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
708*4882a593Smuzhiyun if (!mhp->wr_waitp) {
709*4882a593Smuzhiyun ret = -ENOMEM;
710*4882a593Smuzhiyun goto err_free_mhp;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun c4iw_init_wr_wait(mhp->wr_waitp);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
715*4882a593Smuzhiyun length, &mhp->mpl_addr, GFP_KERNEL);
716*4882a593Smuzhiyun if (!mhp->mpl) {
717*4882a593Smuzhiyun ret = -ENOMEM;
718*4882a593Smuzhiyun goto err_free_wr_wait;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun mhp->max_mpl_len = length;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun mhp->rhp = rhp;
723*4882a593Smuzhiyun ret = alloc_pbl(mhp, max_num_sg);
724*4882a593Smuzhiyun if (ret)
725*4882a593Smuzhiyun goto err_free_dma;
726*4882a593Smuzhiyun mhp->attr.pbl_size = max_num_sg;
727*4882a593Smuzhiyun ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
728*4882a593Smuzhiyun mhp->attr.pbl_size, mhp->attr.pbl_addr,
729*4882a593Smuzhiyun mhp->wr_waitp);
730*4882a593Smuzhiyun if (ret)
731*4882a593Smuzhiyun goto err_free_pbl;
732*4882a593Smuzhiyun mhp->attr.pdid = php->pdid;
733*4882a593Smuzhiyun mhp->attr.type = FW_RI_STAG_NSMR;
734*4882a593Smuzhiyun mhp->attr.stag = stag;
735*4882a593Smuzhiyun mhp->attr.state = 0;
736*4882a593Smuzhiyun mmid = (stag) >> 8;
737*4882a593Smuzhiyun mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
738*4882a593Smuzhiyun if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
739*4882a593Smuzhiyun ret = -ENOMEM;
740*4882a593Smuzhiyun goto err_dereg;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
744*4882a593Smuzhiyun return &(mhp->ibmr);
745*4882a593Smuzhiyun err_dereg:
746*4882a593Smuzhiyun dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
747*4882a593Smuzhiyun mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
748*4882a593Smuzhiyun err_free_pbl:
749*4882a593Smuzhiyun c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
750*4882a593Smuzhiyun mhp->attr.pbl_size << 3);
751*4882a593Smuzhiyun err_free_dma:
752*4882a593Smuzhiyun dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
753*4882a593Smuzhiyun mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
754*4882a593Smuzhiyun err_free_wr_wait:
755*4882a593Smuzhiyun c4iw_put_wr_wait(mhp->wr_waitp);
756*4882a593Smuzhiyun err_free_mhp:
757*4882a593Smuzhiyun kfree(mhp);
758*4882a593Smuzhiyun err:
759*4882a593Smuzhiyun return ERR_PTR(ret);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
c4iw_set_page(struct ib_mr * ibmr,u64 addr)762*4882a593Smuzhiyun static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
767*4882a593Smuzhiyun return -ENOMEM;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun mhp->mpl[mhp->mpl_len++] = addr;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun return 0;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
c4iw_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)774*4882a593Smuzhiyun int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
775*4882a593Smuzhiyun unsigned int *sg_offset)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun mhp->mpl_len = 0;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
c4iw_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)784*4882a593Smuzhiyun int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun struct c4iw_dev *rhp;
787*4882a593Smuzhiyun struct c4iw_mr *mhp;
788*4882a593Smuzhiyun u32 mmid;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun pr_debug("ib_mr %p\n", ib_mr);
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun mhp = to_c4iw_mr(ib_mr);
793*4882a593Smuzhiyun rhp = mhp->rhp;
794*4882a593Smuzhiyun mmid = mhp->attr.stag >> 8;
795*4882a593Smuzhiyun xa_erase_irq(&rhp->mrs, mmid);
796*4882a593Smuzhiyun if (mhp->mpl)
797*4882a593Smuzhiyun dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
798*4882a593Smuzhiyun mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
799*4882a593Smuzhiyun dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
800*4882a593Smuzhiyun mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
801*4882a593Smuzhiyun if (mhp->attr.pbl_size)
802*4882a593Smuzhiyun c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
803*4882a593Smuzhiyun mhp->attr.pbl_size << 3);
804*4882a593Smuzhiyun if (mhp->kva)
805*4882a593Smuzhiyun kfree((void *) (unsigned long) mhp->kva);
806*4882a593Smuzhiyun ib_umem_release(mhp->umem);
807*4882a593Smuzhiyun pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
808*4882a593Smuzhiyun c4iw_put_wr_wait(mhp->wr_waitp);
809*4882a593Smuzhiyun kfree(mhp);
810*4882a593Smuzhiyun return 0;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
c4iw_invalidate_mr(struct c4iw_dev * rhp,u32 rkey)813*4882a593Smuzhiyun void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun struct c4iw_mr *mhp;
816*4882a593Smuzhiyun unsigned long flags;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun xa_lock_irqsave(&rhp->mrs, flags);
819*4882a593Smuzhiyun mhp = xa_load(&rhp->mrs, rkey >> 8);
820*4882a593Smuzhiyun if (mhp)
821*4882a593Smuzhiyun mhp->attr.state = 0;
822*4882a593Smuzhiyun xa_unlock_irqrestore(&rhp->mrs, flags);
823*4882a593Smuzhiyun }
824