1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/vmalloc.h>
7*4882a593Smuzhiyun #include <linux/log2.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <rdma/ib_addr.h>
10*4882a593Smuzhiyun #include <rdma/ib_umem.h>
11*4882a593Smuzhiyun #include <rdma/ib_user_verbs.h>
12*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
13*4882a593Smuzhiyun #include <rdma/uverbs_ioctl.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "efa.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun enum {
18*4882a593Smuzhiyun EFA_MMAP_DMA_PAGE = 0,
19*4882a593Smuzhiyun EFA_MMAP_IO_WC,
20*4882a593Smuzhiyun EFA_MMAP_IO_NC,
21*4882a593Smuzhiyun };
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define EFA_AENQ_ENABLED_GROUPS \
24*4882a593Smuzhiyun (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
25*4882a593Smuzhiyun BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun struct efa_user_mmap_entry {
28*4882a593Smuzhiyun struct rdma_user_mmap_entry rdma_entry;
29*4882a593Smuzhiyun u64 address;
30*4882a593Smuzhiyun u8 mmap_flag;
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define EFA_DEFINE_STATS(op) \
34*4882a593Smuzhiyun op(EFA_TX_BYTES, "tx_bytes") \
35*4882a593Smuzhiyun op(EFA_TX_PKTS, "tx_pkts") \
36*4882a593Smuzhiyun op(EFA_RX_BYTES, "rx_bytes") \
37*4882a593Smuzhiyun op(EFA_RX_PKTS, "rx_pkts") \
38*4882a593Smuzhiyun op(EFA_RX_DROPS, "rx_drops") \
39*4882a593Smuzhiyun op(EFA_SEND_BYTES, "send_bytes") \
40*4882a593Smuzhiyun op(EFA_SEND_WRS, "send_wrs") \
41*4882a593Smuzhiyun op(EFA_RECV_BYTES, "recv_bytes") \
42*4882a593Smuzhiyun op(EFA_RECV_WRS, "recv_wrs") \
43*4882a593Smuzhiyun op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \
44*4882a593Smuzhiyun op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
45*4882a593Smuzhiyun op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
46*4882a593Smuzhiyun op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
47*4882a593Smuzhiyun op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
48*4882a593Smuzhiyun op(EFA_COMPLETED_CMDS, "completed_cmds") \
49*4882a593Smuzhiyun op(EFA_CMDS_ERR, "cmds_err") \
50*4882a593Smuzhiyun op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
51*4882a593Smuzhiyun op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
52*4882a593Smuzhiyun op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
53*4882a593Smuzhiyun op(EFA_CREATE_QP_ERR, "create_qp_err") \
54*4882a593Smuzhiyun op(EFA_CREATE_CQ_ERR, "create_cq_err") \
55*4882a593Smuzhiyun op(EFA_REG_MR_ERR, "reg_mr_err") \
56*4882a593Smuzhiyun op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
57*4882a593Smuzhiyun op(EFA_CREATE_AH_ERR, "create_ah_err") \
58*4882a593Smuzhiyun op(EFA_MMAP_ERR, "mmap_err")
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #define EFA_STATS_ENUM(ename, name) ename,
61*4882a593Smuzhiyun #define EFA_STATS_STR(ename, name) [ename] = name,
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun enum efa_hw_stats {
64*4882a593Smuzhiyun EFA_DEFINE_STATS(EFA_STATS_ENUM)
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun static const char *const efa_stats_names[] = {
68*4882a593Smuzhiyun EFA_DEFINE_STATS(EFA_STATS_STR)
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun #define EFA_CHUNK_PAYLOAD_SHIFT 12
72*4882a593Smuzhiyun #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
73*4882a593Smuzhiyun #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define EFA_CHUNK_SHIFT 12
76*4882a593Smuzhiyun #define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT)
77*4882a593Smuzhiyun #define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info)
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define EFA_PTRS_PER_CHUNK \
80*4882a593Smuzhiyun ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #define EFA_CHUNK_USED_SIZE \
83*4882a593Smuzhiyun ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun struct pbl_chunk {
86*4882a593Smuzhiyun dma_addr_t dma_addr;
87*4882a593Smuzhiyun u64 *buf;
88*4882a593Smuzhiyun u32 length;
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun struct pbl_chunk_list {
92*4882a593Smuzhiyun struct pbl_chunk *chunks;
93*4882a593Smuzhiyun unsigned int size;
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun struct pbl_context {
97*4882a593Smuzhiyun union {
98*4882a593Smuzhiyun struct {
99*4882a593Smuzhiyun dma_addr_t dma_addr;
100*4882a593Smuzhiyun } continuous;
101*4882a593Smuzhiyun struct {
102*4882a593Smuzhiyun u32 pbl_buf_size_in_pages;
103*4882a593Smuzhiyun struct scatterlist *sgl;
104*4882a593Smuzhiyun int sg_dma_cnt;
105*4882a593Smuzhiyun struct pbl_chunk_list chunk_list;
106*4882a593Smuzhiyun } indirect;
107*4882a593Smuzhiyun } phys;
108*4882a593Smuzhiyun u64 *pbl_buf;
109*4882a593Smuzhiyun u32 pbl_buf_size_in_bytes;
110*4882a593Smuzhiyun u8 physically_continuous;
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun
to_edev(struct ib_device * ibdev)113*4882a593Smuzhiyun static inline struct efa_dev *to_edev(struct ib_device *ibdev)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun return container_of(ibdev, struct efa_dev, ibdev);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
to_eucontext(struct ib_ucontext * ibucontext)118*4882a593Smuzhiyun static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun return container_of(ibucontext, struct efa_ucontext, ibucontext);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
to_epd(struct ib_pd * ibpd)123*4882a593Smuzhiyun static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun return container_of(ibpd, struct efa_pd, ibpd);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
to_emr(struct ib_mr * ibmr)128*4882a593Smuzhiyun static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun return container_of(ibmr, struct efa_mr, ibmr);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
to_eqp(struct ib_qp * ibqp)133*4882a593Smuzhiyun static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun return container_of(ibqp, struct efa_qp, ibqp);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
to_ecq(struct ib_cq * ibcq)138*4882a593Smuzhiyun static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun return container_of(ibcq, struct efa_cq, ibcq);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
to_eah(struct ib_ah * ibah)143*4882a593Smuzhiyun static inline struct efa_ah *to_eah(struct ib_ah *ibah)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun return container_of(ibah, struct efa_ah, ibah);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun static inline struct efa_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry * rdma_entry)149*4882a593Smuzhiyun to_emmap(struct rdma_user_mmap_entry *rdma_entry)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun #define EFA_DEV_CAP(dev, cap) \
155*4882a593Smuzhiyun ((dev)->dev_attr.device_caps & \
156*4882a593Smuzhiyun EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK)
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun #define is_reserved_cleared(reserved) \
159*4882a593Smuzhiyun !memchr_inv(reserved, 0, sizeof(reserved))
160*4882a593Smuzhiyun
efa_zalloc_mapped(struct efa_dev * dev,dma_addr_t * dma_addr,size_t size,enum dma_data_direction dir)161*4882a593Smuzhiyun static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
162*4882a593Smuzhiyun size_t size, enum dma_data_direction dir)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun void *addr;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
167*4882a593Smuzhiyun if (!addr)
168*4882a593Smuzhiyun return NULL;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
171*4882a593Smuzhiyun if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
172*4882a593Smuzhiyun ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
173*4882a593Smuzhiyun free_pages_exact(addr, size);
174*4882a593Smuzhiyun return NULL;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun return addr;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
efa_free_mapped(struct efa_dev * dev,void * cpu_addr,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir)180*4882a593Smuzhiyun static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
181*4882a593Smuzhiyun dma_addr_t dma_addr,
182*4882a593Smuzhiyun size_t size, enum dma_data_direction dir)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
185*4882a593Smuzhiyun free_pages_exact(cpu_addr, size);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
efa_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * udata)188*4882a593Smuzhiyun int efa_query_device(struct ib_device *ibdev,
189*4882a593Smuzhiyun struct ib_device_attr *props,
190*4882a593Smuzhiyun struct ib_udata *udata)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun struct efa_com_get_device_attr_result *dev_attr;
193*4882a593Smuzhiyun struct efa_ibv_ex_query_device_resp resp = {};
194*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibdev);
195*4882a593Smuzhiyun int err;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (udata && udata->inlen &&
198*4882a593Smuzhiyun !ib_is_udata_cleared(udata, 0, udata->inlen)) {
199*4882a593Smuzhiyun ibdev_dbg(ibdev,
200*4882a593Smuzhiyun "Incompatible ABI params, udata not cleared\n");
201*4882a593Smuzhiyun return -EINVAL;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun dev_attr = &dev->dev_attr;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun memset(props, 0, sizeof(*props));
207*4882a593Smuzhiyun props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
208*4882a593Smuzhiyun props->page_size_cap = dev_attr->page_size_cap;
209*4882a593Smuzhiyun props->vendor_id = dev->pdev->vendor;
210*4882a593Smuzhiyun props->vendor_part_id = dev->pdev->device;
211*4882a593Smuzhiyun props->hw_ver = dev->pdev->subsystem_device;
212*4882a593Smuzhiyun props->max_qp = dev_attr->max_qp;
213*4882a593Smuzhiyun props->max_cq = dev_attr->max_cq;
214*4882a593Smuzhiyun props->max_pd = dev_attr->max_pd;
215*4882a593Smuzhiyun props->max_mr = dev_attr->max_mr;
216*4882a593Smuzhiyun props->max_ah = dev_attr->max_ah;
217*4882a593Smuzhiyun props->max_cqe = dev_attr->max_cq_depth;
218*4882a593Smuzhiyun props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
219*4882a593Smuzhiyun dev_attr->max_rq_depth);
220*4882a593Smuzhiyun props->max_send_sge = dev_attr->max_sq_sge;
221*4882a593Smuzhiyun props->max_recv_sge = dev_attr->max_rq_sge;
222*4882a593Smuzhiyun props->max_sge_rd = dev_attr->max_wr_rdma_sge;
223*4882a593Smuzhiyun props->max_pkeys = 1;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (udata && udata->outlen) {
226*4882a593Smuzhiyun resp.max_sq_sge = dev_attr->max_sq_sge;
227*4882a593Smuzhiyun resp.max_rq_sge = dev_attr->max_rq_sge;
228*4882a593Smuzhiyun resp.max_sq_wr = dev_attr->max_sq_depth;
229*4882a593Smuzhiyun resp.max_rq_wr = dev_attr->max_rq_depth;
230*4882a593Smuzhiyun resp.max_rdma_size = dev_attr->max_rdma_size;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (EFA_DEV_CAP(dev, RDMA_READ))
233*4882a593Smuzhiyun resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (EFA_DEV_CAP(dev, RNR_RETRY))
236*4882a593Smuzhiyun resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun err = ib_copy_to_udata(udata, &resp,
239*4882a593Smuzhiyun min(sizeof(resp), udata->outlen));
240*4882a593Smuzhiyun if (err) {
241*4882a593Smuzhiyun ibdev_dbg(ibdev,
242*4882a593Smuzhiyun "Failed to copy udata for query_device\n");
243*4882a593Smuzhiyun return err;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun return 0;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
efa_query_port(struct ib_device * ibdev,u8 port,struct ib_port_attr * props)250*4882a593Smuzhiyun int efa_query_port(struct ib_device *ibdev, u8 port,
251*4882a593Smuzhiyun struct ib_port_attr *props)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibdev);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun props->lmc = 1;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun props->state = IB_PORT_ACTIVE;
258*4882a593Smuzhiyun props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
259*4882a593Smuzhiyun props->gid_tbl_len = 1;
260*4882a593Smuzhiyun props->pkey_tbl_len = 1;
261*4882a593Smuzhiyun props->active_speed = IB_SPEED_EDR;
262*4882a593Smuzhiyun props->active_width = IB_WIDTH_4X;
263*4882a593Smuzhiyun props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
264*4882a593Smuzhiyun props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
265*4882a593Smuzhiyun props->max_msg_sz = dev->dev_attr.mtu;
266*4882a593Smuzhiyun props->max_vl_num = 1;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun return 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
efa_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)271*4882a593Smuzhiyun int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
272*4882a593Smuzhiyun int qp_attr_mask,
273*4882a593Smuzhiyun struct ib_qp_init_attr *qp_init_attr)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibqp->device);
276*4882a593Smuzhiyun struct efa_com_query_qp_params params = {};
277*4882a593Smuzhiyun struct efa_com_query_qp_result result;
278*4882a593Smuzhiyun struct efa_qp *qp = to_eqp(ibqp);
279*4882a593Smuzhiyun int err;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun #define EFA_QUERY_QP_SUPP_MASK \
282*4882a593Smuzhiyun (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
283*4882a593Smuzhiyun IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY)
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
286*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
287*4882a593Smuzhiyun "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
288*4882a593Smuzhiyun qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
289*4882a593Smuzhiyun return -EOPNOTSUPP;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun memset(qp_attr, 0, sizeof(*qp_attr));
293*4882a593Smuzhiyun memset(qp_init_attr, 0, sizeof(*qp_init_attr));
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun params.qp_handle = qp->qp_handle;
296*4882a593Smuzhiyun err = efa_com_query_qp(&dev->edev, ¶ms, &result);
297*4882a593Smuzhiyun if (err)
298*4882a593Smuzhiyun return err;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun qp_attr->qp_state = result.qp_state;
301*4882a593Smuzhiyun qp_attr->qkey = result.qkey;
302*4882a593Smuzhiyun qp_attr->sq_psn = result.sq_psn;
303*4882a593Smuzhiyun qp_attr->sq_draining = result.sq_draining;
304*4882a593Smuzhiyun qp_attr->port_num = 1;
305*4882a593Smuzhiyun qp_attr->rnr_retry = result.rnr_retry;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun qp_attr->cap.max_send_wr = qp->max_send_wr;
308*4882a593Smuzhiyun qp_attr->cap.max_recv_wr = qp->max_recv_wr;
309*4882a593Smuzhiyun qp_attr->cap.max_send_sge = qp->max_send_sge;
310*4882a593Smuzhiyun qp_attr->cap.max_recv_sge = qp->max_recv_sge;
311*4882a593Smuzhiyun qp_attr->cap.max_inline_data = qp->max_inline_data;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun qp_init_attr->qp_type = ibqp->qp_type;
314*4882a593Smuzhiyun qp_init_attr->recv_cq = ibqp->recv_cq;
315*4882a593Smuzhiyun qp_init_attr->send_cq = ibqp->send_cq;
316*4882a593Smuzhiyun qp_init_attr->qp_context = ibqp->qp_context;
317*4882a593Smuzhiyun qp_init_attr->cap = qp_attr->cap;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun return 0;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
efa_query_gid(struct ib_device * ibdev,u8 port,int index,union ib_gid * gid)322*4882a593Smuzhiyun int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
323*4882a593Smuzhiyun union ib_gid *gid)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibdev);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return 0;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
efa_query_pkey(struct ib_device * ibdev,u8 port,u16 index,u16 * pkey)332*4882a593Smuzhiyun int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
333*4882a593Smuzhiyun u16 *pkey)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun if (index > 0)
336*4882a593Smuzhiyun return -EINVAL;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun *pkey = 0xffff;
339*4882a593Smuzhiyun return 0;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
efa_pd_dealloc(struct efa_dev * dev,u16 pdn)342*4882a593Smuzhiyun static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun struct efa_com_dealloc_pd_params params = {
345*4882a593Smuzhiyun .pdn = pdn,
346*4882a593Smuzhiyun };
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun return efa_com_dealloc_pd(&dev->edev, ¶ms);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
efa_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)351*4882a593Smuzhiyun int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibpd->device);
354*4882a593Smuzhiyun struct efa_ibv_alloc_pd_resp resp = {};
355*4882a593Smuzhiyun struct efa_com_alloc_pd_result result;
356*4882a593Smuzhiyun struct efa_pd *pd = to_epd(ibpd);
357*4882a593Smuzhiyun int err;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (udata->inlen &&
360*4882a593Smuzhiyun !ib_is_udata_cleared(udata, 0, udata->inlen)) {
361*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
362*4882a593Smuzhiyun "Incompatible ABI params, udata not cleared\n");
363*4882a593Smuzhiyun err = -EINVAL;
364*4882a593Smuzhiyun goto err_out;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun err = efa_com_alloc_pd(&dev->edev, &result);
368*4882a593Smuzhiyun if (err)
369*4882a593Smuzhiyun goto err_out;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun pd->pdn = result.pdn;
372*4882a593Smuzhiyun resp.pdn = result.pdn;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (udata->outlen) {
375*4882a593Smuzhiyun err = ib_copy_to_udata(udata, &resp,
376*4882a593Smuzhiyun min(sizeof(resp), udata->outlen));
377*4882a593Smuzhiyun if (err) {
378*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
379*4882a593Smuzhiyun "Failed to copy udata for alloc_pd\n");
380*4882a593Smuzhiyun goto err_dealloc_pd;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun return 0;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun err_dealloc_pd:
389*4882a593Smuzhiyun efa_pd_dealloc(dev, result.pdn);
390*4882a593Smuzhiyun err_out:
391*4882a593Smuzhiyun atomic64_inc(&dev->stats.alloc_pd_err);
392*4882a593Smuzhiyun return err;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
efa_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)395*4882a593Smuzhiyun int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibpd->device);
398*4882a593Smuzhiyun struct efa_pd *pd = to_epd(ibpd);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
401*4882a593Smuzhiyun efa_pd_dealloc(dev, pd->pdn);
402*4882a593Smuzhiyun return 0;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
efa_destroy_qp_handle(struct efa_dev * dev,u32 qp_handle)405*4882a593Smuzhiyun static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun return efa_com_destroy_qp(&dev->edev, ¶ms);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
efa_qp_user_mmap_entries_remove(struct efa_qp * qp)412*4882a593Smuzhiyun static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
415*4882a593Smuzhiyun rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
416*4882a593Smuzhiyun rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
417*4882a593Smuzhiyun rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
efa_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)420*4882a593Smuzhiyun int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibqp->pd->device);
423*4882a593Smuzhiyun struct efa_qp *qp = to_eqp(ibqp);
424*4882a593Smuzhiyun int err;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun efa_qp_user_mmap_entries_remove(qp);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun err = efa_destroy_qp_handle(dev, qp->qp_handle);
431*4882a593Smuzhiyun if (err)
432*4882a593Smuzhiyun return err;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (qp->rq_cpu_addr) {
435*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
436*4882a593Smuzhiyun "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
437*4882a593Smuzhiyun qp->rq_cpu_addr, qp->rq_size,
438*4882a593Smuzhiyun &qp->rq_dma_addr);
439*4882a593Smuzhiyun efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
440*4882a593Smuzhiyun qp->rq_size, DMA_TO_DEVICE);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun kfree(qp);
444*4882a593Smuzhiyun return 0;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun static struct rdma_user_mmap_entry*
efa_user_mmap_entry_insert(struct ib_ucontext * ucontext,u64 address,size_t length,u8 mmap_flag,u64 * offset)448*4882a593Smuzhiyun efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
449*4882a593Smuzhiyun u64 address, size_t length,
450*4882a593Smuzhiyun u8 mmap_flag, u64 *offset)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
453*4882a593Smuzhiyun int err;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun if (!entry)
456*4882a593Smuzhiyun return NULL;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun entry->address = address;
459*4882a593Smuzhiyun entry->mmap_flag = mmap_flag;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
462*4882a593Smuzhiyun length);
463*4882a593Smuzhiyun if (err) {
464*4882a593Smuzhiyun kfree(entry);
465*4882a593Smuzhiyun return NULL;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun return &entry->rdma_entry;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
qp_mmap_entries_setup(struct efa_qp * qp,struct efa_dev * dev,struct efa_ucontext * ucontext,struct efa_com_create_qp_params * params,struct efa_ibv_create_qp_resp * resp)472*4882a593Smuzhiyun static int qp_mmap_entries_setup(struct efa_qp *qp,
473*4882a593Smuzhiyun struct efa_dev *dev,
474*4882a593Smuzhiyun struct efa_ucontext *ucontext,
475*4882a593Smuzhiyun struct efa_com_create_qp_params *params,
476*4882a593Smuzhiyun struct efa_ibv_create_qp_resp *resp)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun size_t length;
479*4882a593Smuzhiyun u64 address;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun address = dev->db_bar_addr + resp->sq_db_offset;
482*4882a593Smuzhiyun qp->sq_db_mmap_entry =
483*4882a593Smuzhiyun efa_user_mmap_entry_insert(&ucontext->ibucontext,
484*4882a593Smuzhiyun address,
485*4882a593Smuzhiyun PAGE_SIZE, EFA_MMAP_IO_NC,
486*4882a593Smuzhiyun &resp->sq_db_mmap_key);
487*4882a593Smuzhiyun if (!qp->sq_db_mmap_entry)
488*4882a593Smuzhiyun return -ENOMEM;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun resp->sq_db_offset &= ~PAGE_MASK;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun address = dev->mem_bar_addr + resp->llq_desc_offset;
493*4882a593Smuzhiyun length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
494*4882a593Smuzhiyun (resp->llq_desc_offset & ~PAGE_MASK));
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun qp->llq_desc_mmap_entry =
497*4882a593Smuzhiyun efa_user_mmap_entry_insert(&ucontext->ibucontext,
498*4882a593Smuzhiyun address, length,
499*4882a593Smuzhiyun EFA_MMAP_IO_WC,
500*4882a593Smuzhiyun &resp->llq_desc_mmap_key);
501*4882a593Smuzhiyun if (!qp->llq_desc_mmap_entry)
502*4882a593Smuzhiyun goto err_remove_mmap;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun resp->llq_desc_offset &= ~PAGE_MASK;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (qp->rq_size) {
507*4882a593Smuzhiyun address = dev->db_bar_addr + resp->rq_db_offset;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun qp->rq_db_mmap_entry =
510*4882a593Smuzhiyun efa_user_mmap_entry_insert(&ucontext->ibucontext,
511*4882a593Smuzhiyun address, PAGE_SIZE,
512*4882a593Smuzhiyun EFA_MMAP_IO_NC,
513*4882a593Smuzhiyun &resp->rq_db_mmap_key);
514*4882a593Smuzhiyun if (!qp->rq_db_mmap_entry)
515*4882a593Smuzhiyun goto err_remove_mmap;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun resp->rq_db_offset &= ~PAGE_MASK;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun address = virt_to_phys(qp->rq_cpu_addr);
520*4882a593Smuzhiyun qp->rq_mmap_entry =
521*4882a593Smuzhiyun efa_user_mmap_entry_insert(&ucontext->ibucontext,
522*4882a593Smuzhiyun address, qp->rq_size,
523*4882a593Smuzhiyun EFA_MMAP_DMA_PAGE,
524*4882a593Smuzhiyun &resp->rq_mmap_key);
525*4882a593Smuzhiyun if (!qp->rq_mmap_entry)
526*4882a593Smuzhiyun goto err_remove_mmap;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun resp->rq_mmap_size = qp->rq_size;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun return 0;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun err_remove_mmap:
534*4882a593Smuzhiyun efa_qp_user_mmap_entries_remove(qp);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun return -ENOMEM;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
efa_qp_validate_cap(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)539*4882a593Smuzhiyun static int efa_qp_validate_cap(struct efa_dev *dev,
540*4882a593Smuzhiyun struct ib_qp_init_attr *init_attr)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
543*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
544*4882a593Smuzhiyun "qp: requested send wr[%u] exceeds the max[%u]\n",
545*4882a593Smuzhiyun init_attr->cap.max_send_wr,
546*4882a593Smuzhiyun dev->dev_attr.max_sq_depth);
547*4882a593Smuzhiyun return -EINVAL;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
550*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
551*4882a593Smuzhiyun "qp: requested receive wr[%u] exceeds the max[%u]\n",
552*4882a593Smuzhiyun init_attr->cap.max_recv_wr,
553*4882a593Smuzhiyun dev->dev_attr.max_rq_depth);
554*4882a593Smuzhiyun return -EINVAL;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
557*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
558*4882a593Smuzhiyun "qp: requested sge send[%u] exceeds the max[%u]\n",
559*4882a593Smuzhiyun init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
560*4882a593Smuzhiyun return -EINVAL;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
563*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
564*4882a593Smuzhiyun "qp: requested sge recv[%u] exceeds the max[%u]\n",
565*4882a593Smuzhiyun init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
566*4882a593Smuzhiyun return -EINVAL;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
569*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
570*4882a593Smuzhiyun "qp: requested inline data[%u] exceeds the max[%u]\n",
571*4882a593Smuzhiyun init_attr->cap.max_inline_data,
572*4882a593Smuzhiyun dev->dev_attr.inline_buf_size);
573*4882a593Smuzhiyun return -EINVAL;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun return 0;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
efa_qp_validate_attr(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)579*4882a593Smuzhiyun static int efa_qp_validate_attr(struct efa_dev *dev,
580*4882a593Smuzhiyun struct ib_qp_init_attr *init_attr)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun if (init_attr->qp_type != IB_QPT_DRIVER &&
583*4882a593Smuzhiyun init_attr->qp_type != IB_QPT_UD) {
584*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
585*4882a593Smuzhiyun "Unsupported qp type %d\n", init_attr->qp_type);
586*4882a593Smuzhiyun return -EOPNOTSUPP;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (init_attr->srq) {
590*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
591*4882a593Smuzhiyun return -EOPNOTSUPP;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun if (init_attr->create_flags) {
595*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
596*4882a593Smuzhiyun return -EOPNOTSUPP;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun return 0;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
efa_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)602*4882a593Smuzhiyun struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
603*4882a593Smuzhiyun struct ib_qp_init_attr *init_attr,
604*4882a593Smuzhiyun struct ib_udata *udata)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun struct efa_com_create_qp_params create_qp_params = {};
607*4882a593Smuzhiyun struct efa_com_create_qp_result create_qp_resp;
608*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibpd->device);
609*4882a593Smuzhiyun struct efa_ibv_create_qp_resp resp = {};
610*4882a593Smuzhiyun struct efa_ibv_create_qp cmd = {};
611*4882a593Smuzhiyun struct efa_ucontext *ucontext;
612*4882a593Smuzhiyun struct efa_qp *qp;
613*4882a593Smuzhiyun int err;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
616*4882a593Smuzhiyun ibucontext);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun err = efa_qp_validate_cap(dev, init_attr);
619*4882a593Smuzhiyun if (err)
620*4882a593Smuzhiyun goto err_out;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun err = efa_qp_validate_attr(dev, init_attr);
623*4882a593Smuzhiyun if (err)
624*4882a593Smuzhiyun goto err_out;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
627*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
628*4882a593Smuzhiyun "Incompatible ABI params, no input udata\n");
629*4882a593Smuzhiyun err = -EINVAL;
630*4882a593Smuzhiyun goto err_out;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun if (udata->inlen > sizeof(cmd) &&
634*4882a593Smuzhiyun !ib_is_udata_cleared(udata, sizeof(cmd),
635*4882a593Smuzhiyun udata->inlen - sizeof(cmd))) {
636*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
637*4882a593Smuzhiyun "Incompatible ABI params, unknown fields in udata\n");
638*4882a593Smuzhiyun err = -EINVAL;
639*4882a593Smuzhiyun goto err_out;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun err = ib_copy_from_udata(&cmd, udata,
643*4882a593Smuzhiyun min(sizeof(cmd), udata->inlen));
644*4882a593Smuzhiyun if (err) {
645*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
646*4882a593Smuzhiyun "Cannot copy udata for create_qp\n");
647*4882a593Smuzhiyun goto err_out;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun if (cmd.comp_mask) {
651*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
652*4882a593Smuzhiyun "Incompatible ABI params, unknown fields in udata\n");
653*4882a593Smuzhiyun err = -EINVAL;
654*4882a593Smuzhiyun goto err_out;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun qp = kzalloc(sizeof(*qp), GFP_KERNEL);
658*4882a593Smuzhiyun if (!qp) {
659*4882a593Smuzhiyun err = -ENOMEM;
660*4882a593Smuzhiyun goto err_out;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun create_qp_params.uarn = ucontext->uarn;
664*4882a593Smuzhiyun create_qp_params.pd = to_epd(ibpd)->pdn;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun if (init_attr->qp_type == IB_QPT_UD) {
667*4882a593Smuzhiyun create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
668*4882a593Smuzhiyun } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
669*4882a593Smuzhiyun create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
670*4882a593Smuzhiyun } else {
671*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
672*4882a593Smuzhiyun "Unsupported qp type %d driver qp type %d\n",
673*4882a593Smuzhiyun init_attr->qp_type, cmd.driver_qp_type);
674*4882a593Smuzhiyun err = -EOPNOTSUPP;
675*4882a593Smuzhiyun goto err_free_qp;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
679*4882a593Smuzhiyun init_attr->qp_type, cmd.driver_qp_type);
680*4882a593Smuzhiyun create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
681*4882a593Smuzhiyun create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
682*4882a593Smuzhiyun create_qp_params.sq_depth = init_attr->cap.max_send_wr;
683*4882a593Smuzhiyun create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
686*4882a593Smuzhiyun create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
687*4882a593Smuzhiyun qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
688*4882a593Smuzhiyun if (qp->rq_size) {
689*4882a593Smuzhiyun qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
690*4882a593Smuzhiyun qp->rq_size, DMA_TO_DEVICE);
691*4882a593Smuzhiyun if (!qp->rq_cpu_addr) {
692*4882a593Smuzhiyun err = -ENOMEM;
693*4882a593Smuzhiyun goto err_free_qp;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
697*4882a593Smuzhiyun "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
698*4882a593Smuzhiyun qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
699*4882a593Smuzhiyun create_qp_params.rq_base_addr = qp->rq_dma_addr;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun err = efa_com_create_qp(&dev->edev, &create_qp_params,
703*4882a593Smuzhiyun &create_qp_resp);
704*4882a593Smuzhiyun if (err)
705*4882a593Smuzhiyun goto err_free_mapped;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun resp.sq_db_offset = create_qp_resp.sq_db_offset;
708*4882a593Smuzhiyun resp.rq_db_offset = create_qp_resp.rq_db_offset;
709*4882a593Smuzhiyun resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
710*4882a593Smuzhiyun resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
711*4882a593Smuzhiyun resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
714*4882a593Smuzhiyun &resp);
715*4882a593Smuzhiyun if (err)
716*4882a593Smuzhiyun goto err_destroy_qp;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun qp->qp_handle = create_qp_resp.qp_handle;
719*4882a593Smuzhiyun qp->ibqp.qp_num = create_qp_resp.qp_num;
720*4882a593Smuzhiyun qp->max_send_wr = init_attr->cap.max_send_wr;
721*4882a593Smuzhiyun qp->max_recv_wr = init_attr->cap.max_recv_wr;
722*4882a593Smuzhiyun qp->max_send_sge = init_attr->cap.max_send_sge;
723*4882a593Smuzhiyun qp->max_recv_sge = init_attr->cap.max_recv_sge;
724*4882a593Smuzhiyun qp->max_inline_data = init_attr->cap.max_inline_data;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun if (udata->outlen) {
727*4882a593Smuzhiyun err = ib_copy_to_udata(udata, &resp,
728*4882a593Smuzhiyun min(sizeof(resp), udata->outlen));
729*4882a593Smuzhiyun if (err) {
730*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
731*4882a593Smuzhiyun "Failed to copy udata for qp[%u]\n",
732*4882a593Smuzhiyun create_qp_resp.qp_num);
733*4882a593Smuzhiyun goto err_remove_mmap_entries;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun return &qp->ibqp;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun err_remove_mmap_entries:
742*4882a593Smuzhiyun efa_qp_user_mmap_entries_remove(qp);
743*4882a593Smuzhiyun err_destroy_qp:
744*4882a593Smuzhiyun efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
745*4882a593Smuzhiyun err_free_mapped:
746*4882a593Smuzhiyun if (qp->rq_size)
747*4882a593Smuzhiyun efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
748*4882a593Smuzhiyun qp->rq_size, DMA_TO_DEVICE);
749*4882a593Smuzhiyun err_free_qp:
750*4882a593Smuzhiyun kfree(qp);
751*4882a593Smuzhiyun err_out:
752*4882a593Smuzhiyun atomic64_inc(&dev->stats.create_qp_err);
753*4882a593Smuzhiyun return ERR_PTR(err);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun static const struct {
757*4882a593Smuzhiyun int valid;
758*4882a593Smuzhiyun enum ib_qp_attr_mask req_param;
759*4882a593Smuzhiyun enum ib_qp_attr_mask opt_param;
760*4882a593Smuzhiyun } srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
761*4882a593Smuzhiyun [IB_QPS_RESET] = {
762*4882a593Smuzhiyun [IB_QPS_RESET] = { .valid = 1 },
763*4882a593Smuzhiyun [IB_QPS_INIT] = {
764*4882a593Smuzhiyun .valid = 1,
765*4882a593Smuzhiyun .req_param = IB_QP_PKEY_INDEX |
766*4882a593Smuzhiyun IB_QP_PORT |
767*4882a593Smuzhiyun IB_QP_QKEY,
768*4882a593Smuzhiyun },
769*4882a593Smuzhiyun },
770*4882a593Smuzhiyun [IB_QPS_INIT] = {
771*4882a593Smuzhiyun [IB_QPS_RESET] = { .valid = 1 },
772*4882a593Smuzhiyun [IB_QPS_ERR] = { .valid = 1 },
773*4882a593Smuzhiyun [IB_QPS_INIT] = {
774*4882a593Smuzhiyun .valid = 1,
775*4882a593Smuzhiyun .opt_param = IB_QP_PKEY_INDEX |
776*4882a593Smuzhiyun IB_QP_PORT |
777*4882a593Smuzhiyun IB_QP_QKEY,
778*4882a593Smuzhiyun },
779*4882a593Smuzhiyun [IB_QPS_RTR] = {
780*4882a593Smuzhiyun .valid = 1,
781*4882a593Smuzhiyun .opt_param = IB_QP_PKEY_INDEX |
782*4882a593Smuzhiyun IB_QP_QKEY,
783*4882a593Smuzhiyun },
784*4882a593Smuzhiyun },
785*4882a593Smuzhiyun [IB_QPS_RTR] = {
786*4882a593Smuzhiyun [IB_QPS_RESET] = { .valid = 1 },
787*4882a593Smuzhiyun [IB_QPS_ERR] = { .valid = 1 },
788*4882a593Smuzhiyun [IB_QPS_RTS] = {
789*4882a593Smuzhiyun .valid = 1,
790*4882a593Smuzhiyun .req_param = IB_QP_SQ_PSN,
791*4882a593Smuzhiyun .opt_param = IB_QP_CUR_STATE |
792*4882a593Smuzhiyun IB_QP_QKEY |
793*4882a593Smuzhiyun IB_QP_RNR_RETRY,
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun },
797*4882a593Smuzhiyun [IB_QPS_RTS] = {
798*4882a593Smuzhiyun [IB_QPS_RESET] = { .valid = 1 },
799*4882a593Smuzhiyun [IB_QPS_ERR] = { .valid = 1 },
800*4882a593Smuzhiyun [IB_QPS_RTS] = {
801*4882a593Smuzhiyun .valid = 1,
802*4882a593Smuzhiyun .opt_param = IB_QP_CUR_STATE |
803*4882a593Smuzhiyun IB_QP_QKEY,
804*4882a593Smuzhiyun },
805*4882a593Smuzhiyun [IB_QPS_SQD] = {
806*4882a593Smuzhiyun .valid = 1,
807*4882a593Smuzhiyun .opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY,
808*4882a593Smuzhiyun },
809*4882a593Smuzhiyun },
810*4882a593Smuzhiyun [IB_QPS_SQD] = {
811*4882a593Smuzhiyun [IB_QPS_RESET] = { .valid = 1 },
812*4882a593Smuzhiyun [IB_QPS_ERR] = { .valid = 1 },
813*4882a593Smuzhiyun [IB_QPS_RTS] = {
814*4882a593Smuzhiyun .valid = 1,
815*4882a593Smuzhiyun .opt_param = IB_QP_CUR_STATE |
816*4882a593Smuzhiyun IB_QP_QKEY,
817*4882a593Smuzhiyun },
818*4882a593Smuzhiyun [IB_QPS_SQD] = {
819*4882a593Smuzhiyun .valid = 1,
820*4882a593Smuzhiyun .opt_param = IB_QP_PKEY_INDEX |
821*4882a593Smuzhiyun IB_QP_QKEY,
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun },
824*4882a593Smuzhiyun [IB_QPS_SQE] = {
825*4882a593Smuzhiyun [IB_QPS_RESET] = { .valid = 1 },
826*4882a593Smuzhiyun [IB_QPS_ERR] = { .valid = 1 },
827*4882a593Smuzhiyun [IB_QPS_RTS] = {
828*4882a593Smuzhiyun .valid = 1,
829*4882a593Smuzhiyun .opt_param = IB_QP_CUR_STATE |
830*4882a593Smuzhiyun IB_QP_QKEY,
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun },
833*4882a593Smuzhiyun [IB_QPS_ERR] = {
834*4882a593Smuzhiyun [IB_QPS_RESET] = { .valid = 1 },
835*4882a593Smuzhiyun [IB_QPS_ERR] = { .valid = 1 },
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun };
838*4882a593Smuzhiyun
efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,enum ib_qp_state next_state,enum ib_qp_attr_mask mask)839*4882a593Smuzhiyun static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,
840*4882a593Smuzhiyun enum ib_qp_state next_state,
841*4882a593Smuzhiyun enum ib_qp_attr_mask mask)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun enum ib_qp_attr_mask req_param, opt_param;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun if (mask & IB_QP_CUR_STATE &&
846*4882a593Smuzhiyun cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
847*4882a593Smuzhiyun cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
848*4882a593Smuzhiyun return false;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (!srd_qp_state_table[cur_state][next_state].valid)
851*4882a593Smuzhiyun return false;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun req_param = srd_qp_state_table[cur_state][next_state].req_param;
854*4882a593Smuzhiyun opt_param = srd_qp_state_table[cur_state][next_state].opt_param;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun if ((mask & req_param) != req_param)
857*4882a593Smuzhiyun return false;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun if (mask & ~(req_param | opt_param | IB_QP_STATE))
860*4882a593Smuzhiyun return false;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun return true;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
efa_modify_qp_validate(struct efa_dev * dev,struct efa_qp * qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)865*4882a593Smuzhiyun static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
866*4882a593Smuzhiyun struct ib_qp_attr *qp_attr, int qp_attr_mask,
867*4882a593Smuzhiyun enum ib_qp_state cur_state,
868*4882a593Smuzhiyun enum ib_qp_state new_state)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun int err;
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun #define EFA_MODIFY_QP_SUPP_MASK \
873*4882a593Smuzhiyun (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
874*4882a593Smuzhiyun IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \
875*4882a593Smuzhiyun IB_QP_RNR_RETRY)
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
878*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
879*4882a593Smuzhiyun "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
880*4882a593Smuzhiyun qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
881*4882a593Smuzhiyun return -EOPNOTSUPP;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun if (qp->ibqp.qp_type == IB_QPT_DRIVER)
885*4882a593Smuzhiyun err = !efa_modify_srd_qp_is_ok(cur_state, new_state,
886*4882a593Smuzhiyun qp_attr_mask);
887*4882a593Smuzhiyun else
888*4882a593Smuzhiyun err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
889*4882a593Smuzhiyun qp_attr_mask);
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun if (err) {
892*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
893*4882a593Smuzhiyun return -EINVAL;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
897*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Can't change port num\n");
898*4882a593Smuzhiyun return -EOPNOTSUPP;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
902*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
903*4882a593Smuzhiyun return -EOPNOTSUPP;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return 0;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
efa_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)909*4882a593Smuzhiyun int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
910*4882a593Smuzhiyun int qp_attr_mask, struct ib_udata *udata)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibqp->device);
913*4882a593Smuzhiyun struct efa_com_modify_qp_params params = {};
914*4882a593Smuzhiyun struct efa_qp *qp = to_eqp(ibqp);
915*4882a593Smuzhiyun enum ib_qp_state cur_state;
916*4882a593Smuzhiyun enum ib_qp_state new_state;
917*4882a593Smuzhiyun int err;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun if (udata->inlen &&
920*4882a593Smuzhiyun !ib_is_udata_cleared(udata, 0, udata->inlen)) {
921*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
922*4882a593Smuzhiyun "Incompatible ABI params, udata not cleared\n");
923*4882a593Smuzhiyun return -EINVAL;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
927*4882a593Smuzhiyun qp->state;
928*4882a593Smuzhiyun new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
931*4882a593Smuzhiyun new_state);
932*4882a593Smuzhiyun if (err)
933*4882a593Smuzhiyun return err;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun params.qp_handle = qp->qp_handle;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun if (qp_attr_mask & IB_QP_STATE) {
938*4882a593Smuzhiyun EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE,
939*4882a593Smuzhiyun 1);
940*4882a593Smuzhiyun EFA_SET(¶ms.modify_mask,
941*4882a593Smuzhiyun EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
942*4882a593Smuzhiyun params.cur_qp_state = cur_state;
943*4882a593Smuzhiyun params.qp_state = new_state;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
947*4882a593Smuzhiyun EFA_SET(¶ms.modify_mask,
948*4882a593Smuzhiyun EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1);
949*4882a593Smuzhiyun params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun if (qp_attr_mask & IB_QP_QKEY) {
953*4882a593Smuzhiyun EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1);
954*4882a593Smuzhiyun params.qkey = qp_attr->qkey;
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun if (qp_attr_mask & IB_QP_SQ_PSN) {
958*4882a593Smuzhiyun EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1);
959*4882a593Smuzhiyun params.sq_psn = qp_attr->sq_psn;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun if (qp_attr_mask & IB_QP_RNR_RETRY) {
963*4882a593Smuzhiyun EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY,
964*4882a593Smuzhiyun 1);
965*4882a593Smuzhiyun params.rnr_retry = qp_attr->rnr_retry;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun err = efa_com_modify_qp(&dev->edev, ¶ms);
969*4882a593Smuzhiyun if (err)
970*4882a593Smuzhiyun return err;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun qp->state = new_state;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun return 0;
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun
efa_destroy_cq_idx(struct efa_dev * dev,int cq_idx)977*4882a593Smuzhiyun static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun return efa_com_destroy_cq(&dev->edev, ¶ms);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
efa_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)984*4882a593Smuzhiyun int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibcq->device);
987*4882a593Smuzhiyun struct efa_cq *cq = to_ecq(ibcq);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
990*4882a593Smuzhiyun "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
991*4882a593Smuzhiyun cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun rdma_user_mmap_entry_remove(cq->mmap_entry);
994*4882a593Smuzhiyun efa_destroy_cq_idx(dev, cq->cq_idx);
995*4882a593Smuzhiyun efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
996*4882a593Smuzhiyun DMA_FROM_DEVICE);
997*4882a593Smuzhiyun return 0;
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun
cq_mmap_entries_setup(struct efa_dev * dev,struct efa_cq * cq,struct efa_ibv_create_cq_resp * resp)1000*4882a593Smuzhiyun static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
1001*4882a593Smuzhiyun struct efa_ibv_create_cq_resp *resp)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun resp->q_mmap_size = cq->size;
1004*4882a593Smuzhiyun cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1005*4882a593Smuzhiyun virt_to_phys(cq->cpu_addr),
1006*4882a593Smuzhiyun cq->size, EFA_MMAP_DMA_PAGE,
1007*4882a593Smuzhiyun &resp->q_mmap_key);
1008*4882a593Smuzhiyun if (!cq->mmap_entry)
1009*4882a593Smuzhiyun return -ENOMEM;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun return 0;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
efa_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)1014*4882a593Smuzhiyun int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1015*4882a593Smuzhiyun struct ib_udata *udata)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
1018*4882a593Smuzhiyun udata, struct efa_ucontext, ibucontext);
1019*4882a593Smuzhiyun struct efa_ibv_create_cq_resp resp = {};
1020*4882a593Smuzhiyun struct efa_com_create_cq_params params;
1021*4882a593Smuzhiyun struct efa_com_create_cq_result result;
1022*4882a593Smuzhiyun struct ib_device *ibdev = ibcq->device;
1023*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibdev);
1024*4882a593Smuzhiyun struct efa_ibv_create_cq cmd = {};
1025*4882a593Smuzhiyun struct efa_cq *cq = to_ecq(ibcq);
1026*4882a593Smuzhiyun int entries = attr->cqe;
1027*4882a593Smuzhiyun int err;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
1032*4882a593Smuzhiyun ibdev_dbg(ibdev,
1033*4882a593Smuzhiyun "cq: requested entries[%u] non-positive or greater than max[%u]\n",
1034*4882a593Smuzhiyun entries, dev->dev_attr.max_cq_depth);
1035*4882a593Smuzhiyun err = -EINVAL;
1036*4882a593Smuzhiyun goto err_out;
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
1040*4882a593Smuzhiyun ibdev_dbg(ibdev,
1041*4882a593Smuzhiyun "Incompatible ABI params, no input udata\n");
1042*4882a593Smuzhiyun err = -EINVAL;
1043*4882a593Smuzhiyun goto err_out;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if (udata->inlen > sizeof(cmd) &&
1047*4882a593Smuzhiyun !ib_is_udata_cleared(udata, sizeof(cmd),
1048*4882a593Smuzhiyun udata->inlen - sizeof(cmd))) {
1049*4882a593Smuzhiyun ibdev_dbg(ibdev,
1050*4882a593Smuzhiyun "Incompatible ABI params, unknown fields in udata\n");
1051*4882a593Smuzhiyun err = -EINVAL;
1052*4882a593Smuzhiyun goto err_out;
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun err = ib_copy_from_udata(&cmd, udata,
1056*4882a593Smuzhiyun min(sizeof(cmd), udata->inlen));
1057*4882a593Smuzhiyun if (err) {
1058*4882a593Smuzhiyun ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
1059*4882a593Smuzhiyun goto err_out;
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
1063*4882a593Smuzhiyun ibdev_dbg(ibdev,
1064*4882a593Smuzhiyun "Incompatible ABI params, unknown fields in udata\n");
1065*4882a593Smuzhiyun err = -EINVAL;
1066*4882a593Smuzhiyun goto err_out;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun if (!cmd.cq_entry_size) {
1070*4882a593Smuzhiyun ibdev_dbg(ibdev,
1071*4882a593Smuzhiyun "Invalid entry size [%u]\n", cmd.cq_entry_size);
1072*4882a593Smuzhiyun err = -EINVAL;
1073*4882a593Smuzhiyun goto err_out;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
1077*4882a593Smuzhiyun ibdev_dbg(ibdev,
1078*4882a593Smuzhiyun "Invalid number of sub cqs[%u] expected[%u]\n",
1079*4882a593Smuzhiyun cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
1080*4882a593Smuzhiyun err = -EINVAL;
1081*4882a593Smuzhiyun goto err_out;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun cq->ucontext = ucontext;
1085*4882a593Smuzhiyun cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
1086*4882a593Smuzhiyun cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
1087*4882a593Smuzhiyun DMA_FROM_DEVICE);
1088*4882a593Smuzhiyun if (!cq->cpu_addr) {
1089*4882a593Smuzhiyun err = -ENOMEM;
1090*4882a593Smuzhiyun goto err_out;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun params.uarn = cq->ucontext->uarn;
1094*4882a593Smuzhiyun params.cq_depth = entries;
1095*4882a593Smuzhiyun params.dma_addr = cq->dma_addr;
1096*4882a593Smuzhiyun params.entry_size_in_bytes = cmd.cq_entry_size;
1097*4882a593Smuzhiyun params.num_sub_cqs = cmd.num_sub_cqs;
1098*4882a593Smuzhiyun err = efa_com_create_cq(&dev->edev, ¶ms, &result);
1099*4882a593Smuzhiyun if (err)
1100*4882a593Smuzhiyun goto err_free_mapped;
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun resp.cq_idx = result.cq_idx;
1103*4882a593Smuzhiyun cq->cq_idx = result.cq_idx;
1104*4882a593Smuzhiyun cq->ibcq.cqe = result.actual_depth;
1105*4882a593Smuzhiyun WARN_ON_ONCE(entries != result.actual_depth);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun err = cq_mmap_entries_setup(dev, cq, &resp);
1108*4882a593Smuzhiyun if (err) {
1109*4882a593Smuzhiyun ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
1110*4882a593Smuzhiyun cq->cq_idx);
1111*4882a593Smuzhiyun goto err_destroy_cq;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun if (udata->outlen) {
1115*4882a593Smuzhiyun err = ib_copy_to_udata(udata, &resp,
1116*4882a593Smuzhiyun min(sizeof(resp), udata->outlen));
1117*4882a593Smuzhiyun if (err) {
1118*4882a593Smuzhiyun ibdev_dbg(ibdev,
1119*4882a593Smuzhiyun "Failed to copy udata for create_cq\n");
1120*4882a593Smuzhiyun goto err_remove_mmap;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
1125*4882a593Smuzhiyun cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun return 0;
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun err_remove_mmap:
1130*4882a593Smuzhiyun rdma_user_mmap_entry_remove(cq->mmap_entry);
1131*4882a593Smuzhiyun err_destroy_cq:
1132*4882a593Smuzhiyun efa_destroy_cq_idx(dev, cq->cq_idx);
1133*4882a593Smuzhiyun err_free_mapped:
1134*4882a593Smuzhiyun efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
1135*4882a593Smuzhiyun DMA_FROM_DEVICE);
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun err_out:
1138*4882a593Smuzhiyun atomic64_inc(&dev->stats.create_cq_err);
1139*4882a593Smuzhiyun return err;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
umem_to_page_list(struct efa_dev * dev,struct ib_umem * umem,u64 * page_list,u32 hp_cnt,u8 hp_shift)1142*4882a593Smuzhiyun static int umem_to_page_list(struct efa_dev *dev,
1143*4882a593Smuzhiyun struct ib_umem *umem,
1144*4882a593Smuzhiyun u64 *page_list,
1145*4882a593Smuzhiyun u32 hp_cnt,
1146*4882a593Smuzhiyun u8 hp_shift)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1149*4882a593Smuzhiyun struct ib_block_iter biter;
1150*4882a593Smuzhiyun unsigned int hp_idx = 0;
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1153*4882a593Smuzhiyun hp_cnt, pages_in_hp);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
1156*4882a593Smuzhiyun page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun return 0;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
efa_vmalloc_buf_to_sg(u64 * buf,int page_cnt)1161*4882a593Smuzhiyun static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1162*4882a593Smuzhiyun {
1163*4882a593Smuzhiyun struct scatterlist *sglist;
1164*4882a593Smuzhiyun struct page *pg;
1165*4882a593Smuzhiyun int i;
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun sglist = kmalloc_array(page_cnt, sizeof(*sglist), GFP_KERNEL);
1168*4882a593Smuzhiyun if (!sglist)
1169*4882a593Smuzhiyun return NULL;
1170*4882a593Smuzhiyun sg_init_table(sglist, page_cnt);
1171*4882a593Smuzhiyun for (i = 0; i < page_cnt; i++) {
1172*4882a593Smuzhiyun pg = vmalloc_to_page(buf);
1173*4882a593Smuzhiyun if (!pg)
1174*4882a593Smuzhiyun goto err;
1175*4882a593Smuzhiyun sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1176*4882a593Smuzhiyun buf += PAGE_SIZE / sizeof(*buf);
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun return sglist;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun err:
1181*4882a593Smuzhiyun kfree(sglist);
1182*4882a593Smuzhiyun return NULL;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun /*
1186*4882a593Smuzhiyun * create a chunk list of physical pages dma addresses from the supplied
1187*4882a593Smuzhiyun * scatter gather list
1188*4882a593Smuzhiyun */
pbl_chunk_list_create(struct efa_dev * dev,struct pbl_context * pbl)1189*4882a593Smuzhiyun static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1192*4882a593Smuzhiyun int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1193*4882a593Smuzhiyun struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1194*4882a593Smuzhiyun unsigned int chunk_list_size, chunk_idx, payload_idx;
1195*4882a593Smuzhiyun int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1196*4882a593Smuzhiyun struct efa_com_ctrl_buff_info *ctrl_buf;
1197*4882a593Smuzhiyun u64 *cur_chunk_buf, *prev_chunk_buf;
1198*4882a593Smuzhiyun struct ib_block_iter biter;
1199*4882a593Smuzhiyun dma_addr_t dma_addr;
1200*4882a593Smuzhiyun int i;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun /* allocate a chunk list that consists of 4KB chunks */
1203*4882a593Smuzhiyun chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun chunk_list->size = chunk_list_size;
1206*4882a593Smuzhiyun chunk_list->chunks = kcalloc(chunk_list_size,
1207*4882a593Smuzhiyun sizeof(*chunk_list->chunks),
1208*4882a593Smuzhiyun GFP_KERNEL);
1209*4882a593Smuzhiyun if (!chunk_list->chunks)
1210*4882a593Smuzhiyun return -ENOMEM;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1213*4882a593Smuzhiyun "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1214*4882a593Smuzhiyun page_cnt);
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun /* allocate chunk buffers: */
1217*4882a593Smuzhiyun for (i = 0; i < chunk_list_size; i++) {
1218*4882a593Smuzhiyun chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1219*4882a593Smuzhiyun if (!chunk_list->chunks[i].buf)
1220*4882a593Smuzhiyun goto chunk_list_dealloc;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun chunk_list->chunks[chunk_list_size - 1].length =
1225*4882a593Smuzhiyun ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1226*4882a593Smuzhiyun EFA_CHUNK_PTR_SIZE;
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun /* fill the dma addresses of sg list pages to chunks: */
1229*4882a593Smuzhiyun chunk_idx = 0;
1230*4882a593Smuzhiyun payload_idx = 0;
1231*4882a593Smuzhiyun cur_chunk_buf = chunk_list->chunks[0].buf;
1232*4882a593Smuzhiyun rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1233*4882a593Smuzhiyun EFA_CHUNK_PAYLOAD_SIZE) {
1234*4882a593Smuzhiyun cur_chunk_buf[payload_idx++] =
1235*4882a593Smuzhiyun rdma_block_iter_dma_address(&biter);
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun if (payload_idx == EFA_PTRS_PER_CHUNK) {
1238*4882a593Smuzhiyun chunk_idx++;
1239*4882a593Smuzhiyun cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1240*4882a593Smuzhiyun payload_idx = 0;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun /* map chunks to dma and fill chunks next ptrs */
1245*4882a593Smuzhiyun for (i = chunk_list_size - 1; i >= 0; i--) {
1246*4882a593Smuzhiyun dma_addr = dma_map_single(&dev->pdev->dev,
1247*4882a593Smuzhiyun chunk_list->chunks[i].buf,
1248*4882a593Smuzhiyun chunk_list->chunks[i].length,
1249*4882a593Smuzhiyun DMA_TO_DEVICE);
1250*4882a593Smuzhiyun if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1251*4882a593Smuzhiyun ibdev_err(&dev->ibdev,
1252*4882a593Smuzhiyun "chunk[%u] dma_map_failed\n", i);
1253*4882a593Smuzhiyun goto chunk_list_unmap;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun chunk_list->chunks[i].dma_addr = dma_addr;
1257*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1258*4882a593Smuzhiyun "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun if (!i)
1261*4882a593Smuzhiyun break;
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun ctrl_buf = (struct efa_com_ctrl_buff_info *)
1266*4882a593Smuzhiyun &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1267*4882a593Smuzhiyun ctrl_buf->length = chunk_list->chunks[i].length;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun efa_com_set_dma_addr(dma_addr,
1270*4882a593Smuzhiyun &ctrl_buf->address.mem_addr_high,
1271*4882a593Smuzhiyun &ctrl_buf->address.mem_addr_low);
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun return 0;
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun chunk_list_unmap:
1277*4882a593Smuzhiyun for (; i < chunk_list_size; i++) {
1278*4882a593Smuzhiyun dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1279*4882a593Smuzhiyun chunk_list->chunks[i].length, DMA_TO_DEVICE);
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun chunk_list_dealloc:
1282*4882a593Smuzhiyun for (i = 0; i < chunk_list_size; i++)
1283*4882a593Smuzhiyun kfree(chunk_list->chunks[i].buf);
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun kfree(chunk_list->chunks);
1286*4882a593Smuzhiyun return -ENOMEM;
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun
pbl_chunk_list_destroy(struct efa_dev * dev,struct pbl_context * pbl)1289*4882a593Smuzhiyun static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1292*4882a593Smuzhiyun int i;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun for (i = 0; i < chunk_list->size; i++) {
1295*4882a593Smuzhiyun dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1296*4882a593Smuzhiyun chunk_list->chunks[i].length, DMA_TO_DEVICE);
1297*4882a593Smuzhiyun kfree(chunk_list->chunks[i].buf);
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun kfree(chunk_list->chunks);
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun /* initialize pbl continuous mode: map pbl buffer to a dma address. */
pbl_continuous_initialize(struct efa_dev * dev,struct pbl_context * pbl)1304*4882a593Smuzhiyun static int pbl_continuous_initialize(struct efa_dev *dev,
1305*4882a593Smuzhiyun struct pbl_context *pbl)
1306*4882a593Smuzhiyun {
1307*4882a593Smuzhiyun dma_addr_t dma_addr;
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1310*4882a593Smuzhiyun pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1311*4882a593Smuzhiyun if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1312*4882a593Smuzhiyun ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1313*4882a593Smuzhiyun return -ENOMEM;
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun pbl->phys.continuous.dma_addr = dma_addr;
1317*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1318*4882a593Smuzhiyun "pbl continuous - dma_addr = %pad, size[%u]\n",
1319*4882a593Smuzhiyun &dma_addr, pbl->pbl_buf_size_in_bytes);
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun return 0;
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun /*
1325*4882a593Smuzhiyun * initialize pbl indirect mode:
1326*4882a593Smuzhiyun * create a chunk list out of the dma addresses of the physical pages of
1327*4882a593Smuzhiyun * pbl buffer.
1328*4882a593Smuzhiyun */
pbl_indirect_initialize(struct efa_dev * dev,struct pbl_context * pbl)1329*4882a593Smuzhiyun static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1330*4882a593Smuzhiyun {
1331*4882a593Smuzhiyun u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
1332*4882a593Smuzhiyun struct scatterlist *sgl;
1333*4882a593Smuzhiyun int sg_dma_cnt, err;
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1336*4882a593Smuzhiyun sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1337*4882a593Smuzhiyun if (!sgl)
1338*4882a593Smuzhiyun return -ENOMEM;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1341*4882a593Smuzhiyun if (!sg_dma_cnt) {
1342*4882a593Smuzhiyun err = -EINVAL;
1343*4882a593Smuzhiyun goto err_map;
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1347*4882a593Smuzhiyun pbl->phys.indirect.sgl = sgl;
1348*4882a593Smuzhiyun pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1349*4882a593Smuzhiyun err = pbl_chunk_list_create(dev, pbl);
1350*4882a593Smuzhiyun if (err) {
1351*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1352*4882a593Smuzhiyun "chunk_list creation failed[%d]\n", err);
1353*4882a593Smuzhiyun goto err_chunk;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1357*4882a593Smuzhiyun "pbl indirect - size[%u], chunks[%u]\n",
1358*4882a593Smuzhiyun pbl->pbl_buf_size_in_bytes,
1359*4882a593Smuzhiyun pbl->phys.indirect.chunk_list.size);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun return 0;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun err_chunk:
1364*4882a593Smuzhiyun dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1365*4882a593Smuzhiyun err_map:
1366*4882a593Smuzhiyun kfree(sgl);
1367*4882a593Smuzhiyun return err;
1368*4882a593Smuzhiyun }
1369*4882a593Smuzhiyun
pbl_indirect_terminate(struct efa_dev * dev,struct pbl_context * pbl)1370*4882a593Smuzhiyun static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1371*4882a593Smuzhiyun {
1372*4882a593Smuzhiyun pbl_chunk_list_destroy(dev, pbl);
1373*4882a593Smuzhiyun dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1374*4882a593Smuzhiyun pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1375*4882a593Smuzhiyun kfree(pbl->phys.indirect.sgl);
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun /* create a page buffer list from a mapped user memory region */
pbl_create(struct efa_dev * dev,struct pbl_context * pbl,struct ib_umem * umem,int hp_cnt,u8 hp_shift)1379*4882a593Smuzhiyun static int pbl_create(struct efa_dev *dev,
1380*4882a593Smuzhiyun struct pbl_context *pbl,
1381*4882a593Smuzhiyun struct ib_umem *umem,
1382*4882a593Smuzhiyun int hp_cnt,
1383*4882a593Smuzhiyun u8 hp_shift)
1384*4882a593Smuzhiyun {
1385*4882a593Smuzhiyun int err;
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1388*4882a593Smuzhiyun pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1389*4882a593Smuzhiyun if (!pbl->pbl_buf)
1390*4882a593Smuzhiyun return -ENOMEM;
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun if (is_vmalloc_addr(pbl->pbl_buf)) {
1393*4882a593Smuzhiyun pbl->physically_continuous = 0;
1394*4882a593Smuzhiyun err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1395*4882a593Smuzhiyun hp_shift);
1396*4882a593Smuzhiyun if (err)
1397*4882a593Smuzhiyun goto err_free;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun err = pbl_indirect_initialize(dev, pbl);
1400*4882a593Smuzhiyun if (err)
1401*4882a593Smuzhiyun goto err_free;
1402*4882a593Smuzhiyun } else {
1403*4882a593Smuzhiyun pbl->physically_continuous = 1;
1404*4882a593Smuzhiyun err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1405*4882a593Smuzhiyun hp_shift);
1406*4882a593Smuzhiyun if (err)
1407*4882a593Smuzhiyun goto err_free;
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun err = pbl_continuous_initialize(dev, pbl);
1410*4882a593Smuzhiyun if (err)
1411*4882a593Smuzhiyun goto err_free;
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1415*4882a593Smuzhiyun "user_pbl_created: user_pages[%u], continuous[%u]\n",
1416*4882a593Smuzhiyun hp_cnt, pbl->physically_continuous);
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun return 0;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun err_free:
1421*4882a593Smuzhiyun kvfree(pbl->pbl_buf);
1422*4882a593Smuzhiyun return err;
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun
pbl_destroy(struct efa_dev * dev,struct pbl_context * pbl)1425*4882a593Smuzhiyun static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1426*4882a593Smuzhiyun {
1427*4882a593Smuzhiyun if (pbl->physically_continuous)
1428*4882a593Smuzhiyun dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1429*4882a593Smuzhiyun pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1430*4882a593Smuzhiyun else
1431*4882a593Smuzhiyun pbl_indirect_terminate(dev, pbl);
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun kvfree(pbl->pbl_buf);
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun
efa_create_inline_pbl(struct efa_dev * dev,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1436*4882a593Smuzhiyun static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1437*4882a593Smuzhiyun struct efa_com_reg_mr_params *params)
1438*4882a593Smuzhiyun {
1439*4882a593Smuzhiyun int err;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun params->inline_pbl = 1;
1442*4882a593Smuzhiyun err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1443*4882a593Smuzhiyun params->page_num, params->page_shift);
1444*4882a593Smuzhiyun if (err)
1445*4882a593Smuzhiyun return err;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1448*4882a593Smuzhiyun "inline_pbl_array - pages[%u]\n", params->page_num);
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun return 0;
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun
efa_create_pbl(struct efa_dev * dev,struct pbl_context * pbl,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1453*4882a593Smuzhiyun static int efa_create_pbl(struct efa_dev *dev,
1454*4882a593Smuzhiyun struct pbl_context *pbl,
1455*4882a593Smuzhiyun struct efa_mr *mr,
1456*4882a593Smuzhiyun struct efa_com_reg_mr_params *params)
1457*4882a593Smuzhiyun {
1458*4882a593Smuzhiyun int err;
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun err = pbl_create(dev, pbl, mr->umem, params->page_num,
1461*4882a593Smuzhiyun params->page_shift);
1462*4882a593Smuzhiyun if (err) {
1463*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1464*4882a593Smuzhiyun return err;
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun params->inline_pbl = 0;
1468*4882a593Smuzhiyun params->indirect = !pbl->physically_continuous;
1469*4882a593Smuzhiyun if (pbl->physically_continuous) {
1470*4882a593Smuzhiyun params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1473*4882a593Smuzhiyun ¶ms->pbl.pbl.address.mem_addr_high,
1474*4882a593Smuzhiyun ¶ms->pbl.pbl.address.mem_addr_low);
1475*4882a593Smuzhiyun } else {
1476*4882a593Smuzhiyun params->pbl.pbl.length =
1477*4882a593Smuzhiyun pbl->phys.indirect.chunk_list.chunks[0].length;
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1480*4882a593Smuzhiyun ¶ms->pbl.pbl.address.mem_addr_high,
1481*4882a593Smuzhiyun ¶ms->pbl.pbl.address.mem_addr_low);
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun return 0;
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun
efa_reg_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_udata * udata)1487*4882a593Smuzhiyun struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1488*4882a593Smuzhiyun u64 virt_addr, int access_flags,
1489*4882a593Smuzhiyun struct ib_udata *udata)
1490*4882a593Smuzhiyun {
1491*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibpd->device);
1492*4882a593Smuzhiyun struct efa_com_reg_mr_params params = {};
1493*4882a593Smuzhiyun struct efa_com_reg_mr_result result = {};
1494*4882a593Smuzhiyun struct pbl_context pbl;
1495*4882a593Smuzhiyun int supp_access_flags;
1496*4882a593Smuzhiyun unsigned int pg_sz;
1497*4882a593Smuzhiyun struct efa_mr *mr;
1498*4882a593Smuzhiyun int inline_size;
1499*4882a593Smuzhiyun int err;
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun if (udata && udata->inlen &&
1502*4882a593Smuzhiyun !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1503*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1504*4882a593Smuzhiyun "Incompatible ABI params, udata not cleared\n");
1505*4882a593Smuzhiyun err = -EINVAL;
1506*4882a593Smuzhiyun goto err_out;
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun supp_access_flags =
1510*4882a593Smuzhiyun IB_ACCESS_LOCAL_WRITE |
1511*4882a593Smuzhiyun (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0);
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun access_flags &= ~IB_ACCESS_OPTIONAL;
1514*4882a593Smuzhiyun if (access_flags & ~supp_access_flags) {
1515*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1516*4882a593Smuzhiyun "Unsupported access flags[%#x], supported[%#x]\n",
1517*4882a593Smuzhiyun access_flags, supp_access_flags);
1518*4882a593Smuzhiyun err = -EOPNOTSUPP;
1519*4882a593Smuzhiyun goto err_out;
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1523*4882a593Smuzhiyun if (!mr) {
1524*4882a593Smuzhiyun err = -ENOMEM;
1525*4882a593Smuzhiyun goto err_out;
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1529*4882a593Smuzhiyun if (IS_ERR(mr->umem)) {
1530*4882a593Smuzhiyun err = PTR_ERR(mr->umem);
1531*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1532*4882a593Smuzhiyun "Failed to pin and map user space memory[%d]\n", err);
1533*4882a593Smuzhiyun goto err_free;
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun params.pd = to_epd(ibpd)->pdn;
1537*4882a593Smuzhiyun params.iova = virt_addr;
1538*4882a593Smuzhiyun params.mr_length_in_bytes = length;
1539*4882a593Smuzhiyun params.permissions = access_flags;
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun pg_sz = ib_umem_find_best_pgsz(mr->umem,
1542*4882a593Smuzhiyun dev->dev_attr.page_size_cap,
1543*4882a593Smuzhiyun virt_addr);
1544*4882a593Smuzhiyun if (!pg_sz) {
1545*4882a593Smuzhiyun err = -EOPNOTSUPP;
1546*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1547*4882a593Smuzhiyun dev->dev_attr.page_size_cap);
1548*4882a593Smuzhiyun goto err_unmap;
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun params.page_shift = order_base_2(pg_sz);
1552*4882a593Smuzhiyun params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz);
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1555*4882a593Smuzhiyun "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1556*4882a593Smuzhiyun start, length, params.page_shift, params.page_num);
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1559*4882a593Smuzhiyun if (params.page_num <= inline_size) {
1560*4882a593Smuzhiyun err = efa_create_inline_pbl(dev, mr, ¶ms);
1561*4882a593Smuzhiyun if (err)
1562*4882a593Smuzhiyun goto err_unmap;
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1565*4882a593Smuzhiyun if (err)
1566*4882a593Smuzhiyun goto err_unmap;
1567*4882a593Smuzhiyun } else {
1568*4882a593Smuzhiyun err = efa_create_pbl(dev, &pbl, mr, ¶ms);
1569*4882a593Smuzhiyun if (err)
1570*4882a593Smuzhiyun goto err_unmap;
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1573*4882a593Smuzhiyun pbl_destroy(dev, &pbl);
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun if (err)
1576*4882a593Smuzhiyun goto err_unmap;
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun mr->ibmr.lkey = result.l_key;
1580*4882a593Smuzhiyun mr->ibmr.rkey = result.r_key;
1581*4882a593Smuzhiyun mr->ibmr.length = length;
1582*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun return &mr->ibmr;
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun err_unmap:
1587*4882a593Smuzhiyun ib_umem_release(mr->umem);
1588*4882a593Smuzhiyun err_free:
1589*4882a593Smuzhiyun kfree(mr);
1590*4882a593Smuzhiyun err_out:
1591*4882a593Smuzhiyun atomic64_inc(&dev->stats.reg_mr_err);
1592*4882a593Smuzhiyun return ERR_PTR(err);
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun
efa_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1595*4882a593Smuzhiyun int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1596*4882a593Smuzhiyun {
1597*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibmr->device);
1598*4882a593Smuzhiyun struct efa_com_dereg_mr_params params;
1599*4882a593Smuzhiyun struct efa_mr *mr = to_emr(ibmr);
1600*4882a593Smuzhiyun int err;
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun params.l_key = mr->ibmr.lkey;
1605*4882a593Smuzhiyun err = efa_com_dereg_mr(&dev->edev, ¶ms);
1606*4882a593Smuzhiyun if (err)
1607*4882a593Smuzhiyun return err;
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun ib_umem_release(mr->umem);
1610*4882a593Smuzhiyun kfree(mr);
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun return 0;
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun
efa_get_port_immutable(struct ib_device * ibdev,u8 port_num,struct ib_port_immutable * immutable)1615*4882a593Smuzhiyun int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
1616*4882a593Smuzhiyun struct ib_port_immutable *immutable)
1617*4882a593Smuzhiyun {
1618*4882a593Smuzhiyun struct ib_port_attr attr;
1619*4882a593Smuzhiyun int err;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun err = ib_query_port(ibdev, port_num, &attr);
1622*4882a593Smuzhiyun if (err) {
1623*4882a593Smuzhiyun ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1624*4882a593Smuzhiyun return err;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun immutable->pkey_tbl_len = attr.pkey_tbl_len;
1628*4882a593Smuzhiyun immutable->gid_tbl_len = attr.gid_tbl_len;
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun return 0;
1631*4882a593Smuzhiyun }
1632*4882a593Smuzhiyun
efa_dealloc_uar(struct efa_dev * dev,u16 uarn)1633*4882a593Smuzhiyun static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1634*4882a593Smuzhiyun {
1635*4882a593Smuzhiyun struct efa_com_dealloc_uar_params params = {
1636*4882a593Smuzhiyun .uarn = uarn,
1637*4882a593Smuzhiyun };
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun return efa_com_dealloc_uar(&dev->edev, ¶ms);
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun #define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \
1643*4882a593Smuzhiyun (_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \
1644*4882a593Smuzhiyun NULL : #_attr)
1645*4882a593Smuzhiyun
efa_user_comp_handshake(const struct ib_ucontext * ibucontext,const struct efa_ibv_alloc_ucontext_cmd * cmd)1646*4882a593Smuzhiyun static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext,
1647*4882a593Smuzhiyun const struct efa_ibv_alloc_ucontext_cmd *cmd)
1648*4882a593Smuzhiyun {
1649*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibucontext->device);
1650*4882a593Smuzhiyun char *attr_str;
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch,
1653*4882a593Smuzhiyun EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str))
1654*4882a593Smuzhiyun goto err;
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth,
1657*4882a593Smuzhiyun EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR,
1658*4882a593Smuzhiyun attr_str))
1659*4882a593Smuzhiyun goto err;
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun return 0;
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun err:
1664*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n",
1665*4882a593Smuzhiyun attr_str);
1666*4882a593Smuzhiyun return -EOPNOTSUPP;
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun
efa_alloc_ucontext(struct ib_ucontext * ibucontext,struct ib_udata * udata)1669*4882a593Smuzhiyun int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1670*4882a593Smuzhiyun {
1671*4882a593Smuzhiyun struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1672*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibucontext->device);
1673*4882a593Smuzhiyun struct efa_ibv_alloc_ucontext_resp resp = {};
1674*4882a593Smuzhiyun struct efa_ibv_alloc_ucontext_cmd cmd = {};
1675*4882a593Smuzhiyun struct efa_com_alloc_uar_result result;
1676*4882a593Smuzhiyun int err;
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun /*
1679*4882a593Smuzhiyun * it's fine if the driver does not know all request fields,
1680*4882a593Smuzhiyun * we will ack input fields in our response.
1681*4882a593Smuzhiyun */
1682*4882a593Smuzhiyun
1683*4882a593Smuzhiyun err = ib_copy_from_udata(&cmd, udata,
1684*4882a593Smuzhiyun min(sizeof(cmd), udata->inlen));
1685*4882a593Smuzhiyun if (err) {
1686*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1687*4882a593Smuzhiyun "Cannot copy udata for alloc_ucontext\n");
1688*4882a593Smuzhiyun goto err_out;
1689*4882a593Smuzhiyun }
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun err = efa_user_comp_handshake(ibucontext, &cmd);
1692*4882a593Smuzhiyun if (err)
1693*4882a593Smuzhiyun goto err_out;
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun err = efa_com_alloc_uar(&dev->edev, &result);
1696*4882a593Smuzhiyun if (err)
1697*4882a593Smuzhiyun goto err_out;
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun ucontext->uarn = result.uarn;
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1702*4882a593Smuzhiyun resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1703*4882a593Smuzhiyun resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1704*4882a593Smuzhiyun resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1705*4882a593Smuzhiyun resp.max_llq_size = dev->dev_attr.max_llq_size;
1706*4882a593Smuzhiyun resp.max_tx_batch = dev->dev_attr.max_tx_batch;
1707*4882a593Smuzhiyun resp.min_sq_wr = dev->dev_attr.min_sq_depth;
1708*4882a593Smuzhiyun
1709*4882a593Smuzhiyun err = ib_copy_to_udata(udata, &resp,
1710*4882a593Smuzhiyun min(sizeof(resp), udata->outlen));
1711*4882a593Smuzhiyun if (err)
1712*4882a593Smuzhiyun goto err_dealloc_uar;
1713*4882a593Smuzhiyun
1714*4882a593Smuzhiyun return 0;
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun err_dealloc_uar:
1717*4882a593Smuzhiyun efa_dealloc_uar(dev, result.uarn);
1718*4882a593Smuzhiyun err_out:
1719*4882a593Smuzhiyun atomic64_inc(&dev->stats.alloc_ucontext_err);
1720*4882a593Smuzhiyun return err;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun
efa_dealloc_ucontext(struct ib_ucontext * ibucontext)1723*4882a593Smuzhiyun void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1724*4882a593Smuzhiyun {
1725*4882a593Smuzhiyun struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1726*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibucontext->device);
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun efa_dealloc_uar(dev, ucontext->uarn);
1729*4882a593Smuzhiyun }
1730*4882a593Smuzhiyun
efa_mmap_free(struct rdma_user_mmap_entry * rdma_entry)1731*4882a593Smuzhiyun void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1732*4882a593Smuzhiyun {
1733*4882a593Smuzhiyun struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun kfree(entry);
1736*4882a593Smuzhiyun }
1737*4882a593Smuzhiyun
__efa_mmap(struct efa_dev * dev,struct efa_ucontext * ucontext,struct vm_area_struct * vma)1738*4882a593Smuzhiyun static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1739*4882a593Smuzhiyun struct vm_area_struct *vma)
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun struct rdma_user_mmap_entry *rdma_entry;
1742*4882a593Smuzhiyun struct efa_user_mmap_entry *entry;
1743*4882a593Smuzhiyun unsigned long va;
1744*4882a593Smuzhiyun int err = 0;
1745*4882a593Smuzhiyun u64 pfn;
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1748*4882a593Smuzhiyun if (!rdma_entry) {
1749*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1750*4882a593Smuzhiyun "pgoff[%#lx] does not have valid entry\n",
1751*4882a593Smuzhiyun vma->vm_pgoff);
1752*4882a593Smuzhiyun atomic64_inc(&dev->stats.mmap_err);
1753*4882a593Smuzhiyun return -EINVAL;
1754*4882a593Smuzhiyun }
1755*4882a593Smuzhiyun entry = to_emmap(rdma_entry);
1756*4882a593Smuzhiyun
1757*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1758*4882a593Smuzhiyun "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
1759*4882a593Smuzhiyun entry->address, rdma_entry->npages * PAGE_SIZE,
1760*4882a593Smuzhiyun entry->mmap_flag);
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun pfn = entry->address >> PAGE_SHIFT;
1763*4882a593Smuzhiyun switch (entry->mmap_flag) {
1764*4882a593Smuzhiyun case EFA_MMAP_IO_NC:
1765*4882a593Smuzhiyun err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1766*4882a593Smuzhiyun entry->rdma_entry.npages * PAGE_SIZE,
1767*4882a593Smuzhiyun pgprot_noncached(vma->vm_page_prot),
1768*4882a593Smuzhiyun rdma_entry);
1769*4882a593Smuzhiyun break;
1770*4882a593Smuzhiyun case EFA_MMAP_IO_WC:
1771*4882a593Smuzhiyun err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1772*4882a593Smuzhiyun entry->rdma_entry.npages * PAGE_SIZE,
1773*4882a593Smuzhiyun pgprot_writecombine(vma->vm_page_prot),
1774*4882a593Smuzhiyun rdma_entry);
1775*4882a593Smuzhiyun break;
1776*4882a593Smuzhiyun case EFA_MMAP_DMA_PAGE:
1777*4882a593Smuzhiyun for (va = vma->vm_start; va < vma->vm_end;
1778*4882a593Smuzhiyun va += PAGE_SIZE, pfn++) {
1779*4882a593Smuzhiyun err = vm_insert_page(vma, va, pfn_to_page(pfn));
1780*4882a593Smuzhiyun if (err)
1781*4882a593Smuzhiyun break;
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun break;
1784*4882a593Smuzhiyun default:
1785*4882a593Smuzhiyun err = -EINVAL;
1786*4882a593Smuzhiyun }
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun if (err) {
1789*4882a593Smuzhiyun ibdev_dbg(
1790*4882a593Smuzhiyun &dev->ibdev,
1791*4882a593Smuzhiyun "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
1792*4882a593Smuzhiyun entry->address, rdma_entry->npages * PAGE_SIZE,
1793*4882a593Smuzhiyun entry->mmap_flag, err);
1794*4882a593Smuzhiyun atomic64_inc(&dev->stats.mmap_err);
1795*4882a593Smuzhiyun }
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun rdma_user_mmap_entry_put(rdma_entry);
1798*4882a593Smuzhiyun return err;
1799*4882a593Smuzhiyun }
1800*4882a593Smuzhiyun
efa_mmap(struct ib_ucontext * ibucontext,struct vm_area_struct * vma)1801*4882a593Smuzhiyun int efa_mmap(struct ib_ucontext *ibucontext,
1802*4882a593Smuzhiyun struct vm_area_struct *vma)
1803*4882a593Smuzhiyun {
1804*4882a593Smuzhiyun struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1805*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibucontext->device);
1806*4882a593Smuzhiyun size_t length = vma->vm_end - vma->vm_start;
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1809*4882a593Smuzhiyun "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
1810*4882a593Smuzhiyun vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun return __efa_mmap(dev, ucontext, vma);
1813*4882a593Smuzhiyun }
1814*4882a593Smuzhiyun
efa_ah_destroy(struct efa_dev * dev,struct efa_ah * ah)1815*4882a593Smuzhiyun static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
1816*4882a593Smuzhiyun {
1817*4882a593Smuzhiyun struct efa_com_destroy_ah_params params = {
1818*4882a593Smuzhiyun .ah = ah->ah,
1819*4882a593Smuzhiyun .pdn = to_epd(ah->ibah.pd)->pdn,
1820*4882a593Smuzhiyun };
1821*4882a593Smuzhiyun
1822*4882a593Smuzhiyun return efa_com_destroy_ah(&dev->edev, ¶ms);
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun
efa_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)1825*4882a593Smuzhiyun int efa_create_ah(struct ib_ah *ibah,
1826*4882a593Smuzhiyun struct rdma_ah_init_attr *init_attr,
1827*4882a593Smuzhiyun struct ib_udata *udata)
1828*4882a593Smuzhiyun {
1829*4882a593Smuzhiyun struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
1830*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibah->device);
1831*4882a593Smuzhiyun struct efa_com_create_ah_params params = {};
1832*4882a593Smuzhiyun struct efa_ibv_create_ah_resp resp = {};
1833*4882a593Smuzhiyun struct efa_com_create_ah_result result;
1834*4882a593Smuzhiyun struct efa_ah *ah = to_eah(ibah);
1835*4882a593Smuzhiyun int err;
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
1838*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1839*4882a593Smuzhiyun "Create address handle is not supported in atomic context\n");
1840*4882a593Smuzhiyun err = -EOPNOTSUPP;
1841*4882a593Smuzhiyun goto err_out;
1842*4882a593Smuzhiyun }
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun if (udata->inlen &&
1845*4882a593Smuzhiyun !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1846*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
1847*4882a593Smuzhiyun err = -EINVAL;
1848*4882a593Smuzhiyun goto err_out;
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
1852*4882a593Smuzhiyun sizeof(params.dest_addr));
1853*4882a593Smuzhiyun params.pdn = to_epd(ibah->pd)->pdn;
1854*4882a593Smuzhiyun err = efa_com_create_ah(&dev->edev, ¶ms, &result);
1855*4882a593Smuzhiyun if (err)
1856*4882a593Smuzhiyun goto err_out;
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
1859*4882a593Smuzhiyun ah->ah = result.ah;
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun resp.efa_address_handle = result.ah;
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun if (udata->outlen) {
1864*4882a593Smuzhiyun err = ib_copy_to_udata(udata, &resp,
1865*4882a593Smuzhiyun min(sizeof(resp), udata->outlen));
1866*4882a593Smuzhiyun if (err) {
1867*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1868*4882a593Smuzhiyun "Failed to copy udata for create_ah response\n");
1869*4882a593Smuzhiyun goto err_destroy_ah;
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun }
1872*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun return 0;
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun err_destroy_ah:
1877*4882a593Smuzhiyun efa_ah_destroy(dev, ah);
1878*4882a593Smuzhiyun err_out:
1879*4882a593Smuzhiyun atomic64_inc(&dev->stats.create_ah_err);
1880*4882a593Smuzhiyun return err;
1881*4882a593Smuzhiyun }
1882*4882a593Smuzhiyun
efa_destroy_ah(struct ib_ah * ibah,u32 flags)1883*4882a593Smuzhiyun int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
1884*4882a593Smuzhiyun {
1885*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibah->pd->device);
1886*4882a593Smuzhiyun struct efa_ah *ah = to_eah(ibah);
1887*4882a593Smuzhiyun
1888*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
1891*4882a593Smuzhiyun ibdev_dbg(&dev->ibdev,
1892*4882a593Smuzhiyun "Destroy address handle is not supported in atomic context\n");
1893*4882a593Smuzhiyun return -EOPNOTSUPP;
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun efa_ah_destroy(dev, ah);
1897*4882a593Smuzhiyun return 0;
1898*4882a593Smuzhiyun }
1899*4882a593Smuzhiyun
efa_alloc_hw_stats(struct ib_device * ibdev,u8 port_num)1900*4882a593Smuzhiyun struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
1901*4882a593Smuzhiyun {
1902*4882a593Smuzhiyun return rdma_alloc_hw_stats_struct(efa_stats_names,
1903*4882a593Smuzhiyun ARRAY_SIZE(efa_stats_names),
1904*4882a593Smuzhiyun RDMA_HW_STATS_DEFAULT_LIFESPAN);
1905*4882a593Smuzhiyun }
1906*4882a593Smuzhiyun
efa_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u8 port_num,int index)1907*4882a593Smuzhiyun int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1908*4882a593Smuzhiyun u8 port_num, int index)
1909*4882a593Smuzhiyun {
1910*4882a593Smuzhiyun struct efa_com_get_stats_params params = {};
1911*4882a593Smuzhiyun union efa_com_get_stats_result result;
1912*4882a593Smuzhiyun struct efa_dev *dev = to_edev(ibdev);
1913*4882a593Smuzhiyun struct efa_com_rdma_read_stats *rrs;
1914*4882a593Smuzhiyun struct efa_com_messages_stats *ms;
1915*4882a593Smuzhiyun struct efa_com_basic_stats *bs;
1916*4882a593Smuzhiyun struct efa_com_stats_admin *as;
1917*4882a593Smuzhiyun struct efa_stats *s;
1918*4882a593Smuzhiyun int err;
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
1921*4882a593Smuzhiyun params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
1922*4882a593Smuzhiyun
1923*4882a593Smuzhiyun err = efa_com_get_stats(&dev->edev, ¶ms, &result);
1924*4882a593Smuzhiyun if (err)
1925*4882a593Smuzhiyun return err;
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun bs = &result.basic_stats;
1928*4882a593Smuzhiyun stats->value[EFA_TX_BYTES] = bs->tx_bytes;
1929*4882a593Smuzhiyun stats->value[EFA_TX_PKTS] = bs->tx_pkts;
1930*4882a593Smuzhiyun stats->value[EFA_RX_BYTES] = bs->rx_bytes;
1931*4882a593Smuzhiyun stats->value[EFA_RX_PKTS] = bs->rx_pkts;
1932*4882a593Smuzhiyun stats->value[EFA_RX_DROPS] = bs->rx_drops;
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES;
1935*4882a593Smuzhiyun err = efa_com_get_stats(&dev->edev, ¶ms, &result);
1936*4882a593Smuzhiyun if (err)
1937*4882a593Smuzhiyun return err;
1938*4882a593Smuzhiyun
1939*4882a593Smuzhiyun ms = &result.messages_stats;
1940*4882a593Smuzhiyun stats->value[EFA_SEND_BYTES] = ms->send_bytes;
1941*4882a593Smuzhiyun stats->value[EFA_SEND_WRS] = ms->send_wrs;
1942*4882a593Smuzhiyun stats->value[EFA_RECV_BYTES] = ms->recv_bytes;
1943*4882a593Smuzhiyun stats->value[EFA_RECV_WRS] = ms->recv_wrs;
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ;
1946*4882a593Smuzhiyun err = efa_com_get_stats(&dev->edev, ¶ms, &result);
1947*4882a593Smuzhiyun if (err)
1948*4882a593Smuzhiyun return err;
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun rrs = &result.rdma_read_stats;
1951*4882a593Smuzhiyun stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs;
1952*4882a593Smuzhiyun stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes;
1953*4882a593Smuzhiyun stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
1954*4882a593Smuzhiyun stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
1955*4882a593Smuzhiyun
1956*4882a593Smuzhiyun as = &dev->edev.aq.stats;
1957*4882a593Smuzhiyun stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
1958*4882a593Smuzhiyun stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
1959*4882a593Smuzhiyun stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
1960*4882a593Smuzhiyun stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun s = &dev->stats;
1963*4882a593Smuzhiyun stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
1964*4882a593Smuzhiyun stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
1965*4882a593Smuzhiyun stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
1966*4882a593Smuzhiyun stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
1967*4882a593Smuzhiyun stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
1968*4882a593Smuzhiyun stats->value[EFA_ALLOC_UCONTEXT_ERR] =
1969*4882a593Smuzhiyun atomic64_read(&s->alloc_ucontext_err);
1970*4882a593Smuzhiyun stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
1971*4882a593Smuzhiyun stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
1972*4882a593Smuzhiyun
1973*4882a593Smuzhiyun return ARRAY_SIZE(efa_stats_names);
1974*4882a593Smuzhiyun }
1975*4882a593Smuzhiyun
efa_port_link_layer(struct ib_device * ibdev,u8 port_num)1976*4882a593Smuzhiyun enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
1977*4882a593Smuzhiyun u8 port_num)
1978*4882a593Smuzhiyun {
1979*4882a593Smuzhiyun return IB_LINK_LAYER_UNSPECIFIED;
1980*4882a593Smuzhiyun }
1981*4882a593Smuzhiyun
1982