1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2018 Hisilicon Limited.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <rdma/ib_umem.h>
7*4882a593Smuzhiyun #include <rdma/hns-abi.h>
8*4882a593Smuzhiyun #include "hns_roce_device.h"
9*4882a593Smuzhiyun #include "hns_roce_cmd.h"
10*4882a593Smuzhiyun #include "hns_roce_hem.h"
11*4882a593Smuzhiyun
hns_roce_srq_event(struct hns_roce_dev * hr_dev,u32 srqn,int event_type)12*4882a593Smuzhiyun void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
15*4882a593Smuzhiyun struct hns_roce_srq *srq;
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun xa_lock(&srq_table->xa);
18*4882a593Smuzhiyun srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
19*4882a593Smuzhiyun if (srq)
20*4882a593Smuzhiyun atomic_inc(&srq->refcount);
21*4882a593Smuzhiyun xa_unlock(&srq_table->xa);
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun if (!srq) {
24*4882a593Smuzhiyun dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
25*4882a593Smuzhiyun return;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun srq->event(srq, event_type);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun if (atomic_dec_and_test(&srq->refcount))
31*4882a593Smuzhiyun complete(&srq->free);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
hns_roce_ib_srq_event(struct hns_roce_srq * srq,enum hns_roce_event event_type)34*4882a593Smuzhiyun static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
35*4882a593Smuzhiyun enum hns_roce_event event_type)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
38*4882a593Smuzhiyun struct ib_srq *ibsrq = &srq->ibsrq;
39*4882a593Smuzhiyun struct ib_event event;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun if (ibsrq->event_handler) {
42*4882a593Smuzhiyun event.device = ibsrq->device;
43*4882a593Smuzhiyun event.element.srq = ibsrq;
44*4882a593Smuzhiyun switch (event_type) {
45*4882a593Smuzhiyun case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
46*4882a593Smuzhiyun event.event = IB_EVENT_SRQ_LIMIT_REACHED;
47*4882a593Smuzhiyun break;
48*4882a593Smuzhiyun case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
49*4882a593Smuzhiyun event.event = IB_EVENT_SRQ_ERR;
50*4882a593Smuzhiyun break;
51*4882a593Smuzhiyun default:
52*4882a593Smuzhiyun dev_err(hr_dev->dev,
53*4882a593Smuzhiyun "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
54*4882a593Smuzhiyun event_type, srq->srqn);
55*4882a593Smuzhiyun return;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun ibsrq->event_handler(&event, ibsrq->srq_context);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
hns_roce_hw_create_srq(struct hns_roce_dev * dev,struct hns_roce_cmd_mailbox * mailbox,unsigned long srq_num)62*4882a593Smuzhiyun static int hns_roce_hw_create_srq(struct hns_roce_dev *dev,
63*4882a593Smuzhiyun struct hns_roce_cmd_mailbox *mailbox,
64*4882a593Smuzhiyun unsigned long srq_num)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
67*4882a593Smuzhiyun HNS_ROCE_CMD_CREATE_SRQ,
68*4882a593Smuzhiyun HNS_ROCE_CMD_TIMEOUT_MSECS);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
hns_roce_hw_destroy_srq(struct hns_roce_dev * dev,struct hns_roce_cmd_mailbox * mailbox,unsigned long srq_num)71*4882a593Smuzhiyun static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
72*4882a593Smuzhiyun struct hns_roce_cmd_mailbox *mailbox,
73*4882a593Smuzhiyun unsigned long srq_num)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
76*4882a593Smuzhiyun mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ,
77*4882a593Smuzhiyun HNS_ROCE_CMD_TIMEOUT_MSECS);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
alloc_srqc(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq,u32 pdn,u32 cqn,u16 xrcd,u64 db_rec_addr)80*4882a593Smuzhiyun static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
81*4882a593Smuzhiyun u32 pdn, u32 cqn, u16 xrcd, u64 db_rec_addr)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
84*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
85*4882a593Smuzhiyun struct hns_roce_cmd_mailbox *mailbox;
86*4882a593Smuzhiyun u64 mtts_wqe[MTT_MIN_COUNT] = { 0 };
87*4882a593Smuzhiyun u64 mtts_idx[MTT_MIN_COUNT] = { 0 };
88*4882a593Smuzhiyun dma_addr_t dma_handle_wqe = 0;
89*4882a593Smuzhiyun dma_addr_t dma_handle_idx = 0;
90*4882a593Smuzhiyun int ret;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* Get the physical address of srq buf */
93*4882a593Smuzhiyun ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
94*4882a593Smuzhiyun ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
95*4882a593Smuzhiyun if (ret < 1) {
96*4882a593Smuzhiyun ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
97*4882a593Smuzhiyun ret);
98*4882a593Smuzhiyun return -ENOBUFS;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* Get physical address of idx que buf */
102*4882a593Smuzhiyun ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx,
103*4882a593Smuzhiyun ARRAY_SIZE(mtts_idx), &dma_handle_idx);
104*4882a593Smuzhiyun if (ret < 1) {
105*4882a593Smuzhiyun ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
106*4882a593Smuzhiyun ret);
107*4882a593Smuzhiyun return -ENOBUFS;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
111*4882a593Smuzhiyun if (ret) {
112*4882a593Smuzhiyun ibdev_err(ibdev,
113*4882a593Smuzhiyun "failed to alloc SRQ number, ret = %d.\n", ret);
114*4882a593Smuzhiyun return -ENOMEM;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
118*4882a593Smuzhiyun if (ret) {
119*4882a593Smuzhiyun ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
120*4882a593Smuzhiyun goto err_out;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
124*4882a593Smuzhiyun if (ret) {
125*4882a593Smuzhiyun ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
126*4882a593Smuzhiyun goto err_put;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
130*4882a593Smuzhiyun if (IS_ERR_OR_NULL(mailbox)) {
131*4882a593Smuzhiyun ret = -ENOMEM;
132*4882a593Smuzhiyun ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
133*4882a593Smuzhiyun goto err_xa;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
137*4882a593Smuzhiyun mtts_wqe, mtts_idx, dma_handle_wqe,
138*4882a593Smuzhiyun dma_handle_idx);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
141*4882a593Smuzhiyun hns_roce_free_cmd_mailbox(hr_dev, mailbox);
142*4882a593Smuzhiyun if (ret) {
143*4882a593Smuzhiyun ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
144*4882a593Smuzhiyun goto err_xa;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun atomic_set(&srq->refcount, 1);
148*4882a593Smuzhiyun init_completion(&srq->free);
149*4882a593Smuzhiyun return ret;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun err_xa:
152*4882a593Smuzhiyun xa_erase(&srq_table->xa, srq->srqn);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun err_put:
155*4882a593Smuzhiyun hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun err_out:
158*4882a593Smuzhiyun hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
159*4882a593Smuzhiyun return ret;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
free_srqc(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)162*4882a593Smuzhiyun static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
165*4882a593Smuzhiyun int ret;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn);
168*4882a593Smuzhiyun if (ret)
169*4882a593Smuzhiyun dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
170*4882a593Smuzhiyun ret, srq->srqn);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun xa_erase(&srq_table->xa, srq->srqn);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (atomic_dec_and_test(&srq->refcount))
175*4882a593Smuzhiyun complete(&srq->free);
176*4882a593Smuzhiyun wait_for_completion(&srq->free);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
179*4882a593Smuzhiyun hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
alloc_srq_buf(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq,struct ib_udata * udata,unsigned long addr)182*4882a593Smuzhiyun static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
183*4882a593Smuzhiyun struct ib_udata *udata, unsigned long addr)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
186*4882a593Smuzhiyun struct hns_roce_buf_attr buf_attr = {};
187*4882a593Smuzhiyun int err;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
190*4882a593Smuzhiyun HNS_ROCE_SGE_SIZE *
191*4882a593Smuzhiyun srq->max_gs)));
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
194*4882a593Smuzhiyun buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
195*4882a593Smuzhiyun srq->wqe_shift);
196*4882a593Smuzhiyun buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
197*4882a593Smuzhiyun buf_attr.region_count = 1;
198*4882a593Smuzhiyun buf_attr.fixed_page = true;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun err = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
201*4882a593Smuzhiyun hr_dev->caps.srqwqe_ba_pg_sz +
202*4882a593Smuzhiyun HNS_HW_PAGE_SHIFT, udata, addr);
203*4882a593Smuzhiyun if (err)
204*4882a593Smuzhiyun ibdev_err(ibdev,
205*4882a593Smuzhiyun "failed to alloc SRQ buf mtr, ret = %d.\n", err);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun return err;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
free_srq_buf(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)210*4882a593Smuzhiyun static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
alloc_srq_idx(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq,struct ib_udata * udata,unsigned long addr)215*4882a593Smuzhiyun static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
216*4882a593Smuzhiyun struct ib_udata *udata, unsigned long addr)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct hns_roce_idx_que *idx_que = &srq->idx_que;
219*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
220*4882a593Smuzhiyun struct hns_roce_buf_attr buf_attr = {};
221*4882a593Smuzhiyun int err;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + HNS_HW_PAGE_SHIFT;
226*4882a593Smuzhiyun buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
227*4882a593Smuzhiyun srq->idx_que.entry_shift);
228*4882a593Smuzhiyun buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
229*4882a593Smuzhiyun buf_attr.region_count = 1;
230*4882a593Smuzhiyun buf_attr.fixed_page = true;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun err = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
233*4882a593Smuzhiyun hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
234*4882a593Smuzhiyun udata, addr);
235*4882a593Smuzhiyun if (err) {
236*4882a593Smuzhiyun ibdev_err(ibdev,
237*4882a593Smuzhiyun "failed to alloc SRQ idx mtr, ret = %d.\n", err);
238*4882a593Smuzhiyun return err;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun if (!udata) {
242*4882a593Smuzhiyun idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
243*4882a593Smuzhiyun if (!idx_que->bitmap) {
244*4882a593Smuzhiyun ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
245*4882a593Smuzhiyun err = -ENOMEM;
246*4882a593Smuzhiyun goto err_idx_mtr;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun return 0;
252*4882a593Smuzhiyun err_idx_mtr:
253*4882a593Smuzhiyun hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun return err;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
free_srq_idx(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)258*4882a593Smuzhiyun static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun struct hns_roce_idx_que *idx_que = &srq->idx_que;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun bitmap_free(idx_que->bitmap);
263*4882a593Smuzhiyun idx_que->bitmap = NULL;
264*4882a593Smuzhiyun hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
alloc_srq_wrid(struct hns_roce_dev * hr_dev,struct hns_roce_srq * srq)267*4882a593Smuzhiyun static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun srq->head = 0;
270*4882a593Smuzhiyun srq->tail = srq->wqe_cnt - 1;
271*4882a593Smuzhiyun srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
272*4882a593Smuzhiyun if (!srq->wrid)
273*4882a593Smuzhiyun return -ENOMEM;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
free_srq_wrid(struct hns_roce_srq * srq)278*4882a593Smuzhiyun static void free_srq_wrid(struct hns_roce_srq *srq)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun kvfree(srq->wrid);
281*4882a593Smuzhiyun srq->wrid = NULL;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
hns_roce_create_srq(struct ib_srq * ib_srq,struct ib_srq_init_attr * init_attr,struct ib_udata * udata)284*4882a593Smuzhiyun int hns_roce_create_srq(struct ib_srq *ib_srq,
285*4882a593Smuzhiyun struct ib_srq_init_attr *init_attr,
286*4882a593Smuzhiyun struct ib_udata *udata)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
289*4882a593Smuzhiyun struct hns_roce_ib_create_srq_resp resp = {};
290*4882a593Smuzhiyun struct hns_roce_srq *srq = to_hr_srq(ib_srq);
291*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
292*4882a593Smuzhiyun struct hns_roce_ib_create_srq ucmd = {};
293*4882a593Smuzhiyun int ret;
294*4882a593Smuzhiyun u32 cqn;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* Check the actual SRQ wqe and SRQ sge num */
297*4882a593Smuzhiyun if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
298*4882a593Smuzhiyun init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
299*4882a593Smuzhiyun return -EINVAL;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun mutex_init(&srq->mutex);
302*4882a593Smuzhiyun spin_lock_init(&srq->lock);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
305*4882a593Smuzhiyun srq->max_gs = init_attr->attr.max_sge;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (udata) {
308*4882a593Smuzhiyun ret = ib_copy_from_udata(&ucmd, udata,
309*4882a593Smuzhiyun min(udata->inlen, sizeof(ucmd)));
310*4882a593Smuzhiyun if (ret) {
311*4882a593Smuzhiyun ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n",
312*4882a593Smuzhiyun ret);
313*4882a593Smuzhiyun return ret;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr);
318*4882a593Smuzhiyun if (ret) {
319*4882a593Smuzhiyun ibdev_err(ibdev,
320*4882a593Smuzhiyun "failed to alloc SRQ buffer, ret = %d.\n", ret);
321*4882a593Smuzhiyun return ret;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
325*4882a593Smuzhiyun if (ret) {
326*4882a593Smuzhiyun ibdev_err(ibdev, "failed to alloc SRQ idx, ret = %d.\n", ret);
327*4882a593Smuzhiyun goto err_buf_alloc;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun if (!udata) {
331*4882a593Smuzhiyun ret = alloc_srq_wrid(hr_dev, srq);
332*4882a593Smuzhiyun if (ret) {
333*4882a593Smuzhiyun ibdev_err(ibdev, "failed to alloc SRQ wrid, ret = %d.\n",
334*4882a593Smuzhiyun ret);
335*4882a593Smuzhiyun goto err_idx_alloc;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun cqn = ib_srq_has_cq(init_attr->srq_type) ?
340*4882a593Smuzhiyun to_hr_cq(init_attr->ext.cq)->cqn : 0;
341*4882a593Smuzhiyun srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0);
344*4882a593Smuzhiyun if (ret) {
345*4882a593Smuzhiyun ibdev_err(ibdev,
346*4882a593Smuzhiyun "failed to alloc SRQ context, ret = %d.\n", ret);
347*4882a593Smuzhiyun goto err_wrid_alloc;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun srq->event = hns_roce_ib_srq_event;
351*4882a593Smuzhiyun resp.srqn = srq->srqn;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (udata) {
354*4882a593Smuzhiyun ret = ib_copy_to_udata(udata, &resp,
355*4882a593Smuzhiyun min(udata->outlen, sizeof(resp)));
356*4882a593Smuzhiyun if (ret)
357*4882a593Smuzhiyun goto err_srqc_alloc;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun return 0;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun err_srqc_alloc:
363*4882a593Smuzhiyun free_srqc(hr_dev, srq);
364*4882a593Smuzhiyun err_wrid_alloc:
365*4882a593Smuzhiyun free_srq_wrid(srq);
366*4882a593Smuzhiyun err_idx_alloc:
367*4882a593Smuzhiyun free_srq_idx(hr_dev, srq);
368*4882a593Smuzhiyun err_buf_alloc:
369*4882a593Smuzhiyun free_srq_buf(hr_dev, srq);
370*4882a593Smuzhiyun return ret;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
hns_roce_destroy_srq(struct ib_srq * ibsrq,struct ib_udata * udata)373*4882a593Smuzhiyun int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
376*4882a593Smuzhiyun struct hns_roce_srq *srq = to_hr_srq(ibsrq);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun free_srqc(hr_dev, srq);
379*4882a593Smuzhiyun free_srq_idx(hr_dev, srq);
380*4882a593Smuzhiyun free_srq_wrid(srq);
381*4882a593Smuzhiyun free_srq_buf(hr_dev, srq);
382*4882a593Smuzhiyun return 0;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
hns_roce_init_srq_table(struct hns_roce_dev * hr_dev)385*4882a593Smuzhiyun int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun xa_init(&srq_table->xa);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
392*4882a593Smuzhiyun hr_dev->caps.num_srqs - 1,
393*4882a593Smuzhiyun hr_dev->caps.reserved_srqs, 0);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
hns_roce_cleanup_srq_table(struct hns_roce_dev * hr_dev)396*4882a593Smuzhiyun void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
399*4882a593Smuzhiyun }
400