1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2016 Hisilicon Limited.
3*4882a593Smuzhiyun * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun * OpenIB.org BSD license below:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun * without modification, are permitted provided that the following
13*4882a593Smuzhiyun * conditions are met:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * - Redistributions of source code must retain the above
16*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun * disclaimer.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun * provided with the distribution.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun * SOFTWARE.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include <linux/platform_device.h>
35*4882a593Smuzhiyun #include <linux/vmalloc.h>
36*4882a593Smuzhiyun #include <rdma/ib_umem.h>
37*4882a593Smuzhiyun #include "hns_roce_device.h"
38*4882a593Smuzhiyun #include "hns_roce_cmd.h"
39*4882a593Smuzhiyun #include "hns_roce_hem.h"
40*4882a593Smuzhiyun
hw_index_to_key(unsigned long ind)41*4882a593Smuzhiyun static u32 hw_index_to_key(unsigned long ind)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun return (u32)(ind >> 24) | (ind << 8);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
key_to_hw_index(u32 key)46*4882a593Smuzhiyun unsigned long key_to_hw_index(u32 key)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun return (key << 24) | (key >> 8);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
hns_roce_hw_create_mpt(struct hns_roce_dev * hr_dev,struct hns_roce_cmd_mailbox * mailbox,unsigned long mpt_index)51*4882a593Smuzhiyun static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev,
52*4882a593Smuzhiyun struct hns_roce_cmd_mailbox *mailbox,
53*4882a593Smuzhiyun unsigned long mpt_index)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
56*4882a593Smuzhiyun HNS_ROCE_CMD_CREATE_MPT,
57*4882a593Smuzhiyun HNS_ROCE_CMD_TIMEOUT_MSECS);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
hns_roce_hw_destroy_mpt(struct hns_roce_dev * hr_dev,struct hns_roce_cmd_mailbox * mailbox,unsigned long mpt_index)60*4882a593Smuzhiyun int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
61*4882a593Smuzhiyun struct hns_roce_cmd_mailbox *mailbox,
62*4882a593Smuzhiyun unsigned long mpt_index)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
65*4882a593Smuzhiyun mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT,
66*4882a593Smuzhiyun HNS_ROCE_CMD_TIMEOUT_MSECS);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
alloc_mr_key(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr,u32 pd,u64 iova,u64 size,u32 access)69*4882a593Smuzhiyun static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
70*4882a593Smuzhiyun u32 pd, u64 iova, u64 size, u32 access)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
73*4882a593Smuzhiyun unsigned long obj = 0;
74*4882a593Smuzhiyun int err;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* Allocate a key for mr from mr_table */
77*4882a593Smuzhiyun err = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &obj);
78*4882a593Smuzhiyun if (err) {
79*4882a593Smuzhiyun ibdev_err(ibdev,
80*4882a593Smuzhiyun "failed to alloc bitmap for MR key, ret = %d.\n",
81*4882a593Smuzhiyun err);
82*4882a593Smuzhiyun return -ENOMEM;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun mr->iova = iova; /* MR va starting addr */
86*4882a593Smuzhiyun mr->size = size; /* MR addr range */
87*4882a593Smuzhiyun mr->pd = pd; /* MR num */
88*4882a593Smuzhiyun mr->access = access; /* MR access permit */
89*4882a593Smuzhiyun mr->enabled = 0; /* MR active status */
90*4882a593Smuzhiyun mr->key = hw_index_to_key(obj); /* MR key */
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
93*4882a593Smuzhiyun if (err) {
94*4882a593Smuzhiyun ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
95*4882a593Smuzhiyun goto err_free_bitmap;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun return 0;
99*4882a593Smuzhiyun err_free_bitmap:
100*4882a593Smuzhiyun hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
101*4882a593Smuzhiyun return err;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
free_mr_key(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)104*4882a593Smuzhiyun static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun unsigned long obj = key_to_hw_index(mr->key);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
109*4882a593Smuzhiyun hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
alloc_mr_pbl(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr,size_t length,struct ib_udata * udata,u64 start,int access)112*4882a593Smuzhiyun static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
113*4882a593Smuzhiyun size_t length, struct ib_udata *udata, u64 start,
114*4882a593Smuzhiyun int access)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
117*4882a593Smuzhiyun bool is_fast = mr->type == MR_TYPE_FRMR;
118*4882a593Smuzhiyun struct hns_roce_buf_attr buf_attr = {};
119*4882a593Smuzhiyun int err;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
122*4882a593Smuzhiyun buf_attr.page_shift = is_fast ? PAGE_SHIFT :
123*4882a593Smuzhiyun hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
124*4882a593Smuzhiyun buf_attr.region[0].size = length;
125*4882a593Smuzhiyun buf_attr.region[0].hopnum = mr->pbl_hop_num;
126*4882a593Smuzhiyun buf_attr.region_count = 1;
127*4882a593Smuzhiyun buf_attr.fixed_page = true;
128*4882a593Smuzhiyun buf_attr.user_access = access;
129*4882a593Smuzhiyun /* fast MR's buffer is alloced before mapping, not at creation */
130*4882a593Smuzhiyun buf_attr.mtt_only = is_fast;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
133*4882a593Smuzhiyun hr_dev->caps.pbl_ba_pg_sz + HNS_HW_PAGE_SHIFT,
134*4882a593Smuzhiyun udata, start);
135*4882a593Smuzhiyun if (err)
136*4882a593Smuzhiyun ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
137*4882a593Smuzhiyun else
138*4882a593Smuzhiyun mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun return err;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
free_mr_pbl(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)143*4882a593Smuzhiyun static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
hns_roce_mr_free(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)148*4882a593Smuzhiyun static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
149*4882a593Smuzhiyun struct hns_roce_mr *mr)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
152*4882a593Smuzhiyun int ret;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (mr->enabled) {
155*4882a593Smuzhiyun ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
156*4882a593Smuzhiyun key_to_hw_index(mr->key) &
157*4882a593Smuzhiyun (hr_dev->caps.num_mtpts - 1));
158*4882a593Smuzhiyun if (ret)
159*4882a593Smuzhiyun ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n",
160*4882a593Smuzhiyun ret);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun free_mr_pbl(hr_dev, mr);
164*4882a593Smuzhiyun free_mr_key(hr_dev, mr);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
hns_roce_mr_enable(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)167*4882a593Smuzhiyun static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
168*4882a593Smuzhiyun struct hns_roce_mr *mr)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun int ret;
171*4882a593Smuzhiyun unsigned long mtpt_idx = key_to_hw_index(mr->key);
172*4882a593Smuzhiyun struct device *dev = hr_dev->dev;
173*4882a593Smuzhiyun struct hns_roce_cmd_mailbox *mailbox;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Allocate mailbox memory */
176*4882a593Smuzhiyun mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
177*4882a593Smuzhiyun if (IS_ERR(mailbox)) {
178*4882a593Smuzhiyun ret = PTR_ERR(mailbox);
179*4882a593Smuzhiyun return ret;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (mr->type != MR_TYPE_FRMR)
183*4882a593Smuzhiyun ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
184*4882a593Smuzhiyun mtpt_idx);
185*4882a593Smuzhiyun else
186*4882a593Smuzhiyun ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
187*4882a593Smuzhiyun if (ret) {
188*4882a593Smuzhiyun dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
189*4882a593Smuzhiyun goto err_page;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
193*4882a593Smuzhiyun mtpt_idx & (hr_dev->caps.num_mtpts - 1));
194*4882a593Smuzhiyun if (ret) {
195*4882a593Smuzhiyun dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
196*4882a593Smuzhiyun goto err_page;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun mr->enabled = 1;
200*4882a593Smuzhiyun hns_roce_free_cmd_mailbox(hr_dev, mailbox);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun return 0;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun err_page:
205*4882a593Smuzhiyun hns_roce_free_cmd_mailbox(hr_dev, mailbox);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun return ret;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
hns_roce_init_mr_table(struct hns_roce_dev * hr_dev)210*4882a593Smuzhiyun int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
213*4882a593Smuzhiyun int ret;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
216*4882a593Smuzhiyun hr_dev->caps.num_mtpts,
217*4882a593Smuzhiyun hr_dev->caps.num_mtpts - 1,
218*4882a593Smuzhiyun hr_dev->caps.reserved_mrws, 0);
219*4882a593Smuzhiyun return ret;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
hns_roce_cleanup_mr_table(struct hns_roce_dev * hr_dev)222*4882a593Smuzhiyun void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
hns_roce_get_dma_mr(struct ib_pd * pd,int acc)229*4882a593Smuzhiyun struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
232*4882a593Smuzhiyun struct hns_roce_mr *mr;
233*4882a593Smuzhiyun int ret;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun mr = kzalloc(sizeof(*mr), GFP_KERNEL);
236*4882a593Smuzhiyun if (mr == NULL)
237*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun mr->type = MR_TYPE_DMA;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* Allocate memory region key */
242*4882a593Smuzhiyun hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
243*4882a593Smuzhiyun ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, 0, acc);
244*4882a593Smuzhiyun if (ret)
245*4882a593Smuzhiyun goto err_free;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
248*4882a593Smuzhiyun if (ret)
249*4882a593Smuzhiyun goto err_mr;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun return &mr->ibmr;
254*4882a593Smuzhiyun err_mr:
255*4882a593Smuzhiyun free_mr_key(hr_dev, mr);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun err_free:
258*4882a593Smuzhiyun kfree(mr);
259*4882a593Smuzhiyun return ERR_PTR(ret);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
hns_roce_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_udata * udata)262*4882a593Smuzhiyun struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
263*4882a593Smuzhiyun u64 virt_addr, int access_flags,
264*4882a593Smuzhiyun struct ib_udata *udata)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
267*4882a593Smuzhiyun struct hns_roce_mr *mr;
268*4882a593Smuzhiyun int ret;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun mr = kzalloc(sizeof(*mr), GFP_KERNEL);
271*4882a593Smuzhiyun if (!mr)
272*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun mr->type = MR_TYPE_MR;
275*4882a593Smuzhiyun ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, virt_addr, length,
276*4882a593Smuzhiyun access_flags);
277*4882a593Smuzhiyun if (ret)
278*4882a593Smuzhiyun goto err_alloc_mr;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, access_flags);
281*4882a593Smuzhiyun if (ret)
282*4882a593Smuzhiyun goto err_alloc_key;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun ret = hns_roce_mr_enable(hr_dev, mr);
285*4882a593Smuzhiyun if (ret)
286*4882a593Smuzhiyun goto err_alloc_pbl;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun return &mr->ibmr;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun err_alloc_pbl:
293*4882a593Smuzhiyun free_mr_pbl(hr_dev, mr);
294*4882a593Smuzhiyun err_alloc_key:
295*4882a593Smuzhiyun free_mr_key(hr_dev, mr);
296*4882a593Smuzhiyun err_alloc_mr:
297*4882a593Smuzhiyun kfree(mr);
298*4882a593Smuzhiyun return ERR_PTR(ret);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
rereg_mr_trans(struct ib_mr * ibmr,int flags,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct hns_roce_cmd_mailbox * mailbox,u32 pdn,struct ib_udata * udata)301*4882a593Smuzhiyun static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
302*4882a593Smuzhiyun u64 start, u64 length,
303*4882a593Smuzhiyun u64 virt_addr, int mr_access_flags,
304*4882a593Smuzhiyun struct hns_roce_cmd_mailbox *mailbox,
305*4882a593Smuzhiyun u32 pdn, struct ib_udata *udata)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
308*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
309*4882a593Smuzhiyun struct hns_roce_mr *mr = to_hr_mr(ibmr);
310*4882a593Smuzhiyun int ret;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun free_mr_pbl(hr_dev, mr);
313*4882a593Smuzhiyun ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, mr_access_flags);
314*4882a593Smuzhiyun if (ret) {
315*4882a593Smuzhiyun ibdev_err(ibdev, "failed to create mr PBL, ret = %d.\n", ret);
316*4882a593Smuzhiyun return ret;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
320*4882a593Smuzhiyun mr_access_flags, virt_addr,
321*4882a593Smuzhiyun length, mailbox->buf);
322*4882a593Smuzhiyun if (ret) {
323*4882a593Smuzhiyun ibdev_err(ibdev, "failed to write mtpt, ret = %d.\n", ret);
324*4882a593Smuzhiyun free_mr_pbl(hr_dev, mr);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun return ret;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
hns_roce_rereg_user_mr(struct ib_mr * ibmr,int flags,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_pd * pd,struct ib_udata * udata)330*4882a593Smuzhiyun int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
331*4882a593Smuzhiyun u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
332*4882a593Smuzhiyun struct ib_udata *udata)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
335*4882a593Smuzhiyun struct ib_device *ib_dev = &hr_dev->ib_dev;
336*4882a593Smuzhiyun struct hns_roce_mr *mr = to_hr_mr(ibmr);
337*4882a593Smuzhiyun struct hns_roce_cmd_mailbox *mailbox;
338*4882a593Smuzhiyun unsigned long mtpt_idx;
339*4882a593Smuzhiyun u32 pdn = 0;
340*4882a593Smuzhiyun int ret;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (!mr->enabled)
343*4882a593Smuzhiyun return -EINVAL;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
346*4882a593Smuzhiyun if (IS_ERR(mailbox))
347*4882a593Smuzhiyun return PTR_ERR(mailbox);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
350*4882a593Smuzhiyun ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
351*4882a593Smuzhiyun HNS_ROCE_CMD_QUERY_MPT,
352*4882a593Smuzhiyun HNS_ROCE_CMD_TIMEOUT_MSECS);
353*4882a593Smuzhiyun if (ret)
354*4882a593Smuzhiyun goto free_cmd_mbox;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
357*4882a593Smuzhiyun if (ret)
358*4882a593Smuzhiyun ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun mr->enabled = 0;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (flags & IB_MR_REREG_PD)
363*4882a593Smuzhiyun pdn = to_hr_pd(pd)->pdn;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun if (flags & IB_MR_REREG_TRANS) {
366*4882a593Smuzhiyun ret = rereg_mr_trans(ibmr, flags,
367*4882a593Smuzhiyun start, length,
368*4882a593Smuzhiyun virt_addr, mr_access_flags,
369*4882a593Smuzhiyun mailbox, pdn, udata);
370*4882a593Smuzhiyun if (ret)
371*4882a593Smuzhiyun goto free_cmd_mbox;
372*4882a593Smuzhiyun } else {
373*4882a593Smuzhiyun ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
374*4882a593Smuzhiyun mr_access_flags, virt_addr,
375*4882a593Smuzhiyun length, mailbox->buf);
376*4882a593Smuzhiyun if (ret)
377*4882a593Smuzhiyun goto free_cmd_mbox;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
381*4882a593Smuzhiyun if (ret) {
382*4882a593Smuzhiyun ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
383*4882a593Smuzhiyun goto free_cmd_mbox;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun mr->enabled = 1;
387*4882a593Smuzhiyun if (flags & IB_MR_REREG_ACCESS)
388*4882a593Smuzhiyun mr->access = mr_access_flags;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun hns_roce_free_cmd_mailbox(hr_dev, mailbox);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun return 0;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun free_cmd_mbox:
395*4882a593Smuzhiyun hns_roce_free_cmd_mailbox(hr_dev, mailbox);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun return ret;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
hns_roce_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)400*4882a593Smuzhiyun int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
403*4882a593Smuzhiyun struct hns_roce_mr *mr = to_hr_mr(ibmr);
404*4882a593Smuzhiyun int ret = 0;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun if (hr_dev->hw->dereg_mr) {
407*4882a593Smuzhiyun ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
408*4882a593Smuzhiyun } else {
409*4882a593Smuzhiyun hns_roce_mr_free(hr_dev, mr);
410*4882a593Smuzhiyun kfree(mr);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun return ret;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
hns_roce_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)416*4882a593Smuzhiyun struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
417*4882a593Smuzhiyun u32 max_num_sg)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
420*4882a593Smuzhiyun struct device *dev = hr_dev->dev;
421*4882a593Smuzhiyun struct hns_roce_mr *mr;
422*4882a593Smuzhiyun u64 length;
423*4882a593Smuzhiyun int ret;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (mr_type != IB_MR_TYPE_MEM_REG)
426*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
429*4882a593Smuzhiyun dev_err(dev, "max_num_sg larger than %d\n",
430*4882a593Smuzhiyun HNS_ROCE_FRMR_MAX_PA);
431*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun mr = kzalloc(sizeof(*mr), GFP_KERNEL);
435*4882a593Smuzhiyun if (!mr)
436*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun mr->type = MR_TYPE_FRMR;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /* Allocate memory region key */
441*4882a593Smuzhiyun length = max_num_sg * (1 << PAGE_SHIFT);
442*4882a593Smuzhiyun ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, length, 0);
443*4882a593Smuzhiyun if (ret)
444*4882a593Smuzhiyun goto err_free;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun ret = alloc_mr_pbl(hr_dev, mr, length, NULL, 0, 0);
447*4882a593Smuzhiyun if (ret)
448*4882a593Smuzhiyun goto err_key;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun ret = hns_roce_mr_enable(hr_dev, mr);
451*4882a593Smuzhiyun if (ret)
452*4882a593Smuzhiyun goto err_pbl;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
455*4882a593Smuzhiyun mr->ibmr.length = length;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun return &mr->ibmr;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun err_key:
460*4882a593Smuzhiyun free_mr_key(hr_dev, mr);
461*4882a593Smuzhiyun err_pbl:
462*4882a593Smuzhiyun free_mr_pbl(hr_dev, mr);
463*4882a593Smuzhiyun err_free:
464*4882a593Smuzhiyun kfree(mr);
465*4882a593Smuzhiyun return ERR_PTR(ret);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
hns_roce_set_page(struct ib_mr * ibmr,u64 addr)468*4882a593Smuzhiyun static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun struct hns_roce_mr *mr = to_hr_mr(ibmr);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
473*4882a593Smuzhiyun mr->page_list[mr->npages++] = addr;
474*4882a593Smuzhiyun return 0;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun return -ENOBUFS;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
hns_roce_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)480*4882a593Smuzhiyun int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
481*4882a593Smuzhiyun unsigned int *sg_offset)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
484*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
485*4882a593Smuzhiyun struct hns_roce_mr *mr = to_hr_mr(ibmr);
486*4882a593Smuzhiyun struct hns_roce_mtr *mtr = &mr->pbl_mtr;
487*4882a593Smuzhiyun int ret = 0;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun mr->npages = 0;
490*4882a593Smuzhiyun mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
491*4882a593Smuzhiyun sizeof(dma_addr_t), GFP_KERNEL);
492*4882a593Smuzhiyun if (!mr->page_list)
493*4882a593Smuzhiyun return ret;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
496*4882a593Smuzhiyun if (ret < 1) {
497*4882a593Smuzhiyun ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
498*4882a593Smuzhiyun mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
499*4882a593Smuzhiyun goto err_page_list;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun mtr->hem_cfg.region[0].offset = 0;
503*4882a593Smuzhiyun mtr->hem_cfg.region[0].count = mr->npages;
504*4882a593Smuzhiyun mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
505*4882a593Smuzhiyun mtr->hem_cfg.region_count = 1;
506*4882a593Smuzhiyun ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
507*4882a593Smuzhiyun if (ret) {
508*4882a593Smuzhiyun ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
509*4882a593Smuzhiyun ret = 0;
510*4882a593Smuzhiyun } else {
511*4882a593Smuzhiyun mr->pbl_mtr.hem_cfg.buf_pg_shift = ilog2(ibmr->page_size);
512*4882a593Smuzhiyun ret = mr->npages;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun err_page_list:
516*4882a593Smuzhiyun kvfree(mr->page_list);
517*4882a593Smuzhiyun mr->page_list = NULL;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun return ret;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
hns_roce_mw_free(struct hns_roce_dev * hr_dev,struct hns_roce_mw * mw)522*4882a593Smuzhiyun static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
523*4882a593Smuzhiyun struct hns_roce_mw *mw)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun struct device *dev = hr_dev->dev;
526*4882a593Smuzhiyun int ret;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun if (mw->enabled) {
529*4882a593Smuzhiyun ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
530*4882a593Smuzhiyun key_to_hw_index(mw->rkey) &
531*4882a593Smuzhiyun (hr_dev->caps.num_mtpts - 1));
532*4882a593Smuzhiyun if (ret)
533*4882a593Smuzhiyun dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
536*4882a593Smuzhiyun key_to_hw_index(mw->rkey));
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
540*4882a593Smuzhiyun key_to_hw_index(mw->rkey), BITMAP_NO_RR);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
hns_roce_mw_enable(struct hns_roce_dev * hr_dev,struct hns_roce_mw * mw)543*4882a593Smuzhiyun static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
544*4882a593Smuzhiyun struct hns_roce_mw *mw)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
547*4882a593Smuzhiyun struct hns_roce_cmd_mailbox *mailbox;
548*4882a593Smuzhiyun struct device *dev = hr_dev->dev;
549*4882a593Smuzhiyun unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
550*4882a593Smuzhiyun int ret;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /* prepare HEM entry memory */
553*4882a593Smuzhiyun ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
554*4882a593Smuzhiyun if (ret)
555*4882a593Smuzhiyun return ret;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
558*4882a593Smuzhiyun if (IS_ERR(mailbox)) {
559*4882a593Smuzhiyun ret = PTR_ERR(mailbox);
560*4882a593Smuzhiyun goto err_table;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
564*4882a593Smuzhiyun if (ret) {
565*4882a593Smuzhiyun dev_err(dev, "MW write mtpt fail!\n");
566*4882a593Smuzhiyun goto err_page;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
570*4882a593Smuzhiyun mtpt_idx & (hr_dev->caps.num_mtpts - 1));
571*4882a593Smuzhiyun if (ret) {
572*4882a593Smuzhiyun dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
573*4882a593Smuzhiyun goto err_page;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun mw->enabled = 1;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun hns_roce_free_cmd_mailbox(hr_dev, mailbox);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun return 0;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun err_page:
583*4882a593Smuzhiyun hns_roce_free_cmd_mailbox(hr_dev, mailbox);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun err_table:
586*4882a593Smuzhiyun hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun return ret;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
hns_roce_alloc_mw(struct ib_mw * ibmw,struct ib_udata * udata)591*4882a593Smuzhiyun int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
594*4882a593Smuzhiyun struct hns_roce_mw *mw = to_hr_mw(ibmw);
595*4882a593Smuzhiyun unsigned long index = 0;
596*4882a593Smuzhiyun int ret;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /* Allocate a key for mw from bitmap */
599*4882a593Smuzhiyun ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
600*4882a593Smuzhiyun if (ret)
601*4882a593Smuzhiyun return ret;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun mw->rkey = hw_index_to_key(index);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun ibmw->rkey = mw->rkey;
606*4882a593Smuzhiyun mw->pdn = to_hr_pd(ibmw->pd)->pdn;
607*4882a593Smuzhiyun mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
608*4882a593Smuzhiyun mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
609*4882a593Smuzhiyun mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun ret = hns_roce_mw_enable(hr_dev, mw);
612*4882a593Smuzhiyun if (ret)
613*4882a593Smuzhiyun goto err_mw;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun return 0;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun err_mw:
618*4882a593Smuzhiyun hns_roce_mw_free(hr_dev, mw);
619*4882a593Smuzhiyun return ret;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
hns_roce_dealloc_mw(struct ib_mw * ibmw)622*4882a593Smuzhiyun int hns_roce_dealloc_mw(struct ib_mw *ibmw)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
625*4882a593Smuzhiyun struct hns_roce_mw *mw = to_hr_mw(ibmw);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun hns_roce_mw_free(hr_dev, mw);
628*4882a593Smuzhiyun return 0;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
mtr_map_region(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,dma_addr_t * pages,struct hns_roce_buf_region * region)631*4882a593Smuzhiyun static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
632*4882a593Smuzhiyun dma_addr_t *pages, struct hns_roce_buf_region *region)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun __le64 *mtts;
635*4882a593Smuzhiyun int offset;
636*4882a593Smuzhiyun int count;
637*4882a593Smuzhiyun int npage;
638*4882a593Smuzhiyun u64 addr;
639*4882a593Smuzhiyun int end;
640*4882a593Smuzhiyun int i;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /* if hopnum is 0, buffer cannot store BAs, so skip write mtt */
643*4882a593Smuzhiyun if (!region->hopnum)
644*4882a593Smuzhiyun return 0;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun offset = region->offset;
647*4882a593Smuzhiyun end = offset + region->count;
648*4882a593Smuzhiyun npage = 0;
649*4882a593Smuzhiyun while (offset < end) {
650*4882a593Smuzhiyun mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
651*4882a593Smuzhiyun offset, &count, NULL);
652*4882a593Smuzhiyun if (!mtts)
653*4882a593Smuzhiyun return -ENOBUFS;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun for (i = 0; i < count; i++) {
656*4882a593Smuzhiyun if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
657*4882a593Smuzhiyun addr = to_hr_hw_page_addr(pages[npage]);
658*4882a593Smuzhiyun else
659*4882a593Smuzhiyun addr = pages[npage];
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun mtts[i] = cpu_to_le64(addr);
662*4882a593Smuzhiyun npage++;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun offset += count;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun return 0;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
mtr_has_mtt(struct hns_roce_buf_attr * attr)670*4882a593Smuzhiyun static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun int i;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun for (i = 0; i < attr->region_count; i++)
675*4882a593Smuzhiyun if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
676*4882a593Smuzhiyun attr->region[i].hopnum > 0)
677*4882a593Smuzhiyun return true;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /* because the mtr only one root base address, when hopnum is 0 means
680*4882a593Smuzhiyun * root base address equals the first buffer address, thus all alloced
681*4882a593Smuzhiyun * memory must in a continuous space accessed by direct mode.
682*4882a593Smuzhiyun */
683*4882a593Smuzhiyun return false;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
mtr_bufs_size(struct hns_roce_buf_attr * attr)686*4882a593Smuzhiyun static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun size_t size = 0;
689*4882a593Smuzhiyun int i;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun for (i = 0; i < attr->region_count; i++)
692*4882a593Smuzhiyun size += attr->region[i].size;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun return size;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
mtr_kmem_direct_size(bool is_direct,size_t alloc_size,unsigned int page_shift)697*4882a593Smuzhiyun static inline size_t mtr_kmem_direct_size(bool is_direct, size_t alloc_size,
698*4882a593Smuzhiyun unsigned int page_shift)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun if (is_direct)
701*4882a593Smuzhiyun return ALIGN(alloc_size, 1 << page_shift);
702*4882a593Smuzhiyun else
703*4882a593Smuzhiyun return HNS_HW_DIRECT_PAGE_COUNT << page_shift;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /*
707*4882a593Smuzhiyun * check the given pages in continuous address space
708*4882a593Smuzhiyun * Returns 0 on success, or the error page num.
709*4882a593Smuzhiyun */
mtr_check_direct_pages(dma_addr_t * pages,int page_count,unsigned int page_shift)710*4882a593Smuzhiyun static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
711*4882a593Smuzhiyun unsigned int page_shift)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun size_t page_size = 1 << page_shift;
714*4882a593Smuzhiyun int i;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun for (i = 1; i < page_count; i++)
717*4882a593Smuzhiyun if (pages[i] - pages[i - 1] != page_size)
718*4882a593Smuzhiyun return i;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun return 0;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
mtr_free_bufs(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)723*4882a593Smuzhiyun static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun /* release user buffers */
726*4882a593Smuzhiyun if (mtr->umem) {
727*4882a593Smuzhiyun ib_umem_release(mtr->umem);
728*4882a593Smuzhiyun mtr->umem = NULL;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /* release kernel buffers */
732*4882a593Smuzhiyun if (mtr->kmem) {
733*4882a593Smuzhiyun hns_roce_buf_free(hr_dev, mtr->kmem);
734*4882a593Smuzhiyun kfree(mtr->kmem);
735*4882a593Smuzhiyun mtr->kmem = NULL;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
mtr_alloc_bufs(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * buf_attr,bool is_direct,struct ib_udata * udata,unsigned long user_addr)739*4882a593Smuzhiyun static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
740*4882a593Smuzhiyun struct hns_roce_buf_attr *buf_attr, bool is_direct,
741*4882a593Smuzhiyun struct ib_udata *udata, unsigned long user_addr)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
744*4882a593Smuzhiyun unsigned int best_pg_shift;
745*4882a593Smuzhiyun int all_pg_count = 0;
746*4882a593Smuzhiyun size_t direct_size;
747*4882a593Smuzhiyun size_t total_size;
748*4882a593Smuzhiyun int ret;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun total_size = mtr_bufs_size(buf_attr);
751*4882a593Smuzhiyun if (total_size < 1) {
752*4882a593Smuzhiyun ibdev_err(ibdev, "Failed to check mtr size\n");
753*4882a593Smuzhiyun return -EINVAL;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun if (udata) {
757*4882a593Smuzhiyun unsigned long pgsz_bitmap;
758*4882a593Smuzhiyun unsigned long page_size;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun mtr->kmem = NULL;
761*4882a593Smuzhiyun mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
762*4882a593Smuzhiyun buf_attr->user_access);
763*4882a593Smuzhiyun if (IS_ERR_OR_NULL(mtr->umem)) {
764*4882a593Smuzhiyun ibdev_err(ibdev, "Failed to get umem, ret %ld\n",
765*4882a593Smuzhiyun PTR_ERR(mtr->umem));
766*4882a593Smuzhiyun return -ENOMEM;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun if (buf_attr->fixed_page)
769*4882a593Smuzhiyun pgsz_bitmap = 1 << buf_attr->page_shift;
770*4882a593Smuzhiyun else
771*4882a593Smuzhiyun pgsz_bitmap = GENMASK(buf_attr->page_shift, PAGE_SHIFT);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun page_size = ib_umem_find_best_pgsz(mtr->umem, pgsz_bitmap,
774*4882a593Smuzhiyun user_addr);
775*4882a593Smuzhiyun if (!page_size)
776*4882a593Smuzhiyun return -EINVAL;
777*4882a593Smuzhiyun best_pg_shift = order_base_2(page_size);
778*4882a593Smuzhiyun all_pg_count = ib_umem_num_dma_blocks(mtr->umem, page_size);
779*4882a593Smuzhiyun ret = 0;
780*4882a593Smuzhiyun } else {
781*4882a593Smuzhiyun mtr->umem = NULL;
782*4882a593Smuzhiyun mtr->kmem = kzalloc(sizeof(*mtr->kmem), GFP_KERNEL);
783*4882a593Smuzhiyun if (!mtr->kmem) {
784*4882a593Smuzhiyun ibdev_err(ibdev, "Failed to alloc kmem\n");
785*4882a593Smuzhiyun return -ENOMEM;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun direct_size = mtr_kmem_direct_size(is_direct, total_size,
788*4882a593Smuzhiyun buf_attr->page_shift);
789*4882a593Smuzhiyun ret = hns_roce_buf_alloc(hr_dev, total_size, direct_size,
790*4882a593Smuzhiyun mtr->kmem, buf_attr->page_shift);
791*4882a593Smuzhiyun if (ret) {
792*4882a593Smuzhiyun ibdev_err(ibdev, "Failed to alloc kmem, ret %d\n", ret);
793*4882a593Smuzhiyun goto err_alloc_mem;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun best_pg_shift = buf_attr->page_shift;
796*4882a593Smuzhiyun all_pg_count = mtr->kmem->npages;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun /* must bigger than minimum hardware page shift */
800*4882a593Smuzhiyun if (best_pg_shift < HNS_HW_PAGE_SHIFT || all_pg_count < 1) {
801*4882a593Smuzhiyun ret = -EINVAL;
802*4882a593Smuzhiyun ibdev_err(ibdev, "Failed to check mtr page shift %d count %d\n",
803*4882a593Smuzhiyun best_pg_shift, all_pg_count);
804*4882a593Smuzhiyun goto err_alloc_mem;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun mtr->hem_cfg.buf_pg_shift = best_pg_shift;
808*4882a593Smuzhiyun mtr->hem_cfg.buf_pg_count = all_pg_count;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun return 0;
811*4882a593Smuzhiyun err_alloc_mem:
812*4882a593Smuzhiyun mtr_free_bufs(hr_dev, mtr);
813*4882a593Smuzhiyun return ret;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
mtr_get_pages(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,dma_addr_t * pages,int count,unsigned int page_shift)816*4882a593Smuzhiyun static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
817*4882a593Smuzhiyun dma_addr_t *pages, int count, unsigned int page_shift)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
820*4882a593Smuzhiyun int npage;
821*4882a593Smuzhiyun int err;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun if (mtr->umem)
824*4882a593Smuzhiyun npage = hns_roce_get_umem_bufs(hr_dev, pages, count, 0,
825*4882a593Smuzhiyun mtr->umem, page_shift);
826*4882a593Smuzhiyun else
827*4882a593Smuzhiyun npage = hns_roce_get_kmem_bufs(hr_dev, pages, count, 0,
828*4882a593Smuzhiyun mtr->kmem);
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun if (mtr->hem_cfg.is_direct && npage > 1) {
831*4882a593Smuzhiyun err = mtr_check_direct_pages(pages, npage, page_shift);
832*4882a593Smuzhiyun if (err) {
833*4882a593Smuzhiyun ibdev_err(ibdev, "Failed to check %s direct page-%d\n",
834*4882a593Smuzhiyun mtr->umem ? "user" : "kernel", err);
835*4882a593Smuzhiyun npage = err;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun return npage;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
hns_roce_mtr_map(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,dma_addr_t * pages,int page_cnt)842*4882a593Smuzhiyun int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
843*4882a593Smuzhiyun dma_addr_t *pages, int page_cnt)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
846*4882a593Smuzhiyun struct hns_roce_buf_region *r;
847*4882a593Smuzhiyun int err;
848*4882a593Smuzhiyun int i;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /*
851*4882a593Smuzhiyun * Only use the first page address as root ba when hopnum is 0, this
852*4882a593Smuzhiyun * is because the addresses of all pages are consecutive in this case.
853*4882a593Smuzhiyun */
854*4882a593Smuzhiyun if (mtr->hem_cfg.is_direct) {
855*4882a593Smuzhiyun mtr->hem_cfg.root_ba = pages[0];
856*4882a593Smuzhiyun return 0;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun for (i = 0; i < mtr->hem_cfg.region_count; i++) {
860*4882a593Smuzhiyun r = &mtr->hem_cfg.region[i];
861*4882a593Smuzhiyun if (r->offset + r->count > page_cnt) {
862*4882a593Smuzhiyun err = -EINVAL;
863*4882a593Smuzhiyun ibdev_err(ibdev,
864*4882a593Smuzhiyun "failed to check mtr%u end %u + %u, max %u.\n",
865*4882a593Smuzhiyun i, r->offset, r->count, page_cnt);
866*4882a593Smuzhiyun return err;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
870*4882a593Smuzhiyun if (err) {
871*4882a593Smuzhiyun ibdev_err(ibdev,
872*4882a593Smuzhiyun "failed to map mtr%u offset %u, ret = %d.\n",
873*4882a593Smuzhiyun i, r->offset, err);
874*4882a593Smuzhiyun return err;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun return 0;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
hns_roce_mtr_find(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,int offset,u64 * mtt_buf,int mtt_max,u64 * base_addr)881*4882a593Smuzhiyun int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
882*4882a593Smuzhiyun int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
885*4882a593Smuzhiyun int start_index;
886*4882a593Smuzhiyun int mtt_count;
887*4882a593Smuzhiyun int total = 0;
888*4882a593Smuzhiyun __le64 *mtts;
889*4882a593Smuzhiyun int npage;
890*4882a593Smuzhiyun u64 addr;
891*4882a593Smuzhiyun int left;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun if (!mtt_buf || mtt_max < 1)
894*4882a593Smuzhiyun goto done;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun /* no mtt memory in direct mode, so just return the buffer address */
897*4882a593Smuzhiyun if (cfg->is_direct) {
898*4882a593Smuzhiyun start_index = offset >> HNS_HW_PAGE_SHIFT;
899*4882a593Smuzhiyun for (mtt_count = 0; mtt_count < cfg->region_count &&
900*4882a593Smuzhiyun total < mtt_max; mtt_count++) {
901*4882a593Smuzhiyun npage = cfg->region[mtt_count].offset;
902*4882a593Smuzhiyun if (npage < start_index)
903*4882a593Smuzhiyun continue;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
906*4882a593Smuzhiyun if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
907*4882a593Smuzhiyun mtt_buf[total] = to_hr_hw_page_addr(addr);
908*4882a593Smuzhiyun else
909*4882a593Smuzhiyun mtt_buf[total] = addr;
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun total++;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun goto done;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun start_index = offset >> cfg->buf_pg_shift;
918*4882a593Smuzhiyun left = mtt_max;
919*4882a593Smuzhiyun while (left > 0) {
920*4882a593Smuzhiyun mtt_count = 0;
921*4882a593Smuzhiyun mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
922*4882a593Smuzhiyun start_index + total,
923*4882a593Smuzhiyun &mtt_count, NULL);
924*4882a593Smuzhiyun if (!mtts || !mtt_count)
925*4882a593Smuzhiyun goto done;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun npage = min(mtt_count, left);
928*4882a593Smuzhiyun left -= npage;
929*4882a593Smuzhiyun for (mtt_count = 0; mtt_count < npage; mtt_count++)
930*4882a593Smuzhiyun mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun done:
934*4882a593Smuzhiyun if (base_addr)
935*4882a593Smuzhiyun *base_addr = cfg->root_ba;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun return total;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
mtr_init_buf_cfg(struct hns_roce_dev * hr_dev,struct hns_roce_buf_attr * attr,struct hns_roce_hem_cfg * cfg,unsigned int * buf_page_shift)940*4882a593Smuzhiyun static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
941*4882a593Smuzhiyun struct hns_roce_buf_attr *attr,
942*4882a593Smuzhiyun struct hns_roce_hem_cfg *cfg,
943*4882a593Smuzhiyun unsigned int *buf_page_shift)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun struct hns_roce_buf_region *r;
946*4882a593Smuzhiyun unsigned int page_shift;
947*4882a593Smuzhiyun int page_cnt = 0;
948*4882a593Smuzhiyun size_t buf_size;
949*4882a593Smuzhiyun int region_cnt;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun if (cfg->is_direct) {
952*4882a593Smuzhiyun buf_size = cfg->buf_pg_count << cfg->buf_pg_shift;
953*4882a593Smuzhiyun page_cnt = DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE);
954*4882a593Smuzhiyun /*
955*4882a593Smuzhiyun * When HEM buffer use level-0 addressing, the page size equals
956*4882a593Smuzhiyun * the buffer size, and the the page size = 4K * 2^N.
957*4882a593Smuzhiyun */
958*4882a593Smuzhiyun cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT + order_base_2(page_cnt);
959*4882a593Smuzhiyun if (attr->region_count > 1) {
960*4882a593Smuzhiyun cfg->buf_pg_count = page_cnt;
961*4882a593Smuzhiyun page_shift = HNS_HW_PAGE_SHIFT;
962*4882a593Smuzhiyun } else {
963*4882a593Smuzhiyun cfg->buf_pg_count = 1;
964*4882a593Smuzhiyun page_shift = cfg->buf_pg_shift;
965*4882a593Smuzhiyun if (buf_size != 1 << page_shift) {
966*4882a593Smuzhiyun ibdev_err(&hr_dev->ib_dev,
967*4882a593Smuzhiyun "failed to check direct size %zu shift %d.\n",
968*4882a593Smuzhiyun buf_size, page_shift);
969*4882a593Smuzhiyun return -EINVAL;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun } else {
973*4882a593Smuzhiyun page_shift = cfg->buf_pg_shift;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun /* convert buffer size to page index and page count */
977*4882a593Smuzhiyun for (page_cnt = 0, region_cnt = 0; page_cnt < cfg->buf_pg_count &&
978*4882a593Smuzhiyun region_cnt < attr->region_count &&
979*4882a593Smuzhiyun region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
980*4882a593Smuzhiyun r = &cfg->region[region_cnt];
981*4882a593Smuzhiyun r->offset = page_cnt;
982*4882a593Smuzhiyun buf_size = hr_hw_page_align(attr->region[region_cnt].size);
983*4882a593Smuzhiyun r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
984*4882a593Smuzhiyun page_cnt += r->count;
985*4882a593Smuzhiyun r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
986*4882a593Smuzhiyun r->count);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun if (region_cnt < 1) {
990*4882a593Smuzhiyun ibdev_err(&hr_dev->ib_dev,
991*4882a593Smuzhiyun "failed to check mtr region count, pages = %d.\n",
992*4882a593Smuzhiyun cfg->buf_pg_count);
993*4882a593Smuzhiyun return -ENOBUFS;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun cfg->region_count = region_cnt;
997*4882a593Smuzhiyun *buf_page_shift = page_shift;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun return page_cnt;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun /**
1003*4882a593Smuzhiyun * hns_roce_mtr_create - Create hns memory translate region.
1004*4882a593Smuzhiyun *
1005*4882a593Smuzhiyun * @mtr: memory translate region
1006*4882a593Smuzhiyun * @buf_attr: buffer attribute for creating mtr
1007*4882a593Smuzhiyun * @ba_page_shift: page shift for multi-hop base address table
1008*4882a593Smuzhiyun * @udata: user space context, if it's NULL, means kernel space
1009*4882a593Smuzhiyun * @user_addr: userspace virtual address to start at
1010*4882a593Smuzhiyun */
hns_roce_mtr_create(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * buf_attr,unsigned int ba_page_shift,struct ib_udata * udata,unsigned long user_addr)1011*4882a593Smuzhiyun int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1012*4882a593Smuzhiyun struct hns_roce_buf_attr *buf_attr,
1013*4882a593Smuzhiyun unsigned int ba_page_shift, struct ib_udata *udata,
1014*4882a593Smuzhiyun unsigned long user_addr)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
1017*4882a593Smuzhiyun struct ib_device *ibdev = &hr_dev->ib_dev;
1018*4882a593Smuzhiyun unsigned int buf_page_shift = 0;
1019*4882a593Smuzhiyun dma_addr_t *pages = NULL;
1020*4882a593Smuzhiyun int all_pg_cnt;
1021*4882a593Smuzhiyun int get_pg_cnt;
1022*4882a593Smuzhiyun int ret = 0;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /* if disable mtt, all pages must in a continuous address range */
1025*4882a593Smuzhiyun cfg->is_direct = !mtr_has_mtt(buf_attr);
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun /* if buffer only need mtt, just init the hem cfg */
1028*4882a593Smuzhiyun if (buf_attr->mtt_only) {
1029*4882a593Smuzhiyun cfg->buf_pg_shift = buf_attr->page_shift;
1030*4882a593Smuzhiyun cfg->buf_pg_count = mtr_bufs_size(buf_attr) >>
1031*4882a593Smuzhiyun buf_attr->page_shift;
1032*4882a593Smuzhiyun mtr->umem = NULL;
1033*4882a593Smuzhiyun mtr->kmem = NULL;
1034*4882a593Smuzhiyun } else {
1035*4882a593Smuzhiyun ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, cfg->is_direct,
1036*4882a593Smuzhiyun udata, user_addr);
1037*4882a593Smuzhiyun if (ret) {
1038*4882a593Smuzhiyun ibdev_err(ibdev,
1039*4882a593Smuzhiyun "failed to alloc mtr bufs, ret = %d.\n", ret);
1040*4882a593Smuzhiyun return ret;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun all_pg_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, cfg, &buf_page_shift);
1045*4882a593Smuzhiyun if (all_pg_cnt < 1) {
1046*4882a593Smuzhiyun ret = -ENOBUFS;
1047*4882a593Smuzhiyun ibdev_err(ibdev, "failed to init mtr buf cfg.\n");
1048*4882a593Smuzhiyun goto err_alloc_bufs;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun hns_roce_hem_list_init(&mtr->hem_list);
1052*4882a593Smuzhiyun if (!cfg->is_direct) {
1053*4882a593Smuzhiyun ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
1054*4882a593Smuzhiyun cfg->region, cfg->region_count,
1055*4882a593Smuzhiyun ba_page_shift);
1056*4882a593Smuzhiyun if (ret) {
1057*4882a593Smuzhiyun ibdev_err(ibdev, "failed to request mtr hem, ret = %d.\n",
1058*4882a593Smuzhiyun ret);
1059*4882a593Smuzhiyun goto err_alloc_bufs;
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun cfg->root_ba = mtr->hem_list.root_ba;
1062*4882a593Smuzhiyun cfg->ba_pg_shift = ba_page_shift;
1063*4882a593Smuzhiyun } else {
1064*4882a593Smuzhiyun cfg->ba_pg_shift = cfg->buf_pg_shift;
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun /* no buffer to map */
1068*4882a593Smuzhiyun if (buf_attr->mtt_only)
1069*4882a593Smuzhiyun return 0;
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun /* alloc a tmp array to store buffer's dma address */
1072*4882a593Smuzhiyun pages = kvcalloc(all_pg_cnt, sizeof(dma_addr_t), GFP_KERNEL);
1073*4882a593Smuzhiyun if (!pages) {
1074*4882a593Smuzhiyun ret = -ENOMEM;
1075*4882a593Smuzhiyun ibdev_err(ibdev, "failed to alloc mtr page list %d.\n",
1076*4882a593Smuzhiyun all_pg_cnt);
1077*4882a593Smuzhiyun goto err_alloc_hem_list;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun get_pg_cnt = mtr_get_pages(hr_dev, mtr, pages, all_pg_cnt,
1081*4882a593Smuzhiyun buf_page_shift);
1082*4882a593Smuzhiyun if (get_pg_cnt != all_pg_cnt) {
1083*4882a593Smuzhiyun ibdev_err(ibdev, "failed to get mtr page %d != %d.\n",
1084*4882a593Smuzhiyun get_pg_cnt, all_pg_cnt);
1085*4882a593Smuzhiyun ret = -ENOBUFS;
1086*4882a593Smuzhiyun goto err_alloc_page_list;
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun /* write buffer's dma address to BA table */
1090*4882a593Smuzhiyun ret = hns_roce_mtr_map(hr_dev, mtr, pages, all_pg_cnt);
1091*4882a593Smuzhiyun if (ret) {
1092*4882a593Smuzhiyun ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
1093*4882a593Smuzhiyun goto err_alloc_page_list;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /* drop tmp array */
1097*4882a593Smuzhiyun kvfree(pages);
1098*4882a593Smuzhiyun return 0;
1099*4882a593Smuzhiyun err_alloc_page_list:
1100*4882a593Smuzhiyun kvfree(pages);
1101*4882a593Smuzhiyun err_alloc_hem_list:
1102*4882a593Smuzhiyun hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1103*4882a593Smuzhiyun err_alloc_bufs:
1104*4882a593Smuzhiyun mtr_free_bufs(hr_dev, mtr);
1105*4882a593Smuzhiyun return ret;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
hns_roce_mtr_destroy(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)1108*4882a593Smuzhiyun void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun /* release multi-hop addressing resource */
1111*4882a593Smuzhiyun hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun /* free buffers */
1114*4882a593Smuzhiyun mtr_free_bufs(hr_dev, mtr);
1115*4882a593Smuzhiyun }
1116