xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/hns/hns_roce_main.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2016 Hisilicon Limited.
3*4882a593Smuzhiyun  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun  * OpenIB.org BSD license below:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
13*4882a593Smuzhiyun  *     conditions are met:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
16*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun  *        disclaimer.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun  *        provided with the distribution.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun  * SOFTWARE.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun #include <linux/acpi.h>
34*4882a593Smuzhiyun #include <linux/of_platform.h>
35*4882a593Smuzhiyun #include <linux/module.h>
36*4882a593Smuzhiyun #include <rdma/ib_addr.h>
37*4882a593Smuzhiyun #include <rdma/ib_smi.h>
38*4882a593Smuzhiyun #include <rdma/ib_user_verbs.h>
39*4882a593Smuzhiyun #include <rdma/ib_cache.h>
40*4882a593Smuzhiyun #include "hns_roce_common.h"
41*4882a593Smuzhiyun #include "hns_roce_device.h"
42*4882a593Smuzhiyun #include <rdma/hns-abi.h>
43*4882a593Smuzhiyun #include "hns_roce_hem.h"
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun  * hns_get_gid_index - Get gid index.
47*4882a593Smuzhiyun  * @hr_dev: pointer to structure hns_roce_dev.
48*4882a593Smuzhiyun  * @port:  port, value range: 0 ~ MAX
49*4882a593Smuzhiyun  * @gid_index:  gid_index, value range: 0 ~ MAX
50*4882a593Smuzhiyun  * Description:
51*4882a593Smuzhiyun  *    N ports shared gids, allocation method as follow:
52*4882a593Smuzhiyun  *		GID[0][0], GID[1][0],.....GID[N - 1][0],
53*4882a593Smuzhiyun  *		GID[0][0], GID[1][0],.....GID[N - 1][0],
54*4882a593Smuzhiyun  *		And so on
55*4882a593Smuzhiyun  */
hns_get_gid_index(struct hns_roce_dev * hr_dev,u8 port,int gid_index)56*4882a593Smuzhiyun int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	return gid_index * hr_dev->caps.num_ports + port;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
hns_roce_set_mac(struct hns_roce_dev * hr_dev,u8 port,u8 * addr)61*4882a593Smuzhiyun static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	u8 phy_port;
64*4882a593Smuzhiyun 	u32 i = 0;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
67*4882a593Smuzhiyun 		return 0;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	for (i = 0; i < ETH_ALEN; i++)
70*4882a593Smuzhiyun 		hr_dev->dev_addr[port][i] = addr[i];
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	phy_port = hr_dev->iboe.phy_port[port];
73*4882a593Smuzhiyun 	return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
hns_roce_add_gid(const struct ib_gid_attr * attr,void ** context)76*4882a593Smuzhiyun static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
79*4882a593Smuzhiyun 	u8 port = attr->port_num - 1;
80*4882a593Smuzhiyun 	int ret;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	if (port >= hr_dev->caps.num_ports)
83*4882a593Smuzhiyun 		return -EINVAL;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	return ret;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
hns_roce_del_gid(const struct ib_gid_attr * attr,void ** context)90*4882a593Smuzhiyun static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
93*4882a593Smuzhiyun 	struct ib_gid_attr zattr = {};
94*4882a593Smuzhiyun 	u8 port = attr->port_num - 1;
95*4882a593Smuzhiyun 	int ret;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (port >= hr_dev->caps.num_ports)
98*4882a593Smuzhiyun 		return -EINVAL;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	return ret;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
handle_en_event(struct hns_roce_dev * hr_dev,u8 port,unsigned long event)105*4882a593Smuzhiyun static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
106*4882a593Smuzhiyun 			   unsigned long event)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	struct device *dev = hr_dev->dev;
109*4882a593Smuzhiyun 	struct net_device *netdev;
110*4882a593Smuzhiyun 	int ret = 0;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	netdev = hr_dev->iboe.netdevs[port];
113*4882a593Smuzhiyun 	if (!netdev) {
114*4882a593Smuzhiyun 		dev_err(dev, "Can't find netdev on port(%u)!\n", port);
115*4882a593Smuzhiyun 		return -ENODEV;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	switch (event) {
119*4882a593Smuzhiyun 	case NETDEV_UP:
120*4882a593Smuzhiyun 	case NETDEV_CHANGE:
121*4882a593Smuzhiyun 	case NETDEV_REGISTER:
122*4882a593Smuzhiyun 	case NETDEV_CHANGEADDR:
123*4882a593Smuzhiyun 		ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
124*4882a593Smuzhiyun 		break;
125*4882a593Smuzhiyun 	case NETDEV_DOWN:
126*4882a593Smuzhiyun 		/*
127*4882a593Smuzhiyun 		 * In v1 engine, only support all ports closed together.
128*4882a593Smuzhiyun 		 */
129*4882a593Smuzhiyun 		break;
130*4882a593Smuzhiyun 	default:
131*4882a593Smuzhiyun 		dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
132*4882a593Smuzhiyun 		break;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	return ret;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
hns_roce_netdev_event(struct notifier_block * self,unsigned long event,void * ptr)138*4882a593Smuzhiyun static int hns_roce_netdev_event(struct notifier_block *self,
139*4882a593Smuzhiyun 				 unsigned long event, void *ptr)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
142*4882a593Smuzhiyun 	struct hns_roce_ib_iboe *iboe = NULL;
143*4882a593Smuzhiyun 	struct hns_roce_dev *hr_dev = NULL;
144*4882a593Smuzhiyun 	int ret;
145*4882a593Smuzhiyun 	u8 port;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
148*4882a593Smuzhiyun 	iboe = &hr_dev->iboe;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	for (port = 0; port < hr_dev->caps.num_ports; port++) {
151*4882a593Smuzhiyun 		if (dev == iboe->netdevs[port]) {
152*4882a593Smuzhiyun 			ret = handle_en_event(hr_dev, port, event);
153*4882a593Smuzhiyun 			if (ret)
154*4882a593Smuzhiyun 				return NOTIFY_DONE;
155*4882a593Smuzhiyun 			break;
156*4882a593Smuzhiyun 		}
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	return NOTIFY_DONE;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
hns_roce_setup_mtu_mac(struct hns_roce_dev * hr_dev)162*4882a593Smuzhiyun static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	int ret;
165*4882a593Smuzhiyun 	u8 i;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	for (i = 0; i < hr_dev->caps.num_ports; i++) {
168*4882a593Smuzhiyun 		if (hr_dev->hw->set_mtu)
169*4882a593Smuzhiyun 			hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
170*4882a593Smuzhiyun 					    hr_dev->caps.max_mtu);
171*4882a593Smuzhiyun 		ret = hns_roce_set_mac(hr_dev, i,
172*4882a593Smuzhiyun 				       hr_dev->iboe.netdevs[i]->dev_addr);
173*4882a593Smuzhiyun 		if (ret)
174*4882a593Smuzhiyun 			return ret;
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	return 0;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
hns_roce_query_device(struct ib_device * ib_dev,struct ib_device_attr * props,struct ib_udata * uhw)180*4882a593Smuzhiyun static int hns_roce_query_device(struct ib_device *ib_dev,
181*4882a593Smuzhiyun 				 struct ib_device_attr *props,
182*4882a593Smuzhiyun 				 struct ib_udata *uhw)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	memset(props, 0, sizeof(*props));
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	props->fw_ver = hr_dev->caps.fw_ver;
189*4882a593Smuzhiyun 	props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
190*4882a593Smuzhiyun 	props->max_mr_size = (u64)(~(0ULL));
191*4882a593Smuzhiyun 	props->page_size_cap = hr_dev->caps.page_size_cap;
192*4882a593Smuzhiyun 	props->vendor_id = hr_dev->vendor_id;
193*4882a593Smuzhiyun 	props->vendor_part_id = hr_dev->vendor_part_id;
194*4882a593Smuzhiyun 	props->hw_ver = hr_dev->hw_rev;
195*4882a593Smuzhiyun 	props->max_qp = hr_dev->caps.num_qps;
196*4882a593Smuzhiyun 	props->max_qp_wr = hr_dev->caps.max_wqes;
197*4882a593Smuzhiyun 	props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
198*4882a593Smuzhiyun 				  IB_DEVICE_RC_RNR_NAK_GEN;
199*4882a593Smuzhiyun 	props->max_send_sge = hr_dev->caps.max_sq_sg;
200*4882a593Smuzhiyun 	props->max_recv_sge = hr_dev->caps.max_rq_sg;
201*4882a593Smuzhiyun 	props->max_sge_rd = 1;
202*4882a593Smuzhiyun 	props->max_cq = hr_dev->caps.num_cqs;
203*4882a593Smuzhiyun 	props->max_cqe = hr_dev->caps.max_cqes;
204*4882a593Smuzhiyun 	props->max_mr = hr_dev->caps.num_mtpts;
205*4882a593Smuzhiyun 	props->max_pd = hr_dev->caps.num_pds;
206*4882a593Smuzhiyun 	props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
207*4882a593Smuzhiyun 	props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
208*4882a593Smuzhiyun 	props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
209*4882a593Smuzhiyun 			    IB_ATOMIC_HCA : IB_ATOMIC_NONE;
210*4882a593Smuzhiyun 	props->max_pkeys = 1;
211*4882a593Smuzhiyun 	props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
212*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
213*4882a593Smuzhiyun 		props->max_srq = hr_dev->caps.num_srqs;
214*4882a593Smuzhiyun 		props->max_srq_wr = hr_dev->caps.max_srq_wrs;
215*4882a593Smuzhiyun 		props->max_srq_sge = hr_dev->caps.max_srq_sges;
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
219*4882a593Smuzhiyun 		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
220*4882a593Smuzhiyun 		props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
hns_roce_query_port(struct ib_device * ib_dev,u8 port_num,struct ib_port_attr * props)226*4882a593Smuzhiyun static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
227*4882a593Smuzhiyun 			       struct ib_port_attr *props)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
230*4882a593Smuzhiyun 	struct device *dev = hr_dev->dev;
231*4882a593Smuzhiyun 	struct net_device *net_dev;
232*4882a593Smuzhiyun 	unsigned long flags;
233*4882a593Smuzhiyun 	enum ib_mtu mtu;
234*4882a593Smuzhiyun 	u8 port;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	port = port_num - 1;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/* props being zeroed by the caller, avoid zeroing it here */
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	props->max_mtu = hr_dev->caps.max_mtu;
241*4882a593Smuzhiyun 	props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
242*4882a593Smuzhiyun 	props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
243*4882a593Smuzhiyun 				IB_PORT_VENDOR_CLASS_SUP |
244*4882a593Smuzhiyun 				IB_PORT_BOOT_MGMT_SUP;
245*4882a593Smuzhiyun 	props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
246*4882a593Smuzhiyun 	props->pkey_tbl_len = 1;
247*4882a593Smuzhiyun 	props->active_width = IB_WIDTH_4X;
248*4882a593Smuzhiyun 	props->active_speed = 1;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	spin_lock_irqsave(&hr_dev->iboe.lock, flags);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	net_dev = hr_dev->iboe.netdevs[port];
253*4882a593Smuzhiyun 	if (!net_dev) {
254*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
255*4882a593Smuzhiyun 		dev_err(dev, "Find netdev %u failed!\n", port);
256*4882a593Smuzhiyun 		return -EINVAL;
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	mtu = iboe_get_mtu(net_dev->mtu);
260*4882a593Smuzhiyun 	props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
261*4882a593Smuzhiyun 	props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
262*4882a593Smuzhiyun 			       IB_PORT_ACTIVE :
263*4882a593Smuzhiyun 			       IB_PORT_DOWN;
264*4882a593Smuzhiyun 	props->phys_state = props->state == IB_PORT_ACTIVE ?
265*4882a593Smuzhiyun 				    IB_PORT_PHYS_STATE_LINK_UP :
266*4882a593Smuzhiyun 				    IB_PORT_PHYS_STATE_DISABLED;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	return 0;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
hns_roce_get_link_layer(struct ib_device * device,u8 port_num)273*4882a593Smuzhiyun static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
274*4882a593Smuzhiyun 						    u8 port_num)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	return IB_LINK_LAYER_ETHERNET;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
hns_roce_query_pkey(struct ib_device * ib_dev,u8 port,u16 index,u16 * pkey)279*4882a593Smuzhiyun static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
280*4882a593Smuzhiyun 			       u16 *pkey)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	if (index > 0)
283*4882a593Smuzhiyun 		return -EINVAL;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	*pkey = PKEY_ID;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	return 0;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
hns_roce_modify_device(struct ib_device * ib_dev,int mask,struct ib_device_modify * props)290*4882a593Smuzhiyun static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
291*4882a593Smuzhiyun 				  struct ib_device_modify *props)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	unsigned long flags;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
296*4882a593Smuzhiyun 		return -EOPNOTSUPP;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
299*4882a593Smuzhiyun 		spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
300*4882a593Smuzhiyun 		memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
301*4882a593Smuzhiyun 		spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	return 0;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
hns_roce_alloc_ucontext(struct ib_ucontext * uctx,struct ib_udata * udata)307*4882a593Smuzhiyun static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
308*4882a593Smuzhiyun 				   struct ib_udata *udata)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	int ret;
311*4882a593Smuzhiyun 	struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
312*4882a593Smuzhiyun 	struct hns_roce_ib_alloc_ucontext_resp resp = {};
313*4882a593Smuzhiyun 	struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	if (!hr_dev->active)
316*4882a593Smuzhiyun 		return -EAGAIN;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	resp.qp_tab_size = hr_dev->caps.num_qps;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	ret = hns_roce_uar_alloc(hr_dev, &context->uar);
321*4882a593Smuzhiyun 	if (ret)
322*4882a593Smuzhiyun 		goto error_fail_uar_alloc;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
325*4882a593Smuzhiyun 		INIT_LIST_HEAD(&context->page_list);
326*4882a593Smuzhiyun 		mutex_init(&context->page_mutex);
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	resp.cqe_size = hr_dev->caps.cqe_sz;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	ret = ib_copy_to_udata(udata, &resp,
332*4882a593Smuzhiyun 			       min(udata->outlen, sizeof(resp)));
333*4882a593Smuzhiyun 	if (ret)
334*4882a593Smuzhiyun 		goto error_fail_copy_to_udata;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	return 0;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun error_fail_copy_to_udata:
339*4882a593Smuzhiyun 	hns_roce_uar_free(hr_dev, &context->uar);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun error_fail_uar_alloc:
342*4882a593Smuzhiyun 	return ret;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
hns_roce_dealloc_ucontext(struct ib_ucontext * ibcontext)345*4882a593Smuzhiyun static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
hns_roce_mmap(struct ib_ucontext * context,struct vm_area_struct * vma)352*4882a593Smuzhiyun static int hns_roce_mmap(struct ib_ucontext *context,
353*4882a593Smuzhiyun 			 struct vm_area_struct *vma)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	switch (vma->vm_pgoff) {
358*4882a593Smuzhiyun 	case 0:
359*4882a593Smuzhiyun 		return rdma_user_mmap_io(context, vma,
360*4882a593Smuzhiyun 					 to_hr_ucontext(context)->uar.pfn,
361*4882a593Smuzhiyun 					 PAGE_SIZE,
362*4882a593Smuzhiyun 					 pgprot_device(vma->vm_page_prot),
363*4882a593Smuzhiyun 					 NULL);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/* vm_pgoff: 1 -- TPTR */
366*4882a593Smuzhiyun 	case 1:
367*4882a593Smuzhiyun 		if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
368*4882a593Smuzhiyun 			return -EINVAL;
369*4882a593Smuzhiyun 		/*
370*4882a593Smuzhiyun 		 * FIXME: using io_remap_pfn_range on the dma address returned
371*4882a593Smuzhiyun 		 * by dma_alloc_coherent is totally wrong.
372*4882a593Smuzhiyun 		 */
373*4882a593Smuzhiyun 		return rdma_user_mmap_io(context, vma,
374*4882a593Smuzhiyun 					 hr_dev->tptr_dma_addr >> PAGE_SHIFT,
375*4882a593Smuzhiyun 					 hr_dev->tptr_size,
376*4882a593Smuzhiyun 					 vma->vm_page_prot,
377*4882a593Smuzhiyun 					 NULL);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	default:
380*4882a593Smuzhiyun 		return -EINVAL;
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
hns_roce_port_immutable(struct ib_device * ib_dev,u8 port_num,struct ib_port_immutable * immutable)384*4882a593Smuzhiyun static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
385*4882a593Smuzhiyun 				   struct ib_port_immutable *immutable)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	struct ib_port_attr attr;
388*4882a593Smuzhiyun 	int ret;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	ret = ib_query_port(ib_dev, port_num, &attr);
391*4882a593Smuzhiyun 	if (ret)
392*4882a593Smuzhiyun 		return ret;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
395*4882a593Smuzhiyun 	immutable->gid_tbl_len = attr.gid_tbl_len;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
398*4882a593Smuzhiyun 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
399*4882a593Smuzhiyun 	if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2)
400*4882a593Smuzhiyun 		immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	return 0;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
hns_roce_disassociate_ucontext(struct ib_ucontext * ibcontext)405*4882a593Smuzhiyun static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
hns_roce_unregister_device(struct hns_roce_dev * hr_dev)409*4882a593Smuzhiyun static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	hr_dev->active = false;
414*4882a593Smuzhiyun 	unregister_netdevice_notifier(&iboe->nb);
415*4882a593Smuzhiyun 	ib_unregister_device(&hr_dev->ib_dev);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun static const struct ib_device_ops hns_roce_dev_ops = {
419*4882a593Smuzhiyun 	.owner = THIS_MODULE,
420*4882a593Smuzhiyun 	.driver_id = RDMA_DRIVER_HNS,
421*4882a593Smuzhiyun 	.uverbs_abi_ver = 1,
422*4882a593Smuzhiyun 	.uverbs_no_driver_id_binding = 1,
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	.add_gid = hns_roce_add_gid,
425*4882a593Smuzhiyun 	.alloc_pd = hns_roce_alloc_pd,
426*4882a593Smuzhiyun 	.alloc_ucontext = hns_roce_alloc_ucontext,
427*4882a593Smuzhiyun 	.create_ah = hns_roce_create_ah,
428*4882a593Smuzhiyun 	.create_cq = hns_roce_create_cq,
429*4882a593Smuzhiyun 	.create_qp = hns_roce_create_qp,
430*4882a593Smuzhiyun 	.dealloc_pd = hns_roce_dealloc_pd,
431*4882a593Smuzhiyun 	.dealloc_ucontext = hns_roce_dealloc_ucontext,
432*4882a593Smuzhiyun 	.del_gid = hns_roce_del_gid,
433*4882a593Smuzhiyun 	.dereg_mr = hns_roce_dereg_mr,
434*4882a593Smuzhiyun 	.destroy_ah = hns_roce_destroy_ah,
435*4882a593Smuzhiyun 	.destroy_cq = hns_roce_destroy_cq,
436*4882a593Smuzhiyun 	.disassociate_ucontext = hns_roce_disassociate_ucontext,
437*4882a593Smuzhiyun 	.fill_res_cq_entry = hns_roce_fill_res_cq_entry,
438*4882a593Smuzhiyun 	.get_dma_mr = hns_roce_get_dma_mr,
439*4882a593Smuzhiyun 	.get_link_layer = hns_roce_get_link_layer,
440*4882a593Smuzhiyun 	.get_port_immutable = hns_roce_port_immutable,
441*4882a593Smuzhiyun 	.mmap = hns_roce_mmap,
442*4882a593Smuzhiyun 	.modify_device = hns_roce_modify_device,
443*4882a593Smuzhiyun 	.modify_qp = hns_roce_modify_qp,
444*4882a593Smuzhiyun 	.query_ah = hns_roce_query_ah,
445*4882a593Smuzhiyun 	.query_device = hns_roce_query_device,
446*4882a593Smuzhiyun 	.query_pkey = hns_roce_query_pkey,
447*4882a593Smuzhiyun 	.query_port = hns_roce_query_port,
448*4882a593Smuzhiyun 	.reg_user_mr = hns_roce_reg_user_mr,
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
451*4882a593Smuzhiyun 	INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
452*4882a593Smuzhiyun 	INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
453*4882a593Smuzhiyun 	INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
454*4882a593Smuzhiyun };
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun static const struct ib_device_ops hns_roce_dev_mr_ops = {
457*4882a593Smuzhiyun 	.rereg_user_mr = hns_roce_rereg_user_mr,
458*4882a593Smuzhiyun };
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun static const struct ib_device_ops hns_roce_dev_mw_ops = {
461*4882a593Smuzhiyun 	.alloc_mw = hns_roce_alloc_mw,
462*4882a593Smuzhiyun 	.dealloc_mw = hns_roce_dealloc_mw,
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
465*4882a593Smuzhiyun };
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun static const struct ib_device_ops hns_roce_dev_frmr_ops = {
468*4882a593Smuzhiyun 	.alloc_mr = hns_roce_alloc_mr,
469*4882a593Smuzhiyun 	.map_mr_sg = hns_roce_map_mr_sg,
470*4882a593Smuzhiyun };
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun static const struct ib_device_ops hns_roce_dev_srq_ops = {
473*4882a593Smuzhiyun 	.create_srq = hns_roce_create_srq,
474*4882a593Smuzhiyun 	.destroy_srq = hns_roce_destroy_srq,
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
477*4882a593Smuzhiyun };
478*4882a593Smuzhiyun 
hns_roce_register_device(struct hns_roce_dev * hr_dev)479*4882a593Smuzhiyun static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	int ret;
482*4882a593Smuzhiyun 	struct hns_roce_ib_iboe *iboe = NULL;
483*4882a593Smuzhiyun 	struct ib_device *ib_dev = NULL;
484*4882a593Smuzhiyun 	struct device *dev = hr_dev->dev;
485*4882a593Smuzhiyun 	unsigned int i;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	iboe = &hr_dev->iboe;
488*4882a593Smuzhiyun 	spin_lock_init(&iboe->lock);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	ib_dev = &hr_dev->ib_dev;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	ib_dev->node_type = RDMA_NODE_IB_CA;
493*4882a593Smuzhiyun 	ib_dev->dev.parent = dev;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
496*4882a593Smuzhiyun 	ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
497*4882a593Smuzhiyun 	ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
498*4882a593Smuzhiyun 	ib_dev->uverbs_cmd_mask =
499*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
500*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
501*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_QUERY_PORT) |
502*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_ALLOC_PD) |
503*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_DEALLOC_PD) |
504*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_REG_MR) |
505*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_DEREG_MR) |
506*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
507*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_CREATE_CQ) |
508*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_DESTROY_CQ) |
509*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_CREATE_QP) |
510*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_MODIFY_QP) |
511*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
512*4882a593Smuzhiyun 		(1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	ib_dev->uverbs_ex_cmd_mask |= (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
517*4882a593Smuzhiyun 		ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
518*4882a593Smuzhiyun 		ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* MW */
522*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
523*4882a593Smuzhiyun 		ib_dev->uverbs_cmd_mask |=
524*4882a593Smuzhiyun 					(1ULL << IB_USER_VERBS_CMD_ALLOC_MW) |
525*4882a593Smuzhiyun 					(1ULL << IB_USER_VERBS_CMD_DEALLOC_MW);
526*4882a593Smuzhiyun 		ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
527*4882a593Smuzhiyun 	}
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	/* FRMR */
530*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
531*4882a593Smuzhiyun 		ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	/* SRQ */
534*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
535*4882a593Smuzhiyun 		ib_dev->uverbs_cmd_mask |=
536*4882a593Smuzhiyun 				(1ULL << IB_USER_VERBS_CMD_CREATE_SRQ) |
537*4882a593Smuzhiyun 				(1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ) |
538*4882a593Smuzhiyun 				(1ULL << IB_USER_VERBS_CMD_QUERY_SRQ) |
539*4882a593Smuzhiyun 				(1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ) |
540*4882a593Smuzhiyun 				(1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV);
541*4882a593Smuzhiyun 		ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
542*4882a593Smuzhiyun 		ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
543*4882a593Smuzhiyun 	}
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
546*4882a593Smuzhiyun 	ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
547*4882a593Smuzhiyun 	for (i = 0; i < hr_dev->caps.num_ports; i++) {
548*4882a593Smuzhiyun 		if (!hr_dev->iboe.netdevs[i])
549*4882a593Smuzhiyun 			continue;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 		ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
552*4882a593Smuzhiyun 					   i + 1);
553*4882a593Smuzhiyun 		if (ret)
554*4882a593Smuzhiyun 			return ret;
555*4882a593Smuzhiyun 	}
556*4882a593Smuzhiyun 	dma_set_max_seg_size(dev, UINT_MAX);
557*4882a593Smuzhiyun 	ret = ib_register_device(ib_dev, "hns_%d", dev);
558*4882a593Smuzhiyun 	if (ret) {
559*4882a593Smuzhiyun 		dev_err(dev, "ib_register_device failed!\n");
560*4882a593Smuzhiyun 		return ret;
561*4882a593Smuzhiyun 	}
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	ret = hns_roce_setup_mtu_mac(hr_dev);
564*4882a593Smuzhiyun 	if (ret) {
565*4882a593Smuzhiyun 		dev_err(dev, "setup_mtu_mac failed!\n");
566*4882a593Smuzhiyun 		goto error_failed_setup_mtu_mac;
567*4882a593Smuzhiyun 	}
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	iboe->nb.notifier_call = hns_roce_netdev_event;
570*4882a593Smuzhiyun 	ret = register_netdevice_notifier(&iboe->nb);
571*4882a593Smuzhiyun 	if (ret) {
572*4882a593Smuzhiyun 		dev_err(dev, "register_netdevice_notifier failed!\n");
573*4882a593Smuzhiyun 		goto error_failed_setup_mtu_mac;
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	hr_dev->active = true;
577*4882a593Smuzhiyun 	return 0;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun error_failed_setup_mtu_mac:
580*4882a593Smuzhiyun 	ib_unregister_device(ib_dev);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	return ret;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
hns_roce_init_hem(struct hns_roce_dev * hr_dev)585*4882a593Smuzhiyun static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	int ret;
588*4882a593Smuzhiyun 	struct device *dev = hr_dev->dev;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
591*4882a593Smuzhiyun 				      HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
592*4882a593Smuzhiyun 				      hr_dev->caps.num_mtpts, 1);
593*4882a593Smuzhiyun 	if (ret) {
594*4882a593Smuzhiyun 		dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
595*4882a593Smuzhiyun 		return ret;
596*4882a593Smuzhiyun 	}
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
599*4882a593Smuzhiyun 				      HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
600*4882a593Smuzhiyun 				      hr_dev->caps.num_qps, 1);
601*4882a593Smuzhiyun 	if (ret) {
602*4882a593Smuzhiyun 		dev_err(dev, "Failed to init QP context memory, aborting.\n");
603*4882a593Smuzhiyun 		goto err_unmap_dmpt;
604*4882a593Smuzhiyun 	}
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table,
607*4882a593Smuzhiyun 				      HEM_TYPE_IRRL,
608*4882a593Smuzhiyun 				      hr_dev->caps.irrl_entry_sz *
609*4882a593Smuzhiyun 				      hr_dev->caps.max_qp_init_rdma,
610*4882a593Smuzhiyun 				      hr_dev->caps.num_qps, 1);
611*4882a593Smuzhiyun 	if (ret) {
612*4882a593Smuzhiyun 		dev_err(dev, "Failed to init irrl_table memory, aborting.\n");
613*4882a593Smuzhiyun 		goto err_unmap_qp;
614*4882a593Smuzhiyun 	}
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	if (hr_dev->caps.trrl_entry_sz) {
617*4882a593Smuzhiyun 		ret = hns_roce_init_hem_table(hr_dev,
618*4882a593Smuzhiyun 					      &hr_dev->qp_table.trrl_table,
619*4882a593Smuzhiyun 					      HEM_TYPE_TRRL,
620*4882a593Smuzhiyun 					      hr_dev->caps.trrl_entry_sz *
621*4882a593Smuzhiyun 					      hr_dev->caps.max_qp_dest_rdma,
622*4882a593Smuzhiyun 					      hr_dev->caps.num_qps, 1);
623*4882a593Smuzhiyun 		if (ret) {
624*4882a593Smuzhiyun 			dev_err(dev,
625*4882a593Smuzhiyun 				"Failed to init trrl_table memory, aborting.\n");
626*4882a593Smuzhiyun 			goto err_unmap_irrl;
627*4882a593Smuzhiyun 		}
628*4882a593Smuzhiyun 	}
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
631*4882a593Smuzhiyun 				      HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
632*4882a593Smuzhiyun 				      hr_dev->caps.num_cqs, 1);
633*4882a593Smuzhiyun 	if (ret) {
634*4882a593Smuzhiyun 		dev_err(dev, "Failed to init CQ context memory, aborting.\n");
635*4882a593Smuzhiyun 		goto err_unmap_trrl;
636*4882a593Smuzhiyun 	}
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
639*4882a593Smuzhiyun 		ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
640*4882a593Smuzhiyun 					      HEM_TYPE_SRQC,
641*4882a593Smuzhiyun 					      hr_dev->caps.srqc_entry_sz,
642*4882a593Smuzhiyun 					      hr_dev->caps.num_srqs, 1);
643*4882a593Smuzhiyun 		if (ret) {
644*4882a593Smuzhiyun 			dev_err(dev,
645*4882a593Smuzhiyun 				"Failed to init SRQ context memory, aborting.\n");
646*4882a593Smuzhiyun 			goto err_unmap_cq;
647*4882a593Smuzhiyun 		}
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
651*4882a593Smuzhiyun 		ret = hns_roce_init_hem_table(hr_dev,
652*4882a593Smuzhiyun 					      &hr_dev->qp_table.sccc_table,
653*4882a593Smuzhiyun 					      HEM_TYPE_SCCC,
654*4882a593Smuzhiyun 					      hr_dev->caps.sccc_sz,
655*4882a593Smuzhiyun 					      hr_dev->caps.num_qps, 1);
656*4882a593Smuzhiyun 		if (ret) {
657*4882a593Smuzhiyun 			dev_err(dev,
658*4882a593Smuzhiyun 				"Failed to init SCC context memory, aborting.\n");
659*4882a593Smuzhiyun 			goto err_unmap_srq;
660*4882a593Smuzhiyun 		}
661*4882a593Smuzhiyun 	}
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	if (hr_dev->caps.qpc_timer_entry_sz) {
664*4882a593Smuzhiyun 		ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
665*4882a593Smuzhiyun 					      HEM_TYPE_QPC_TIMER,
666*4882a593Smuzhiyun 					      hr_dev->caps.qpc_timer_entry_sz,
667*4882a593Smuzhiyun 					      hr_dev->caps.num_qpc_timer, 1);
668*4882a593Smuzhiyun 		if (ret) {
669*4882a593Smuzhiyun 			dev_err(dev,
670*4882a593Smuzhiyun 				"Failed to init QPC timer memory, aborting.\n");
671*4882a593Smuzhiyun 			goto err_unmap_ctx;
672*4882a593Smuzhiyun 		}
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	if (hr_dev->caps.cqc_timer_entry_sz) {
676*4882a593Smuzhiyun 		ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
677*4882a593Smuzhiyun 					      HEM_TYPE_CQC_TIMER,
678*4882a593Smuzhiyun 					      hr_dev->caps.cqc_timer_entry_sz,
679*4882a593Smuzhiyun 					      hr_dev->caps.num_cqc_timer, 1);
680*4882a593Smuzhiyun 		if (ret) {
681*4882a593Smuzhiyun 			dev_err(dev,
682*4882a593Smuzhiyun 				"Failed to init CQC timer memory, aborting.\n");
683*4882a593Smuzhiyun 			goto err_unmap_qpc_timer;
684*4882a593Smuzhiyun 		}
685*4882a593Smuzhiyun 	}
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	return 0;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun err_unmap_qpc_timer:
690*4882a593Smuzhiyun 	if (hr_dev->caps.qpc_timer_entry_sz)
691*4882a593Smuzhiyun 		hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun err_unmap_ctx:
694*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
695*4882a593Smuzhiyun 		hns_roce_cleanup_hem_table(hr_dev,
696*4882a593Smuzhiyun 					   &hr_dev->qp_table.sccc_table);
697*4882a593Smuzhiyun err_unmap_srq:
698*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
699*4882a593Smuzhiyun 		hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun err_unmap_cq:
702*4882a593Smuzhiyun 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun err_unmap_trrl:
705*4882a593Smuzhiyun 	if (hr_dev->caps.trrl_entry_sz)
706*4882a593Smuzhiyun 		hns_roce_cleanup_hem_table(hr_dev,
707*4882a593Smuzhiyun 					   &hr_dev->qp_table.trrl_table);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun err_unmap_irrl:
710*4882a593Smuzhiyun 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun err_unmap_qp:
713*4882a593Smuzhiyun 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun err_unmap_dmpt:
716*4882a593Smuzhiyun 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	return ret;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun /**
722*4882a593Smuzhiyun  * hns_roce_setup_hca - setup host channel adapter
723*4882a593Smuzhiyun  * @hr_dev: pointer to hns roce device
724*4882a593Smuzhiyun  * Return : int
725*4882a593Smuzhiyun  */
hns_roce_setup_hca(struct hns_roce_dev * hr_dev)726*4882a593Smuzhiyun static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun 	int ret;
729*4882a593Smuzhiyun 	struct device *dev = hr_dev->dev;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	spin_lock_init(&hr_dev->sm_lock);
732*4882a593Smuzhiyun 	spin_lock_init(&hr_dev->bt_cmd_lock);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
735*4882a593Smuzhiyun 		INIT_LIST_HEAD(&hr_dev->pgdir_list);
736*4882a593Smuzhiyun 		mutex_init(&hr_dev->pgdir_mutex);
737*4882a593Smuzhiyun 	}
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	ret = hns_roce_init_uar_table(hr_dev);
740*4882a593Smuzhiyun 	if (ret) {
741*4882a593Smuzhiyun 		dev_err(dev, "Failed to initialize uar table. aborting\n");
742*4882a593Smuzhiyun 		return ret;
743*4882a593Smuzhiyun 	}
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
746*4882a593Smuzhiyun 	if (ret) {
747*4882a593Smuzhiyun 		dev_err(dev, "Failed to allocate priv_uar.\n");
748*4882a593Smuzhiyun 		goto err_uar_table_free;
749*4882a593Smuzhiyun 	}
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	ret = hns_roce_init_pd_table(hr_dev);
752*4882a593Smuzhiyun 	if (ret) {
753*4882a593Smuzhiyun 		dev_err(dev, "Failed to init protected domain table.\n");
754*4882a593Smuzhiyun 		goto err_uar_alloc_free;
755*4882a593Smuzhiyun 	}
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	ret = hns_roce_init_mr_table(hr_dev);
758*4882a593Smuzhiyun 	if (ret) {
759*4882a593Smuzhiyun 		dev_err(dev, "Failed to init memory region table.\n");
760*4882a593Smuzhiyun 		goto err_pd_table_free;
761*4882a593Smuzhiyun 	}
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	ret = hns_roce_init_cq_table(hr_dev);
764*4882a593Smuzhiyun 	if (ret) {
765*4882a593Smuzhiyun 		dev_err(dev, "Failed to init completion queue table.\n");
766*4882a593Smuzhiyun 		goto err_mr_table_free;
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	ret = hns_roce_init_qp_table(hr_dev);
770*4882a593Smuzhiyun 	if (ret) {
771*4882a593Smuzhiyun 		dev_err(dev, "Failed to init queue pair table.\n");
772*4882a593Smuzhiyun 		goto err_cq_table_free;
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
776*4882a593Smuzhiyun 		ret = hns_roce_init_srq_table(hr_dev);
777*4882a593Smuzhiyun 		if (ret) {
778*4882a593Smuzhiyun 			dev_err(dev,
779*4882a593Smuzhiyun 				"Failed to init share receive queue table.\n");
780*4882a593Smuzhiyun 			goto err_qp_table_free;
781*4882a593Smuzhiyun 		}
782*4882a593Smuzhiyun 	}
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	return 0;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun err_qp_table_free:
787*4882a593Smuzhiyun 	hns_roce_cleanup_qp_table(hr_dev);
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun err_cq_table_free:
790*4882a593Smuzhiyun 	hns_roce_cleanup_cq_table(hr_dev);
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun err_mr_table_free:
793*4882a593Smuzhiyun 	hns_roce_cleanup_mr_table(hr_dev);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun err_pd_table_free:
796*4882a593Smuzhiyun 	hns_roce_cleanup_pd_table(hr_dev);
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun err_uar_alloc_free:
799*4882a593Smuzhiyun 	hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun err_uar_table_free:
802*4882a593Smuzhiyun 	hns_roce_cleanup_uar_table(hr_dev);
803*4882a593Smuzhiyun 	return ret;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
check_and_get_armed_cq(struct list_head * cq_list,struct ib_cq * cq)806*4882a593Smuzhiyun static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun 	struct hns_roce_cq *hr_cq = to_hr_cq(cq);
809*4882a593Smuzhiyun 	unsigned long flags;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	spin_lock_irqsave(&hr_cq->lock, flags);
812*4882a593Smuzhiyun 	if (cq->comp_handler) {
813*4882a593Smuzhiyun 		if (!hr_cq->is_armed) {
814*4882a593Smuzhiyun 			hr_cq->is_armed = 1;
815*4882a593Smuzhiyun 			list_add_tail(&hr_cq->node, cq_list);
816*4882a593Smuzhiyun 		}
817*4882a593Smuzhiyun 	}
818*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hr_cq->lock, flags);
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun 
hns_roce_handle_device_err(struct hns_roce_dev * hr_dev)821*4882a593Smuzhiyun void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun 	struct hns_roce_qp *hr_qp;
824*4882a593Smuzhiyun 	struct hns_roce_cq *hr_cq;
825*4882a593Smuzhiyun 	struct list_head cq_list;
826*4882a593Smuzhiyun 	unsigned long flags_qp;
827*4882a593Smuzhiyun 	unsigned long flags;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	INIT_LIST_HEAD(&cq_list);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
832*4882a593Smuzhiyun 	list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
833*4882a593Smuzhiyun 		spin_lock_irqsave(&hr_qp->sq.lock, flags_qp);
834*4882a593Smuzhiyun 		if (hr_qp->sq.tail != hr_qp->sq.head)
835*4882a593Smuzhiyun 			check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq);
836*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp);
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 		spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
839*4882a593Smuzhiyun 		if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
840*4882a593Smuzhiyun 			check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq);
841*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
842*4882a593Smuzhiyun 	}
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	list_for_each_entry(hr_cq, &cq_list, node)
845*4882a593Smuzhiyun 		hns_roce_cq_completion(hr_dev, hr_cq->cqn);
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun 
hns_roce_init(struct hns_roce_dev * hr_dev)850*4882a593Smuzhiyun int hns_roce_init(struct hns_roce_dev *hr_dev)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun 	int ret;
853*4882a593Smuzhiyun 	struct device *dev = hr_dev->dev;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	if (hr_dev->hw->reset) {
856*4882a593Smuzhiyun 		ret = hr_dev->hw->reset(hr_dev, true);
857*4882a593Smuzhiyun 		if (ret) {
858*4882a593Smuzhiyun 			dev_err(dev, "Reset RoCE engine failed!\n");
859*4882a593Smuzhiyun 			return ret;
860*4882a593Smuzhiyun 		}
861*4882a593Smuzhiyun 	}
862*4882a593Smuzhiyun 	hr_dev->is_reset = false;
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	if (hr_dev->hw->cmq_init) {
865*4882a593Smuzhiyun 		ret = hr_dev->hw->cmq_init(hr_dev);
866*4882a593Smuzhiyun 		if (ret) {
867*4882a593Smuzhiyun 			dev_err(dev, "Init RoCE Command Queue failed!\n");
868*4882a593Smuzhiyun 			goto error_failed_cmq_init;
869*4882a593Smuzhiyun 		}
870*4882a593Smuzhiyun 	}
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	ret = hr_dev->hw->hw_profile(hr_dev);
873*4882a593Smuzhiyun 	if (ret) {
874*4882a593Smuzhiyun 		dev_err(dev, "Get RoCE engine profile failed!\n");
875*4882a593Smuzhiyun 		goto error_failed_cmd_init;
876*4882a593Smuzhiyun 	}
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	ret = hns_roce_cmd_init(hr_dev);
879*4882a593Smuzhiyun 	if (ret) {
880*4882a593Smuzhiyun 		dev_err(dev, "cmd init failed!\n");
881*4882a593Smuzhiyun 		goto error_failed_cmd_init;
882*4882a593Smuzhiyun 	}
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	/* EQ depends on poll mode, event mode depends on EQ */
885*4882a593Smuzhiyun 	ret = hr_dev->hw->init_eq(hr_dev);
886*4882a593Smuzhiyun 	if (ret) {
887*4882a593Smuzhiyun 		dev_err(dev, "eq init failed!\n");
888*4882a593Smuzhiyun 		goto error_failed_eq_table;
889*4882a593Smuzhiyun 	}
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	if (hr_dev->cmd_mod) {
892*4882a593Smuzhiyun 		ret = hns_roce_cmd_use_events(hr_dev);
893*4882a593Smuzhiyun 		if (ret) {
894*4882a593Smuzhiyun 			dev_warn(dev,
895*4882a593Smuzhiyun 				 "Cmd event  mode failed, set back to poll!\n");
896*4882a593Smuzhiyun 			hns_roce_cmd_use_polling(hr_dev);
897*4882a593Smuzhiyun 		}
898*4882a593Smuzhiyun 	}
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	ret = hns_roce_init_hem(hr_dev);
901*4882a593Smuzhiyun 	if (ret) {
902*4882a593Smuzhiyun 		dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
903*4882a593Smuzhiyun 		goto error_failed_init_hem;
904*4882a593Smuzhiyun 	}
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	ret = hns_roce_setup_hca(hr_dev);
907*4882a593Smuzhiyun 	if (ret) {
908*4882a593Smuzhiyun 		dev_err(dev, "setup hca failed!\n");
909*4882a593Smuzhiyun 		goto error_failed_setup_hca;
910*4882a593Smuzhiyun 	}
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	if (hr_dev->hw->hw_init) {
913*4882a593Smuzhiyun 		ret = hr_dev->hw->hw_init(hr_dev);
914*4882a593Smuzhiyun 		if (ret) {
915*4882a593Smuzhiyun 			dev_err(dev, "hw_init failed!\n");
916*4882a593Smuzhiyun 			goto error_failed_engine_init;
917*4882a593Smuzhiyun 		}
918*4882a593Smuzhiyun 	}
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	INIT_LIST_HEAD(&hr_dev->qp_list);
921*4882a593Smuzhiyun 	spin_lock_init(&hr_dev->qp_list_lock);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	ret = hns_roce_register_device(hr_dev);
924*4882a593Smuzhiyun 	if (ret)
925*4882a593Smuzhiyun 		goto error_failed_register_device;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	return 0;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun error_failed_register_device:
930*4882a593Smuzhiyun 	if (hr_dev->hw->hw_exit)
931*4882a593Smuzhiyun 		hr_dev->hw->hw_exit(hr_dev);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun error_failed_engine_init:
934*4882a593Smuzhiyun 	hns_roce_cleanup_bitmap(hr_dev);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun error_failed_setup_hca:
937*4882a593Smuzhiyun 	hns_roce_cleanup_hem(hr_dev);
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun error_failed_init_hem:
940*4882a593Smuzhiyun 	if (hr_dev->cmd_mod)
941*4882a593Smuzhiyun 		hns_roce_cmd_use_polling(hr_dev);
942*4882a593Smuzhiyun 	hr_dev->hw->cleanup_eq(hr_dev);
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun error_failed_eq_table:
945*4882a593Smuzhiyun 	hns_roce_cmd_cleanup(hr_dev);
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun error_failed_cmd_init:
948*4882a593Smuzhiyun 	if (hr_dev->hw->cmq_exit)
949*4882a593Smuzhiyun 		hr_dev->hw->cmq_exit(hr_dev);
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun error_failed_cmq_init:
952*4882a593Smuzhiyun 	if (hr_dev->hw->reset) {
953*4882a593Smuzhiyun 		if (hr_dev->hw->reset(hr_dev, false))
954*4882a593Smuzhiyun 			dev_err(dev, "Dereset RoCE engine failed!\n");
955*4882a593Smuzhiyun 	}
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	return ret;
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun 
hns_roce_exit(struct hns_roce_dev * hr_dev)960*4882a593Smuzhiyun void hns_roce_exit(struct hns_roce_dev *hr_dev)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun 	hns_roce_unregister_device(hr_dev);
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	if (hr_dev->hw->hw_exit)
965*4882a593Smuzhiyun 		hr_dev->hw->hw_exit(hr_dev);
966*4882a593Smuzhiyun 	hns_roce_cleanup_bitmap(hr_dev);
967*4882a593Smuzhiyun 	hns_roce_cleanup_hem(hr_dev);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	if (hr_dev->cmd_mod)
970*4882a593Smuzhiyun 		hns_roce_cmd_use_polling(hr_dev);
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	hr_dev->hw->cleanup_eq(hr_dev);
973*4882a593Smuzhiyun 	hns_roce_cmd_cleanup(hr_dev);
974*4882a593Smuzhiyun 	if (hr_dev->hw->cmq_exit)
975*4882a593Smuzhiyun 		hr_dev->hw->cmq_exit(hr_dev);
976*4882a593Smuzhiyun 	if (hr_dev->hw->reset)
977*4882a593Smuzhiyun 		hr_dev->hw->reset(hr_dev, false);
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
981*4882a593Smuzhiyun MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
982*4882a593Smuzhiyun MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
983*4882a593Smuzhiyun MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
984*4882a593Smuzhiyun MODULE_DESCRIPTION("HNS RoCE Driver");
985