1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/pci.h>
8*4882a593Smuzhiyun #include <linux/utsname.h>
9*4882a593Smuzhiyun #include <linux/version.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <rdma/ib_user_verbs.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "efa.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define PCI_DEV_ID_EFA0_VF 0xefa0
16*4882a593Smuzhiyun #define PCI_DEV_ID_EFA1_VF 0xefa1
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun static const struct pci_device_id efa_pci_tbl[] = {
19*4882a593Smuzhiyun { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) },
20*4882a593Smuzhiyun { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) },
21*4882a593Smuzhiyun { }
22*4882a593Smuzhiyun };
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
25*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
26*4882a593Smuzhiyun MODULE_DESCRIPTION(DEVICE_NAME);
27*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define EFA_REG_BAR 0
30*4882a593Smuzhiyun #define EFA_MEM_BAR 2
31*4882a593Smuzhiyun #define EFA_BASE_BAR_MASK (BIT(EFA_REG_BAR) | BIT(EFA_MEM_BAR))
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define EFA_AENQ_ENABLED_GROUPS \
34*4882a593Smuzhiyun (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
35*4882a593Smuzhiyun BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* This handler will called for unknown event group or unimplemented handlers */
unimplemented_aenq_handler(void * data,struct efa_admin_aenq_entry * aenq_e)38*4882a593Smuzhiyun static void unimplemented_aenq_handler(void *data,
39*4882a593Smuzhiyun struct efa_admin_aenq_entry *aenq_e)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun struct efa_dev *dev = (struct efa_dev *)data;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun ibdev_err(&dev->ibdev,
44*4882a593Smuzhiyun "Unknown event was received or event with unimplemented handler\n");
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
efa_keep_alive(void * data,struct efa_admin_aenq_entry * aenq_e)47*4882a593Smuzhiyun static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct efa_dev *dev = (struct efa_dev *)data;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun atomic64_inc(&dev->stats.keep_alive_rcvd);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun static struct efa_aenq_handlers aenq_handlers = {
55*4882a593Smuzhiyun .handlers = {
56*4882a593Smuzhiyun [EFA_ADMIN_KEEP_ALIVE] = efa_keep_alive,
57*4882a593Smuzhiyun },
58*4882a593Smuzhiyun .unimplemented_handler = unimplemented_aenq_handler
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun
efa_release_bars(struct efa_dev * dev,int bars_mask)61*4882a593Smuzhiyun static void efa_release_bars(struct efa_dev *dev, int bars_mask)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct pci_dev *pdev = dev->pdev;
64*4882a593Smuzhiyun int release_bars;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & bars_mask;
67*4882a593Smuzhiyun pci_release_selected_regions(pdev, release_bars);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
efa_intr_msix_mgmnt(int irq,void * data)70*4882a593Smuzhiyun static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct efa_dev *dev = data;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun efa_com_admin_q_comp_intr_handler(&dev->edev);
75*4882a593Smuzhiyun efa_com_aenq_intr_handler(&dev->edev, data);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun return IRQ_HANDLED;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
efa_request_mgmnt_irq(struct efa_dev * dev)80*4882a593Smuzhiyun static int efa_request_mgmnt_irq(struct efa_dev *dev)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun struct efa_irq *irq;
83*4882a593Smuzhiyun int err;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun irq = &dev->admin_irq;
86*4882a593Smuzhiyun err = request_irq(irq->vector, irq->handler, 0, irq->name,
87*4882a593Smuzhiyun irq->data);
88*4882a593Smuzhiyun if (err) {
89*4882a593Smuzhiyun dev_err(&dev->pdev->dev, "Failed to request admin irq (%d)\n",
90*4882a593Smuzhiyun err);
91*4882a593Smuzhiyun return err;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun dev_dbg(&dev->pdev->dev, "Set affinity hint of mgmnt irq to %*pbl (irq vector: %d)\n",
95*4882a593Smuzhiyun nr_cpumask_bits, &irq->affinity_hint_mask, irq->vector);
96*4882a593Smuzhiyun irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun return 0;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
efa_setup_mgmnt_irq(struct efa_dev * dev)101*4882a593Smuzhiyun static void efa_setup_mgmnt_irq(struct efa_dev *dev)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun u32 cpu;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun snprintf(dev->admin_irq.name, EFA_IRQNAME_SIZE,
106*4882a593Smuzhiyun "efa-mgmnt@pci:%s", pci_name(dev->pdev));
107*4882a593Smuzhiyun dev->admin_irq.handler = efa_intr_msix_mgmnt;
108*4882a593Smuzhiyun dev->admin_irq.data = dev;
109*4882a593Smuzhiyun dev->admin_irq.vector =
110*4882a593Smuzhiyun pci_irq_vector(dev->pdev, dev->admin_msix_vector_idx);
111*4882a593Smuzhiyun cpu = cpumask_first(cpu_online_mask);
112*4882a593Smuzhiyun dev->admin_irq.cpu = cpu;
113*4882a593Smuzhiyun cpumask_set_cpu(cpu,
114*4882a593Smuzhiyun &dev->admin_irq.affinity_hint_mask);
115*4882a593Smuzhiyun dev_info(&dev->pdev->dev, "Setup irq:0x%p vector:%d name:%s\n",
116*4882a593Smuzhiyun &dev->admin_irq,
117*4882a593Smuzhiyun dev->admin_irq.vector,
118*4882a593Smuzhiyun dev->admin_irq.name);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
efa_free_mgmnt_irq(struct efa_dev * dev)121*4882a593Smuzhiyun static void efa_free_mgmnt_irq(struct efa_dev *dev)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun struct efa_irq *irq;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun irq = &dev->admin_irq;
126*4882a593Smuzhiyun irq_set_affinity_hint(irq->vector, NULL);
127*4882a593Smuzhiyun free_irq(irq->vector, irq->data);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
efa_set_mgmnt_irq(struct efa_dev * dev)130*4882a593Smuzhiyun static int efa_set_mgmnt_irq(struct efa_dev *dev)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun efa_setup_mgmnt_irq(dev);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun return efa_request_mgmnt_irq(dev);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
efa_request_doorbell_bar(struct efa_dev * dev)137*4882a593Smuzhiyun static int efa_request_doorbell_bar(struct efa_dev *dev)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun u8 db_bar_idx = dev->dev_attr.db_bar;
140*4882a593Smuzhiyun struct pci_dev *pdev = dev->pdev;
141*4882a593Smuzhiyun int bars;
142*4882a593Smuzhiyun int err;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (!(BIT(db_bar_idx) & EFA_BASE_BAR_MASK)) {
145*4882a593Smuzhiyun bars = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(db_bar_idx);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
148*4882a593Smuzhiyun if (err) {
149*4882a593Smuzhiyun dev_err(&dev->pdev->dev,
150*4882a593Smuzhiyun "pci_request_selected_regions for bar %d failed %d\n",
151*4882a593Smuzhiyun db_bar_idx, err);
152*4882a593Smuzhiyun return err;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun dev->db_bar_addr = pci_resource_start(dev->pdev, db_bar_idx);
157*4882a593Smuzhiyun dev->db_bar_len = pci_resource_len(dev->pdev, db_bar_idx);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun return 0;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
efa_release_doorbell_bar(struct efa_dev * dev)162*4882a593Smuzhiyun static void efa_release_doorbell_bar(struct efa_dev *dev)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun if (!(BIT(dev->dev_attr.db_bar) & EFA_BASE_BAR_MASK))
165*4882a593Smuzhiyun efa_release_bars(dev, BIT(dev->dev_attr.db_bar));
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
efa_update_hw_hints(struct efa_dev * dev,struct efa_com_get_hw_hints_result * hw_hints)168*4882a593Smuzhiyun static void efa_update_hw_hints(struct efa_dev *dev,
169*4882a593Smuzhiyun struct efa_com_get_hw_hints_result *hw_hints)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun struct efa_com_dev *edev = &dev->edev;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (hw_hints->mmio_read_timeout)
174*4882a593Smuzhiyun edev->mmio_read.mmio_read_timeout =
175*4882a593Smuzhiyun hw_hints->mmio_read_timeout * 1000;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (hw_hints->poll_interval)
178*4882a593Smuzhiyun edev->aq.poll_interval = hw_hints->poll_interval;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (hw_hints->admin_completion_timeout)
181*4882a593Smuzhiyun edev->aq.completion_timeout =
182*4882a593Smuzhiyun hw_hints->admin_completion_timeout;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
efa_stats_init(struct efa_dev * dev)185*4882a593Smuzhiyun static void efa_stats_init(struct efa_dev *dev)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun atomic64_t *s = (atomic64_t *)&dev->stats;
188*4882a593Smuzhiyun int i;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun for (i = 0; i < sizeof(dev->stats) / sizeof(*s); i++, s++)
191*4882a593Smuzhiyun atomic64_set(s, 0);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
efa_set_host_info(struct efa_dev * dev)194*4882a593Smuzhiyun static void efa_set_host_info(struct efa_dev *dev)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun struct efa_admin_set_feature_resp resp = {};
197*4882a593Smuzhiyun struct efa_admin_set_feature_cmd cmd = {};
198*4882a593Smuzhiyun struct efa_admin_host_info *hinf;
199*4882a593Smuzhiyun u32 bufsz = sizeof(*hinf);
200*4882a593Smuzhiyun dma_addr_t hinf_dma;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (!efa_com_check_supported_feature_id(&dev->edev,
203*4882a593Smuzhiyun EFA_ADMIN_HOST_INFO))
204*4882a593Smuzhiyun return;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* Failures in host info set shall not disturb probe */
207*4882a593Smuzhiyun hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma,
208*4882a593Smuzhiyun GFP_KERNEL);
209*4882a593Smuzhiyun if (!hinf)
210*4882a593Smuzhiyun return;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun strlcpy(hinf->os_dist_str, utsname()->release,
213*4882a593Smuzhiyun min(sizeof(hinf->os_dist_str), sizeof(utsname()->release)));
214*4882a593Smuzhiyun hinf->os_type = EFA_ADMIN_OS_LINUX;
215*4882a593Smuzhiyun strlcpy(hinf->kernel_ver_str, utsname()->version,
216*4882a593Smuzhiyun min(sizeof(hinf->kernel_ver_str), sizeof(utsname()->version)));
217*4882a593Smuzhiyun hinf->kernel_ver = LINUX_VERSION_CODE;
218*4882a593Smuzhiyun EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
219*4882a593Smuzhiyun EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
220*4882a593Smuzhiyun EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0);
221*4882a593Smuzhiyun EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0);
222*4882a593Smuzhiyun EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number);
223*4882a593Smuzhiyun EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE,
224*4882a593Smuzhiyun PCI_SLOT(dev->pdev->devfn));
225*4882a593Smuzhiyun EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION,
226*4882a593Smuzhiyun PCI_FUNC(dev->pdev->devfn));
227*4882a593Smuzhiyun EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR,
228*4882a593Smuzhiyun EFA_COMMON_SPEC_VERSION_MAJOR);
229*4882a593Smuzhiyun EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR,
230*4882a593Smuzhiyun EFA_COMMON_SPEC_VERSION_MINOR);
231*4882a593Smuzhiyun EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1);
232*4882a593Smuzhiyun EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO,
235*4882a593Smuzhiyun hinf_dma, bufsz);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun static const struct ib_device_ops efa_dev_ops = {
241*4882a593Smuzhiyun .owner = THIS_MODULE,
242*4882a593Smuzhiyun .driver_id = RDMA_DRIVER_EFA,
243*4882a593Smuzhiyun .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun .alloc_hw_stats = efa_alloc_hw_stats,
246*4882a593Smuzhiyun .alloc_pd = efa_alloc_pd,
247*4882a593Smuzhiyun .alloc_ucontext = efa_alloc_ucontext,
248*4882a593Smuzhiyun .create_ah = efa_create_ah,
249*4882a593Smuzhiyun .create_cq = efa_create_cq,
250*4882a593Smuzhiyun .create_qp = efa_create_qp,
251*4882a593Smuzhiyun .dealloc_pd = efa_dealloc_pd,
252*4882a593Smuzhiyun .dealloc_ucontext = efa_dealloc_ucontext,
253*4882a593Smuzhiyun .dereg_mr = efa_dereg_mr,
254*4882a593Smuzhiyun .destroy_ah = efa_destroy_ah,
255*4882a593Smuzhiyun .destroy_cq = efa_destroy_cq,
256*4882a593Smuzhiyun .destroy_qp = efa_destroy_qp,
257*4882a593Smuzhiyun .get_hw_stats = efa_get_hw_stats,
258*4882a593Smuzhiyun .get_link_layer = efa_port_link_layer,
259*4882a593Smuzhiyun .get_port_immutable = efa_get_port_immutable,
260*4882a593Smuzhiyun .mmap = efa_mmap,
261*4882a593Smuzhiyun .mmap_free = efa_mmap_free,
262*4882a593Smuzhiyun .modify_qp = efa_modify_qp,
263*4882a593Smuzhiyun .query_device = efa_query_device,
264*4882a593Smuzhiyun .query_gid = efa_query_gid,
265*4882a593Smuzhiyun .query_pkey = efa_query_pkey,
266*4882a593Smuzhiyun .query_port = efa_query_port,
267*4882a593Smuzhiyun .query_qp = efa_query_qp,
268*4882a593Smuzhiyun .reg_user_mr = efa_reg_mr,
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
271*4882a593Smuzhiyun INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
272*4882a593Smuzhiyun INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
273*4882a593Smuzhiyun INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
274*4882a593Smuzhiyun };
275*4882a593Smuzhiyun
efa_ib_device_add(struct efa_dev * dev)276*4882a593Smuzhiyun static int efa_ib_device_add(struct efa_dev *dev)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun struct efa_com_get_hw_hints_result hw_hints;
279*4882a593Smuzhiyun struct pci_dev *pdev = dev->pdev;
280*4882a593Smuzhiyun int err;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun efa_stats_init(dev);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun err = efa_com_get_device_attr(&dev->edev, &dev->dev_attr);
285*4882a593Smuzhiyun if (err)
286*4882a593Smuzhiyun return err;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar);
289*4882a593Smuzhiyun err = efa_request_doorbell_bar(dev);
290*4882a593Smuzhiyun if (err)
291*4882a593Smuzhiyun return err;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
294*4882a593Smuzhiyun if (err)
295*4882a593Smuzhiyun goto err_release_doorbell_bar;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun efa_update_hw_hints(dev, &hw_hints);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /* Try to enable all the available aenq groups */
300*4882a593Smuzhiyun err = efa_com_set_aenq_config(&dev->edev, EFA_AENQ_ENABLED_GROUPS);
301*4882a593Smuzhiyun if (err)
302*4882a593Smuzhiyun goto err_release_doorbell_bar;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun efa_set_host_info(dev);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
307*4882a593Smuzhiyun dev->ibdev.phys_port_cnt = 1;
308*4882a593Smuzhiyun dev->ibdev.num_comp_vectors = 1;
309*4882a593Smuzhiyun dev->ibdev.dev.parent = &pdev->dev;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun dev->ibdev.uverbs_cmd_mask =
312*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
313*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
314*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
315*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
316*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
317*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_REG_MR) |
318*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
319*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
320*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
321*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
322*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
323*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
324*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
325*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
326*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
327*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun dev->ibdev.uverbs_ex_cmd_mask =
330*4882a593Smuzhiyun (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
335*4882a593Smuzhiyun if (err)
336*4882a593Smuzhiyun goto err_release_doorbell_bar;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun ibdev_info(&dev->ibdev, "IB device registered\n");
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun return 0;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun err_release_doorbell_bar:
343*4882a593Smuzhiyun efa_release_doorbell_bar(dev);
344*4882a593Smuzhiyun return err;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
efa_ib_device_remove(struct efa_dev * dev)347*4882a593Smuzhiyun static void efa_ib_device_remove(struct efa_dev *dev)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
350*4882a593Smuzhiyun ibdev_info(&dev->ibdev, "Unregister ib device\n");
351*4882a593Smuzhiyun ib_unregister_device(&dev->ibdev);
352*4882a593Smuzhiyun efa_release_doorbell_bar(dev);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
efa_disable_msix(struct efa_dev * dev)355*4882a593Smuzhiyun static void efa_disable_msix(struct efa_dev *dev)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun pci_free_irq_vectors(dev->pdev);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
efa_enable_msix(struct efa_dev * dev)360*4882a593Smuzhiyun static int efa_enable_msix(struct efa_dev *dev)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun int msix_vecs, irq_num;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /* Reserve the max msix vectors we might need */
365*4882a593Smuzhiyun msix_vecs = EFA_NUM_MSIX_VEC;
366*4882a593Smuzhiyun dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
367*4882a593Smuzhiyun msix_vecs);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
370*4882a593Smuzhiyun irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs,
371*4882a593Smuzhiyun msix_vecs, PCI_IRQ_MSIX);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun if (irq_num < 0) {
374*4882a593Smuzhiyun dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n",
375*4882a593Smuzhiyun irq_num);
376*4882a593Smuzhiyun return -ENOSPC;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (irq_num != msix_vecs) {
380*4882a593Smuzhiyun efa_disable_msix(dev);
381*4882a593Smuzhiyun dev_err(&dev->pdev->dev,
382*4882a593Smuzhiyun "Allocated %d MSI-X (out of %d requested)\n",
383*4882a593Smuzhiyun irq_num, msix_vecs);
384*4882a593Smuzhiyun return -ENOSPC;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
efa_device_init(struct efa_com_dev * edev,struct pci_dev * pdev)390*4882a593Smuzhiyun static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun int dma_width;
393*4882a593Smuzhiyun int err;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun err = efa_com_dev_reset(edev, EFA_REGS_RESET_NORMAL);
396*4882a593Smuzhiyun if (err)
397*4882a593Smuzhiyun return err;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun err = efa_com_validate_version(edev);
400*4882a593Smuzhiyun if (err)
401*4882a593Smuzhiyun return err;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun dma_width = efa_com_get_dma_width(edev);
404*4882a593Smuzhiyun if (dma_width < 0) {
405*4882a593Smuzhiyun err = dma_width;
406*4882a593Smuzhiyun return err;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun err = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
410*4882a593Smuzhiyun if (err) {
411*4882a593Smuzhiyun dev_err(&pdev->dev, "pci_set_dma_mask failed %d\n", err);
412*4882a593Smuzhiyun return err;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
416*4882a593Smuzhiyun if (err) {
417*4882a593Smuzhiyun dev_err(&pdev->dev,
418*4882a593Smuzhiyun "err_pci_set_consistent_dma_mask failed %d\n",
419*4882a593Smuzhiyun err);
420*4882a593Smuzhiyun return err;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun dma_set_max_seg_size(&pdev->dev, UINT_MAX);
423*4882a593Smuzhiyun return 0;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
efa_probe_device(struct pci_dev * pdev)426*4882a593Smuzhiyun static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun struct efa_com_dev *edev;
429*4882a593Smuzhiyun struct efa_dev *dev;
430*4882a593Smuzhiyun int bars;
431*4882a593Smuzhiyun int err;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun err = pci_enable_device_mem(pdev);
434*4882a593Smuzhiyun if (err) {
435*4882a593Smuzhiyun dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
436*4882a593Smuzhiyun return ERR_PTR(err);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun pci_set_master(pdev);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun dev = ib_alloc_device(efa_dev, ibdev);
442*4882a593Smuzhiyun if (!dev) {
443*4882a593Smuzhiyun dev_err(&pdev->dev, "Device alloc failed\n");
444*4882a593Smuzhiyun err = -ENOMEM;
445*4882a593Smuzhiyun goto err_disable_device;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun pci_set_drvdata(pdev, dev);
449*4882a593Smuzhiyun edev = &dev->edev;
450*4882a593Smuzhiyun edev->efa_dev = dev;
451*4882a593Smuzhiyun edev->dmadev = &pdev->dev;
452*4882a593Smuzhiyun dev->pdev = pdev;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK;
455*4882a593Smuzhiyun err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
456*4882a593Smuzhiyun if (err) {
457*4882a593Smuzhiyun dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
458*4882a593Smuzhiyun err);
459*4882a593Smuzhiyun goto err_ibdev_destroy;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR);
463*4882a593Smuzhiyun dev->reg_bar_len = pci_resource_len(pdev, EFA_REG_BAR);
464*4882a593Smuzhiyun dev->mem_bar_addr = pci_resource_start(pdev, EFA_MEM_BAR);
465*4882a593Smuzhiyun dev->mem_bar_len = pci_resource_len(pdev, EFA_MEM_BAR);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun edev->reg_bar = devm_ioremap(&pdev->dev,
468*4882a593Smuzhiyun dev->reg_bar_addr,
469*4882a593Smuzhiyun dev->reg_bar_len);
470*4882a593Smuzhiyun if (!edev->reg_bar) {
471*4882a593Smuzhiyun dev_err(&pdev->dev, "Failed to remap register bar\n");
472*4882a593Smuzhiyun err = -EFAULT;
473*4882a593Smuzhiyun goto err_release_bars;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun err = efa_com_mmio_reg_read_init(edev);
477*4882a593Smuzhiyun if (err) {
478*4882a593Smuzhiyun dev_err(&pdev->dev, "Failed to init readless MMIO\n");
479*4882a593Smuzhiyun goto err_iounmap;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun err = efa_device_init(edev, pdev);
483*4882a593Smuzhiyun if (err) {
484*4882a593Smuzhiyun dev_err(&pdev->dev, "EFA device init failed\n");
485*4882a593Smuzhiyun if (err == -ETIME)
486*4882a593Smuzhiyun err = -EPROBE_DEFER;
487*4882a593Smuzhiyun goto err_reg_read_destroy;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun err = efa_enable_msix(dev);
491*4882a593Smuzhiyun if (err)
492*4882a593Smuzhiyun goto err_reg_read_destroy;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun edev->aq.msix_vector_idx = dev->admin_msix_vector_idx;
495*4882a593Smuzhiyun edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun err = efa_set_mgmnt_irq(dev);
498*4882a593Smuzhiyun if (err)
499*4882a593Smuzhiyun goto err_disable_msix;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun err = efa_com_admin_init(edev, &aenq_handlers);
502*4882a593Smuzhiyun if (err)
503*4882a593Smuzhiyun goto err_free_mgmnt_irq;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun return dev;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun err_free_mgmnt_irq:
508*4882a593Smuzhiyun efa_free_mgmnt_irq(dev);
509*4882a593Smuzhiyun err_disable_msix:
510*4882a593Smuzhiyun efa_disable_msix(dev);
511*4882a593Smuzhiyun err_reg_read_destroy:
512*4882a593Smuzhiyun efa_com_mmio_reg_read_destroy(edev);
513*4882a593Smuzhiyun err_iounmap:
514*4882a593Smuzhiyun devm_iounmap(&pdev->dev, edev->reg_bar);
515*4882a593Smuzhiyun err_release_bars:
516*4882a593Smuzhiyun efa_release_bars(dev, EFA_BASE_BAR_MASK);
517*4882a593Smuzhiyun err_ibdev_destroy:
518*4882a593Smuzhiyun ib_dealloc_device(&dev->ibdev);
519*4882a593Smuzhiyun err_disable_device:
520*4882a593Smuzhiyun pci_disable_device(pdev);
521*4882a593Smuzhiyun return ERR_PTR(err);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
efa_remove_device(struct pci_dev * pdev)524*4882a593Smuzhiyun static void efa_remove_device(struct pci_dev *pdev)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun struct efa_dev *dev = pci_get_drvdata(pdev);
527*4882a593Smuzhiyun struct efa_com_dev *edev;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun edev = &dev->edev;
530*4882a593Smuzhiyun efa_com_admin_destroy(edev);
531*4882a593Smuzhiyun efa_free_mgmnt_irq(dev);
532*4882a593Smuzhiyun efa_disable_msix(dev);
533*4882a593Smuzhiyun efa_com_mmio_reg_read_destroy(edev);
534*4882a593Smuzhiyun devm_iounmap(&pdev->dev, edev->reg_bar);
535*4882a593Smuzhiyun efa_release_bars(dev, EFA_BASE_BAR_MASK);
536*4882a593Smuzhiyun ib_dealloc_device(&dev->ibdev);
537*4882a593Smuzhiyun pci_disable_device(pdev);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
efa_probe(struct pci_dev * pdev,const struct pci_device_id * ent)540*4882a593Smuzhiyun static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun struct efa_dev *dev;
543*4882a593Smuzhiyun int err;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun dev = efa_probe_device(pdev);
546*4882a593Smuzhiyun if (IS_ERR(dev))
547*4882a593Smuzhiyun return PTR_ERR(dev);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun err = efa_ib_device_add(dev);
550*4882a593Smuzhiyun if (err)
551*4882a593Smuzhiyun goto err_remove_device;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun return 0;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun err_remove_device:
556*4882a593Smuzhiyun efa_remove_device(pdev);
557*4882a593Smuzhiyun return err;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
efa_remove(struct pci_dev * pdev)560*4882a593Smuzhiyun static void efa_remove(struct pci_dev *pdev)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun struct efa_dev *dev = pci_get_drvdata(pdev);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun efa_ib_device_remove(dev);
565*4882a593Smuzhiyun efa_remove_device(pdev);
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun static struct pci_driver efa_pci_driver = {
569*4882a593Smuzhiyun .name = DRV_MODULE_NAME,
570*4882a593Smuzhiyun .id_table = efa_pci_tbl,
571*4882a593Smuzhiyun .probe = efa_probe,
572*4882a593Smuzhiyun .remove = efa_remove,
573*4882a593Smuzhiyun };
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun module_pci_driver(efa_pci_driver);
576