xref: /OK3568_Linux_fs/kernel/drivers/dma/idxd/init.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3*4882a593Smuzhiyun #include <linux/init.h>
4*4882a593Smuzhiyun #include <linux/kernel.h>
5*4882a593Smuzhiyun #include <linux/module.h>
6*4882a593Smuzhiyun #include <linux/slab.h>
7*4882a593Smuzhiyun #include <linux/pci.h>
8*4882a593Smuzhiyun #include <linux/interrupt.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/workqueue.h>
12*4882a593Smuzhiyun #include <linux/aer.h>
13*4882a593Smuzhiyun #include <linux/fs.h>
14*4882a593Smuzhiyun #include <linux/io-64-nonatomic-lo-hi.h>
15*4882a593Smuzhiyun #include <linux/device.h>
16*4882a593Smuzhiyun #include <linux/idr.h>
17*4882a593Smuzhiyun #include <uapi/linux/idxd.h>
18*4882a593Smuzhiyun #include <linux/dmaengine.h>
19*4882a593Smuzhiyun #include "../dmaengine.h"
20*4882a593Smuzhiyun #include "registers.h"
21*4882a593Smuzhiyun #include "idxd.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun MODULE_VERSION(IDXD_DRIVER_VERSION);
24*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
25*4882a593Smuzhiyun MODULE_AUTHOR("Intel Corporation");
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define DRV_NAME "idxd"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static struct idr idxd_idrs[IDXD_TYPE_MAX];
30*4882a593Smuzhiyun static struct mutex idxd_idr_lock;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static struct pci_device_id idxd_pci_tbl[] = {
33*4882a593Smuzhiyun 	/* DSA ver 1.0 platforms */
34*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
35*4882a593Smuzhiyun 	{ 0, }
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun static char *idxd_name[] = {
40*4882a593Smuzhiyun 	"dsa",
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
idxd_get_dev_name(struct idxd_device * idxd)43*4882a593Smuzhiyun const char *idxd_get_dev_name(struct idxd_device *idxd)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	return idxd_name[idxd->type];
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
idxd_setup_interrupts(struct idxd_device * idxd)48*4882a593Smuzhiyun static int idxd_setup_interrupts(struct idxd_device *idxd)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct pci_dev *pdev = idxd->pdev;
51*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
52*4882a593Smuzhiyun 	struct msix_entry *msix;
53*4882a593Smuzhiyun 	struct idxd_irq_entry *irq_entry;
54*4882a593Smuzhiyun 	int i, msixcnt;
55*4882a593Smuzhiyun 	int rc = 0;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	msixcnt = pci_msix_vec_count(pdev);
58*4882a593Smuzhiyun 	if (msixcnt < 0) {
59*4882a593Smuzhiyun 		dev_err(dev, "Not MSI-X interrupt capable.\n");
60*4882a593Smuzhiyun 		goto err_no_irq;
61*4882a593Smuzhiyun 	}
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
64*4882a593Smuzhiyun 			msixcnt, GFP_KERNEL);
65*4882a593Smuzhiyun 	if (!idxd->msix_entries) {
66*4882a593Smuzhiyun 		rc = -ENOMEM;
67*4882a593Smuzhiyun 		goto err_no_irq;
68*4882a593Smuzhiyun 	}
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	for (i = 0; i < msixcnt; i++)
71*4882a593Smuzhiyun 		idxd->msix_entries[i].entry = i;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
74*4882a593Smuzhiyun 	if (rc) {
75*4882a593Smuzhiyun 		dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
76*4882a593Smuzhiyun 		goto err_no_irq;
77*4882a593Smuzhiyun 	}
78*4882a593Smuzhiyun 	dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/*
81*4882a593Smuzhiyun 	 * We implement 1 completion list per MSI-X entry except for
82*4882a593Smuzhiyun 	 * entry 0, which is for errors and others.
83*4882a593Smuzhiyun 	 */
84*4882a593Smuzhiyun 	idxd->irq_entries = devm_kcalloc(dev, msixcnt,
85*4882a593Smuzhiyun 					 sizeof(struct idxd_irq_entry),
86*4882a593Smuzhiyun 					 GFP_KERNEL);
87*4882a593Smuzhiyun 	if (!idxd->irq_entries) {
88*4882a593Smuzhiyun 		rc = -ENOMEM;
89*4882a593Smuzhiyun 		goto err_no_irq;
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	for (i = 0; i < msixcnt; i++) {
93*4882a593Smuzhiyun 		idxd->irq_entries[i].id = i;
94*4882a593Smuzhiyun 		idxd->irq_entries[i].idxd = idxd;
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	msix = &idxd->msix_entries[0];
98*4882a593Smuzhiyun 	irq_entry = &idxd->irq_entries[0];
99*4882a593Smuzhiyun 	rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
100*4882a593Smuzhiyun 				       idxd_misc_thread, 0, "idxd-misc",
101*4882a593Smuzhiyun 				       irq_entry);
102*4882a593Smuzhiyun 	if (rc < 0) {
103*4882a593Smuzhiyun 		dev_err(dev, "Failed to allocate misc interrupt.\n");
104*4882a593Smuzhiyun 		goto err_no_irq;
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
108*4882a593Smuzhiyun 		msix->vector);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	/* first MSI-X entry is not for wq interrupts */
111*4882a593Smuzhiyun 	idxd->num_wq_irqs = msixcnt - 1;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	for (i = 1; i < msixcnt; i++) {
114*4882a593Smuzhiyun 		msix = &idxd->msix_entries[i];
115*4882a593Smuzhiyun 		irq_entry = &idxd->irq_entries[i];
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 		init_llist_head(&idxd->irq_entries[i].pending_llist);
118*4882a593Smuzhiyun 		INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
119*4882a593Smuzhiyun 		rc = devm_request_threaded_irq(dev, msix->vector,
120*4882a593Smuzhiyun 					       idxd_irq_handler,
121*4882a593Smuzhiyun 					       idxd_wq_thread, 0,
122*4882a593Smuzhiyun 					       "idxd-portal", irq_entry);
123*4882a593Smuzhiyun 		if (rc < 0) {
124*4882a593Smuzhiyun 			dev_err(dev, "Failed to allocate irq %d.\n",
125*4882a593Smuzhiyun 				msix->vector);
126*4882a593Smuzhiyun 			goto err_no_irq;
127*4882a593Smuzhiyun 		}
128*4882a593Smuzhiyun 		dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
129*4882a593Smuzhiyun 			i, msix->vector);
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	idxd_unmask_error_interrupts(idxd);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	return 0;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun  err_no_irq:
137*4882a593Smuzhiyun 	/* Disable error interrupt generation */
138*4882a593Smuzhiyun 	idxd_mask_error_interrupts(idxd);
139*4882a593Smuzhiyun 	pci_disable_msix(pdev);
140*4882a593Smuzhiyun 	dev_err(dev, "No usable interrupts\n");
141*4882a593Smuzhiyun 	return rc;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
idxd_setup_internals(struct idxd_device * idxd)144*4882a593Smuzhiyun static int idxd_setup_internals(struct idxd_device *idxd)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	struct device *dev = &idxd->pdev->dev;
147*4882a593Smuzhiyun 	int i;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	init_waitqueue_head(&idxd->cmd_waitq);
150*4882a593Smuzhiyun 	idxd->groups = devm_kcalloc(dev, idxd->max_groups,
151*4882a593Smuzhiyun 				    sizeof(struct idxd_group), GFP_KERNEL);
152*4882a593Smuzhiyun 	if (!idxd->groups)
153*4882a593Smuzhiyun 		return -ENOMEM;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	for (i = 0; i < idxd->max_groups; i++) {
156*4882a593Smuzhiyun 		idxd->groups[i].idxd = idxd;
157*4882a593Smuzhiyun 		idxd->groups[i].id = i;
158*4882a593Smuzhiyun 		idxd->groups[i].tc_a = -1;
159*4882a593Smuzhiyun 		idxd->groups[i].tc_b = -1;
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
163*4882a593Smuzhiyun 				 GFP_KERNEL);
164*4882a593Smuzhiyun 	if (!idxd->wqs)
165*4882a593Smuzhiyun 		return -ENOMEM;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	idxd->engines = devm_kcalloc(dev, idxd->max_engines,
168*4882a593Smuzhiyun 				     sizeof(struct idxd_engine), GFP_KERNEL);
169*4882a593Smuzhiyun 	if (!idxd->engines)
170*4882a593Smuzhiyun 		return -ENOMEM;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	for (i = 0; i < idxd->max_wqs; i++) {
173*4882a593Smuzhiyun 		struct idxd_wq *wq = &idxd->wqs[i];
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		wq->id = i;
176*4882a593Smuzhiyun 		wq->idxd = idxd;
177*4882a593Smuzhiyun 		mutex_init(&wq->wq_lock);
178*4882a593Smuzhiyun 		init_waitqueue_head(&wq->err_queue);
179*4882a593Smuzhiyun 		wq->max_xfer_bytes = idxd->max_xfer_bytes;
180*4882a593Smuzhiyun 		wq->max_batch_size = idxd->max_batch_size;
181*4882a593Smuzhiyun 		wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
182*4882a593Smuzhiyun 		if (!wq->wqcfg)
183*4882a593Smuzhiyun 			return -ENOMEM;
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	for (i = 0; i < idxd->max_engines; i++) {
187*4882a593Smuzhiyun 		idxd->engines[i].idxd = idxd;
188*4882a593Smuzhiyun 		idxd->engines[i].id = i;
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	idxd->wq = create_workqueue(dev_name(dev));
192*4882a593Smuzhiyun 	if (!idxd->wq)
193*4882a593Smuzhiyun 		return -ENOMEM;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	return 0;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
idxd_read_table_offsets(struct idxd_device * idxd)198*4882a593Smuzhiyun static void idxd_read_table_offsets(struct idxd_device *idxd)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	union offsets_reg offsets;
201*4882a593Smuzhiyun 	struct device *dev = &idxd->pdev->dev;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
204*4882a593Smuzhiyun 	offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
205*4882a593Smuzhiyun 			+ sizeof(u64));
206*4882a593Smuzhiyun 	idxd->grpcfg_offset = offsets.grpcfg * 0x100;
207*4882a593Smuzhiyun 	dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
208*4882a593Smuzhiyun 	idxd->wqcfg_offset = offsets.wqcfg * 0x100;
209*4882a593Smuzhiyun 	dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
210*4882a593Smuzhiyun 		idxd->wqcfg_offset);
211*4882a593Smuzhiyun 	idxd->msix_perm_offset = offsets.msix_perm * 0x100;
212*4882a593Smuzhiyun 	dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
213*4882a593Smuzhiyun 		idxd->msix_perm_offset);
214*4882a593Smuzhiyun 	idxd->perfmon_offset = offsets.perfmon * 0x100;
215*4882a593Smuzhiyun 	dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
idxd_read_caps(struct idxd_device * idxd)218*4882a593Smuzhiyun static void idxd_read_caps(struct idxd_device *idxd)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	struct device *dev = &idxd->pdev->dev;
221*4882a593Smuzhiyun 	int i;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/* reading generic capabilities */
224*4882a593Smuzhiyun 	idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
225*4882a593Smuzhiyun 	dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
226*4882a593Smuzhiyun 	idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
227*4882a593Smuzhiyun 	dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
228*4882a593Smuzhiyun 	idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
229*4882a593Smuzhiyun 	dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
230*4882a593Smuzhiyun 	if (idxd->hw.gen_cap.config_en)
231*4882a593Smuzhiyun 		set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/* reading group capabilities */
234*4882a593Smuzhiyun 	idxd->hw.group_cap.bits =
235*4882a593Smuzhiyun 		ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
236*4882a593Smuzhiyun 	dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
237*4882a593Smuzhiyun 	idxd->max_groups = idxd->hw.group_cap.num_groups;
238*4882a593Smuzhiyun 	dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
239*4882a593Smuzhiyun 	idxd->max_tokens = idxd->hw.group_cap.total_tokens;
240*4882a593Smuzhiyun 	dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
241*4882a593Smuzhiyun 	idxd->nr_tokens = idxd->max_tokens;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	/* read engine capabilities */
244*4882a593Smuzhiyun 	idxd->hw.engine_cap.bits =
245*4882a593Smuzhiyun 		ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
246*4882a593Smuzhiyun 	dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
247*4882a593Smuzhiyun 	idxd->max_engines = idxd->hw.engine_cap.num_engines;
248*4882a593Smuzhiyun 	dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	/* read workqueue capabilities */
251*4882a593Smuzhiyun 	idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
252*4882a593Smuzhiyun 	dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
253*4882a593Smuzhiyun 	idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
254*4882a593Smuzhiyun 	dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
255*4882a593Smuzhiyun 	idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
256*4882a593Smuzhiyun 	dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
257*4882a593Smuzhiyun 	idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
258*4882a593Smuzhiyun 	dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* reading operation capabilities */
261*4882a593Smuzhiyun 	for (i = 0; i < 4; i++) {
262*4882a593Smuzhiyun 		idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
263*4882a593Smuzhiyun 				IDXD_OPCAP_OFFSET + i * sizeof(u64));
264*4882a593Smuzhiyun 		dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
idxd_alloc(struct pci_dev * pdev,void __iomem * const * iomap)268*4882a593Smuzhiyun static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
269*4882a593Smuzhiyun 				      void __iomem * const *iomap)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
272*4882a593Smuzhiyun 	struct idxd_device *idxd;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
275*4882a593Smuzhiyun 	if (!idxd)
276*4882a593Smuzhiyun 		return NULL;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	idxd->pdev = pdev;
279*4882a593Smuzhiyun 	idxd->reg_base = iomap[IDXD_MMIO_BAR];
280*4882a593Smuzhiyun 	spin_lock_init(&idxd->dev_lock);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	return idxd;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
idxd_probe(struct idxd_device * idxd)285*4882a593Smuzhiyun static int idxd_probe(struct idxd_device *idxd)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	struct pci_dev *pdev = idxd->pdev;
288*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
289*4882a593Smuzhiyun 	int rc;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	dev_dbg(dev, "%s entered and resetting device\n", __func__);
292*4882a593Smuzhiyun 	rc = idxd_device_init_reset(idxd);
293*4882a593Smuzhiyun 	if (rc < 0)
294*4882a593Smuzhiyun 		return rc;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	dev_dbg(dev, "IDXD reset complete\n");
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	idxd_read_caps(idxd);
299*4882a593Smuzhiyun 	idxd_read_table_offsets(idxd);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	rc = idxd_setup_internals(idxd);
302*4882a593Smuzhiyun 	if (rc)
303*4882a593Smuzhiyun 		goto err_setup;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	rc = idxd_setup_interrupts(idxd);
306*4882a593Smuzhiyun 	if (rc)
307*4882a593Smuzhiyun 		goto err_setup;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	dev_dbg(dev, "IDXD interrupt setup complete.\n");
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	mutex_lock(&idxd_idr_lock);
312*4882a593Smuzhiyun 	idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
313*4882a593Smuzhiyun 	mutex_unlock(&idxd_idr_lock);
314*4882a593Smuzhiyun 	if (idxd->id < 0) {
315*4882a593Smuzhiyun 		rc = -ENOMEM;
316*4882a593Smuzhiyun 		goto err_idr_fail;
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	idxd->major = idxd_cdev_get_major(idxd);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
322*4882a593Smuzhiyun 	return 0;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun  err_idr_fail:
325*4882a593Smuzhiyun 	idxd_mask_error_interrupts(idxd);
326*4882a593Smuzhiyun 	idxd_mask_msix_vectors(idxd);
327*4882a593Smuzhiyun  err_setup:
328*4882a593Smuzhiyun 	return rc;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
idxd_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)331*4882a593Smuzhiyun static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	void __iomem * const *iomap;
334*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
335*4882a593Smuzhiyun 	struct idxd_device *idxd;
336*4882a593Smuzhiyun 	int rc;
337*4882a593Smuzhiyun 	unsigned int mask;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	rc = pcim_enable_device(pdev);
340*4882a593Smuzhiyun 	if (rc)
341*4882a593Smuzhiyun 		return rc;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	dev_dbg(dev, "Mapping BARs\n");
344*4882a593Smuzhiyun 	mask = (1 << IDXD_MMIO_BAR);
345*4882a593Smuzhiyun 	rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
346*4882a593Smuzhiyun 	if (rc)
347*4882a593Smuzhiyun 		return rc;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	iomap = pcim_iomap_table(pdev);
350*4882a593Smuzhiyun 	if (!iomap)
351*4882a593Smuzhiyun 		return -ENOMEM;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	dev_dbg(dev, "Set DMA masks\n");
354*4882a593Smuzhiyun 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
355*4882a593Smuzhiyun 	if (rc)
356*4882a593Smuzhiyun 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
357*4882a593Smuzhiyun 	if (rc)
358*4882a593Smuzhiyun 		return rc;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
361*4882a593Smuzhiyun 	if (rc)
362*4882a593Smuzhiyun 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
363*4882a593Smuzhiyun 	if (rc)
364*4882a593Smuzhiyun 		return rc;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	dev_dbg(dev, "Alloc IDXD context\n");
367*4882a593Smuzhiyun 	idxd = idxd_alloc(pdev, iomap);
368*4882a593Smuzhiyun 	if (!idxd)
369*4882a593Smuzhiyun 		return -ENOMEM;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	idxd_set_type(idxd);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	dev_dbg(dev, "Set PCI master\n");
374*4882a593Smuzhiyun 	pci_set_master(pdev);
375*4882a593Smuzhiyun 	pci_set_drvdata(pdev, idxd);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
378*4882a593Smuzhiyun 	rc = idxd_probe(idxd);
379*4882a593Smuzhiyun 	if (rc) {
380*4882a593Smuzhiyun 		dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
381*4882a593Smuzhiyun 		return -ENODEV;
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	rc = idxd_setup_sysfs(idxd);
385*4882a593Smuzhiyun 	if (rc) {
386*4882a593Smuzhiyun 		dev_err(dev, "IDXD sysfs setup failed\n");
387*4882a593Smuzhiyun 		return -ENODEV;
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	idxd->state = IDXD_DEV_CONF_READY;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
393*4882a593Smuzhiyun 		 idxd->hw.version);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	return 0;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
idxd_flush_pending_llist(struct idxd_irq_entry * ie)398*4882a593Smuzhiyun static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	struct idxd_desc *desc, *itr;
401*4882a593Smuzhiyun 	struct llist_node *head;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	head = llist_del_all(&ie->pending_llist);
404*4882a593Smuzhiyun 	if (!head)
405*4882a593Smuzhiyun 		return;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	llist_for_each_entry_safe(desc, itr, head, llnode) {
408*4882a593Smuzhiyun 		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
409*4882a593Smuzhiyun 		idxd_free_desc(desc->wq, desc);
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
idxd_flush_work_list(struct idxd_irq_entry * ie)413*4882a593Smuzhiyun static void idxd_flush_work_list(struct idxd_irq_entry *ie)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	struct idxd_desc *desc, *iter;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
418*4882a593Smuzhiyun 		list_del(&desc->list);
419*4882a593Smuzhiyun 		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
420*4882a593Smuzhiyun 		idxd_free_desc(desc->wq, desc);
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
idxd_shutdown(struct pci_dev * pdev)424*4882a593Smuzhiyun static void idxd_shutdown(struct pci_dev *pdev)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	struct idxd_device *idxd = pci_get_drvdata(pdev);
427*4882a593Smuzhiyun 	int rc, i;
428*4882a593Smuzhiyun 	struct idxd_irq_entry *irq_entry;
429*4882a593Smuzhiyun 	int msixcnt = pci_msix_vec_count(pdev);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	rc = idxd_device_disable(idxd);
432*4882a593Smuzhiyun 	if (rc)
433*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Disabling device failed\n");
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	dev_dbg(&pdev->dev, "%s called\n", __func__);
436*4882a593Smuzhiyun 	idxd_mask_msix_vectors(idxd);
437*4882a593Smuzhiyun 	idxd_mask_error_interrupts(idxd);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	for (i = 0; i < msixcnt; i++) {
440*4882a593Smuzhiyun 		irq_entry = &idxd->irq_entries[i];
441*4882a593Smuzhiyun 		synchronize_irq(idxd->msix_entries[i].vector);
442*4882a593Smuzhiyun 		if (i == 0)
443*4882a593Smuzhiyun 			continue;
444*4882a593Smuzhiyun 		idxd_flush_pending_llist(irq_entry);
445*4882a593Smuzhiyun 		idxd_flush_work_list(irq_entry);
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	destroy_workqueue(idxd->wq);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
idxd_remove(struct pci_dev * pdev)451*4882a593Smuzhiyun static void idxd_remove(struct pci_dev *pdev)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun 	struct idxd_device *idxd = pci_get_drvdata(pdev);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	dev_dbg(&pdev->dev, "%s called\n", __func__);
456*4882a593Smuzhiyun 	idxd_cleanup_sysfs(idxd);
457*4882a593Smuzhiyun 	idxd_shutdown(pdev);
458*4882a593Smuzhiyun 	mutex_lock(&idxd_idr_lock);
459*4882a593Smuzhiyun 	idr_remove(&idxd_idrs[idxd->type], idxd->id);
460*4882a593Smuzhiyun 	mutex_unlock(&idxd_idr_lock);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun static struct pci_driver idxd_pci_driver = {
464*4882a593Smuzhiyun 	.name		= DRV_NAME,
465*4882a593Smuzhiyun 	.id_table	= idxd_pci_tbl,
466*4882a593Smuzhiyun 	.probe		= idxd_pci_probe,
467*4882a593Smuzhiyun 	.remove		= idxd_remove,
468*4882a593Smuzhiyun 	.shutdown	= idxd_shutdown,
469*4882a593Smuzhiyun };
470*4882a593Smuzhiyun 
idxd_init_module(void)471*4882a593Smuzhiyun static int __init idxd_init_module(void)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	int err, i;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	/*
476*4882a593Smuzhiyun 	 * If the CPU does not support write512, there's no point in
477*4882a593Smuzhiyun 	 * enumerating the device. We can not utilize it.
478*4882a593Smuzhiyun 	 */
479*4882a593Smuzhiyun 	if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
480*4882a593Smuzhiyun 		pr_warn("idxd driver failed to load without MOVDIR64B.\n");
481*4882a593Smuzhiyun 		return -ENODEV;
482*4882a593Smuzhiyun 	}
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
485*4882a593Smuzhiyun 		DRV_NAME, IDXD_DRIVER_VERSION);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	mutex_init(&idxd_idr_lock);
488*4882a593Smuzhiyun 	for (i = 0; i < IDXD_TYPE_MAX; i++)
489*4882a593Smuzhiyun 		idr_init(&idxd_idrs[i]);
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	err = idxd_register_bus_type();
492*4882a593Smuzhiyun 	if (err < 0)
493*4882a593Smuzhiyun 		return err;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	err = idxd_register_driver();
496*4882a593Smuzhiyun 	if (err < 0)
497*4882a593Smuzhiyun 		goto err_idxd_driver_register;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	err = idxd_cdev_register();
500*4882a593Smuzhiyun 	if (err)
501*4882a593Smuzhiyun 		goto err_cdev_register;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	err = pci_register_driver(&idxd_pci_driver);
504*4882a593Smuzhiyun 	if (err)
505*4882a593Smuzhiyun 		goto err_pci_register;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	return 0;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun err_pci_register:
510*4882a593Smuzhiyun 	idxd_cdev_remove();
511*4882a593Smuzhiyun err_cdev_register:
512*4882a593Smuzhiyun 	idxd_unregister_driver();
513*4882a593Smuzhiyun err_idxd_driver_register:
514*4882a593Smuzhiyun 	idxd_unregister_bus_type();
515*4882a593Smuzhiyun 	return err;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun module_init(idxd_init_module);
518*4882a593Smuzhiyun 
idxd_exit_module(void)519*4882a593Smuzhiyun static void __exit idxd_exit_module(void)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	idxd_unregister_driver();
522*4882a593Smuzhiyun 	pci_unregister_driver(&idxd_pci_driver);
523*4882a593Smuzhiyun 	idxd_cdev_remove();
524*4882a593Smuzhiyun 	idxd_unregister_bus_type();
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun module_exit(idxd_exit_module);
527