1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3*4882a593Smuzhiyun #include <linux/init.h>
4*4882a593Smuzhiyun #include <linux/kernel.h>
5*4882a593Smuzhiyun #include <linux/module.h>
6*4882a593Smuzhiyun #include <linux/pci.h>
7*4882a593Smuzhiyun #include <linux/io-64-nonatomic-lo-hi.h>
8*4882a593Smuzhiyun #include <linux/dmaengine.h>
9*4882a593Smuzhiyun #include <linux/irq.h>
10*4882a593Smuzhiyun #include <linux/msi.h>
11*4882a593Smuzhiyun #include <uapi/linux/idxd.h>
12*4882a593Smuzhiyun #include "../dmaengine.h"
13*4882a593Smuzhiyun #include "idxd.h"
14*4882a593Smuzhiyun #include "registers.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
17*4882a593Smuzhiyun u32 *status);
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /* Interrupt control bits */
idxd_mask_msix_vector(struct idxd_device * idxd,int vec_id)20*4882a593Smuzhiyun void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun pci_msi_mask_irq(data);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
idxd_mask_msix_vectors(struct idxd_device * idxd)27*4882a593Smuzhiyun void idxd_mask_msix_vectors(struct idxd_device *idxd)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun struct pci_dev *pdev = idxd->pdev;
30*4882a593Smuzhiyun int msixcnt = pci_msix_vec_count(pdev);
31*4882a593Smuzhiyun int i;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun for (i = 0; i < msixcnt; i++)
34*4882a593Smuzhiyun idxd_mask_msix_vector(idxd, i);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
idxd_unmask_msix_vector(struct idxd_device * idxd,int vec_id)37*4882a593Smuzhiyun void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun pci_msi_unmask_irq(data);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
idxd_unmask_error_interrupts(struct idxd_device * idxd)44*4882a593Smuzhiyun void idxd_unmask_error_interrupts(struct idxd_device *idxd)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun union genctrl_reg genctrl;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
49*4882a593Smuzhiyun genctrl.softerr_int_en = 1;
50*4882a593Smuzhiyun iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
idxd_mask_error_interrupts(struct idxd_device * idxd)53*4882a593Smuzhiyun void idxd_mask_error_interrupts(struct idxd_device *idxd)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun union genctrl_reg genctrl;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
58*4882a593Smuzhiyun genctrl.softerr_int_en = 0;
59*4882a593Smuzhiyun iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
free_hw_descs(struct idxd_wq * wq)62*4882a593Smuzhiyun static void free_hw_descs(struct idxd_wq *wq)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun int i;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun for (i = 0; i < wq->num_descs; i++)
67*4882a593Smuzhiyun kfree(wq->hw_descs[i]);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun kfree(wq->hw_descs);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
alloc_hw_descs(struct idxd_wq * wq,int num)72*4882a593Smuzhiyun static int alloc_hw_descs(struct idxd_wq *wq, int num)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun struct device *dev = &wq->idxd->pdev->dev;
75*4882a593Smuzhiyun int i;
76*4882a593Smuzhiyun int node = dev_to_node(dev);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
79*4882a593Smuzhiyun GFP_KERNEL, node);
80*4882a593Smuzhiyun if (!wq->hw_descs)
81*4882a593Smuzhiyun return -ENOMEM;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun for (i = 0; i < num; i++) {
84*4882a593Smuzhiyun wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
85*4882a593Smuzhiyun GFP_KERNEL, node);
86*4882a593Smuzhiyun if (!wq->hw_descs[i]) {
87*4882a593Smuzhiyun free_hw_descs(wq);
88*4882a593Smuzhiyun return -ENOMEM;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun return 0;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
free_descs(struct idxd_wq * wq)95*4882a593Smuzhiyun static void free_descs(struct idxd_wq *wq)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun int i;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun for (i = 0; i < wq->num_descs; i++)
100*4882a593Smuzhiyun kfree(wq->descs[i]);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun kfree(wq->descs);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
alloc_descs(struct idxd_wq * wq,int num)105*4882a593Smuzhiyun static int alloc_descs(struct idxd_wq *wq, int num)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct device *dev = &wq->idxd->pdev->dev;
108*4882a593Smuzhiyun int i;
109*4882a593Smuzhiyun int node = dev_to_node(dev);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
112*4882a593Smuzhiyun GFP_KERNEL, node);
113*4882a593Smuzhiyun if (!wq->descs)
114*4882a593Smuzhiyun return -ENOMEM;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun for (i = 0; i < num; i++) {
117*4882a593Smuzhiyun wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
118*4882a593Smuzhiyun GFP_KERNEL, node);
119*4882a593Smuzhiyun if (!wq->descs[i]) {
120*4882a593Smuzhiyun free_descs(wq);
121*4882a593Smuzhiyun return -ENOMEM;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun return 0;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* WQ control bits */
idxd_wq_alloc_resources(struct idxd_wq * wq)129*4882a593Smuzhiyun int idxd_wq_alloc_resources(struct idxd_wq *wq)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun struct idxd_device *idxd = wq->idxd;
132*4882a593Smuzhiyun struct device *dev = &idxd->pdev->dev;
133*4882a593Smuzhiyun int rc, num_descs, i;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (wq->type != IDXD_WQT_KERNEL)
136*4882a593Smuzhiyun return 0;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun wq->num_descs = wq->size;
139*4882a593Smuzhiyun num_descs = wq->size;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun rc = alloc_hw_descs(wq, num_descs);
142*4882a593Smuzhiyun if (rc < 0)
143*4882a593Smuzhiyun return rc;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
146*4882a593Smuzhiyun wq->compls = dma_alloc_coherent(dev, wq->compls_size,
147*4882a593Smuzhiyun &wq->compls_addr, GFP_KERNEL);
148*4882a593Smuzhiyun if (!wq->compls) {
149*4882a593Smuzhiyun rc = -ENOMEM;
150*4882a593Smuzhiyun goto fail_alloc_compls;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun rc = alloc_descs(wq, num_descs);
154*4882a593Smuzhiyun if (rc < 0)
155*4882a593Smuzhiyun goto fail_alloc_descs;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
158*4882a593Smuzhiyun dev_to_node(dev));
159*4882a593Smuzhiyun if (rc < 0)
160*4882a593Smuzhiyun goto fail_sbitmap_init;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun for (i = 0; i < num_descs; i++) {
163*4882a593Smuzhiyun struct idxd_desc *desc = wq->descs[i];
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun desc->hw = wq->hw_descs[i];
166*4882a593Smuzhiyun desc->completion = &wq->compls[i];
167*4882a593Smuzhiyun desc->compl_dma = wq->compls_addr +
168*4882a593Smuzhiyun sizeof(struct dsa_completion_record) * i;
169*4882a593Smuzhiyun desc->id = i;
170*4882a593Smuzhiyun desc->wq = wq;
171*4882a593Smuzhiyun desc->cpu = -1;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun return 0;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun fail_sbitmap_init:
177*4882a593Smuzhiyun free_descs(wq);
178*4882a593Smuzhiyun fail_alloc_descs:
179*4882a593Smuzhiyun dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
180*4882a593Smuzhiyun fail_alloc_compls:
181*4882a593Smuzhiyun free_hw_descs(wq);
182*4882a593Smuzhiyun return rc;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
idxd_wq_free_resources(struct idxd_wq * wq)185*4882a593Smuzhiyun void idxd_wq_free_resources(struct idxd_wq *wq)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun struct device *dev = &wq->idxd->pdev->dev;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (wq->type != IDXD_WQT_KERNEL)
190*4882a593Smuzhiyun return;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun free_hw_descs(wq);
193*4882a593Smuzhiyun free_descs(wq);
194*4882a593Smuzhiyun dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
195*4882a593Smuzhiyun sbitmap_queue_free(&wq->sbq);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
idxd_wq_enable(struct idxd_wq * wq)198*4882a593Smuzhiyun int idxd_wq_enable(struct idxd_wq *wq)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct idxd_device *idxd = wq->idxd;
201*4882a593Smuzhiyun struct device *dev = &idxd->pdev->dev;
202*4882a593Smuzhiyun u32 status;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (wq->state == IDXD_WQ_ENABLED) {
205*4882a593Smuzhiyun dev_dbg(dev, "WQ %d already enabled\n", wq->id);
206*4882a593Smuzhiyun return -ENXIO;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (status != IDXD_CMDSTS_SUCCESS &&
212*4882a593Smuzhiyun status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
213*4882a593Smuzhiyun dev_dbg(dev, "WQ enable failed: %#x\n", status);
214*4882a593Smuzhiyun return -ENXIO;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun wq->state = IDXD_WQ_ENABLED;
218*4882a593Smuzhiyun dev_dbg(dev, "WQ %d enabled\n", wq->id);
219*4882a593Smuzhiyun return 0;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
idxd_wq_disable(struct idxd_wq * wq)222*4882a593Smuzhiyun int idxd_wq_disable(struct idxd_wq *wq)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct idxd_device *idxd = wq->idxd;
225*4882a593Smuzhiyun struct device *dev = &idxd->pdev->dev;
226*4882a593Smuzhiyun u32 status, operand;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun dev_dbg(dev, "Disabling WQ %d\n", wq->id);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if (wq->state != IDXD_WQ_ENABLED) {
231*4882a593Smuzhiyun dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
232*4882a593Smuzhiyun return 0;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
236*4882a593Smuzhiyun idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (status != IDXD_CMDSTS_SUCCESS) {
239*4882a593Smuzhiyun dev_dbg(dev, "WQ disable failed: %#x\n", status);
240*4882a593Smuzhiyun return -ENXIO;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun wq->state = IDXD_WQ_DISABLED;
244*4882a593Smuzhiyun dev_dbg(dev, "WQ %d disabled\n", wq->id);
245*4882a593Smuzhiyun return 0;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
idxd_wq_drain(struct idxd_wq * wq)248*4882a593Smuzhiyun void idxd_wq_drain(struct idxd_wq *wq)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct idxd_device *idxd = wq->idxd;
251*4882a593Smuzhiyun struct device *dev = &idxd->pdev->dev;
252*4882a593Smuzhiyun u32 operand;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun if (wq->state != IDXD_WQ_ENABLED) {
255*4882a593Smuzhiyun dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
256*4882a593Smuzhiyun return;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun dev_dbg(dev, "Draining WQ %d\n", wq->id);
260*4882a593Smuzhiyun operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
261*4882a593Smuzhiyun idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
idxd_wq_reset(struct idxd_wq * wq)264*4882a593Smuzhiyun void idxd_wq_reset(struct idxd_wq *wq)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct idxd_device *idxd = wq->idxd;
267*4882a593Smuzhiyun struct device *dev = &idxd->pdev->dev;
268*4882a593Smuzhiyun u32 operand;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (wq->state != IDXD_WQ_ENABLED) {
271*4882a593Smuzhiyun dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
272*4882a593Smuzhiyun return;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
276*4882a593Smuzhiyun idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
277*4882a593Smuzhiyun wq->state = IDXD_WQ_DISABLED;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
idxd_wq_map_portal(struct idxd_wq * wq)280*4882a593Smuzhiyun int idxd_wq_map_portal(struct idxd_wq *wq)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun struct idxd_device *idxd = wq->idxd;
283*4882a593Smuzhiyun struct pci_dev *pdev = idxd->pdev;
284*4882a593Smuzhiyun struct device *dev = &pdev->dev;
285*4882a593Smuzhiyun resource_size_t start;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun start = pci_resource_start(pdev, IDXD_WQ_BAR);
288*4882a593Smuzhiyun start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
291*4882a593Smuzhiyun if (!wq->dportal)
292*4882a593Smuzhiyun return -ENOMEM;
293*4882a593Smuzhiyun dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun return 0;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
idxd_wq_unmap_portal(struct idxd_wq * wq)298*4882a593Smuzhiyun void idxd_wq_unmap_portal(struct idxd_wq *wq)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun struct device *dev = &wq->idxd->pdev->dev;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun devm_iounmap(dev, wq->dportal);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
idxd_wq_disable_cleanup(struct idxd_wq * wq)305*4882a593Smuzhiyun void idxd_wq_disable_cleanup(struct idxd_wq *wq)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct idxd_device *idxd = wq->idxd;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun lockdep_assert_held(&idxd->dev_lock);
310*4882a593Smuzhiyun memset(wq->wqcfg, 0, idxd->wqcfg_size);
311*4882a593Smuzhiyun wq->type = IDXD_WQT_NONE;
312*4882a593Smuzhiyun wq->size = 0;
313*4882a593Smuzhiyun wq->group = NULL;
314*4882a593Smuzhiyun wq->threshold = 0;
315*4882a593Smuzhiyun wq->priority = 0;
316*4882a593Smuzhiyun clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
317*4882a593Smuzhiyun memset(wq->name, 0, WQ_NAME_SIZE);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Device control bits */
idxd_is_enabled(struct idxd_device * idxd)321*4882a593Smuzhiyun static inline bool idxd_is_enabled(struct idxd_device *idxd)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun union gensts_reg gensts;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
328*4882a593Smuzhiyun return true;
329*4882a593Smuzhiyun return false;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
idxd_device_is_halted(struct idxd_device * idxd)332*4882a593Smuzhiyun static inline bool idxd_device_is_halted(struct idxd_device *idxd)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun union gensts_reg gensts;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun return (gensts.state == IDXD_DEVICE_STATE_HALT);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun * This is function is only used for reset during probe and will
343*4882a593Smuzhiyun * poll for completion. Once the device is setup with interrupts,
344*4882a593Smuzhiyun * all commands will be done via interrupt completion.
345*4882a593Smuzhiyun */
idxd_device_init_reset(struct idxd_device * idxd)346*4882a593Smuzhiyun int idxd_device_init_reset(struct idxd_device *idxd)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun struct device *dev = &idxd->pdev->dev;
349*4882a593Smuzhiyun union idxd_command_reg cmd;
350*4882a593Smuzhiyun unsigned long flags;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun if (idxd_device_is_halted(idxd)) {
353*4882a593Smuzhiyun dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
354*4882a593Smuzhiyun return -ENXIO;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
358*4882a593Smuzhiyun cmd.cmd = IDXD_CMD_RESET_DEVICE;
359*4882a593Smuzhiyun dev_dbg(dev, "%s: sending reset for init.\n", __func__);
360*4882a593Smuzhiyun spin_lock_irqsave(&idxd->dev_lock, flags);
361*4882a593Smuzhiyun iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
364*4882a593Smuzhiyun IDXD_CMDSTS_ACTIVE)
365*4882a593Smuzhiyun cpu_relax();
366*4882a593Smuzhiyun spin_unlock_irqrestore(&idxd->dev_lock, flags);
367*4882a593Smuzhiyun return 0;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
idxd_cmd_exec(struct idxd_device * idxd,int cmd_code,u32 operand,u32 * status)370*4882a593Smuzhiyun static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
371*4882a593Smuzhiyun u32 *status)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun union idxd_command_reg cmd;
374*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(done);
375*4882a593Smuzhiyun unsigned long flags;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (idxd_device_is_halted(idxd)) {
378*4882a593Smuzhiyun dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
379*4882a593Smuzhiyun if (status)
380*4882a593Smuzhiyun *status = IDXD_CMDSTS_HW_ERR;
381*4882a593Smuzhiyun return;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
385*4882a593Smuzhiyun cmd.cmd = cmd_code;
386*4882a593Smuzhiyun cmd.operand = operand;
387*4882a593Smuzhiyun cmd.int_req = 1;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun spin_lock_irqsave(&idxd->dev_lock, flags);
390*4882a593Smuzhiyun wait_event_lock_irq(idxd->cmd_waitq,
391*4882a593Smuzhiyun !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
392*4882a593Smuzhiyun idxd->dev_lock);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
395*4882a593Smuzhiyun __func__, cmd_code, operand);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun idxd->cmd_status = 0;
398*4882a593Smuzhiyun __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
399*4882a593Smuzhiyun idxd->cmd_done = &done;
400*4882a593Smuzhiyun iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun * After command submitted, release lock and go to sleep until
404*4882a593Smuzhiyun * the command completes via interrupt.
405*4882a593Smuzhiyun */
406*4882a593Smuzhiyun spin_unlock_irqrestore(&idxd->dev_lock, flags);
407*4882a593Smuzhiyun wait_for_completion(&done);
408*4882a593Smuzhiyun spin_lock_irqsave(&idxd->dev_lock, flags);
409*4882a593Smuzhiyun if (status) {
410*4882a593Smuzhiyun *status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
411*4882a593Smuzhiyun idxd->cmd_status = *status & GENMASK(7, 0);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
415*4882a593Smuzhiyun /* Wake up other pending commands */
416*4882a593Smuzhiyun wake_up(&idxd->cmd_waitq);
417*4882a593Smuzhiyun spin_unlock_irqrestore(&idxd->dev_lock, flags);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
idxd_device_enable(struct idxd_device * idxd)420*4882a593Smuzhiyun int idxd_device_enable(struct idxd_device *idxd)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct device *dev = &idxd->pdev->dev;
423*4882a593Smuzhiyun u32 status;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (idxd_is_enabled(idxd)) {
426*4882a593Smuzhiyun dev_dbg(dev, "Device already enabled\n");
427*4882a593Smuzhiyun return -ENXIO;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* If the command is successful or if the device was enabled */
433*4882a593Smuzhiyun if (status != IDXD_CMDSTS_SUCCESS &&
434*4882a593Smuzhiyun status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
435*4882a593Smuzhiyun dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
436*4882a593Smuzhiyun return -ENXIO;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun idxd->state = IDXD_DEV_ENABLED;
440*4882a593Smuzhiyun return 0;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
idxd_device_wqs_clear_state(struct idxd_device * idxd)443*4882a593Smuzhiyun void idxd_device_wqs_clear_state(struct idxd_device *idxd)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun int i;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun lockdep_assert_held(&idxd->dev_lock);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun for (i = 0; i < idxd->max_wqs; i++) {
450*4882a593Smuzhiyun struct idxd_wq *wq = &idxd->wqs[i];
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (wq->state == IDXD_WQ_ENABLED) {
453*4882a593Smuzhiyun idxd_wq_disable_cleanup(wq);
454*4882a593Smuzhiyun wq->state = IDXD_WQ_DISABLED;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
idxd_device_disable(struct idxd_device * idxd)459*4882a593Smuzhiyun int idxd_device_disable(struct idxd_device *idxd)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun struct device *dev = &idxd->pdev->dev;
462*4882a593Smuzhiyun u32 status;
463*4882a593Smuzhiyun unsigned long flags;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun if (!idxd_is_enabled(idxd)) {
466*4882a593Smuzhiyun dev_dbg(dev, "Device is not enabled\n");
467*4882a593Smuzhiyun return 0;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* If the command is successful or if the device was disabled */
473*4882a593Smuzhiyun if (status != IDXD_CMDSTS_SUCCESS &&
474*4882a593Smuzhiyun !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
475*4882a593Smuzhiyun dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
476*4882a593Smuzhiyun return -ENXIO;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun spin_lock_irqsave(&idxd->dev_lock, flags);
480*4882a593Smuzhiyun idxd_device_wqs_clear_state(idxd);
481*4882a593Smuzhiyun idxd->state = IDXD_DEV_CONF_READY;
482*4882a593Smuzhiyun spin_unlock_irqrestore(&idxd->dev_lock, flags);
483*4882a593Smuzhiyun return 0;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
idxd_device_reset(struct idxd_device * idxd)486*4882a593Smuzhiyun void idxd_device_reset(struct idxd_device *idxd)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun unsigned long flags;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
491*4882a593Smuzhiyun spin_lock_irqsave(&idxd->dev_lock, flags);
492*4882a593Smuzhiyun idxd_device_wqs_clear_state(idxd);
493*4882a593Smuzhiyun idxd->state = IDXD_DEV_CONF_READY;
494*4882a593Smuzhiyun spin_unlock_irqrestore(&idxd->dev_lock, flags);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /* Device configuration bits */
idxd_group_config_write(struct idxd_group * group)498*4882a593Smuzhiyun static void idxd_group_config_write(struct idxd_group *group)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct idxd_device *idxd = group->idxd;
501*4882a593Smuzhiyun struct device *dev = &idxd->pdev->dev;
502*4882a593Smuzhiyun int i;
503*4882a593Smuzhiyun u32 grpcfg_offset;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /* setup GRPWQCFG */
508*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
509*4882a593Smuzhiyun grpcfg_offset = idxd->grpcfg_offset +
510*4882a593Smuzhiyun group->id * 64 + i * sizeof(u64);
511*4882a593Smuzhiyun iowrite64(group->grpcfg.wqs[i],
512*4882a593Smuzhiyun idxd->reg_base + grpcfg_offset);
513*4882a593Smuzhiyun dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
514*4882a593Smuzhiyun group->id, i, grpcfg_offset,
515*4882a593Smuzhiyun ioread64(idxd->reg_base + grpcfg_offset));
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /* setup GRPENGCFG */
519*4882a593Smuzhiyun grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
520*4882a593Smuzhiyun iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
521*4882a593Smuzhiyun dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
522*4882a593Smuzhiyun grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /* setup GRPFLAGS */
525*4882a593Smuzhiyun grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
526*4882a593Smuzhiyun iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
527*4882a593Smuzhiyun dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
528*4882a593Smuzhiyun group->id, grpcfg_offset,
529*4882a593Smuzhiyun ioread32(idxd->reg_base + grpcfg_offset));
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
idxd_groups_config_write(struct idxd_device * idxd)532*4882a593Smuzhiyun static int idxd_groups_config_write(struct idxd_device *idxd)
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun union gencfg_reg reg;
536*4882a593Smuzhiyun int i;
537*4882a593Smuzhiyun struct device *dev = &idxd->pdev->dev;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /* Setup bandwidth token limit */
540*4882a593Smuzhiyun if (idxd->token_limit) {
541*4882a593Smuzhiyun reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
542*4882a593Smuzhiyun reg.token_limit = idxd->token_limit;
543*4882a593Smuzhiyun iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
547*4882a593Smuzhiyun ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun for (i = 0; i < idxd->max_groups; i++) {
550*4882a593Smuzhiyun struct idxd_group *group = &idxd->groups[i];
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun idxd_group_config_write(group);
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun return 0;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
idxd_wq_config_write(struct idxd_wq * wq)558*4882a593Smuzhiyun static int idxd_wq_config_write(struct idxd_wq *wq)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun struct idxd_device *idxd = wq->idxd;
561*4882a593Smuzhiyun struct device *dev = &idxd->pdev->dev;
562*4882a593Smuzhiyun u32 wq_offset;
563*4882a593Smuzhiyun int i;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun if (!wq->group)
566*4882a593Smuzhiyun return 0;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /*
569*4882a593Smuzhiyun * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
570*4882a593Smuzhiyun * wq reset. This will copy back the sticky values that are present on some devices.
571*4882a593Smuzhiyun */
572*4882a593Smuzhiyun for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
573*4882a593Smuzhiyun wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
574*4882a593Smuzhiyun wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /* byte 0-3 */
578*4882a593Smuzhiyun wq->wqcfg->wq_size = wq->size;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun if (wq->size == 0) {
581*4882a593Smuzhiyun dev_warn(dev, "Incorrect work queue size: 0\n");
582*4882a593Smuzhiyun return -EINVAL;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /* bytes 4-7 */
586*4882a593Smuzhiyun wq->wqcfg->wq_thresh = wq->threshold;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /* byte 8-11 */
589*4882a593Smuzhiyun wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
590*4882a593Smuzhiyun wq->wqcfg->mode = 1;
591*4882a593Smuzhiyun wq->wqcfg->priority = wq->priority;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /* bytes 12-15 */
594*4882a593Smuzhiyun wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
595*4882a593Smuzhiyun wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun dev_dbg(dev, "WQ %d CFGs\n", wq->id);
598*4882a593Smuzhiyun for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
599*4882a593Smuzhiyun wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
600*4882a593Smuzhiyun iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
601*4882a593Smuzhiyun dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
602*4882a593Smuzhiyun wq->id, i, wq_offset,
603*4882a593Smuzhiyun ioread32(idxd->reg_base + wq_offset));
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun return 0;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
idxd_wqs_config_write(struct idxd_device * idxd)609*4882a593Smuzhiyun static int idxd_wqs_config_write(struct idxd_device *idxd)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun int i, rc;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun for (i = 0; i < idxd->max_wqs; i++) {
614*4882a593Smuzhiyun struct idxd_wq *wq = &idxd->wqs[i];
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun rc = idxd_wq_config_write(wq);
617*4882a593Smuzhiyun if (rc < 0)
618*4882a593Smuzhiyun return rc;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun return 0;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun
idxd_group_flags_setup(struct idxd_device * idxd)624*4882a593Smuzhiyun static void idxd_group_flags_setup(struct idxd_device *idxd)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun int i;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /* TC-A 0 and TC-B 1 should be defaults */
629*4882a593Smuzhiyun for (i = 0; i < idxd->max_groups; i++) {
630*4882a593Smuzhiyun struct idxd_group *group = &idxd->groups[i];
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun if (group->tc_a == -1)
633*4882a593Smuzhiyun group->tc_a = group->grpcfg.flags.tc_a = 0;
634*4882a593Smuzhiyun else
635*4882a593Smuzhiyun group->grpcfg.flags.tc_a = group->tc_a;
636*4882a593Smuzhiyun if (group->tc_b == -1)
637*4882a593Smuzhiyun group->tc_b = group->grpcfg.flags.tc_b = 1;
638*4882a593Smuzhiyun else
639*4882a593Smuzhiyun group->grpcfg.flags.tc_b = group->tc_b;
640*4882a593Smuzhiyun group->grpcfg.flags.use_token_limit = group->use_token_limit;
641*4882a593Smuzhiyun group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
642*4882a593Smuzhiyun if (group->tokens_allowed)
643*4882a593Smuzhiyun group->grpcfg.flags.tokens_allowed =
644*4882a593Smuzhiyun group->tokens_allowed;
645*4882a593Smuzhiyun else
646*4882a593Smuzhiyun group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
idxd_engines_setup(struct idxd_device * idxd)650*4882a593Smuzhiyun static int idxd_engines_setup(struct idxd_device *idxd)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun int i, engines = 0;
653*4882a593Smuzhiyun struct idxd_engine *eng;
654*4882a593Smuzhiyun struct idxd_group *group;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun for (i = 0; i < idxd->max_groups; i++) {
657*4882a593Smuzhiyun group = &idxd->groups[i];
658*4882a593Smuzhiyun group->grpcfg.engines = 0;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun for (i = 0; i < idxd->max_engines; i++) {
662*4882a593Smuzhiyun eng = &idxd->engines[i];
663*4882a593Smuzhiyun group = eng->group;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun if (!group)
666*4882a593Smuzhiyun continue;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun group->grpcfg.engines |= BIT(eng->id);
669*4882a593Smuzhiyun engines++;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (!engines)
673*4882a593Smuzhiyun return -EINVAL;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun return 0;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
idxd_wqs_setup(struct idxd_device * idxd)678*4882a593Smuzhiyun static int idxd_wqs_setup(struct idxd_device *idxd)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun struct idxd_wq *wq;
681*4882a593Smuzhiyun struct idxd_group *group;
682*4882a593Smuzhiyun int i, j, configured = 0;
683*4882a593Smuzhiyun struct device *dev = &idxd->pdev->dev;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun for (i = 0; i < idxd->max_groups; i++) {
686*4882a593Smuzhiyun group = &idxd->groups[i];
687*4882a593Smuzhiyun for (j = 0; j < 4; j++)
688*4882a593Smuzhiyun group->grpcfg.wqs[j] = 0;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun for (i = 0; i < idxd->max_wqs; i++) {
692*4882a593Smuzhiyun wq = &idxd->wqs[i];
693*4882a593Smuzhiyun group = wq->group;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun if (!wq->group)
696*4882a593Smuzhiyun continue;
697*4882a593Smuzhiyun if (!wq->size)
698*4882a593Smuzhiyun continue;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (!wq_dedicated(wq)) {
701*4882a593Smuzhiyun dev_warn(dev, "No shared workqueue support.\n");
702*4882a593Smuzhiyun return -EINVAL;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
706*4882a593Smuzhiyun configured++;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun if (configured == 0)
710*4882a593Smuzhiyun return -EINVAL;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun return 0;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
idxd_device_config(struct idxd_device * idxd)715*4882a593Smuzhiyun int idxd_device_config(struct idxd_device *idxd)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun int rc;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun lockdep_assert_held(&idxd->dev_lock);
720*4882a593Smuzhiyun rc = idxd_wqs_setup(idxd);
721*4882a593Smuzhiyun if (rc < 0)
722*4882a593Smuzhiyun return rc;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun rc = idxd_engines_setup(idxd);
725*4882a593Smuzhiyun if (rc < 0)
726*4882a593Smuzhiyun return rc;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun idxd_group_flags_setup(idxd);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun rc = idxd_wqs_config_write(idxd);
731*4882a593Smuzhiyun if (rc < 0)
732*4882a593Smuzhiyun return rc;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun rc = idxd_groups_config_write(idxd);
735*4882a593Smuzhiyun if (rc < 0)
736*4882a593Smuzhiyun return rc;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun return 0;
739*4882a593Smuzhiyun }
740