1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3*4882a593Smuzhiyun #include <linux/init.h>
4*4882a593Smuzhiyun #include <linux/kernel.h>
5*4882a593Smuzhiyun #include <linux/module.h>
6*4882a593Smuzhiyun #include <linux/pci.h>
7*4882a593Smuzhiyun #include <uapi/linux/idxd.h>
8*4882a593Smuzhiyun #include "idxd.h"
9*4882a593Smuzhiyun #include "registers.h"
10*4882a593Smuzhiyun
__get_desc(struct idxd_wq * wq,int idx,int cpu)11*4882a593Smuzhiyun static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun struct idxd_desc *desc;
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun desc = wq->descs[idx];
16*4882a593Smuzhiyun memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
17*4882a593Smuzhiyun memset(desc->completion, 0, sizeof(struct dsa_completion_record));
18*4882a593Smuzhiyun desc->cpu = cpu;
19*4882a593Smuzhiyun return desc;
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun
idxd_alloc_desc(struct idxd_wq * wq,enum idxd_op_type optype)22*4882a593Smuzhiyun struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun int cpu, idx;
25*4882a593Smuzhiyun struct idxd_device *idxd = wq->idxd;
26*4882a593Smuzhiyun DEFINE_SBQ_WAIT(wait);
27*4882a593Smuzhiyun struct sbq_wait_state *ws;
28*4882a593Smuzhiyun struct sbitmap_queue *sbq;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun if (idxd->state != IDXD_DEV_ENABLED)
31*4882a593Smuzhiyun return ERR_PTR(-EIO);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun sbq = &wq->sbq;
34*4882a593Smuzhiyun idx = sbitmap_queue_get(sbq, &cpu);
35*4882a593Smuzhiyun if (idx < 0) {
36*4882a593Smuzhiyun if (optype == IDXD_OP_NONBLOCK)
37*4882a593Smuzhiyun return ERR_PTR(-EAGAIN);
38*4882a593Smuzhiyun } else {
39*4882a593Smuzhiyun return __get_desc(wq, idx, cpu);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun ws = &sbq->ws[0];
43*4882a593Smuzhiyun for (;;) {
44*4882a593Smuzhiyun sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_INTERRUPTIBLE);
45*4882a593Smuzhiyun if (signal_pending_state(TASK_INTERRUPTIBLE, current))
46*4882a593Smuzhiyun break;
47*4882a593Smuzhiyun idx = sbitmap_queue_get(sbq, &cpu);
48*4882a593Smuzhiyun if (idx >= 0)
49*4882a593Smuzhiyun break;
50*4882a593Smuzhiyun schedule();
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun sbitmap_finish_wait(sbq, ws, &wait);
54*4882a593Smuzhiyun if (idx < 0)
55*4882a593Smuzhiyun return ERR_PTR(-EAGAIN);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun return __get_desc(wq, idx, cpu);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
idxd_free_desc(struct idxd_wq * wq,struct idxd_desc * desc)60*4882a593Smuzhiyun void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun int cpu = desc->cpu;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun desc->cpu = -1;
65*4882a593Smuzhiyun sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
idxd_submit_desc(struct idxd_wq * wq,struct idxd_desc * desc)68*4882a593Smuzhiyun int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun struct idxd_device *idxd = wq->idxd;
71*4882a593Smuzhiyun int vec = desc->hw->int_handle;
72*4882a593Smuzhiyun void __iomem *portal;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if (idxd->state != IDXD_DEV_ENABLED)
75*4882a593Smuzhiyun return -EIO;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun portal = wq->dportal;
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * The wmb() flushes writes to coherent DMA data before possibly
80*4882a593Smuzhiyun * triggering a DMA read. The wmb() is necessary even on UP because
81*4882a593Smuzhiyun * the recipient is a device.
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun wmb();
84*4882a593Smuzhiyun iosubmit_cmds512(portal, desc->hw, 1);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * Pending the descriptor to the lockless list for the irq_entry
88*4882a593Smuzhiyun * that we designated the descriptor to.
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun if (desc->hw->flags & IDXD_OP_FLAG_RCI)
91*4882a593Smuzhiyun llist_add(&desc->llnode,
92*4882a593Smuzhiyun &idxd->irq_entries[vec].pending_llist);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return 0;
95*4882a593Smuzhiyun }
96