1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3*4882a593Smuzhiyun #ifndef _IDXD_H_
4*4882a593Smuzhiyun #define _IDXD_H_
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/sbitmap.h>
7*4882a593Smuzhiyun #include <linux/dmaengine.h>
8*4882a593Smuzhiyun #include <linux/percpu-rwsem.h>
9*4882a593Smuzhiyun #include <linux/wait.h>
10*4882a593Smuzhiyun #include <linux/cdev.h>
11*4882a593Smuzhiyun #include "registers.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define IDXD_DRIVER_VERSION "1.00"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun extern struct kmem_cache *idxd_desc_pool;
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun struct idxd_device;
18*4882a593Smuzhiyun struct idxd_wq;
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define IDXD_REG_TIMEOUT 50
21*4882a593Smuzhiyun #define IDXD_DRAIN_TIMEOUT 5000
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun enum idxd_type {
24*4882a593Smuzhiyun IDXD_TYPE_UNKNOWN = -1,
25*4882a593Smuzhiyun IDXD_TYPE_DSA = 0,
26*4882a593Smuzhiyun IDXD_TYPE_MAX
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define IDXD_NAME_SIZE 128
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct idxd_device_driver {
32*4882a593Smuzhiyun struct device_driver drv;
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun struct idxd_irq_entry {
36*4882a593Smuzhiyun struct idxd_device *idxd;
37*4882a593Smuzhiyun int id;
38*4882a593Smuzhiyun struct llist_head pending_llist;
39*4882a593Smuzhiyun struct list_head work_list;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun struct idxd_group {
43*4882a593Smuzhiyun struct device conf_dev;
44*4882a593Smuzhiyun struct idxd_device *idxd;
45*4882a593Smuzhiyun struct grpcfg grpcfg;
46*4882a593Smuzhiyun int id;
47*4882a593Smuzhiyun int num_engines;
48*4882a593Smuzhiyun int num_wqs;
49*4882a593Smuzhiyun bool use_token_limit;
50*4882a593Smuzhiyun u8 tokens_allowed;
51*4882a593Smuzhiyun u8 tokens_reserved;
52*4882a593Smuzhiyun int tc_a;
53*4882a593Smuzhiyun int tc_b;
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define IDXD_MAX_PRIORITY 0xf
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun enum idxd_wq_state {
59*4882a593Smuzhiyun IDXD_WQ_DISABLED = 0,
60*4882a593Smuzhiyun IDXD_WQ_ENABLED,
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun enum idxd_wq_flag {
64*4882a593Smuzhiyun WQ_FLAG_DEDICATED = 0,
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun enum idxd_wq_type {
68*4882a593Smuzhiyun IDXD_WQT_NONE = 0,
69*4882a593Smuzhiyun IDXD_WQT_KERNEL,
70*4882a593Smuzhiyun IDXD_WQT_USER,
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun struct idxd_cdev {
74*4882a593Smuzhiyun struct idxd_wq *wq;
75*4882a593Smuzhiyun struct cdev cdev;
76*4882a593Smuzhiyun struct device dev;
77*4882a593Smuzhiyun int minor;
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #define IDXD_ALLOCATED_BATCH_SIZE 128U
81*4882a593Smuzhiyun #define WQ_NAME_SIZE 1024
82*4882a593Smuzhiyun #define WQ_TYPE_SIZE 10
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun enum idxd_op_type {
85*4882a593Smuzhiyun IDXD_OP_BLOCK = 0,
86*4882a593Smuzhiyun IDXD_OP_NONBLOCK = 1,
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun enum idxd_complete_type {
90*4882a593Smuzhiyun IDXD_COMPLETE_NORMAL = 0,
91*4882a593Smuzhiyun IDXD_COMPLETE_ABORT,
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun struct idxd_dma_chan {
95*4882a593Smuzhiyun struct dma_chan chan;
96*4882a593Smuzhiyun struct idxd_wq *wq;
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun struct idxd_wq {
100*4882a593Smuzhiyun void __iomem *dportal;
101*4882a593Smuzhiyun struct device conf_dev;
102*4882a593Smuzhiyun struct idxd_cdev *idxd_cdev;
103*4882a593Smuzhiyun struct wait_queue_head err_queue;
104*4882a593Smuzhiyun struct idxd_device *idxd;
105*4882a593Smuzhiyun int id;
106*4882a593Smuzhiyun enum idxd_wq_type type;
107*4882a593Smuzhiyun struct idxd_group *group;
108*4882a593Smuzhiyun int client_count;
109*4882a593Smuzhiyun struct mutex wq_lock; /* mutex for workqueue */
110*4882a593Smuzhiyun u32 size;
111*4882a593Smuzhiyun u32 threshold;
112*4882a593Smuzhiyun u32 priority;
113*4882a593Smuzhiyun enum idxd_wq_state state;
114*4882a593Smuzhiyun unsigned long flags;
115*4882a593Smuzhiyun union wqcfg *wqcfg;
116*4882a593Smuzhiyun u32 vec_ptr; /* interrupt steering */
117*4882a593Smuzhiyun struct dsa_hw_desc **hw_descs;
118*4882a593Smuzhiyun int num_descs;
119*4882a593Smuzhiyun struct dsa_completion_record *compls;
120*4882a593Smuzhiyun dma_addr_t compls_addr;
121*4882a593Smuzhiyun int compls_size;
122*4882a593Smuzhiyun struct idxd_desc **descs;
123*4882a593Smuzhiyun struct sbitmap_queue sbq;
124*4882a593Smuzhiyun struct idxd_dma_chan *idxd_chan;
125*4882a593Smuzhiyun char name[WQ_NAME_SIZE + 1];
126*4882a593Smuzhiyun u64 max_xfer_bytes;
127*4882a593Smuzhiyun u32 max_batch_size;
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun struct idxd_engine {
131*4882a593Smuzhiyun struct device conf_dev;
132*4882a593Smuzhiyun int id;
133*4882a593Smuzhiyun struct idxd_group *group;
134*4882a593Smuzhiyun struct idxd_device *idxd;
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* shadow registers */
138*4882a593Smuzhiyun struct idxd_hw {
139*4882a593Smuzhiyun u32 version;
140*4882a593Smuzhiyun union gen_cap_reg gen_cap;
141*4882a593Smuzhiyun union wq_cap_reg wq_cap;
142*4882a593Smuzhiyun union group_cap_reg group_cap;
143*4882a593Smuzhiyun union engine_cap_reg engine_cap;
144*4882a593Smuzhiyun struct opcap opcap;
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun enum idxd_device_state {
148*4882a593Smuzhiyun IDXD_DEV_HALTED = -1,
149*4882a593Smuzhiyun IDXD_DEV_DISABLED = 0,
150*4882a593Smuzhiyun IDXD_DEV_CONF_READY,
151*4882a593Smuzhiyun IDXD_DEV_ENABLED,
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun enum idxd_device_flag {
155*4882a593Smuzhiyun IDXD_FLAG_CONFIGURABLE = 0,
156*4882a593Smuzhiyun IDXD_FLAG_CMD_RUNNING,
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun struct idxd_dma_dev {
160*4882a593Smuzhiyun struct idxd_device *idxd;
161*4882a593Smuzhiyun struct dma_device dma;
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun struct idxd_device {
165*4882a593Smuzhiyun enum idxd_type type;
166*4882a593Smuzhiyun struct device conf_dev;
167*4882a593Smuzhiyun struct list_head list;
168*4882a593Smuzhiyun struct idxd_hw hw;
169*4882a593Smuzhiyun enum idxd_device_state state;
170*4882a593Smuzhiyun unsigned long flags;
171*4882a593Smuzhiyun int id;
172*4882a593Smuzhiyun int major;
173*4882a593Smuzhiyun u8 cmd_status;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun struct pci_dev *pdev;
176*4882a593Smuzhiyun void __iomem *reg_base;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun spinlock_t dev_lock; /* spinlock for device */
179*4882a593Smuzhiyun struct completion *cmd_done;
180*4882a593Smuzhiyun struct idxd_group *groups;
181*4882a593Smuzhiyun struct idxd_wq *wqs;
182*4882a593Smuzhiyun struct idxd_engine *engines;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun int num_groups;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun u32 msix_perm_offset;
187*4882a593Smuzhiyun u32 wqcfg_offset;
188*4882a593Smuzhiyun u32 grpcfg_offset;
189*4882a593Smuzhiyun u32 perfmon_offset;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun u64 max_xfer_bytes;
192*4882a593Smuzhiyun u32 max_batch_size;
193*4882a593Smuzhiyun int max_groups;
194*4882a593Smuzhiyun int max_engines;
195*4882a593Smuzhiyun int max_tokens;
196*4882a593Smuzhiyun int max_wqs;
197*4882a593Smuzhiyun int max_wq_size;
198*4882a593Smuzhiyun int token_limit;
199*4882a593Smuzhiyun int nr_tokens; /* non-reserved tokens */
200*4882a593Smuzhiyun unsigned int wqcfg_size;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun union sw_err_reg sw_err;
203*4882a593Smuzhiyun wait_queue_head_t cmd_waitq;
204*4882a593Smuzhiyun struct msix_entry *msix_entries;
205*4882a593Smuzhiyun int num_wq_irqs;
206*4882a593Smuzhiyun struct idxd_irq_entry *irq_entries;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun struct idxd_dma_dev *idxd_dma;
209*4882a593Smuzhiyun struct workqueue_struct *wq;
210*4882a593Smuzhiyun struct work_struct work;
211*4882a593Smuzhiyun };
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* IDXD software descriptor */
214*4882a593Smuzhiyun struct idxd_desc {
215*4882a593Smuzhiyun struct dsa_hw_desc *hw;
216*4882a593Smuzhiyun dma_addr_t desc_dma;
217*4882a593Smuzhiyun struct dsa_completion_record *completion;
218*4882a593Smuzhiyun dma_addr_t compl_dma;
219*4882a593Smuzhiyun struct dma_async_tx_descriptor txd;
220*4882a593Smuzhiyun struct llist_node llnode;
221*4882a593Smuzhiyun struct list_head list;
222*4882a593Smuzhiyun int id;
223*4882a593Smuzhiyun int cpu;
224*4882a593Smuzhiyun struct idxd_wq *wq;
225*4882a593Smuzhiyun };
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun #define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
228*4882a593Smuzhiyun #define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun extern struct bus_type dsa_bus_type;
231*4882a593Smuzhiyun
wq_dedicated(struct idxd_wq * wq)232*4882a593Smuzhiyun static inline bool wq_dedicated(struct idxd_wq *wq)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun enum idxd_portal_prot {
238*4882a593Smuzhiyun IDXD_PORTAL_UNLIMITED = 0,
239*4882a593Smuzhiyun IDXD_PORTAL_LIMITED,
240*4882a593Smuzhiyun };
241*4882a593Smuzhiyun
idxd_get_wq_portal_offset(enum idxd_portal_prot prot)242*4882a593Smuzhiyun static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun return prot * 0x1000;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
idxd_get_wq_portal_full_offset(int wq_id,enum idxd_portal_prot prot)247*4882a593Smuzhiyun static inline int idxd_get_wq_portal_full_offset(int wq_id,
248*4882a593Smuzhiyun enum idxd_portal_prot prot)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
idxd_set_type(struct idxd_device * idxd)253*4882a593Smuzhiyun static inline void idxd_set_type(struct idxd_device *idxd)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun struct pci_dev *pdev = idxd->pdev;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
258*4882a593Smuzhiyun idxd->type = IDXD_TYPE_DSA;
259*4882a593Smuzhiyun else
260*4882a593Smuzhiyun idxd->type = IDXD_TYPE_UNKNOWN;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
idxd_wq_get(struct idxd_wq * wq)263*4882a593Smuzhiyun static inline void idxd_wq_get(struct idxd_wq *wq)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun wq->client_count++;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
idxd_wq_put(struct idxd_wq * wq)268*4882a593Smuzhiyun static inline void idxd_wq_put(struct idxd_wq *wq)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun wq->client_count--;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
idxd_wq_refcount(struct idxd_wq * wq)273*4882a593Smuzhiyun static inline int idxd_wq_refcount(struct idxd_wq *wq)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun return wq->client_count;
276*4882a593Smuzhiyun };
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun const char *idxd_get_dev_name(struct idxd_device *idxd);
279*4882a593Smuzhiyun int idxd_register_bus_type(void);
280*4882a593Smuzhiyun void idxd_unregister_bus_type(void);
281*4882a593Smuzhiyun int idxd_setup_sysfs(struct idxd_device *idxd);
282*4882a593Smuzhiyun void idxd_cleanup_sysfs(struct idxd_device *idxd);
283*4882a593Smuzhiyun int idxd_register_driver(void);
284*4882a593Smuzhiyun void idxd_unregister_driver(void);
285*4882a593Smuzhiyun struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* device interrupt control */
288*4882a593Smuzhiyun irqreturn_t idxd_irq_handler(int vec, void *data);
289*4882a593Smuzhiyun irqreturn_t idxd_misc_thread(int vec, void *data);
290*4882a593Smuzhiyun irqreturn_t idxd_wq_thread(int irq, void *data);
291*4882a593Smuzhiyun void idxd_mask_error_interrupts(struct idxd_device *idxd);
292*4882a593Smuzhiyun void idxd_unmask_error_interrupts(struct idxd_device *idxd);
293*4882a593Smuzhiyun void idxd_mask_msix_vectors(struct idxd_device *idxd);
294*4882a593Smuzhiyun void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
295*4882a593Smuzhiyun void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* device control */
298*4882a593Smuzhiyun int idxd_device_init_reset(struct idxd_device *idxd);
299*4882a593Smuzhiyun int idxd_device_enable(struct idxd_device *idxd);
300*4882a593Smuzhiyun int idxd_device_disable(struct idxd_device *idxd);
301*4882a593Smuzhiyun void idxd_device_reset(struct idxd_device *idxd);
302*4882a593Smuzhiyun void idxd_device_cleanup(struct idxd_device *idxd);
303*4882a593Smuzhiyun int idxd_device_config(struct idxd_device *idxd);
304*4882a593Smuzhiyun void idxd_device_wqs_clear_state(struct idxd_device *idxd);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* work queue control */
307*4882a593Smuzhiyun int idxd_wq_alloc_resources(struct idxd_wq *wq);
308*4882a593Smuzhiyun void idxd_wq_free_resources(struct idxd_wq *wq);
309*4882a593Smuzhiyun int idxd_wq_enable(struct idxd_wq *wq);
310*4882a593Smuzhiyun int idxd_wq_disable(struct idxd_wq *wq);
311*4882a593Smuzhiyun void idxd_wq_drain(struct idxd_wq *wq);
312*4882a593Smuzhiyun void idxd_wq_reset(struct idxd_wq *wq);
313*4882a593Smuzhiyun int idxd_wq_map_portal(struct idxd_wq *wq);
314*4882a593Smuzhiyun void idxd_wq_unmap_portal(struct idxd_wq *wq);
315*4882a593Smuzhiyun void idxd_wq_disable_cleanup(struct idxd_wq *wq);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* submission */
318*4882a593Smuzhiyun int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
319*4882a593Smuzhiyun struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
320*4882a593Smuzhiyun void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* dmaengine */
323*4882a593Smuzhiyun int idxd_register_dma_device(struct idxd_device *idxd);
324*4882a593Smuzhiyun void idxd_unregister_dma_device(struct idxd_device *idxd);
325*4882a593Smuzhiyun int idxd_register_dma_channel(struct idxd_wq *wq);
326*4882a593Smuzhiyun void idxd_unregister_dma_channel(struct idxd_wq *wq);
327*4882a593Smuzhiyun void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
328*4882a593Smuzhiyun void idxd_dma_complete_txd(struct idxd_desc *desc,
329*4882a593Smuzhiyun enum idxd_complete_type comp_type);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /* cdev */
332*4882a593Smuzhiyun int idxd_cdev_register(void);
333*4882a593Smuzhiyun void idxd_cdev_remove(void);
334*4882a593Smuzhiyun int idxd_cdev_get_major(struct idxd_device *idxd);
335*4882a593Smuzhiyun int idxd_wq_add_cdev(struct idxd_wq *wq);
336*4882a593Smuzhiyun void idxd_wq_del_cdev(struct idxd_wq *wq);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun #endif
339