xref: /OK3568_Linux_fs/kernel/drivers/dma/ioat/dma.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef IOATDMA_H
6*4882a593Smuzhiyun #define IOATDMA_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/dmaengine.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/dmapool.h>
11*4882a593Smuzhiyun #include <linux/cache.h>
12*4882a593Smuzhiyun #include <linux/pci_ids.h>
13*4882a593Smuzhiyun #include <linux/circ_buf.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include "registers.h"
16*4882a593Smuzhiyun #include "hw.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define IOAT_DMA_VERSION  "5.00"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define IOAT_DMA_DCA_ANY_CPU		~0
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
23*4882a593Smuzhiyun #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
24*4882a593Smuzhiyun #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* ioat hardware assumes at least two sources for raid operations */
29*4882a593Smuzhiyun #define src_cnt_to_sw(x) ((x) + 2)
30*4882a593Smuzhiyun #define src_cnt_to_hw(x) ((x) - 2)
31*4882a593Smuzhiyun #define ndest_to_sw(x) ((x) + 1)
32*4882a593Smuzhiyun #define ndest_to_hw(x) ((x) - 1)
33*4882a593Smuzhiyun #define src16_cnt_to_sw(x) ((x) + 9)
34*4882a593Smuzhiyun #define src16_cnt_to_hw(x) ((x) - 9)
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * workaround for IOAT ver.3.0 null descriptor issue
38*4882a593Smuzhiyun  * (channel returns error when size is 0)
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun #define NULL_DESC_BUFFER_SIZE 1
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun enum ioat_irq_mode {
43*4882a593Smuzhiyun 	IOAT_NOIRQ = 0,
44*4882a593Smuzhiyun 	IOAT_MSIX,
45*4882a593Smuzhiyun 	IOAT_MSI,
46*4882a593Smuzhiyun 	IOAT_INTX
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /**
50*4882a593Smuzhiyun  * struct ioatdma_device - internal representation of a IOAT device
51*4882a593Smuzhiyun  * @pdev: PCI-Express device
52*4882a593Smuzhiyun  * @reg_base: MMIO register space base address
53*4882a593Smuzhiyun  * @completion_pool: DMA buffers for completion ops
54*4882a593Smuzhiyun  * @sed_hw_pool: DMA super descriptor pools
55*4882a593Smuzhiyun  * @dma_dev: embedded struct dma_device
56*4882a593Smuzhiyun  * @version: version of ioatdma device
57*4882a593Smuzhiyun  * @msix_entries: irq handlers
58*4882a593Smuzhiyun  * @idx: per channel data
59*4882a593Smuzhiyun  * @dca: direct cache access context
60*4882a593Smuzhiyun  * @irq_mode: interrupt mode (INTX, MSI, MSIX)
61*4882a593Smuzhiyun  * @cap: read DMA capabilities register
62*4882a593Smuzhiyun  */
63*4882a593Smuzhiyun struct ioatdma_device {
64*4882a593Smuzhiyun 	struct pci_dev *pdev;
65*4882a593Smuzhiyun 	void __iomem *reg_base;
66*4882a593Smuzhiyun 	struct dma_pool *completion_pool;
67*4882a593Smuzhiyun #define MAX_SED_POOLS	5
68*4882a593Smuzhiyun 	struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
69*4882a593Smuzhiyun 	struct dma_device dma_dev;
70*4882a593Smuzhiyun 	u8 version;
71*4882a593Smuzhiyun #define IOAT_MAX_CHANS 4
72*4882a593Smuzhiyun 	struct msix_entry msix_entries[IOAT_MAX_CHANS];
73*4882a593Smuzhiyun 	struct ioatdma_chan *idx[IOAT_MAX_CHANS];
74*4882a593Smuzhiyun 	struct dca_provider *dca;
75*4882a593Smuzhiyun 	enum ioat_irq_mode irq_mode;
76*4882a593Smuzhiyun 	u32 cap;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	/* shadow version for CB3.3 chan reset errata workaround */
79*4882a593Smuzhiyun 	u64 msixtba0;
80*4882a593Smuzhiyun 	u64 msixdata0;
81*4882a593Smuzhiyun 	u32 msixpba;
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #define IOAT_MAX_ORDER 16
85*4882a593Smuzhiyun #define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
86*4882a593Smuzhiyun #define IOAT_CHUNK_SIZE (SZ_512K)
87*4882a593Smuzhiyun #define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun struct ioat_descs {
90*4882a593Smuzhiyun 	void *virt;
91*4882a593Smuzhiyun 	dma_addr_t hw;
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun struct ioatdma_chan {
95*4882a593Smuzhiyun 	struct dma_chan dma_chan;
96*4882a593Smuzhiyun 	void __iomem *reg_base;
97*4882a593Smuzhiyun 	dma_addr_t last_completion;
98*4882a593Smuzhiyun 	spinlock_t cleanup_lock;
99*4882a593Smuzhiyun 	unsigned long state;
100*4882a593Smuzhiyun 	#define IOAT_CHAN_DOWN 0
101*4882a593Smuzhiyun 	#define IOAT_COMPLETION_ACK 1
102*4882a593Smuzhiyun 	#define IOAT_RESET_PENDING 2
103*4882a593Smuzhiyun 	#define IOAT_KOBJ_INIT_FAIL 3
104*4882a593Smuzhiyun 	#define IOAT_RUN 5
105*4882a593Smuzhiyun 	#define IOAT_CHAN_ACTIVE 6
106*4882a593Smuzhiyun 	struct timer_list timer;
107*4882a593Smuzhiyun 	#define RESET_DELAY msecs_to_jiffies(100)
108*4882a593Smuzhiyun 	struct ioatdma_device *ioat_dma;
109*4882a593Smuzhiyun 	dma_addr_t completion_dma;
110*4882a593Smuzhiyun 	u64 *completion;
111*4882a593Smuzhiyun 	struct tasklet_struct cleanup_task;
112*4882a593Smuzhiyun 	struct kobject kobj;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /* ioat v2 / v3 channel attributes
115*4882a593Smuzhiyun  * @xfercap_log; log2 of channel max transfer length (for fast division)
116*4882a593Smuzhiyun  * @head: allocated index
117*4882a593Smuzhiyun  * @issued: hardware notification point
118*4882a593Smuzhiyun  * @tail: cleanup index
119*4882a593Smuzhiyun  * @dmacount: identical to 'head' except for occasionally resetting to zero
120*4882a593Smuzhiyun  * @alloc_order: log2 of the number of allocated descriptors
121*4882a593Smuzhiyun  * @produce: number of descriptors to produce at submit time
122*4882a593Smuzhiyun  * @ring: software ring buffer implementation of hardware ring
123*4882a593Smuzhiyun  * @prep_lock: serializes descriptor preparation (producers)
124*4882a593Smuzhiyun  */
125*4882a593Smuzhiyun 	size_t xfercap_log;
126*4882a593Smuzhiyun 	u16 head;
127*4882a593Smuzhiyun 	u16 issued;
128*4882a593Smuzhiyun 	u16 tail;
129*4882a593Smuzhiyun 	u16 dmacount;
130*4882a593Smuzhiyun 	u16 alloc_order;
131*4882a593Smuzhiyun 	u16 produce;
132*4882a593Smuzhiyun 	struct ioat_ring_ent **ring;
133*4882a593Smuzhiyun 	spinlock_t prep_lock;
134*4882a593Smuzhiyun 	struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
135*4882a593Smuzhiyun 	int desc_chunks;
136*4882a593Smuzhiyun 	int intr_coalesce;
137*4882a593Smuzhiyun 	int prev_intr_coalesce;
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun struct ioat_sysfs_entry {
141*4882a593Smuzhiyun 	struct attribute attr;
142*4882a593Smuzhiyun 	ssize_t (*show)(struct dma_chan *, char *);
143*4882a593Smuzhiyun 	ssize_t (*store)(struct dma_chan *, const char *, size_t);
144*4882a593Smuzhiyun };
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /**
147*4882a593Smuzhiyun  * struct ioat_sed_ent - wrapper around super extended hardware descriptor
148*4882a593Smuzhiyun  * @hw: hardware SED
149*4882a593Smuzhiyun  * @dma: dma address for the SED
150*4882a593Smuzhiyun  * @parent: point to the dma descriptor that's the parent
151*4882a593Smuzhiyun  * @hw_pool: descriptor pool index
152*4882a593Smuzhiyun  */
153*4882a593Smuzhiyun struct ioat_sed_ent {
154*4882a593Smuzhiyun 	struct ioat_sed_raw_descriptor *hw;
155*4882a593Smuzhiyun 	dma_addr_t dma;
156*4882a593Smuzhiyun 	struct ioat_ring_ent *parent;
157*4882a593Smuzhiyun 	unsigned int hw_pool;
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /**
161*4882a593Smuzhiyun  * struct ioat_ring_ent - wrapper around hardware descriptor
162*4882a593Smuzhiyun  * @hw: hardware DMA descriptor (for memcpy)
163*4882a593Smuzhiyun  * @xor: hardware xor descriptor
164*4882a593Smuzhiyun  * @xor_ex: hardware xor extension descriptor
165*4882a593Smuzhiyun  * @pq: hardware pq descriptor
166*4882a593Smuzhiyun  * @pq_ex: hardware pq extension descriptor
167*4882a593Smuzhiyun  * @pqu: hardware pq update descriptor
168*4882a593Smuzhiyun  * @raw: hardware raw (un-typed) descriptor
169*4882a593Smuzhiyun  * @txd: the generic software descriptor for all engines
170*4882a593Smuzhiyun  * @len: total transaction length for unmap
171*4882a593Smuzhiyun  * @result: asynchronous result of validate operations
172*4882a593Smuzhiyun  * @id: identifier for debug
173*4882a593Smuzhiyun  * @sed: pointer to super extended descriptor sw desc
174*4882a593Smuzhiyun  */
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun struct ioat_ring_ent {
177*4882a593Smuzhiyun 	union {
178*4882a593Smuzhiyun 		struct ioat_dma_descriptor *hw;
179*4882a593Smuzhiyun 		struct ioat_xor_descriptor *xor;
180*4882a593Smuzhiyun 		struct ioat_xor_ext_descriptor *xor_ex;
181*4882a593Smuzhiyun 		struct ioat_pq_descriptor *pq;
182*4882a593Smuzhiyun 		struct ioat_pq_ext_descriptor *pq_ex;
183*4882a593Smuzhiyun 		struct ioat_pq_update_descriptor *pqu;
184*4882a593Smuzhiyun 		struct ioat_raw_descriptor *raw;
185*4882a593Smuzhiyun 	};
186*4882a593Smuzhiyun 	size_t len;
187*4882a593Smuzhiyun 	struct dma_async_tx_descriptor txd;
188*4882a593Smuzhiyun 	enum sum_check_flags *result;
189*4882a593Smuzhiyun 	#ifdef DEBUG
190*4882a593Smuzhiyun 	int id;
191*4882a593Smuzhiyun 	#endif
192*4882a593Smuzhiyun 	struct ioat_sed_ent *sed;
193*4882a593Smuzhiyun };
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun extern const struct sysfs_ops ioat_sysfs_ops;
196*4882a593Smuzhiyun extern struct ioat_sysfs_entry ioat_version_attr;
197*4882a593Smuzhiyun extern struct ioat_sysfs_entry ioat_cap_attr;
198*4882a593Smuzhiyun extern int ioat_pending_level;
199*4882a593Smuzhiyun extern int ioat_ring_alloc_order;
200*4882a593Smuzhiyun extern struct kobj_type ioat_ktype;
201*4882a593Smuzhiyun extern struct kmem_cache *ioat_cache;
202*4882a593Smuzhiyun extern int ioat_ring_max_alloc_order;
203*4882a593Smuzhiyun extern struct kmem_cache *ioat_sed_cache;
204*4882a593Smuzhiyun 
to_ioat_chan(struct dma_chan * c)205*4882a593Smuzhiyun static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	return container_of(c, struct ioatdma_chan, dma_chan);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /* wrapper around hardware descriptor format + additional software fields */
211*4882a593Smuzhiyun #ifdef DEBUG
212*4882a593Smuzhiyun #define set_desc_id(desc, i) ((desc)->id = (i))
213*4882a593Smuzhiyun #define desc_id(desc) ((desc)->id)
214*4882a593Smuzhiyun #else
215*4882a593Smuzhiyun #define set_desc_id(desc, i)
216*4882a593Smuzhiyun #define desc_id(desc) (0)
217*4882a593Smuzhiyun #endif
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun static inline void
__dump_desc_dbg(struct ioatdma_chan * ioat_chan,struct ioat_dma_descriptor * hw,struct dma_async_tx_descriptor * tx,int id)220*4882a593Smuzhiyun __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
221*4882a593Smuzhiyun 		struct dma_async_tx_descriptor *tx, int id)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct device *dev = to_dev(ioat_chan);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
226*4882a593Smuzhiyun 		" ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
227*4882a593Smuzhiyun 		(unsigned long long) tx->phys,
228*4882a593Smuzhiyun 		(unsigned long long) hw->next, tx->cookie, tx->flags,
229*4882a593Smuzhiyun 		hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun #define dump_desc_dbg(c, d) \
233*4882a593Smuzhiyun 	({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun static inline struct ioatdma_chan *
ioat_chan_by_index(struct ioatdma_device * ioat_dma,int index)236*4882a593Smuzhiyun ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	return ioat_dma->idx[index];
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
ioat_chansts(struct ioatdma_chan * ioat_chan)241*4882a593Smuzhiyun static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
ioat_chansts_to_addr(u64 status)246*4882a593Smuzhiyun static inline u64 ioat_chansts_to_addr(u64 status)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
ioat_chanerr(struct ioatdma_chan * ioat_chan)251*4882a593Smuzhiyun static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
ioat_suspend(struct ioatdma_chan * ioat_chan)256*4882a593Smuzhiyun static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	u8 ver = ioat_chan->ioat_dma->version;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	writeb(IOAT_CHANCMD_SUSPEND,
261*4882a593Smuzhiyun 	       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
ioat_reset(struct ioatdma_chan * ioat_chan)264*4882a593Smuzhiyun static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	u8 ver = ioat_chan->ioat_dma->version;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	writeb(IOAT_CHANCMD_RESET,
269*4882a593Smuzhiyun 	       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
ioat_reset_pending(struct ioatdma_chan * ioat_chan)272*4882a593Smuzhiyun static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	u8 ver = ioat_chan->ioat_dma->version;
275*4882a593Smuzhiyun 	u8 cmd;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
278*4882a593Smuzhiyun 	return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
is_ioat_active(unsigned long status)281*4882a593Smuzhiyun static inline bool is_ioat_active(unsigned long status)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
is_ioat_idle(unsigned long status)286*4882a593Smuzhiyun static inline bool is_ioat_idle(unsigned long status)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
is_ioat_halted(unsigned long status)291*4882a593Smuzhiyun static inline bool is_ioat_halted(unsigned long status)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
is_ioat_suspended(unsigned long status)296*4882a593Smuzhiyun static inline bool is_ioat_suspended(unsigned long status)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /* channel was fatally programmed */
is_ioat_bug(unsigned long err)302*4882a593Smuzhiyun static inline bool is_ioat_bug(unsigned long err)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	return !!err;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 
ioat_ring_size(struct ioatdma_chan * ioat_chan)308*4882a593Smuzhiyun static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	return 1 << ioat_chan->alloc_order;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun /* count of descriptors in flight with the engine */
ioat_ring_active(struct ioatdma_chan * ioat_chan)314*4882a593Smuzhiyun static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
317*4882a593Smuzhiyun 			ioat_ring_size(ioat_chan));
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun /* count of descriptors pending submission to hardware */
ioat_ring_pending(struct ioatdma_chan * ioat_chan)321*4882a593Smuzhiyun static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
324*4882a593Smuzhiyun 			ioat_ring_size(ioat_chan));
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
ioat_ring_space(struct ioatdma_chan * ioat_chan)327*4882a593Smuzhiyun static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun static inline u16
ioat_xferlen_to_descs(struct ioatdma_chan * ioat_chan,size_t len)333*4882a593Smuzhiyun ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	u16 num_descs = len >> ioat_chan->xfercap_log;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
338*4882a593Smuzhiyun 	return num_descs;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun static inline struct ioat_ring_ent *
ioat_get_ring_ent(struct ioatdma_chan * ioat_chan,u16 idx)342*4882a593Smuzhiyun ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun static inline void
ioat_set_chainaddr(struct ioatdma_chan * ioat_chan,u64 addr)348*4882a593Smuzhiyun ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	writel(addr & 0x00000000FFFFFFFF,
351*4882a593Smuzhiyun 	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
352*4882a593Smuzhiyun 	writel(addr >> 32,
353*4882a593Smuzhiyun 	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /* IOAT Prep functions */
357*4882a593Smuzhiyun struct dma_async_tx_descriptor *
358*4882a593Smuzhiyun ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
359*4882a593Smuzhiyun 			   dma_addr_t dma_src, size_t len, unsigned long flags);
360*4882a593Smuzhiyun struct dma_async_tx_descriptor *
361*4882a593Smuzhiyun ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
362*4882a593Smuzhiyun struct dma_async_tx_descriptor *
363*4882a593Smuzhiyun ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
364*4882a593Smuzhiyun 	       unsigned int src_cnt, size_t len, unsigned long flags);
365*4882a593Smuzhiyun struct dma_async_tx_descriptor *
366*4882a593Smuzhiyun ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
367*4882a593Smuzhiyun 		    unsigned int src_cnt, size_t len,
368*4882a593Smuzhiyun 		    enum sum_check_flags *result, unsigned long flags);
369*4882a593Smuzhiyun struct dma_async_tx_descriptor *
370*4882a593Smuzhiyun ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
371*4882a593Smuzhiyun 	      unsigned int src_cnt, const unsigned char *scf, size_t len,
372*4882a593Smuzhiyun 	      unsigned long flags);
373*4882a593Smuzhiyun struct dma_async_tx_descriptor *
374*4882a593Smuzhiyun ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
375*4882a593Smuzhiyun 		  unsigned int src_cnt, const unsigned char *scf, size_t len,
376*4882a593Smuzhiyun 		  enum sum_check_flags *pqres, unsigned long flags);
377*4882a593Smuzhiyun struct dma_async_tx_descriptor *
378*4882a593Smuzhiyun ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
379*4882a593Smuzhiyun 		 unsigned int src_cnt, size_t len, unsigned long flags);
380*4882a593Smuzhiyun struct dma_async_tx_descriptor *
381*4882a593Smuzhiyun ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
382*4882a593Smuzhiyun 		     unsigned int src_cnt, size_t len,
383*4882a593Smuzhiyun 		     enum sum_check_flags *result, unsigned long flags);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun /* IOAT Operation functions */
386*4882a593Smuzhiyun irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
387*4882a593Smuzhiyun irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
388*4882a593Smuzhiyun struct ioat_ring_ent **
389*4882a593Smuzhiyun ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
390*4882a593Smuzhiyun void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
391*4882a593Smuzhiyun void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
392*4882a593Smuzhiyun int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
393*4882a593Smuzhiyun enum dma_status
394*4882a593Smuzhiyun ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
395*4882a593Smuzhiyun 		struct dma_tx_state *txstate);
396*4882a593Smuzhiyun void ioat_cleanup_event(struct tasklet_struct *t);
397*4882a593Smuzhiyun void ioat_timer_event(struct timer_list *t);
398*4882a593Smuzhiyun int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
399*4882a593Smuzhiyun void ioat_issue_pending(struct dma_chan *chan);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun /* IOAT Init functions */
402*4882a593Smuzhiyun bool is_bwd_ioat(struct pci_dev *pdev);
403*4882a593Smuzhiyun struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
404*4882a593Smuzhiyun void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
405*4882a593Smuzhiyun void ioat_kobject_del(struct ioatdma_device *ioat_dma);
406*4882a593Smuzhiyun int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
407*4882a593Smuzhiyun void ioat_stop(struct ioatdma_chan *ioat_chan);
408*4882a593Smuzhiyun #endif /* IOATDMA_H */
409