1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Intel I/OAT DMA Linux driver
4*4882a593Smuzhiyun * Copyright(c) 2004 - 2015 Intel Corporation.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/pci.h>
8*4882a593Smuzhiyun #include <linux/gfp.h>
9*4882a593Smuzhiyun #include <linux/dmaengine.h>
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/prefetch.h>
12*4882a593Smuzhiyun #include "../dmaengine.h"
13*4882a593Smuzhiyun #include "registers.h"
14*4882a593Smuzhiyun #include "hw.h"
15*4882a593Smuzhiyun #include "dma.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define MAX_SCF 256
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /* provide a lookup table for setting the source address in the base or
20*4882a593Smuzhiyun * extended descriptor of an xor or pq descriptor
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun static const u8 xor_idx_to_desc = 0xe0;
23*4882a593Smuzhiyun static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
24*4882a593Smuzhiyun static const u8 pq_idx_to_desc = 0xf8;
25*4882a593Smuzhiyun static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
26*4882a593Smuzhiyun 2, 2, 2, 2, 2, 2, 2 };
27*4882a593Smuzhiyun static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
28*4882a593Smuzhiyun static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
29*4882a593Smuzhiyun 0, 1, 2, 3, 4, 5, 6 };
30*4882a593Smuzhiyun
xor_set_src(struct ioat_raw_descriptor * descs[2],dma_addr_t addr,u32 offset,int idx)31*4882a593Smuzhiyun static void xor_set_src(struct ioat_raw_descriptor *descs[2],
32*4882a593Smuzhiyun dma_addr_t addr, u32 offset, int idx)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun raw->field[xor_idx_to_field[idx]] = addr + offset;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
pq_get_src(struct ioat_raw_descriptor * descs[2],int idx)39*4882a593Smuzhiyun static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun return raw->field[pq_idx_to_field[idx]];
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
pq16_get_src(struct ioat_raw_descriptor * desc[3],int idx)46*4882a593Smuzhiyun static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun return raw->field[pq16_idx_to_field[idx]];
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
pq_set_src(struct ioat_raw_descriptor * descs[2],dma_addr_t addr,u32 offset,u8 coef,int idx)53*4882a593Smuzhiyun static void pq_set_src(struct ioat_raw_descriptor *descs[2],
54*4882a593Smuzhiyun dma_addr_t addr, u32 offset, u8 coef, int idx)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
57*4882a593Smuzhiyun struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun raw->field[pq_idx_to_field[idx]] = addr + offset;
60*4882a593Smuzhiyun pq->coef[idx] = coef;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
pq16_set_src(struct ioat_raw_descriptor * desc[3],dma_addr_t addr,u32 offset,u8 coef,unsigned idx)63*4882a593Smuzhiyun static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
64*4882a593Smuzhiyun dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
67*4882a593Smuzhiyun struct ioat_pq16a_descriptor *pq16 =
68*4882a593Smuzhiyun (struct ioat_pq16a_descriptor *)desc[1];
69*4882a593Smuzhiyun struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun raw->field[pq16_idx_to_field[idx]] = addr + offset;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (idx < 8)
74*4882a593Smuzhiyun pq->coef[idx] = coef;
75*4882a593Smuzhiyun else
76*4882a593Smuzhiyun pq16->coef[idx - 8] = coef;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun static struct ioat_sed_ent *
ioat3_alloc_sed(struct ioatdma_device * ioat_dma,unsigned int hw_pool)80*4882a593Smuzhiyun ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun struct ioat_sed_ent *sed;
83*4882a593Smuzhiyun gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun sed = kmem_cache_alloc(ioat_sed_cache, flags);
86*4882a593Smuzhiyun if (!sed)
87*4882a593Smuzhiyun return NULL;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun sed->hw_pool = hw_pool;
90*4882a593Smuzhiyun sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool],
91*4882a593Smuzhiyun flags, &sed->dma);
92*4882a593Smuzhiyun if (!sed->hw) {
93*4882a593Smuzhiyun kmem_cache_free(ioat_sed_cache, sed);
94*4882a593Smuzhiyun return NULL;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return sed;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun struct dma_async_tx_descriptor *
ioat_dma_prep_memcpy_lock(struct dma_chan * c,dma_addr_t dma_dest,dma_addr_t dma_src,size_t len,unsigned long flags)101*4882a593Smuzhiyun ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
102*4882a593Smuzhiyun dma_addr_t dma_src, size_t len, unsigned long flags)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
105*4882a593Smuzhiyun struct ioat_dma_descriptor *hw;
106*4882a593Smuzhiyun struct ioat_ring_ent *desc;
107*4882a593Smuzhiyun dma_addr_t dst = dma_dest;
108*4882a593Smuzhiyun dma_addr_t src = dma_src;
109*4882a593Smuzhiyun size_t total_len = len;
110*4882a593Smuzhiyun int num_descs, idx, i;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
113*4882a593Smuzhiyun return NULL;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun num_descs = ioat_xferlen_to_descs(ioat_chan, len);
116*4882a593Smuzhiyun if (likely(num_descs) &&
117*4882a593Smuzhiyun ioat_check_space_lock(ioat_chan, num_descs) == 0)
118*4882a593Smuzhiyun idx = ioat_chan->head;
119*4882a593Smuzhiyun else
120*4882a593Smuzhiyun return NULL;
121*4882a593Smuzhiyun i = 0;
122*4882a593Smuzhiyun do {
123*4882a593Smuzhiyun size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun desc = ioat_get_ring_ent(ioat_chan, idx + i);
126*4882a593Smuzhiyun hw = desc->hw;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun hw->size = copy;
129*4882a593Smuzhiyun hw->ctl = 0;
130*4882a593Smuzhiyun hw->src_addr = src;
131*4882a593Smuzhiyun hw->dst_addr = dst;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun len -= copy;
134*4882a593Smuzhiyun dst += copy;
135*4882a593Smuzhiyun src += copy;
136*4882a593Smuzhiyun dump_desc_dbg(ioat_chan, desc);
137*4882a593Smuzhiyun } while (++i < num_descs);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun desc->txd.flags = flags;
140*4882a593Smuzhiyun desc->len = total_len;
141*4882a593Smuzhiyun hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
142*4882a593Smuzhiyun hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
143*4882a593Smuzhiyun hw->ctl_f.compl_write = 1;
144*4882a593Smuzhiyun dump_desc_dbg(ioat_chan, desc);
145*4882a593Smuzhiyun /* we leave the channel locked to ensure in order submission */
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun return &desc->txd;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
__ioat_prep_xor_lock(struct dma_chan * c,enum sum_check_flags * result,dma_addr_t dest,dma_addr_t * src,unsigned int src_cnt,size_t len,unsigned long flags)152*4882a593Smuzhiyun __ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
153*4882a593Smuzhiyun dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
154*4882a593Smuzhiyun size_t len, unsigned long flags)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
157*4882a593Smuzhiyun struct ioat_ring_ent *compl_desc;
158*4882a593Smuzhiyun struct ioat_ring_ent *desc;
159*4882a593Smuzhiyun struct ioat_ring_ent *ext;
160*4882a593Smuzhiyun size_t total_len = len;
161*4882a593Smuzhiyun struct ioat_xor_descriptor *xor;
162*4882a593Smuzhiyun struct ioat_xor_ext_descriptor *xor_ex = NULL;
163*4882a593Smuzhiyun struct ioat_dma_descriptor *hw;
164*4882a593Smuzhiyun int num_descs, with_ext, idx, i;
165*4882a593Smuzhiyun u32 offset = 0;
166*4882a593Smuzhiyun u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun BUG_ON(src_cnt < 2);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun num_descs = ioat_xferlen_to_descs(ioat_chan, len);
171*4882a593Smuzhiyun /* we need 2x the number of descriptors to cover greater than 5
172*4882a593Smuzhiyun * sources
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun if (src_cnt > 5) {
175*4882a593Smuzhiyun with_ext = 1;
176*4882a593Smuzhiyun num_descs *= 2;
177*4882a593Smuzhiyun } else
178*4882a593Smuzhiyun with_ext = 0;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* completion writes from the raid engine may pass completion
181*4882a593Smuzhiyun * writes from the legacy engine, so we need one extra null
182*4882a593Smuzhiyun * (legacy) descriptor to ensure all completion writes arrive in
183*4882a593Smuzhiyun * order.
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun if (likely(num_descs) &&
186*4882a593Smuzhiyun ioat_check_space_lock(ioat_chan, num_descs+1) == 0)
187*4882a593Smuzhiyun idx = ioat_chan->head;
188*4882a593Smuzhiyun else
189*4882a593Smuzhiyun return NULL;
190*4882a593Smuzhiyun i = 0;
191*4882a593Smuzhiyun do {
192*4882a593Smuzhiyun struct ioat_raw_descriptor *descs[2];
193*4882a593Smuzhiyun size_t xfer_size = min_t(size_t,
194*4882a593Smuzhiyun len, 1 << ioat_chan->xfercap_log);
195*4882a593Smuzhiyun int s;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun desc = ioat_get_ring_ent(ioat_chan, idx + i);
198*4882a593Smuzhiyun xor = desc->xor;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* save a branch by unconditionally retrieving the
201*4882a593Smuzhiyun * extended descriptor xor_set_src() knows to not write
202*4882a593Smuzhiyun * to it in the single descriptor case
203*4882a593Smuzhiyun */
204*4882a593Smuzhiyun ext = ioat_get_ring_ent(ioat_chan, idx + i + 1);
205*4882a593Smuzhiyun xor_ex = ext->xor_ex;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun descs[0] = (struct ioat_raw_descriptor *) xor;
208*4882a593Smuzhiyun descs[1] = (struct ioat_raw_descriptor *) xor_ex;
209*4882a593Smuzhiyun for (s = 0; s < src_cnt; s++)
210*4882a593Smuzhiyun xor_set_src(descs, src[s], offset, s);
211*4882a593Smuzhiyun xor->size = xfer_size;
212*4882a593Smuzhiyun xor->dst_addr = dest + offset;
213*4882a593Smuzhiyun xor->ctl = 0;
214*4882a593Smuzhiyun xor->ctl_f.op = op;
215*4882a593Smuzhiyun xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun len -= xfer_size;
218*4882a593Smuzhiyun offset += xfer_size;
219*4882a593Smuzhiyun dump_desc_dbg(ioat_chan, desc);
220*4882a593Smuzhiyun } while ((i += 1 + with_ext) < num_descs);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* last xor descriptor carries the unmap parameters and fence bit */
223*4882a593Smuzhiyun desc->txd.flags = flags;
224*4882a593Smuzhiyun desc->len = total_len;
225*4882a593Smuzhiyun if (result)
226*4882a593Smuzhiyun desc->result = result;
227*4882a593Smuzhiyun xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* completion descriptor carries interrupt bit */
230*4882a593Smuzhiyun compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
231*4882a593Smuzhiyun compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
232*4882a593Smuzhiyun hw = compl_desc->hw;
233*4882a593Smuzhiyun hw->ctl = 0;
234*4882a593Smuzhiyun hw->ctl_f.null = 1;
235*4882a593Smuzhiyun hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
236*4882a593Smuzhiyun hw->ctl_f.compl_write = 1;
237*4882a593Smuzhiyun hw->size = NULL_DESC_BUFFER_SIZE;
238*4882a593Smuzhiyun dump_desc_dbg(ioat_chan, compl_desc);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* we leave the channel locked to ensure in order submission */
241*4882a593Smuzhiyun return &compl_desc->txd;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun struct dma_async_tx_descriptor *
ioat_prep_xor(struct dma_chan * chan,dma_addr_t dest,dma_addr_t * src,unsigned int src_cnt,size_t len,unsigned long flags)245*4882a593Smuzhiyun ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
246*4882a593Smuzhiyun unsigned int src_cnt, size_t len, unsigned long flags)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
251*4882a593Smuzhiyun return NULL;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun struct dma_async_tx_descriptor *
ioat_prep_xor_val(struct dma_chan * chan,dma_addr_t * src,unsigned int src_cnt,size_t len,enum sum_check_flags * result,unsigned long flags)257*4882a593Smuzhiyun ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
258*4882a593Smuzhiyun unsigned int src_cnt, size_t len,
259*4882a593Smuzhiyun enum sum_check_flags *result, unsigned long flags)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
264*4882a593Smuzhiyun return NULL;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /* the cleanup routine only sets bits on validate failure, it
267*4882a593Smuzhiyun * does not clear bits on validate success... so clear it here
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun *result = 0;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun return __ioat_prep_xor_lock(chan, result, src[0], &src[1],
272*4882a593Smuzhiyun src_cnt - 1, len, flags);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun static void
dump_pq_desc_dbg(struct ioatdma_chan * ioat_chan,struct ioat_ring_ent * desc,struct ioat_ring_ent * ext)276*4882a593Smuzhiyun dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc,
277*4882a593Smuzhiyun struct ioat_ring_ent *ext)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun struct device *dev = to_dev(ioat_chan);
280*4882a593Smuzhiyun struct ioat_pq_descriptor *pq = desc->pq;
281*4882a593Smuzhiyun struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
282*4882a593Smuzhiyun struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
283*4882a593Smuzhiyun int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
284*4882a593Smuzhiyun int i;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
287*4882a593Smuzhiyun " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
288*4882a593Smuzhiyun " src_cnt: %d)\n",
289*4882a593Smuzhiyun desc_id(desc), (unsigned long long) desc->txd.phys,
290*4882a593Smuzhiyun (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
291*4882a593Smuzhiyun desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op,
292*4882a593Smuzhiyun pq->ctl_f.int_en, pq->ctl_f.compl_write,
293*4882a593Smuzhiyun pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
294*4882a593Smuzhiyun pq->ctl_f.src_cnt);
295*4882a593Smuzhiyun for (i = 0; i < src_cnt; i++)
296*4882a593Smuzhiyun dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
297*4882a593Smuzhiyun (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
298*4882a593Smuzhiyun dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
299*4882a593Smuzhiyun dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
300*4882a593Smuzhiyun dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
dump_pq16_desc_dbg(struct ioatdma_chan * ioat_chan,struct ioat_ring_ent * desc)303*4882a593Smuzhiyun static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan,
304*4882a593Smuzhiyun struct ioat_ring_ent *desc)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct device *dev = to_dev(ioat_chan);
307*4882a593Smuzhiyun struct ioat_pq_descriptor *pq = desc->pq;
308*4882a593Smuzhiyun struct ioat_raw_descriptor *descs[] = { (void *)pq,
309*4882a593Smuzhiyun (void *)pq,
310*4882a593Smuzhiyun (void *)pq };
311*4882a593Smuzhiyun int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
312*4882a593Smuzhiyun int i;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (desc->sed) {
315*4882a593Smuzhiyun descs[1] = (void *)desc->sed->hw;
316*4882a593Smuzhiyun descs[2] = (void *)desc->sed->hw + 64;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
320*4882a593Smuzhiyun " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
321*4882a593Smuzhiyun " src_cnt: %d)\n",
322*4882a593Smuzhiyun desc_id(desc), (unsigned long long) desc->txd.phys,
323*4882a593Smuzhiyun (unsigned long long) pq->next,
324*4882a593Smuzhiyun desc->txd.flags, pq->size, pq->ctl,
325*4882a593Smuzhiyun pq->ctl_f.op, pq->ctl_f.int_en,
326*4882a593Smuzhiyun pq->ctl_f.compl_write,
327*4882a593Smuzhiyun pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
328*4882a593Smuzhiyun pq->ctl_f.src_cnt);
329*4882a593Smuzhiyun for (i = 0; i < src_cnt; i++) {
330*4882a593Smuzhiyun dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
331*4882a593Smuzhiyun (unsigned long long) pq16_get_src(descs, i),
332*4882a593Smuzhiyun pq->coef[i]);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
335*4882a593Smuzhiyun dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
__ioat_prep_pq_lock(struct dma_chan * c,enum sum_check_flags * result,const dma_addr_t * dst,const dma_addr_t * src,unsigned int src_cnt,const unsigned char * scf,size_t len,unsigned long flags)339*4882a593Smuzhiyun __ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
340*4882a593Smuzhiyun const dma_addr_t *dst, const dma_addr_t *src,
341*4882a593Smuzhiyun unsigned int src_cnt, const unsigned char *scf,
342*4882a593Smuzhiyun size_t len, unsigned long flags)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
345*4882a593Smuzhiyun struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
346*4882a593Smuzhiyun struct ioat_ring_ent *compl_desc;
347*4882a593Smuzhiyun struct ioat_ring_ent *desc;
348*4882a593Smuzhiyun struct ioat_ring_ent *ext;
349*4882a593Smuzhiyun size_t total_len = len;
350*4882a593Smuzhiyun struct ioat_pq_descriptor *pq;
351*4882a593Smuzhiyun struct ioat_pq_ext_descriptor *pq_ex = NULL;
352*4882a593Smuzhiyun struct ioat_dma_descriptor *hw;
353*4882a593Smuzhiyun u32 offset = 0;
354*4882a593Smuzhiyun u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
355*4882a593Smuzhiyun int i, s, idx, with_ext, num_descs;
356*4882a593Smuzhiyun int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
359*4882a593Smuzhiyun /* the engine requires at least two sources (we provide
360*4882a593Smuzhiyun * at least 1 implied source in the DMA_PREP_CONTINUE case)
361*4882a593Smuzhiyun */
362*4882a593Smuzhiyun BUG_ON(src_cnt + dmaf_continue(flags) < 2);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun num_descs = ioat_xferlen_to_descs(ioat_chan, len);
365*4882a593Smuzhiyun /* we need 2x the number of descriptors to cover greater than 3
366*4882a593Smuzhiyun * sources (we need 1 extra source in the q-only continuation
367*4882a593Smuzhiyun * case and 3 extra sources in the p+q continuation case.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
370*4882a593Smuzhiyun (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
371*4882a593Smuzhiyun with_ext = 1;
372*4882a593Smuzhiyun num_descs *= 2;
373*4882a593Smuzhiyun } else
374*4882a593Smuzhiyun with_ext = 0;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* completion writes from the raid engine may pass completion
377*4882a593Smuzhiyun * writes from the legacy engine, so we need one extra null
378*4882a593Smuzhiyun * (legacy) descriptor to ensure all completion writes arrive in
379*4882a593Smuzhiyun * order.
380*4882a593Smuzhiyun */
381*4882a593Smuzhiyun if (likely(num_descs) &&
382*4882a593Smuzhiyun ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0)
383*4882a593Smuzhiyun idx = ioat_chan->head;
384*4882a593Smuzhiyun else
385*4882a593Smuzhiyun return NULL;
386*4882a593Smuzhiyun i = 0;
387*4882a593Smuzhiyun do {
388*4882a593Smuzhiyun struct ioat_raw_descriptor *descs[2];
389*4882a593Smuzhiyun size_t xfer_size = min_t(size_t, len,
390*4882a593Smuzhiyun 1 << ioat_chan->xfercap_log);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun desc = ioat_get_ring_ent(ioat_chan, idx + i);
393*4882a593Smuzhiyun pq = desc->pq;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /* save a branch by unconditionally retrieving the
396*4882a593Smuzhiyun * extended descriptor pq_set_src() knows to not write
397*4882a593Smuzhiyun * to it in the single descriptor case
398*4882a593Smuzhiyun */
399*4882a593Smuzhiyun ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext);
400*4882a593Smuzhiyun pq_ex = ext->pq_ex;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun descs[0] = (struct ioat_raw_descriptor *) pq;
403*4882a593Smuzhiyun descs[1] = (struct ioat_raw_descriptor *) pq_ex;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun for (s = 0; s < src_cnt; s++)
406*4882a593Smuzhiyun pq_set_src(descs, src[s], offset, scf[s], s);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* see the comment for dma_maxpq in include/linux/dmaengine.h */
409*4882a593Smuzhiyun if (dmaf_p_disabled_continue(flags))
410*4882a593Smuzhiyun pq_set_src(descs, dst[1], offset, 1, s++);
411*4882a593Smuzhiyun else if (dmaf_continue(flags)) {
412*4882a593Smuzhiyun pq_set_src(descs, dst[0], offset, 0, s++);
413*4882a593Smuzhiyun pq_set_src(descs, dst[1], offset, 1, s++);
414*4882a593Smuzhiyun pq_set_src(descs, dst[1], offset, 0, s++);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun pq->size = xfer_size;
417*4882a593Smuzhiyun pq->p_addr = dst[0] + offset;
418*4882a593Smuzhiyun pq->q_addr = dst[1] + offset;
419*4882a593Smuzhiyun pq->ctl = 0;
420*4882a593Smuzhiyun pq->ctl_f.op = op;
421*4882a593Smuzhiyun /* we turn on descriptor write back error status */
422*4882a593Smuzhiyun if (ioat_dma->cap & IOAT_CAP_DWBES)
423*4882a593Smuzhiyun pq->ctl_f.wb_en = result ? 1 : 0;
424*4882a593Smuzhiyun pq->ctl_f.src_cnt = src_cnt_to_hw(s);
425*4882a593Smuzhiyun pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
426*4882a593Smuzhiyun pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun len -= xfer_size;
429*4882a593Smuzhiyun offset += xfer_size;
430*4882a593Smuzhiyun } while ((i += 1 + with_ext) < num_descs);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* last pq descriptor carries the unmap parameters and fence bit */
433*4882a593Smuzhiyun desc->txd.flags = flags;
434*4882a593Smuzhiyun desc->len = total_len;
435*4882a593Smuzhiyun if (result)
436*4882a593Smuzhiyun desc->result = result;
437*4882a593Smuzhiyun pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
438*4882a593Smuzhiyun dump_pq_desc_dbg(ioat_chan, desc, ext);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (!cb32) {
441*4882a593Smuzhiyun pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
442*4882a593Smuzhiyun pq->ctl_f.compl_write = 1;
443*4882a593Smuzhiyun compl_desc = desc;
444*4882a593Smuzhiyun } else {
445*4882a593Smuzhiyun /* completion descriptor carries interrupt bit */
446*4882a593Smuzhiyun compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
447*4882a593Smuzhiyun compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
448*4882a593Smuzhiyun hw = compl_desc->hw;
449*4882a593Smuzhiyun hw->ctl = 0;
450*4882a593Smuzhiyun hw->ctl_f.null = 1;
451*4882a593Smuzhiyun hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
452*4882a593Smuzhiyun hw->ctl_f.compl_write = 1;
453*4882a593Smuzhiyun hw->size = NULL_DESC_BUFFER_SIZE;
454*4882a593Smuzhiyun dump_desc_dbg(ioat_chan, compl_desc);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* we leave the channel locked to ensure in order submission */
459*4882a593Smuzhiyun return &compl_desc->txd;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
__ioat_prep_pq16_lock(struct dma_chan * c,enum sum_check_flags * result,const dma_addr_t * dst,const dma_addr_t * src,unsigned int src_cnt,const unsigned char * scf,size_t len,unsigned long flags)463*4882a593Smuzhiyun __ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
464*4882a593Smuzhiyun const dma_addr_t *dst, const dma_addr_t *src,
465*4882a593Smuzhiyun unsigned int src_cnt, const unsigned char *scf,
466*4882a593Smuzhiyun size_t len, unsigned long flags)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
469*4882a593Smuzhiyun struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
470*4882a593Smuzhiyun struct ioat_ring_ent *desc;
471*4882a593Smuzhiyun size_t total_len = len;
472*4882a593Smuzhiyun struct ioat_pq_descriptor *pq;
473*4882a593Smuzhiyun u32 offset = 0;
474*4882a593Smuzhiyun u8 op;
475*4882a593Smuzhiyun int i, s, idx, num_descs;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* this function is only called with 9-16 sources */
478*4882a593Smuzhiyun op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun num_descs = ioat_xferlen_to_descs(ioat_chan, len);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /*
485*4882a593Smuzhiyun * 16 source pq is only available on cb3.3 and has no completion
486*4882a593Smuzhiyun * write hw bug.
487*4882a593Smuzhiyun */
488*4882a593Smuzhiyun if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0)
489*4882a593Smuzhiyun idx = ioat_chan->head;
490*4882a593Smuzhiyun else
491*4882a593Smuzhiyun return NULL;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun i = 0;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun do {
496*4882a593Smuzhiyun struct ioat_raw_descriptor *descs[4];
497*4882a593Smuzhiyun size_t xfer_size = min_t(size_t, len,
498*4882a593Smuzhiyun 1 << ioat_chan->xfercap_log);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun desc = ioat_get_ring_ent(ioat_chan, idx + i);
501*4882a593Smuzhiyun pq = desc->pq;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun descs[0] = (struct ioat_raw_descriptor *) pq;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3);
506*4882a593Smuzhiyun if (!desc->sed) {
507*4882a593Smuzhiyun dev_err(to_dev(ioat_chan),
508*4882a593Smuzhiyun "%s: no free sed entries\n", __func__);
509*4882a593Smuzhiyun return NULL;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun pq->sed_addr = desc->sed->dma;
513*4882a593Smuzhiyun desc->sed->parent = desc;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
516*4882a593Smuzhiyun descs[2] = (void *)descs[1] + 64;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun for (s = 0; s < src_cnt; s++)
519*4882a593Smuzhiyun pq16_set_src(descs, src[s], offset, scf[s], s);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /* see the comment for dma_maxpq in include/linux/dmaengine.h */
522*4882a593Smuzhiyun if (dmaf_p_disabled_continue(flags))
523*4882a593Smuzhiyun pq16_set_src(descs, dst[1], offset, 1, s++);
524*4882a593Smuzhiyun else if (dmaf_continue(flags)) {
525*4882a593Smuzhiyun pq16_set_src(descs, dst[0], offset, 0, s++);
526*4882a593Smuzhiyun pq16_set_src(descs, dst[1], offset, 1, s++);
527*4882a593Smuzhiyun pq16_set_src(descs, dst[1], offset, 0, s++);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun pq->size = xfer_size;
531*4882a593Smuzhiyun pq->p_addr = dst[0] + offset;
532*4882a593Smuzhiyun pq->q_addr = dst[1] + offset;
533*4882a593Smuzhiyun pq->ctl = 0;
534*4882a593Smuzhiyun pq->ctl_f.op = op;
535*4882a593Smuzhiyun pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
536*4882a593Smuzhiyun /* we turn on descriptor write back error status */
537*4882a593Smuzhiyun if (ioat_dma->cap & IOAT_CAP_DWBES)
538*4882a593Smuzhiyun pq->ctl_f.wb_en = result ? 1 : 0;
539*4882a593Smuzhiyun pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
540*4882a593Smuzhiyun pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun len -= xfer_size;
543*4882a593Smuzhiyun offset += xfer_size;
544*4882a593Smuzhiyun } while (++i < num_descs);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /* last pq descriptor carries the unmap parameters and fence bit */
547*4882a593Smuzhiyun desc->txd.flags = flags;
548*4882a593Smuzhiyun desc->len = total_len;
549*4882a593Smuzhiyun if (result)
550*4882a593Smuzhiyun desc->result = result;
551*4882a593Smuzhiyun pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun /* with cb3.3 we should be able to do completion w/o a null desc */
554*4882a593Smuzhiyun pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
555*4882a593Smuzhiyun pq->ctl_f.compl_write = 1;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun dump_pq16_desc_dbg(ioat_chan, desc);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* we leave the channel locked to ensure in order submission */
560*4882a593Smuzhiyun return &desc->txd;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
src_cnt_flags(unsigned int src_cnt,unsigned long flags)563*4882a593Smuzhiyun static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun if (dmaf_p_disabled_continue(flags))
566*4882a593Smuzhiyun return src_cnt + 1;
567*4882a593Smuzhiyun else if (dmaf_continue(flags))
568*4882a593Smuzhiyun return src_cnt + 3;
569*4882a593Smuzhiyun else
570*4882a593Smuzhiyun return src_cnt;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun struct dma_async_tx_descriptor *
ioat_prep_pq(struct dma_chan * chan,dma_addr_t * dst,dma_addr_t * src,unsigned int src_cnt,const unsigned char * scf,size_t len,unsigned long flags)574*4882a593Smuzhiyun ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
575*4882a593Smuzhiyun unsigned int src_cnt, const unsigned char *scf, size_t len,
576*4882a593Smuzhiyun unsigned long flags)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
581*4882a593Smuzhiyun return NULL;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /* specify valid address for disabled result */
584*4882a593Smuzhiyun if (flags & DMA_PREP_PQ_DISABLE_P)
585*4882a593Smuzhiyun dst[0] = dst[1];
586*4882a593Smuzhiyun if (flags & DMA_PREP_PQ_DISABLE_Q)
587*4882a593Smuzhiyun dst[1] = dst[0];
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /* handle the single source multiply case from the raid6
590*4882a593Smuzhiyun * recovery path
591*4882a593Smuzhiyun */
592*4882a593Smuzhiyun if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
593*4882a593Smuzhiyun dma_addr_t single_source[2];
594*4882a593Smuzhiyun unsigned char single_source_coef[2];
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
597*4882a593Smuzhiyun single_source[0] = src[0];
598*4882a593Smuzhiyun single_source[1] = src[0];
599*4882a593Smuzhiyun single_source_coef[0] = scf[0];
600*4882a593Smuzhiyun single_source_coef[1] = 0;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun return src_cnt_flags(src_cnt, flags) > 8 ?
603*4882a593Smuzhiyun __ioat_prep_pq16_lock(chan, NULL, dst, single_source,
604*4882a593Smuzhiyun 2, single_source_coef, len,
605*4882a593Smuzhiyun flags) :
606*4882a593Smuzhiyun __ioat_prep_pq_lock(chan, NULL, dst, single_source, 2,
607*4882a593Smuzhiyun single_source_coef, len, flags);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun } else {
610*4882a593Smuzhiyun return src_cnt_flags(src_cnt, flags) > 8 ?
611*4882a593Smuzhiyun __ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
612*4882a593Smuzhiyun scf, len, flags) :
613*4882a593Smuzhiyun __ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt,
614*4882a593Smuzhiyun scf, len, flags);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun struct dma_async_tx_descriptor *
ioat_prep_pq_val(struct dma_chan * chan,dma_addr_t * pq,dma_addr_t * src,unsigned int src_cnt,const unsigned char * scf,size_t len,enum sum_check_flags * pqres,unsigned long flags)619*4882a593Smuzhiyun ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
620*4882a593Smuzhiyun unsigned int src_cnt, const unsigned char *scf, size_t len,
621*4882a593Smuzhiyun enum sum_check_flags *pqres, unsigned long flags)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
626*4882a593Smuzhiyun return NULL;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /* specify valid address for disabled result */
629*4882a593Smuzhiyun if (flags & DMA_PREP_PQ_DISABLE_P)
630*4882a593Smuzhiyun pq[0] = pq[1];
631*4882a593Smuzhiyun if (flags & DMA_PREP_PQ_DISABLE_Q)
632*4882a593Smuzhiyun pq[1] = pq[0];
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /* the cleanup routine only sets bits on validate failure, it
635*4882a593Smuzhiyun * does not clear bits on validate success... so clear it here
636*4882a593Smuzhiyun */
637*4882a593Smuzhiyun *pqres = 0;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun return src_cnt_flags(src_cnt, flags) > 8 ?
640*4882a593Smuzhiyun __ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
641*4882a593Smuzhiyun flags) :
642*4882a593Smuzhiyun __ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
643*4882a593Smuzhiyun flags);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun struct dma_async_tx_descriptor *
ioat_prep_pqxor(struct dma_chan * chan,dma_addr_t dst,dma_addr_t * src,unsigned int src_cnt,size_t len,unsigned long flags)647*4882a593Smuzhiyun ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
648*4882a593Smuzhiyun unsigned int src_cnt, size_t len, unsigned long flags)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun unsigned char scf[MAX_SCF];
651*4882a593Smuzhiyun dma_addr_t pq[2];
652*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
655*4882a593Smuzhiyun return NULL;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun if (src_cnt > MAX_SCF)
658*4882a593Smuzhiyun return NULL;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun memset(scf, 0, src_cnt);
661*4882a593Smuzhiyun pq[0] = dst;
662*4882a593Smuzhiyun flags |= DMA_PREP_PQ_DISABLE_Q;
663*4882a593Smuzhiyun pq[1] = dst; /* specify valid address for disabled result */
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun return src_cnt_flags(src_cnt, flags) > 8 ?
666*4882a593Smuzhiyun __ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
667*4882a593Smuzhiyun flags) :
668*4882a593Smuzhiyun __ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
669*4882a593Smuzhiyun flags);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun struct dma_async_tx_descriptor *
ioat_prep_pqxor_val(struct dma_chan * chan,dma_addr_t * src,unsigned int src_cnt,size_t len,enum sum_check_flags * result,unsigned long flags)673*4882a593Smuzhiyun ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
674*4882a593Smuzhiyun unsigned int src_cnt, size_t len,
675*4882a593Smuzhiyun enum sum_check_flags *result, unsigned long flags)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun unsigned char scf[MAX_SCF];
678*4882a593Smuzhiyun dma_addr_t pq[2];
679*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
682*4882a593Smuzhiyun return NULL;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun if (src_cnt > MAX_SCF)
685*4882a593Smuzhiyun return NULL;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun /* the cleanup routine only sets bits on validate failure, it
688*4882a593Smuzhiyun * does not clear bits on validate success... so clear it here
689*4882a593Smuzhiyun */
690*4882a593Smuzhiyun *result = 0;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun memset(scf, 0, src_cnt);
693*4882a593Smuzhiyun pq[0] = src[0];
694*4882a593Smuzhiyun flags |= DMA_PREP_PQ_DISABLE_Q;
695*4882a593Smuzhiyun pq[1] = pq[0]; /* specify valid address for disabled result */
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun return src_cnt_flags(src_cnt, flags) > 8 ?
698*4882a593Smuzhiyun __ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
699*4882a593Smuzhiyun scf, len, flags) :
700*4882a593Smuzhiyun __ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
701*4882a593Smuzhiyun scf, len, flags);
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun struct dma_async_tx_descriptor *
ioat_prep_interrupt_lock(struct dma_chan * c,unsigned long flags)705*4882a593Smuzhiyun ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
708*4882a593Smuzhiyun struct ioat_ring_ent *desc;
709*4882a593Smuzhiyun struct ioat_dma_descriptor *hw;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
712*4882a593Smuzhiyun return NULL;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun if (ioat_check_space_lock(ioat_chan, 1) == 0)
715*4882a593Smuzhiyun desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
716*4882a593Smuzhiyun else
717*4882a593Smuzhiyun return NULL;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun hw = desc->hw;
720*4882a593Smuzhiyun hw->ctl = 0;
721*4882a593Smuzhiyun hw->ctl_f.null = 1;
722*4882a593Smuzhiyun hw->ctl_f.int_en = 1;
723*4882a593Smuzhiyun hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
724*4882a593Smuzhiyun hw->ctl_f.compl_write = 1;
725*4882a593Smuzhiyun hw->size = NULL_DESC_BUFFER_SIZE;
726*4882a593Smuzhiyun hw->src_addr = 0;
727*4882a593Smuzhiyun hw->dst_addr = 0;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun desc->txd.flags = flags;
730*4882a593Smuzhiyun desc->len = 1;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun dump_desc_dbg(ioat_chan, desc);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /* we leave the channel locked to ensure in order submission */
735*4882a593Smuzhiyun return &desc->txd;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
738