1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Intel I/OAT DMA Linux driver
4*4882a593Smuzhiyun * Copyright(c) 2004 - 2015 Intel Corporation.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun * This driver supports an Intel I/OAT DMA engine, which does asynchronous
9*4882a593Smuzhiyun * copy operations.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/pci.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun #include <linux/dmaengine.h>
18*4882a593Smuzhiyun #include <linux/delay.h>
19*4882a593Smuzhiyun #include <linux/dma-mapping.h>
20*4882a593Smuzhiyun #include <linux/workqueue.h>
21*4882a593Smuzhiyun #include <linux/prefetch.h>
22*4882a593Smuzhiyun #include <linux/sizes.h>
23*4882a593Smuzhiyun #include "dma.h"
24*4882a593Smuzhiyun #include "registers.h"
25*4882a593Smuzhiyun #include "hw.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include "../dmaengine.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun static int completion_timeout = 200;
30*4882a593Smuzhiyun module_param(completion_timeout, int, 0644);
31*4882a593Smuzhiyun MODULE_PARM_DESC(completion_timeout,
32*4882a593Smuzhiyun "set ioat completion timeout [msec] (default 200 [msec])");
33*4882a593Smuzhiyun static int idle_timeout = 2000;
34*4882a593Smuzhiyun module_param(idle_timeout, int, 0644);
35*4882a593Smuzhiyun MODULE_PARM_DESC(idle_timeout,
36*4882a593Smuzhiyun "set ioat idel timeout [msec] (default 2000 [msec])");
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
39*4882a593Smuzhiyun #define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun static char *chanerr_str[] = {
42*4882a593Smuzhiyun "DMA Transfer Source Address Error",
43*4882a593Smuzhiyun "DMA Transfer Destination Address Error",
44*4882a593Smuzhiyun "Next Descriptor Address Error",
45*4882a593Smuzhiyun "Descriptor Error",
46*4882a593Smuzhiyun "Chan Address Value Error",
47*4882a593Smuzhiyun "CHANCMD Error",
48*4882a593Smuzhiyun "Chipset Uncorrectable Data Integrity Error",
49*4882a593Smuzhiyun "DMA Uncorrectable Data Integrity Error",
50*4882a593Smuzhiyun "Read Data Error",
51*4882a593Smuzhiyun "Write Data Error",
52*4882a593Smuzhiyun "Descriptor Control Error",
53*4882a593Smuzhiyun "Descriptor Transfer Size Error",
54*4882a593Smuzhiyun "Completion Address Error",
55*4882a593Smuzhiyun "Interrupt Configuration Error",
56*4882a593Smuzhiyun "Super extended descriptor Address Error",
57*4882a593Smuzhiyun "Unaffiliated Error",
58*4882a593Smuzhiyun "CRC or XOR P Error",
59*4882a593Smuzhiyun "XOR Q Error",
60*4882a593Smuzhiyun "Descriptor Count Error",
61*4882a593Smuzhiyun "DIF All F detect Error",
62*4882a593Smuzhiyun "Guard Tag verification Error",
63*4882a593Smuzhiyun "Application Tag verification Error",
64*4882a593Smuzhiyun "Reference Tag verification Error",
65*4882a593Smuzhiyun "Bundle Bit Error",
66*4882a593Smuzhiyun "Result DIF All F detect Error",
67*4882a593Smuzhiyun "Result Guard Tag verification Error",
68*4882a593Smuzhiyun "Result Application Tag verification Error",
69*4882a593Smuzhiyun "Result Reference Tag verification Error",
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun static void ioat_eh(struct ioatdma_chan *ioat_chan);
73*4882a593Smuzhiyun
ioat_print_chanerrs(struct ioatdma_chan * ioat_chan,u32 chanerr)74*4882a593Smuzhiyun static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun int i;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
79*4882a593Smuzhiyun if ((chanerr >> i) & 1) {
80*4882a593Smuzhiyun dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
81*4882a593Smuzhiyun i, chanerr_str[i]);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /**
87*4882a593Smuzhiyun * ioat_dma_do_interrupt - handler used for single vector interrupt mode
88*4882a593Smuzhiyun * @irq: interrupt id
89*4882a593Smuzhiyun * @data: interrupt data
90*4882a593Smuzhiyun */
ioat_dma_do_interrupt(int irq,void * data)91*4882a593Smuzhiyun irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun struct ioatdma_device *instance = data;
94*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan;
95*4882a593Smuzhiyun unsigned long attnstatus;
96*4882a593Smuzhiyun int bit;
97*4882a593Smuzhiyun u8 intrctrl;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
102*4882a593Smuzhiyun return IRQ_NONE;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
105*4882a593Smuzhiyun writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
106*4882a593Smuzhiyun return IRQ_NONE;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
110*4882a593Smuzhiyun for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
111*4882a593Smuzhiyun ioat_chan = ioat_chan_by_index(instance, bit);
112*4882a593Smuzhiyun if (test_bit(IOAT_RUN, &ioat_chan->state))
113*4882a593Smuzhiyun tasklet_schedule(&ioat_chan->cleanup_task);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
117*4882a593Smuzhiyun return IRQ_HANDLED;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /**
121*4882a593Smuzhiyun * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
122*4882a593Smuzhiyun * @irq: interrupt id
123*4882a593Smuzhiyun * @data: interrupt data
124*4882a593Smuzhiyun */
ioat_dma_do_interrupt_msix(int irq,void * data)125*4882a593Smuzhiyun irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = data;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun if (test_bit(IOAT_RUN, &ioat_chan->state))
130*4882a593Smuzhiyun tasklet_schedule(&ioat_chan->cleanup_task);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun return IRQ_HANDLED;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
ioat_stop(struct ioatdma_chan * ioat_chan)135*4882a593Smuzhiyun void ioat_stop(struct ioatdma_chan *ioat_chan)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
138*4882a593Smuzhiyun struct pci_dev *pdev = ioat_dma->pdev;
139*4882a593Smuzhiyun int chan_id = chan_num(ioat_chan);
140*4882a593Smuzhiyun struct msix_entry *msix;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* 1/ stop irq from firing tasklets
143*4882a593Smuzhiyun * 2/ stop the tasklet from re-arming irqs
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun clear_bit(IOAT_RUN, &ioat_chan->state);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* flush inflight interrupts */
148*4882a593Smuzhiyun switch (ioat_dma->irq_mode) {
149*4882a593Smuzhiyun case IOAT_MSIX:
150*4882a593Smuzhiyun msix = &ioat_dma->msix_entries[chan_id];
151*4882a593Smuzhiyun synchronize_irq(msix->vector);
152*4882a593Smuzhiyun break;
153*4882a593Smuzhiyun case IOAT_MSI:
154*4882a593Smuzhiyun case IOAT_INTX:
155*4882a593Smuzhiyun synchronize_irq(pdev->irq);
156*4882a593Smuzhiyun break;
157*4882a593Smuzhiyun default:
158*4882a593Smuzhiyun break;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* flush inflight timers */
162*4882a593Smuzhiyun del_timer_sync(&ioat_chan->timer);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* flush inflight tasklet runs */
165*4882a593Smuzhiyun tasklet_kill(&ioat_chan->cleanup_task);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* final cleanup now that everything is quiesced and can't re-arm */
168*4882a593Smuzhiyun ioat_cleanup_event(&ioat_chan->cleanup_task);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
__ioat_issue_pending(struct ioatdma_chan * ioat_chan)171*4882a593Smuzhiyun static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
174*4882a593Smuzhiyun ioat_chan->issued = ioat_chan->head;
175*4882a593Smuzhiyun writew(ioat_chan->dmacount,
176*4882a593Smuzhiyun ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
177*4882a593Smuzhiyun dev_dbg(to_dev(ioat_chan),
178*4882a593Smuzhiyun "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
179*4882a593Smuzhiyun __func__, ioat_chan->head, ioat_chan->tail,
180*4882a593Smuzhiyun ioat_chan->issued, ioat_chan->dmacount);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
ioat_issue_pending(struct dma_chan * c)183*4882a593Smuzhiyun void ioat_issue_pending(struct dma_chan *c)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (ioat_ring_pending(ioat_chan)) {
188*4882a593Smuzhiyun spin_lock_bh(&ioat_chan->prep_lock);
189*4882a593Smuzhiyun __ioat_issue_pending(ioat_chan);
190*4882a593Smuzhiyun spin_unlock_bh(&ioat_chan->prep_lock);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /**
195*4882a593Smuzhiyun * ioat_update_pending - log pending descriptors
196*4882a593Smuzhiyun * @ioat_chan: ioat+ channel
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun * Check if the number of unsubmitted descriptors has exceeded the
199*4882a593Smuzhiyun * watermark. Called with prep_lock held
200*4882a593Smuzhiyun */
ioat_update_pending(struct ioatdma_chan * ioat_chan)201*4882a593Smuzhiyun static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
204*4882a593Smuzhiyun __ioat_issue_pending(ioat_chan);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
__ioat_start_null_desc(struct ioatdma_chan * ioat_chan)207*4882a593Smuzhiyun static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun struct ioat_ring_ent *desc;
210*4882a593Smuzhiyun struct ioat_dma_descriptor *hw;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (ioat_ring_space(ioat_chan) < 1) {
213*4882a593Smuzhiyun dev_err(to_dev(ioat_chan),
214*4882a593Smuzhiyun "Unable to start null desc - ring full\n");
215*4882a593Smuzhiyun return;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun dev_dbg(to_dev(ioat_chan),
219*4882a593Smuzhiyun "%s: head: %#x tail: %#x issued: %#x\n",
220*4882a593Smuzhiyun __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
221*4882a593Smuzhiyun desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun hw = desc->hw;
224*4882a593Smuzhiyun hw->ctl = 0;
225*4882a593Smuzhiyun hw->ctl_f.null = 1;
226*4882a593Smuzhiyun hw->ctl_f.int_en = 1;
227*4882a593Smuzhiyun hw->ctl_f.compl_write = 1;
228*4882a593Smuzhiyun /* set size to non-zero value (channel returns error when size is 0) */
229*4882a593Smuzhiyun hw->size = NULL_DESC_BUFFER_SIZE;
230*4882a593Smuzhiyun hw->src_addr = 0;
231*4882a593Smuzhiyun hw->dst_addr = 0;
232*4882a593Smuzhiyun async_tx_ack(&desc->txd);
233*4882a593Smuzhiyun ioat_set_chainaddr(ioat_chan, desc->txd.phys);
234*4882a593Smuzhiyun dump_desc_dbg(ioat_chan, desc);
235*4882a593Smuzhiyun /* make sure descriptors are written before we submit */
236*4882a593Smuzhiyun wmb();
237*4882a593Smuzhiyun ioat_chan->head += 1;
238*4882a593Smuzhiyun __ioat_issue_pending(ioat_chan);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
ioat_start_null_desc(struct ioatdma_chan * ioat_chan)241*4882a593Smuzhiyun void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun spin_lock_bh(&ioat_chan->prep_lock);
244*4882a593Smuzhiyun if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
245*4882a593Smuzhiyun __ioat_start_null_desc(ioat_chan);
246*4882a593Smuzhiyun spin_unlock_bh(&ioat_chan->prep_lock);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
__ioat_restart_chan(struct ioatdma_chan * ioat_chan)249*4882a593Smuzhiyun static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun /* set the tail to be re-issued */
252*4882a593Smuzhiyun ioat_chan->issued = ioat_chan->tail;
253*4882a593Smuzhiyun ioat_chan->dmacount = 0;
254*4882a593Smuzhiyun mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun dev_dbg(to_dev(ioat_chan),
257*4882a593Smuzhiyun "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
258*4882a593Smuzhiyun __func__, ioat_chan->head, ioat_chan->tail,
259*4882a593Smuzhiyun ioat_chan->issued, ioat_chan->dmacount);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun if (ioat_ring_pending(ioat_chan)) {
262*4882a593Smuzhiyun struct ioat_ring_ent *desc;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
265*4882a593Smuzhiyun ioat_set_chainaddr(ioat_chan, desc->txd.phys);
266*4882a593Smuzhiyun __ioat_issue_pending(ioat_chan);
267*4882a593Smuzhiyun } else
268*4882a593Smuzhiyun __ioat_start_null_desc(ioat_chan);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
ioat_quiesce(struct ioatdma_chan * ioat_chan,unsigned long tmo)271*4882a593Smuzhiyun static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun unsigned long end = jiffies + tmo;
274*4882a593Smuzhiyun int err = 0;
275*4882a593Smuzhiyun u32 status;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun status = ioat_chansts(ioat_chan);
278*4882a593Smuzhiyun if (is_ioat_active(status) || is_ioat_idle(status))
279*4882a593Smuzhiyun ioat_suspend(ioat_chan);
280*4882a593Smuzhiyun while (is_ioat_active(status) || is_ioat_idle(status)) {
281*4882a593Smuzhiyun if (tmo && time_after(jiffies, end)) {
282*4882a593Smuzhiyun err = -ETIMEDOUT;
283*4882a593Smuzhiyun break;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun status = ioat_chansts(ioat_chan);
286*4882a593Smuzhiyun cpu_relax();
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun return err;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
ioat_reset_sync(struct ioatdma_chan * ioat_chan,unsigned long tmo)292*4882a593Smuzhiyun static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun unsigned long end = jiffies + tmo;
295*4882a593Smuzhiyun int err = 0;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun ioat_reset(ioat_chan);
298*4882a593Smuzhiyun while (ioat_reset_pending(ioat_chan)) {
299*4882a593Smuzhiyun if (end && time_after(jiffies, end)) {
300*4882a593Smuzhiyun err = -ETIMEDOUT;
301*4882a593Smuzhiyun break;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun cpu_relax();
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun return err;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
ioat_tx_submit_unlock(struct dma_async_tx_descriptor * tx)309*4882a593Smuzhiyun static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
310*4882a593Smuzhiyun __releases(&ioat_chan->prep_lock)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun struct dma_chan *c = tx->chan;
313*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
314*4882a593Smuzhiyun dma_cookie_t cookie;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun cookie = dma_cookie_assign(tx);
317*4882a593Smuzhiyun dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
320*4882a593Smuzhiyun mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* make descriptor updates visible before advancing ioat->head,
323*4882a593Smuzhiyun * this is purposefully not smp_wmb() since we are also
324*4882a593Smuzhiyun * publishing the descriptor updates to a dma device
325*4882a593Smuzhiyun */
326*4882a593Smuzhiyun wmb();
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun ioat_chan->head += ioat_chan->produce;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun ioat_update_pending(ioat_chan);
331*4882a593Smuzhiyun spin_unlock_bh(&ioat_chan->prep_lock);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun return cookie;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun static struct ioat_ring_ent *
ioat_alloc_ring_ent(struct dma_chan * chan,int idx,gfp_t flags)337*4882a593Smuzhiyun ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun struct ioat_dma_descriptor *hw;
340*4882a593Smuzhiyun struct ioat_ring_ent *desc;
341*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
342*4882a593Smuzhiyun int chunk;
343*4882a593Smuzhiyun dma_addr_t phys;
344*4882a593Smuzhiyun u8 *pos;
345*4882a593Smuzhiyun off_t offs;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun chunk = idx / IOAT_DESCS_PER_CHUNK;
348*4882a593Smuzhiyun idx &= (IOAT_DESCS_PER_CHUNK - 1);
349*4882a593Smuzhiyun offs = idx * IOAT_DESC_SZ;
350*4882a593Smuzhiyun pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
351*4882a593Smuzhiyun phys = ioat_chan->descs[chunk].hw + offs;
352*4882a593Smuzhiyun hw = (struct ioat_dma_descriptor *)pos;
353*4882a593Smuzhiyun memset(hw, 0, sizeof(*hw));
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun desc = kmem_cache_zalloc(ioat_cache, flags);
356*4882a593Smuzhiyun if (!desc)
357*4882a593Smuzhiyun return NULL;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun dma_async_tx_descriptor_init(&desc->txd, chan);
360*4882a593Smuzhiyun desc->txd.tx_submit = ioat_tx_submit_unlock;
361*4882a593Smuzhiyun desc->hw = hw;
362*4882a593Smuzhiyun desc->txd.phys = phys;
363*4882a593Smuzhiyun return desc;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
ioat_free_ring_ent(struct ioat_ring_ent * desc,struct dma_chan * chan)366*4882a593Smuzhiyun void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun kmem_cache_free(ioat_cache, desc);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun struct ioat_ring_ent **
ioat_alloc_ring(struct dma_chan * c,int order,gfp_t flags)372*4882a593Smuzhiyun ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
375*4882a593Smuzhiyun struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
376*4882a593Smuzhiyun struct ioat_ring_ent **ring;
377*4882a593Smuzhiyun int total_descs = 1 << order;
378*4882a593Smuzhiyun int i, chunks;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* allocate the array to hold the software ring */
381*4882a593Smuzhiyun ring = kcalloc(total_descs, sizeof(*ring), flags);
382*4882a593Smuzhiyun if (!ring)
383*4882a593Smuzhiyun return NULL;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
386*4882a593Smuzhiyun ioat_chan->desc_chunks = chunks;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun for (i = 0; i < chunks; i++) {
389*4882a593Smuzhiyun struct ioat_descs *descs = &ioat_chan->descs[i];
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
392*4882a593Smuzhiyun IOAT_CHUNK_SIZE, &descs->hw, flags);
393*4882a593Smuzhiyun if (!descs->virt) {
394*4882a593Smuzhiyun int idx;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun for (idx = 0; idx < i; idx++) {
397*4882a593Smuzhiyun descs = &ioat_chan->descs[idx];
398*4882a593Smuzhiyun dma_free_coherent(to_dev(ioat_chan),
399*4882a593Smuzhiyun IOAT_CHUNK_SIZE,
400*4882a593Smuzhiyun descs->virt, descs->hw);
401*4882a593Smuzhiyun descs->virt = NULL;
402*4882a593Smuzhiyun descs->hw = 0;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun ioat_chan->desc_chunks = 0;
406*4882a593Smuzhiyun kfree(ring);
407*4882a593Smuzhiyun return NULL;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun for (i = 0; i < total_descs; i++) {
412*4882a593Smuzhiyun ring[i] = ioat_alloc_ring_ent(c, i, flags);
413*4882a593Smuzhiyun if (!ring[i]) {
414*4882a593Smuzhiyun int idx;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun while (i--)
417*4882a593Smuzhiyun ioat_free_ring_ent(ring[i], c);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
420*4882a593Smuzhiyun dma_free_coherent(to_dev(ioat_chan),
421*4882a593Smuzhiyun IOAT_CHUNK_SIZE,
422*4882a593Smuzhiyun ioat_chan->descs[idx].virt,
423*4882a593Smuzhiyun ioat_chan->descs[idx].hw);
424*4882a593Smuzhiyun ioat_chan->descs[idx].virt = NULL;
425*4882a593Smuzhiyun ioat_chan->descs[idx].hw = 0;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun ioat_chan->desc_chunks = 0;
429*4882a593Smuzhiyun kfree(ring);
430*4882a593Smuzhiyun return NULL;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun set_desc_id(ring[i], i);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* link descs */
436*4882a593Smuzhiyun for (i = 0; i < total_descs-1; i++) {
437*4882a593Smuzhiyun struct ioat_ring_ent *next = ring[i+1];
438*4882a593Smuzhiyun struct ioat_dma_descriptor *hw = ring[i]->hw;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun hw->next = next->txd.phys;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun ring[i]->hw->next = ring[0]->txd.phys;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /* setup descriptor pre-fetching for v3.4 */
445*4882a593Smuzhiyun if (ioat_dma->cap & IOAT_CAP_DPS) {
446*4882a593Smuzhiyun u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (chunks == 1)
449*4882a593Smuzhiyun drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun return ring;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /**
459*4882a593Smuzhiyun * ioat_check_space_lock - verify space and grab ring producer lock
460*4882a593Smuzhiyun * @ioat_chan: ioat,3 channel (ring) to operate on
461*4882a593Smuzhiyun * @num_descs: allocation length
462*4882a593Smuzhiyun */
ioat_check_space_lock(struct ioatdma_chan * ioat_chan,int num_descs)463*4882a593Smuzhiyun int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
464*4882a593Smuzhiyun __acquires(&ioat_chan->prep_lock)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun spin_lock_bh(&ioat_chan->prep_lock);
467*4882a593Smuzhiyun /* never allow the last descriptor to be consumed, we need at
468*4882a593Smuzhiyun * least one free at all times to allow for on-the-fly ring
469*4882a593Smuzhiyun * resizing.
470*4882a593Smuzhiyun */
471*4882a593Smuzhiyun if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
472*4882a593Smuzhiyun dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
473*4882a593Smuzhiyun __func__, num_descs, ioat_chan->head,
474*4882a593Smuzhiyun ioat_chan->tail, ioat_chan->issued);
475*4882a593Smuzhiyun ioat_chan->produce = num_descs;
476*4882a593Smuzhiyun return 0; /* with ioat->prep_lock held */
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun spin_unlock_bh(&ioat_chan->prep_lock);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun dev_dbg_ratelimited(to_dev(ioat_chan),
481*4882a593Smuzhiyun "%s: ring full! num_descs: %d (%x:%x:%x)\n",
482*4882a593Smuzhiyun __func__, num_descs, ioat_chan->head,
483*4882a593Smuzhiyun ioat_chan->tail, ioat_chan->issued);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /* progress reclaim in the allocation failure case we may be
486*4882a593Smuzhiyun * called under bh_disabled so we need to trigger the timer
487*4882a593Smuzhiyun * event directly
488*4882a593Smuzhiyun */
489*4882a593Smuzhiyun if (time_is_before_jiffies(ioat_chan->timer.expires)
490*4882a593Smuzhiyun && timer_pending(&ioat_chan->timer)) {
491*4882a593Smuzhiyun mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
492*4882a593Smuzhiyun ioat_timer_event(&ioat_chan->timer);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun return -ENOMEM;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
desc_has_ext(struct ioat_ring_ent * desc)498*4882a593Smuzhiyun static bool desc_has_ext(struct ioat_ring_ent *desc)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct ioat_dma_descriptor *hw = desc->hw;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (hw->ctl_f.op == IOAT_OP_XOR ||
503*4882a593Smuzhiyun hw->ctl_f.op == IOAT_OP_XOR_VAL) {
504*4882a593Smuzhiyun struct ioat_xor_descriptor *xor = desc->xor;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
507*4882a593Smuzhiyun return true;
508*4882a593Smuzhiyun } else if (hw->ctl_f.op == IOAT_OP_PQ ||
509*4882a593Smuzhiyun hw->ctl_f.op == IOAT_OP_PQ_VAL) {
510*4882a593Smuzhiyun struct ioat_pq_descriptor *pq = desc->pq;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
513*4882a593Smuzhiyun return true;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun return false;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun static void
ioat_free_sed(struct ioatdma_device * ioat_dma,struct ioat_sed_ent * sed)520*4882a593Smuzhiyun ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun if (!sed)
523*4882a593Smuzhiyun return;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
526*4882a593Smuzhiyun kmem_cache_free(ioat_sed_cache, sed);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
ioat_get_current_completion(struct ioatdma_chan * ioat_chan)529*4882a593Smuzhiyun static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun u64 phys_complete;
532*4882a593Smuzhiyun u64 completion;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun completion = *ioat_chan->completion;
535*4882a593Smuzhiyun phys_complete = ioat_chansts_to_addr(completion);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
538*4882a593Smuzhiyun (unsigned long long) phys_complete);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun return phys_complete;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
ioat_cleanup_preamble(struct ioatdma_chan * ioat_chan,u64 * phys_complete)543*4882a593Smuzhiyun static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
544*4882a593Smuzhiyun u64 *phys_complete)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun *phys_complete = ioat_get_current_completion(ioat_chan);
547*4882a593Smuzhiyun if (*phys_complete == ioat_chan->last_completion)
548*4882a593Smuzhiyun return false;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
551*4882a593Smuzhiyun mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun return true;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun static void
desc_get_errstat(struct ioatdma_chan * ioat_chan,struct ioat_ring_ent * desc)557*4882a593Smuzhiyun desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun struct ioat_dma_descriptor *hw = desc->hw;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun switch (hw->ctl_f.op) {
562*4882a593Smuzhiyun case IOAT_OP_PQ_VAL:
563*4882a593Smuzhiyun case IOAT_OP_PQ_VAL_16S:
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun struct ioat_pq_descriptor *pq = desc->pq;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /* check if there's error written */
568*4882a593Smuzhiyun if (!pq->dwbes_f.wbes)
569*4882a593Smuzhiyun return;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /* need to set a chanerr var for checking to clear later */
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun if (pq->dwbes_f.p_val_err)
574*4882a593Smuzhiyun *desc->result |= SUM_CHECK_P_RESULT;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun if (pq->dwbes_f.q_val_err)
577*4882a593Smuzhiyun *desc->result |= SUM_CHECK_Q_RESULT;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun return;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun default:
582*4882a593Smuzhiyun return;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /**
587*4882a593Smuzhiyun * __cleanup - reclaim used descriptors
588*4882a593Smuzhiyun * @ioat_chan: channel (ring) to clean
589*4882a593Smuzhiyun * @phys_complete: zeroed (or not) completion address (from status)
590*4882a593Smuzhiyun */
__cleanup(struct ioatdma_chan * ioat_chan,dma_addr_t phys_complete)591*4882a593Smuzhiyun static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
594*4882a593Smuzhiyun struct ioat_ring_ent *desc;
595*4882a593Smuzhiyun bool seen_current = false;
596*4882a593Smuzhiyun int idx = ioat_chan->tail, i;
597*4882a593Smuzhiyun u16 active;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
600*4882a593Smuzhiyun __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /*
603*4882a593Smuzhiyun * At restart of the channel, the completion address and the
604*4882a593Smuzhiyun * channel status will be 0 due to starting a new chain. Since
605*4882a593Smuzhiyun * it's new chain and the first descriptor "fails", there is
606*4882a593Smuzhiyun * nothing to clean up. We do not want to reap the entire submitted
607*4882a593Smuzhiyun * chain due to this 0 address value and then BUG.
608*4882a593Smuzhiyun */
609*4882a593Smuzhiyun if (!phys_complete)
610*4882a593Smuzhiyun return;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun active = ioat_ring_active(ioat_chan);
613*4882a593Smuzhiyun for (i = 0; i < active && !seen_current; i++) {
614*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
617*4882a593Smuzhiyun desc = ioat_get_ring_ent(ioat_chan, idx + i);
618*4882a593Smuzhiyun dump_desc_dbg(ioat_chan, desc);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /* set err stat if we are using dwbes */
621*4882a593Smuzhiyun if (ioat_dma->cap & IOAT_CAP_DWBES)
622*4882a593Smuzhiyun desc_get_errstat(ioat_chan, desc);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun tx = &desc->txd;
625*4882a593Smuzhiyun if (tx->cookie) {
626*4882a593Smuzhiyun dma_cookie_complete(tx);
627*4882a593Smuzhiyun dma_descriptor_unmap(tx);
628*4882a593Smuzhiyun dmaengine_desc_get_callback_invoke(tx, NULL);
629*4882a593Smuzhiyun tx->callback = NULL;
630*4882a593Smuzhiyun tx->callback_result = NULL;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun if (tx->phys == phys_complete)
634*4882a593Smuzhiyun seen_current = true;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /* skip extended descriptors */
637*4882a593Smuzhiyun if (desc_has_ext(desc)) {
638*4882a593Smuzhiyun BUG_ON(i + 1 >= active);
639*4882a593Smuzhiyun i++;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /* cleanup super extended descriptors */
643*4882a593Smuzhiyun if (desc->sed) {
644*4882a593Smuzhiyun ioat_free_sed(ioat_dma, desc->sed);
645*4882a593Smuzhiyun desc->sed = NULL;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /* finish all descriptor reads before incrementing tail */
650*4882a593Smuzhiyun smp_mb();
651*4882a593Smuzhiyun ioat_chan->tail = idx + i;
652*4882a593Smuzhiyun /* no active descs have written a completion? */
653*4882a593Smuzhiyun BUG_ON(active && !seen_current);
654*4882a593Smuzhiyun ioat_chan->last_completion = phys_complete;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (active - i == 0) {
657*4882a593Smuzhiyun dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
658*4882a593Smuzhiyun __func__);
659*4882a593Smuzhiyun mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun /* microsecond delay by sysfs variable per pending descriptor */
663*4882a593Smuzhiyun if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
664*4882a593Smuzhiyun writew(min((ioat_chan->intr_coalesce * (active - i)),
665*4882a593Smuzhiyun IOAT_INTRDELAY_MASK),
666*4882a593Smuzhiyun ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
667*4882a593Smuzhiyun ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
ioat_cleanup(struct ioatdma_chan * ioat_chan)671*4882a593Smuzhiyun static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun u64 phys_complete;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun spin_lock_bh(&ioat_chan->cleanup_lock);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
678*4882a593Smuzhiyun __cleanup(ioat_chan, phys_complete);
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun if (is_ioat_halted(*ioat_chan->completion)) {
681*4882a593Smuzhiyun u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun if (chanerr &
684*4882a593Smuzhiyun (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
685*4882a593Smuzhiyun mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
686*4882a593Smuzhiyun ioat_eh(ioat_chan);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun spin_unlock_bh(&ioat_chan->cleanup_lock);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
ioat_cleanup_event(struct tasklet_struct * t)693*4882a593Smuzhiyun void ioat_cleanup_event(struct tasklet_struct *t)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = from_tasklet(ioat_chan, t, cleanup_task);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun ioat_cleanup(ioat_chan);
698*4882a593Smuzhiyun if (!test_bit(IOAT_RUN, &ioat_chan->state))
699*4882a593Smuzhiyun return;
700*4882a593Smuzhiyun writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
ioat_restart_channel(struct ioatdma_chan * ioat_chan)703*4882a593Smuzhiyun static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun u64 phys_complete;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /* set the completion address register again */
708*4882a593Smuzhiyun writel(lower_32_bits(ioat_chan->completion_dma),
709*4882a593Smuzhiyun ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
710*4882a593Smuzhiyun writel(upper_32_bits(ioat_chan->completion_dma),
711*4882a593Smuzhiyun ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun ioat_quiesce(ioat_chan, 0);
714*4882a593Smuzhiyun if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
715*4882a593Smuzhiyun __cleanup(ioat_chan, phys_complete);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun __ioat_restart_chan(ioat_chan);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun
ioat_abort_descs(struct ioatdma_chan * ioat_chan)721*4882a593Smuzhiyun static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
724*4882a593Smuzhiyun struct ioat_ring_ent *desc;
725*4882a593Smuzhiyun u16 active;
726*4882a593Smuzhiyun int idx = ioat_chan->tail, i;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /*
729*4882a593Smuzhiyun * We assume that the failed descriptor has been processed.
730*4882a593Smuzhiyun * Now we are just returning all the remaining submitted
731*4882a593Smuzhiyun * descriptors to abort.
732*4882a593Smuzhiyun */
733*4882a593Smuzhiyun active = ioat_ring_active(ioat_chan);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /* we skip the failed descriptor that tail points to */
736*4882a593Smuzhiyun for (i = 1; i < active; i++) {
737*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
740*4882a593Smuzhiyun desc = ioat_get_ring_ent(ioat_chan, idx + i);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun tx = &desc->txd;
743*4882a593Smuzhiyun if (tx->cookie) {
744*4882a593Smuzhiyun struct dmaengine_result res;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun dma_cookie_complete(tx);
747*4882a593Smuzhiyun dma_descriptor_unmap(tx);
748*4882a593Smuzhiyun res.result = DMA_TRANS_ABORTED;
749*4882a593Smuzhiyun dmaengine_desc_get_callback_invoke(tx, &res);
750*4882a593Smuzhiyun tx->callback = NULL;
751*4882a593Smuzhiyun tx->callback_result = NULL;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /* skip extended descriptors */
755*4882a593Smuzhiyun if (desc_has_ext(desc)) {
756*4882a593Smuzhiyun WARN_ON(i + 1 >= active);
757*4882a593Smuzhiyun i++;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /* cleanup super extended descriptors */
761*4882a593Smuzhiyun if (desc->sed) {
762*4882a593Smuzhiyun ioat_free_sed(ioat_dma, desc->sed);
763*4882a593Smuzhiyun desc->sed = NULL;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun smp_mb(); /* finish all descriptor reads before incrementing tail */
768*4882a593Smuzhiyun ioat_chan->tail = idx + active;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
771*4882a593Smuzhiyun ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
ioat_eh(struct ioatdma_chan * ioat_chan)774*4882a593Smuzhiyun static void ioat_eh(struct ioatdma_chan *ioat_chan)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun struct pci_dev *pdev = to_pdev(ioat_chan);
777*4882a593Smuzhiyun struct ioat_dma_descriptor *hw;
778*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx;
779*4882a593Smuzhiyun u64 phys_complete;
780*4882a593Smuzhiyun struct ioat_ring_ent *desc;
781*4882a593Smuzhiyun u32 err_handled = 0;
782*4882a593Smuzhiyun u32 chanerr_int;
783*4882a593Smuzhiyun u32 chanerr;
784*4882a593Smuzhiyun bool abort = false;
785*4882a593Smuzhiyun struct dmaengine_result res;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /* cleanup so tail points to descriptor that caused the error */
788*4882a593Smuzhiyun if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
789*4882a593Smuzhiyun __cleanup(ioat_chan, phys_complete);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
792*4882a593Smuzhiyun pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
795*4882a593Smuzhiyun __func__, chanerr, chanerr_int);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
798*4882a593Smuzhiyun hw = desc->hw;
799*4882a593Smuzhiyun dump_desc_dbg(ioat_chan, desc);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun switch (hw->ctl_f.op) {
802*4882a593Smuzhiyun case IOAT_OP_XOR_VAL:
803*4882a593Smuzhiyun if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
804*4882a593Smuzhiyun *desc->result |= SUM_CHECK_P_RESULT;
805*4882a593Smuzhiyun err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun break;
808*4882a593Smuzhiyun case IOAT_OP_PQ_VAL:
809*4882a593Smuzhiyun case IOAT_OP_PQ_VAL_16S:
810*4882a593Smuzhiyun if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
811*4882a593Smuzhiyun *desc->result |= SUM_CHECK_P_RESULT;
812*4882a593Smuzhiyun err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
815*4882a593Smuzhiyun *desc->result |= SUM_CHECK_Q_RESULT;
816*4882a593Smuzhiyun err_handled |= IOAT_CHANERR_XOR_Q_ERR;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun break;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
822*4882a593Smuzhiyun if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
823*4882a593Smuzhiyun res.result = DMA_TRANS_READ_FAILED;
824*4882a593Smuzhiyun err_handled |= IOAT_CHANERR_READ_DATA_ERR;
825*4882a593Smuzhiyun } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
826*4882a593Smuzhiyun res.result = DMA_TRANS_WRITE_FAILED;
827*4882a593Smuzhiyun err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun abort = true;
831*4882a593Smuzhiyun } else
832*4882a593Smuzhiyun res.result = DMA_TRANS_NOERROR;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun /* fault on unhandled error or spurious halt */
835*4882a593Smuzhiyun if (chanerr ^ err_handled || chanerr == 0) {
836*4882a593Smuzhiyun dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
837*4882a593Smuzhiyun __func__, chanerr, err_handled);
838*4882a593Smuzhiyun dev_err(to_dev(ioat_chan), "Errors handled:\n");
839*4882a593Smuzhiyun ioat_print_chanerrs(ioat_chan, err_handled);
840*4882a593Smuzhiyun dev_err(to_dev(ioat_chan), "Errors not handled:\n");
841*4882a593Smuzhiyun ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun BUG();
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun /* cleanup the faulty descriptor since we are continuing */
847*4882a593Smuzhiyun tx = &desc->txd;
848*4882a593Smuzhiyun if (tx->cookie) {
849*4882a593Smuzhiyun dma_cookie_complete(tx);
850*4882a593Smuzhiyun dma_descriptor_unmap(tx);
851*4882a593Smuzhiyun dmaengine_desc_get_callback_invoke(tx, &res);
852*4882a593Smuzhiyun tx->callback = NULL;
853*4882a593Smuzhiyun tx->callback_result = NULL;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun /* mark faulting descriptor as complete */
857*4882a593Smuzhiyun *ioat_chan->completion = desc->txd.phys;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun spin_lock_bh(&ioat_chan->prep_lock);
860*4882a593Smuzhiyun /* we need abort all descriptors */
861*4882a593Smuzhiyun if (abort) {
862*4882a593Smuzhiyun ioat_abort_descs(ioat_chan);
863*4882a593Smuzhiyun /* clean up the channel, we could be in weird state */
864*4882a593Smuzhiyun ioat_reset_hw(ioat_chan);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
868*4882a593Smuzhiyun pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun ioat_restart_channel(ioat_chan);
871*4882a593Smuzhiyun spin_unlock_bh(&ioat_chan->prep_lock);
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
check_active(struct ioatdma_chan * ioat_chan)874*4882a593Smuzhiyun static void check_active(struct ioatdma_chan *ioat_chan)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun if (ioat_ring_active(ioat_chan)) {
877*4882a593Smuzhiyun mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
878*4882a593Smuzhiyun return;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
882*4882a593Smuzhiyun mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
ioat_reboot_chan(struct ioatdma_chan * ioat_chan)885*4882a593Smuzhiyun static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun spin_lock_bh(&ioat_chan->prep_lock);
888*4882a593Smuzhiyun set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
889*4882a593Smuzhiyun spin_unlock_bh(&ioat_chan->prep_lock);
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun ioat_abort_descs(ioat_chan);
892*4882a593Smuzhiyun dev_warn(to_dev(ioat_chan), "Reset channel...\n");
893*4882a593Smuzhiyun ioat_reset_hw(ioat_chan);
894*4882a593Smuzhiyun dev_warn(to_dev(ioat_chan), "Restart channel...\n");
895*4882a593Smuzhiyun ioat_restart_channel(ioat_chan);
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun spin_lock_bh(&ioat_chan->prep_lock);
898*4882a593Smuzhiyun clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
899*4882a593Smuzhiyun spin_unlock_bh(&ioat_chan->prep_lock);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
ioat_timer_event(struct timer_list * t)902*4882a593Smuzhiyun void ioat_timer_event(struct timer_list *t)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
905*4882a593Smuzhiyun dma_addr_t phys_complete;
906*4882a593Smuzhiyun u64 status;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun status = ioat_chansts(ioat_chan);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /* when halted due to errors check for channel
911*4882a593Smuzhiyun * programming errors before advancing the completion state
912*4882a593Smuzhiyun */
913*4882a593Smuzhiyun if (is_ioat_halted(status)) {
914*4882a593Smuzhiyun u32 chanerr;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
917*4882a593Smuzhiyun dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
918*4882a593Smuzhiyun __func__, chanerr);
919*4882a593Smuzhiyun dev_err(to_dev(ioat_chan), "Errors:\n");
920*4882a593Smuzhiyun ioat_print_chanerrs(ioat_chan, chanerr);
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun if (test_bit(IOAT_RUN, &ioat_chan->state)) {
923*4882a593Smuzhiyun spin_lock_bh(&ioat_chan->cleanup_lock);
924*4882a593Smuzhiyun ioat_reboot_chan(ioat_chan);
925*4882a593Smuzhiyun spin_unlock_bh(&ioat_chan->cleanup_lock);
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun return;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun spin_lock_bh(&ioat_chan->cleanup_lock);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun /* handle the no-actives case */
934*4882a593Smuzhiyun if (!ioat_ring_active(ioat_chan)) {
935*4882a593Smuzhiyun spin_lock_bh(&ioat_chan->prep_lock);
936*4882a593Smuzhiyun check_active(ioat_chan);
937*4882a593Smuzhiyun spin_unlock_bh(&ioat_chan->prep_lock);
938*4882a593Smuzhiyun goto unlock_out;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /* handle the missed cleanup case */
942*4882a593Smuzhiyun if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) {
943*4882a593Smuzhiyun /* timer restarted in ioat_cleanup_preamble
944*4882a593Smuzhiyun * and IOAT_COMPLETION_ACK cleared
945*4882a593Smuzhiyun */
946*4882a593Smuzhiyun __cleanup(ioat_chan, phys_complete);
947*4882a593Smuzhiyun goto unlock_out;
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun /* if we haven't made progress and we have already
951*4882a593Smuzhiyun * acknowledged a pending completion once, then be more
952*4882a593Smuzhiyun * forceful with a restart
953*4882a593Smuzhiyun */
954*4882a593Smuzhiyun if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
955*4882a593Smuzhiyun u32 chanerr;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
958*4882a593Smuzhiyun dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
959*4882a593Smuzhiyun status, chanerr);
960*4882a593Smuzhiyun dev_err(to_dev(ioat_chan), "Errors:\n");
961*4882a593Smuzhiyun ioat_print_chanerrs(ioat_chan, chanerr);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
964*4882a593Smuzhiyun ioat_ring_active(ioat_chan));
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun ioat_reboot_chan(ioat_chan);
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun goto unlock_out;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun /* handle missed issue pending case */
972*4882a593Smuzhiyun if (ioat_ring_pending(ioat_chan)) {
973*4882a593Smuzhiyun dev_warn(to_dev(ioat_chan),
974*4882a593Smuzhiyun "Completion timeout with pending descriptors\n");
975*4882a593Smuzhiyun spin_lock_bh(&ioat_chan->prep_lock);
976*4882a593Smuzhiyun __ioat_issue_pending(ioat_chan);
977*4882a593Smuzhiyun spin_unlock_bh(&ioat_chan->prep_lock);
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
981*4882a593Smuzhiyun mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
982*4882a593Smuzhiyun unlock_out:
983*4882a593Smuzhiyun spin_unlock_bh(&ioat_chan->cleanup_lock);
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun enum dma_status
ioat_tx_status(struct dma_chan * c,dma_cookie_t cookie,struct dma_tx_state * txstate)987*4882a593Smuzhiyun ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
988*4882a593Smuzhiyun struct dma_tx_state *txstate)
989*4882a593Smuzhiyun {
990*4882a593Smuzhiyun struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
991*4882a593Smuzhiyun enum dma_status ret;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun ret = dma_cookie_status(c, cookie, txstate);
994*4882a593Smuzhiyun if (ret == DMA_COMPLETE)
995*4882a593Smuzhiyun return ret;
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun ioat_cleanup(ioat_chan);
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun return dma_cookie_status(c, cookie, txstate);
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
ioat_reset_hw(struct ioatdma_chan * ioat_chan)1002*4882a593Smuzhiyun int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun /* throw away whatever the channel was doing and get it
1005*4882a593Smuzhiyun * initialized, with ioat3 specific workarounds
1006*4882a593Smuzhiyun */
1007*4882a593Smuzhiyun struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
1008*4882a593Smuzhiyun struct pci_dev *pdev = ioat_dma->pdev;
1009*4882a593Smuzhiyun u32 chanerr;
1010*4882a593Smuzhiyun u16 dev_id;
1011*4882a593Smuzhiyun int err;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1016*4882a593Smuzhiyun writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun if (ioat_dma->version < IOAT_VER_3_3) {
1019*4882a593Smuzhiyun /* clear any pending errors */
1020*4882a593Smuzhiyun err = pci_read_config_dword(pdev,
1021*4882a593Smuzhiyun IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1022*4882a593Smuzhiyun if (err) {
1023*4882a593Smuzhiyun dev_err(&pdev->dev,
1024*4882a593Smuzhiyun "channel error register unreachable\n");
1025*4882a593Smuzhiyun return err;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun pci_write_config_dword(pdev,
1028*4882a593Smuzhiyun IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1031*4882a593Smuzhiyun * (workaround for spurious config parity error after restart)
1032*4882a593Smuzhiyun */
1033*4882a593Smuzhiyun pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1034*4882a593Smuzhiyun if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1035*4882a593Smuzhiyun pci_write_config_dword(pdev,
1036*4882a593Smuzhiyun IOAT_PCI_DMAUNCERRSTS_OFFSET,
1037*4882a593Smuzhiyun 0x10);
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1042*4882a593Smuzhiyun ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1043*4882a593Smuzhiyun ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1044*4882a593Smuzhiyun ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1049*4882a593Smuzhiyun if (!err) {
1050*4882a593Smuzhiyun if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1051*4882a593Smuzhiyun writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1052*4882a593Smuzhiyun writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1053*4882a593Smuzhiyun writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun if (err)
1058*4882a593Smuzhiyun dev_err(&pdev->dev, "Failed to reset: %d\n", err);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun return err;
1061*4882a593Smuzhiyun }
1062