1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Filename: dma.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6*4882a593Smuzhiyun * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * (C) Copyright 2013 IBM Corporation
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include "rsxx_priv.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun struct rsxx_dma {
15*4882a593Smuzhiyun struct list_head list;
16*4882a593Smuzhiyun u8 cmd;
17*4882a593Smuzhiyun unsigned int laddr; /* Logical address */
18*4882a593Smuzhiyun struct {
19*4882a593Smuzhiyun u32 off;
20*4882a593Smuzhiyun u32 cnt;
21*4882a593Smuzhiyun } sub_page;
22*4882a593Smuzhiyun dma_addr_t dma_addr;
23*4882a593Smuzhiyun struct page *page;
24*4882a593Smuzhiyun unsigned int pg_off; /* Page Offset */
25*4882a593Smuzhiyun rsxx_dma_cb cb;
26*4882a593Smuzhiyun void *cb_data;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* This timeout is used to detect a stalled DMA channel */
30*4882a593Smuzhiyun #define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000)
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct hw_status {
33*4882a593Smuzhiyun u8 status;
34*4882a593Smuzhiyun u8 tag;
35*4882a593Smuzhiyun __le16 count;
36*4882a593Smuzhiyun __le32 _rsvd2;
37*4882a593Smuzhiyun __le64 _rsvd3;
38*4882a593Smuzhiyun } __packed;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun enum rsxx_dma_status {
41*4882a593Smuzhiyun DMA_SW_ERR = 0x1,
42*4882a593Smuzhiyun DMA_HW_FAULT = 0x2,
43*4882a593Smuzhiyun DMA_CANCELLED = 0x4,
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun struct hw_cmd {
47*4882a593Smuzhiyun u8 command;
48*4882a593Smuzhiyun u8 tag;
49*4882a593Smuzhiyun u8 _rsvd;
50*4882a593Smuzhiyun u8 sub_page; /* Bit[0:2]: 512byte offset */
51*4882a593Smuzhiyun /* Bit[4:6]: 512byte count */
52*4882a593Smuzhiyun __le32 device_addr;
53*4882a593Smuzhiyun __le64 host_addr;
54*4882a593Smuzhiyun } __packed;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun enum rsxx_hw_cmd {
57*4882a593Smuzhiyun HW_CMD_BLK_DISCARD = 0x70,
58*4882a593Smuzhiyun HW_CMD_BLK_WRITE = 0x80,
59*4882a593Smuzhiyun HW_CMD_BLK_READ = 0xC0,
60*4882a593Smuzhiyun HW_CMD_BLK_RECON_READ = 0xE0,
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun enum rsxx_hw_status {
64*4882a593Smuzhiyun HW_STATUS_CRC = 0x01,
65*4882a593Smuzhiyun HW_STATUS_HARD_ERR = 0x02,
66*4882a593Smuzhiyun HW_STATUS_SOFT_ERR = 0x04,
67*4882a593Smuzhiyun HW_STATUS_FAULT = 0x08,
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun static struct kmem_cache *rsxx_dma_pool;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun struct dma_tracker {
73*4882a593Smuzhiyun int next_tag;
74*4882a593Smuzhiyun struct rsxx_dma *dma;
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \
78*4882a593Smuzhiyun (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS))
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun struct dma_tracker_list {
81*4882a593Smuzhiyun spinlock_t lock;
82*4882a593Smuzhiyun int head;
83*4882a593Smuzhiyun struct dma_tracker list[];
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*----------------- Misc Utility Functions -------------------*/
rsxx_addr8_to_laddr(u64 addr8,struct rsxx_cardinfo * card)88*4882a593Smuzhiyun static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun unsigned long long tgt_addr8;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) &
93*4882a593Smuzhiyun card->_stripe.upper_mask) |
94*4882a593Smuzhiyun ((addr8) & card->_stripe.lower_mask);
95*4882a593Smuzhiyun do_div(tgt_addr8, RSXX_HW_BLK_SIZE);
96*4882a593Smuzhiyun return tgt_addr8;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
rsxx_get_dma_tgt(struct rsxx_cardinfo * card,u64 addr8)99*4882a593Smuzhiyun static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun unsigned int tgt;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun return tgt;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
rsxx_dma_queue_reset(struct rsxx_cardinfo * card)108*4882a593Smuzhiyun void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun /* Reset all DMA Command/Status Queues */
111*4882a593Smuzhiyun iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
get_dma_size(struct rsxx_dma * dma)114*4882a593Smuzhiyun static unsigned int get_dma_size(struct rsxx_dma *dma)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun if (dma->sub_page.cnt)
117*4882a593Smuzhiyun return dma->sub_page.cnt << 9;
118*4882a593Smuzhiyun else
119*4882a593Smuzhiyun return RSXX_HW_BLK_SIZE;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*----------------- DMA Tracker -------------------*/
set_tracker_dma(struct dma_tracker_list * trackers,int tag,struct rsxx_dma * dma)124*4882a593Smuzhiyun static void set_tracker_dma(struct dma_tracker_list *trackers,
125*4882a593Smuzhiyun int tag,
126*4882a593Smuzhiyun struct rsxx_dma *dma)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun trackers->list[tag].dma = dma;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
get_tracker_dma(struct dma_tracker_list * trackers,int tag)131*4882a593Smuzhiyun static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers,
132*4882a593Smuzhiyun int tag)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun return trackers->list[tag].dma;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
pop_tracker(struct dma_tracker_list * trackers)137*4882a593Smuzhiyun static int pop_tracker(struct dma_tracker_list *trackers)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun int tag;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun spin_lock(&trackers->lock);
142*4882a593Smuzhiyun tag = trackers->head;
143*4882a593Smuzhiyun if (tag != -1) {
144*4882a593Smuzhiyun trackers->head = trackers->list[tag].next_tag;
145*4882a593Smuzhiyun trackers->list[tag].next_tag = -1;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun spin_unlock(&trackers->lock);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun return tag;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
push_tracker(struct dma_tracker_list * trackers,int tag)152*4882a593Smuzhiyun static void push_tracker(struct dma_tracker_list *trackers, int tag)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun spin_lock(&trackers->lock);
155*4882a593Smuzhiyun trackers->list[tag].next_tag = trackers->head;
156*4882a593Smuzhiyun trackers->head = tag;
157*4882a593Smuzhiyun trackers->list[tag].dma = NULL;
158*4882a593Smuzhiyun spin_unlock(&trackers->lock);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /*----------------- Interrupt Coalescing -------------*/
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun * Interrupt Coalescing Register Format:
165*4882a593Smuzhiyun * Interrupt Timer (64ns units) [15:0]
166*4882a593Smuzhiyun * Interrupt Count [24:16]
167*4882a593Smuzhiyun * Reserved [31:25]
168*4882a593Smuzhiyun */
169*4882a593Smuzhiyun #define INTR_COAL_LATENCY_MASK (0x0000ffff)
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun #define INTR_COAL_COUNT_SHIFT 16
172*4882a593Smuzhiyun #define INTR_COAL_COUNT_BITS 9
173*4882a593Smuzhiyun #define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \
174*4882a593Smuzhiyun INTR_COAL_COUNT_SHIFT)
175*4882a593Smuzhiyun #define INTR_COAL_LATENCY_UNITS_NS 64
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun
dma_intr_coal_val(u32 mode,u32 count,u32 latency)178*4882a593Smuzhiyun static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (mode == RSXX_INTR_COAL_DISABLED)
183*4882a593Smuzhiyun return 0;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) |
186*4882a593Smuzhiyun (latency_units & INTR_COAL_LATENCY_MASK);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
dma_intr_coal_auto_tune(struct rsxx_cardinfo * card)190*4882a593Smuzhiyun static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun int i;
193*4882a593Smuzhiyun u32 q_depth = 0;
194*4882a593Smuzhiyun u32 intr_coal;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
197*4882a593Smuzhiyun unlikely(card->eeh_state))
198*4882a593Smuzhiyun return;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++)
201*4882a593Smuzhiyun q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
204*4882a593Smuzhiyun q_depth / 2,
205*4882a593Smuzhiyun card->config.data.intr_coal.latency);
206*4882a593Smuzhiyun iowrite32(intr_coal, card->regmap + INTR_COAL);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /*----------------- RSXX DMA Handling -------------------*/
rsxx_free_dma(struct rsxx_dma_ctrl * ctrl,struct rsxx_dma * dma)210*4882a593Smuzhiyun static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun if (dma->cmd != HW_CMD_BLK_DISCARD) {
213*4882a593Smuzhiyun if (!dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
214*4882a593Smuzhiyun dma_unmap_page(&ctrl->card->dev->dev, dma->dma_addr,
215*4882a593Smuzhiyun get_dma_size(dma),
216*4882a593Smuzhiyun dma->cmd == HW_CMD_BLK_WRITE ?
217*4882a593Smuzhiyun DMA_TO_DEVICE :
218*4882a593Smuzhiyun DMA_FROM_DEVICE);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun kmem_cache_free(rsxx_dma_pool, dma);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
rsxx_complete_dma(struct rsxx_dma_ctrl * ctrl,struct rsxx_dma * dma,unsigned int status)225*4882a593Smuzhiyun static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
226*4882a593Smuzhiyun struct rsxx_dma *dma,
227*4882a593Smuzhiyun unsigned int status)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun if (status & DMA_SW_ERR)
230*4882a593Smuzhiyun ctrl->stats.dma_sw_err++;
231*4882a593Smuzhiyun if (status & DMA_HW_FAULT)
232*4882a593Smuzhiyun ctrl->stats.dma_hw_fault++;
233*4882a593Smuzhiyun if (status & DMA_CANCELLED)
234*4882a593Smuzhiyun ctrl->stats.dma_cancelled++;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun if (dma->cb)
237*4882a593Smuzhiyun dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun rsxx_free_dma(ctrl, dma);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl * ctrl,struct list_head * q,unsigned int done)242*4882a593Smuzhiyun int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
243*4882a593Smuzhiyun struct list_head *q, unsigned int done)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun struct rsxx_dma *dma;
246*4882a593Smuzhiyun struct rsxx_dma *tmp;
247*4882a593Smuzhiyun int cnt = 0;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun list_for_each_entry_safe(dma, tmp, q, list) {
250*4882a593Smuzhiyun list_del(&dma->list);
251*4882a593Smuzhiyun if (done & COMPLETE_DMA)
252*4882a593Smuzhiyun rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
253*4882a593Smuzhiyun else
254*4882a593Smuzhiyun rsxx_free_dma(ctrl, dma);
255*4882a593Smuzhiyun cnt++;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun return cnt;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
rsxx_requeue_dma(struct rsxx_dma_ctrl * ctrl,struct rsxx_dma * dma)261*4882a593Smuzhiyun static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
262*4882a593Smuzhiyun struct rsxx_dma *dma)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun /*
265*4882a593Smuzhiyun * Requeued DMAs go to the front of the queue so they are issued
266*4882a593Smuzhiyun * first.
267*4882a593Smuzhiyun */
268*4882a593Smuzhiyun spin_lock_bh(&ctrl->queue_lock);
269*4882a593Smuzhiyun ctrl->stats.sw_q_depth++;
270*4882a593Smuzhiyun list_add(&dma->list, &ctrl->queue);
271*4882a593Smuzhiyun spin_unlock_bh(&ctrl->queue_lock);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
rsxx_handle_dma_error(struct rsxx_dma_ctrl * ctrl,struct rsxx_dma * dma,u8 hw_st)274*4882a593Smuzhiyun static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
275*4882a593Smuzhiyun struct rsxx_dma *dma,
276*4882a593Smuzhiyun u8 hw_st)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun unsigned int status = 0;
279*4882a593Smuzhiyun int requeue_cmd = 0;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun dev_dbg(CARD_TO_DEV(ctrl->card),
282*4882a593Smuzhiyun "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
283*4882a593Smuzhiyun dma->cmd, dma->laddr, hw_st);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if (hw_st & HW_STATUS_CRC)
286*4882a593Smuzhiyun ctrl->stats.crc_errors++;
287*4882a593Smuzhiyun if (hw_st & HW_STATUS_HARD_ERR)
288*4882a593Smuzhiyun ctrl->stats.hard_errors++;
289*4882a593Smuzhiyun if (hw_st & HW_STATUS_SOFT_ERR)
290*4882a593Smuzhiyun ctrl->stats.soft_errors++;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun switch (dma->cmd) {
293*4882a593Smuzhiyun case HW_CMD_BLK_READ:
294*4882a593Smuzhiyun if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
295*4882a593Smuzhiyun if (ctrl->card->scrub_hard) {
296*4882a593Smuzhiyun dma->cmd = HW_CMD_BLK_RECON_READ;
297*4882a593Smuzhiyun requeue_cmd = 1;
298*4882a593Smuzhiyun ctrl->stats.reads_retried++;
299*4882a593Smuzhiyun } else {
300*4882a593Smuzhiyun status |= DMA_HW_FAULT;
301*4882a593Smuzhiyun ctrl->stats.reads_failed++;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun } else if (hw_st & HW_STATUS_FAULT) {
304*4882a593Smuzhiyun status |= DMA_HW_FAULT;
305*4882a593Smuzhiyun ctrl->stats.reads_failed++;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun break;
309*4882a593Smuzhiyun case HW_CMD_BLK_RECON_READ:
310*4882a593Smuzhiyun if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
311*4882a593Smuzhiyun /* Data could not be reconstructed. */
312*4882a593Smuzhiyun status |= DMA_HW_FAULT;
313*4882a593Smuzhiyun ctrl->stats.reads_failed++;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun break;
317*4882a593Smuzhiyun case HW_CMD_BLK_WRITE:
318*4882a593Smuzhiyun status |= DMA_HW_FAULT;
319*4882a593Smuzhiyun ctrl->stats.writes_failed++;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun break;
322*4882a593Smuzhiyun case HW_CMD_BLK_DISCARD:
323*4882a593Smuzhiyun status |= DMA_HW_FAULT;
324*4882a593Smuzhiyun ctrl->stats.discards_failed++;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun break;
327*4882a593Smuzhiyun default:
328*4882a593Smuzhiyun dev_err(CARD_TO_DEV(ctrl->card),
329*4882a593Smuzhiyun "Unknown command in DMA!(cmd: x%02x "
330*4882a593Smuzhiyun "laddr x%08x st: x%02x\n",
331*4882a593Smuzhiyun dma->cmd, dma->laddr, hw_st);
332*4882a593Smuzhiyun status |= DMA_SW_ERR;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun break;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (requeue_cmd)
338*4882a593Smuzhiyun rsxx_requeue_dma(ctrl, dma);
339*4882a593Smuzhiyun else
340*4882a593Smuzhiyun rsxx_complete_dma(ctrl, dma, status);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
dma_engine_stalled(struct timer_list * t)343*4882a593Smuzhiyun static void dma_engine_stalled(struct timer_list *t)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer);
346*4882a593Smuzhiyun int cnt;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
349*4882a593Smuzhiyun unlikely(ctrl->card->eeh_state))
350*4882a593Smuzhiyun return;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
353*4882a593Smuzhiyun /*
354*4882a593Smuzhiyun * The dma engine was stalled because the SW_CMD_IDX write
355*4882a593Smuzhiyun * was lost. Issue it again to recover.
356*4882a593Smuzhiyun */
357*4882a593Smuzhiyun dev_warn(CARD_TO_DEV(ctrl->card),
358*4882a593Smuzhiyun "SW_CMD_IDX write was lost, re-writing...\n");
359*4882a593Smuzhiyun iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
360*4882a593Smuzhiyun mod_timer(&ctrl->activity_timer,
361*4882a593Smuzhiyun jiffies + DMA_ACTIVITY_TIMEOUT);
362*4882a593Smuzhiyun } else {
363*4882a593Smuzhiyun dev_warn(CARD_TO_DEV(ctrl->card),
364*4882a593Smuzhiyun "DMA channel %d has stalled, faulting interface.\n",
365*4882a593Smuzhiyun ctrl->id);
366*4882a593Smuzhiyun ctrl->card->dma_fault = 1;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* Clean up the DMA queue */
369*4882a593Smuzhiyun spin_lock(&ctrl->queue_lock);
370*4882a593Smuzhiyun cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
371*4882a593Smuzhiyun spin_unlock(&ctrl->queue_lock);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun cnt += rsxx_dma_cancel(ctrl);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (cnt)
376*4882a593Smuzhiyun dev_info(CARD_TO_DEV(ctrl->card),
377*4882a593Smuzhiyun "Freed %d queued DMAs on channel %d\n",
378*4882a593Smuzhiyun cnt, ctrl->id);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
rsxx_issue_dmas(struct rsxx_dma_ctrl * ctrl)382*4882a593Smuzhiyun static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct rsxx_dma *dma;
385*4882a593Smuzhiyun int tag;
386*4882a593Smuzhiyun int cmds_pending = 0;
387*4882a593Smuzhiyun struct hw_cmd *hw_cmd_buf;
388*4882a593Smuzhiyun int dir;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun hw_cmd_buf = ctrl->cmd.buf;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (unlikely(ctrl->card->halt) ||
393*4882a593Smuzhiyun unlikely(ctrl->card->eeh_state))
394*4882a593Smuzhiyun return;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun while (1) {
397*4882a593Smuzhiyun spin_lock_bh(&ctrl->queue_lock);
398*4882a593Smuzhiyun if (list_empty(&ctrl->queue)) {
399*4882a593Smuzhiyun spin_unlock_bh(&ctrl->queue_lock);
400*4882a593Smuzhiyun break;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun spin_unlock_bh(&ctrl->queue_lock);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun tag = pop_tracker(ctrl->trackers);
405*4882a593Smuzhiyun if (tag == -1)
406*4882a593Smuzhiyun break;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun spin_lock_bh(&ctrl->queue_lock);
409*4882a593Smuzhiyun dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
410*4882a593Smuzhiyun list_del(&dma->list);
411*4882a593Smuzhiyun ctrl->stats.sw_q_depth--;
412*4882a593Smuzhiyun spin_unlock_bh(&ctrl->queue_lock);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /*
415*4882a593Smuzhiyun * This will catch any DMAs that slipped in right before the
416*4882a593Smuzhiyun * fault, but was queued after all the other DMAs were
417*4882a593Smuzhiyun * cancelled.
418*4882a593Smuzhiyun */
419*4882a593Smuzhiyun if (unlikely(ctrl->card->dma_fault)) {
420*4882a593Smuzhiyun push_tracker(ctrl->trackers, tag);
421*4882a593Smuzhiyun rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
422*4882a593Smuzhiyun continue;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (dma->cmd != HW_CMD_BLK_DISCARD) {
426*4882a593Smuzhiyun if (dma->cmd == HW_CMD_BLK_WRITE)
427*4882a593Smuzhiyun dir = DMA_TO_DEVICE;
428*4882a593Smuzhiyun else
429*4882a593Smuzhiyun dir = DMA_FROM_DEVICE;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /*
432*4882a593Smuzhiyun * The function dma_map_page is placed here because we
433*4882a593Smuzhiyun * can only, by design, issue up to 255 commands to the
434*4882a593Smuzhiyun * hardware at one time per DMA channel. So the maximum
435*4882a593Smuzhiyun * amount of mapped memory would be 255 * 4 channels *
436*4882a593Smuzhiyun * 4096 Bytes which is less than 2GB, the limit of a x8
437*4882a593Smuzhiyun * Non-HWWD PCIe slot. This way the dma_map_page
438*4882a593Smuzhiyun * function should never fail because of a lack of
439*4882a593Smuzhiyun * mappable memory.
440*4882a593Smuzhiyun */
441*4882a593Smuzhiyun dma->dma_addr = dma_map_page(&ctrl->card->dev->dev, dma->page,
442*4882a593Smuzhiyun dma->pg_off, dma->sub_page.cnt << 9, dir);
443*4882a593Smuzhiyun if (dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
444*4882a593Smuzhiyun push_tracker(ctrl->trackers, tag);
445*4882a593Smuzhiyun rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
446*4882a593Smuzhiyun continue;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun set_tracker_dma(ctrl->trackers, tag, dma);
451*4882a593Smuzhiyun hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
452*4882a593Smuzhiyun hw_cmd_buf[ctrl->cmd.idx].tag = tag;
453*4882a593Smuzhiyun hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0;
454*4882a593Smuzhiyun hw_cmd_buf[ctrl->cmd.idx].sub_page =
455*4882a593Smuzhiyun ((dma->sub_page.cnt & 0x7) << 4) |
456*4882a593Smuzhiyun (dma->sub_page.off & 0x7);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun hw_cmd_buf[ctrl->cmd.idx].device_addr =
459*4882a593Smuzhiyun cpu_to_le32(dma->laddr);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun hw_cmd_buf[ctrl->cmd.idx].host_addr =
462*4882a593Smuzhiyun cpu_to_le64(dma->dma_addr);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun dev_dbg(CARD_TO_DEV(ctrl->card),
465*4882a593Smuzhiyun "Issue DMA%d(laddr %d tag %d) to idx %d\n",
466*4882a593Smuzhiyun ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
469*4882a593Smuzhiyun cmds_pending++;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (dma->cmd == HW_CMD_BLK_WRITE)
472*4882a593Smuzhiyun ctrl->stats.writes_issued++;
473*4882a593Smuzhiyun else if (dma->cmd == HW_CMD_BLK_DISCARD)
474*4882a593Smuzhiyun ctrl->stats.discards_issued++;
475*4882a593Smuzhiyun else
476*4882a593Smuzhiyun ctrl->stats.reads_issued++;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /* Let HW know we've queued commands. */
480*4882a593Smuzhiyun if (cmds_pending) {
481*4882a593Smuzhiyun atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
482*4882a593Smuzhiyun mod_timer(&ctrl->activity_timer,
483*4882a593Smuzhiyun jiffies + DMA_ACTIVITY_TIMEOUT);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun if (unlikely(ctrl->card->eeh_state)) {
486*4882a593Smuzhiyun del_timer_sync(&ctrl->activity_timer);
487*4882a593Smuzhiyun return;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
rsxx_dma_done(struct rsxx_dma_ctrl * ctrl)494*4882a593Smuzhiyun static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun struct rsxx_dma *dma;
497*4882a593Smuzhiyun unsigned long flags;
498*4882a593Smuzhiyun u16 count;
499*4882a593Smuzhiyun u8 status;
500*4882a593Smuzhiyun u8 tag;
501*4882a593Smuzhiyun struct hw_status *hw_st_buf;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun hw_st_buf = ctrl->status.buf;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun if (unlikely(ctrl->card->halt) ||
506*4882a593Smuzhiyun unlikely(ctrl->card->dma_fault) ||
507*4882a593Smuzhiyun unlikely(ctrl->card->eeh_state))
508*4882a593Smuzhiyun return;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun while (count == ctrl->e_cnt) {
513*4882a593Smuzhiyun /*
514*4882a593Smuzhiyun * The read memory-barrier is necessary to keep aggressive
515*4882a593Smuzhiyun * processors/optimizers (such as the PPC Apple G5) from
516*4882a593Smuzhiyun * reordering the following status-buffer tag & status read
517*4882a593Smuzhiyun * *before* the count read on subsequent iterations of the
518*4882a593Smuzhiyun * loop!
519*4882a593Smuzhiyun */
520*4882a593Smuzhiyun rmb();
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun status = hw_st_buf[ctrl->status.idx].status;
523*4882a593Smuzhiyun tag = hw_st_buf[ctrl->status.idx].tag;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun dma = get_tracker_dma(ctrl->trackers, tag);
526*4882a593Smuzhiyun if (dma == NULL) {
527*4882a593Smuzhiyun spin_lock_irqsave(&ctrl->card->irq_lock, flags);
528*4882a593Smuzhiyun rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
529*4882a593Smuzhiyun spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun dev_err(CARD_TO_DEV(ctrl->card),
532*4882a593Smuzhiyun "No tracker for tag %d "
533*4882a593Smuzhiyun "(idx %d id %d)\n",
534*4882a593Smuzhiyun tag, ctrl->status.idx, ctrl->id);
535*4882a593Smuzhiyun return;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun dev_dbg(CARD_TO_DEV(ctrl->card),
539*4882a593Smuzhiyun "Completing DMA%d"
540*4882a593Smuzhiyun "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
541*4882a593Smuzhiyun ctrl->id, dma->laddr, tag, status, count,
542*4882a593Smuzhiyun ctrl->status.idx);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun atomic_dec(&ctrl->stats.hw_q_depth);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun mod_timer(&ctrl->activity_timer,
547*4882a593Smuzhiyun jiffies + DMA_ACTIVITY_TIMEOUT);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun if (status)
550*4882a593Smuzhiyun rsxx_handle_dma_error(ctrl, dma, status);
551*4882a593Smuzhiyun else
552*4882a593Smuzhiyun rsxx_complete_dma(ctrl, dma, 0);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun push_tracker(ctrl->trackers, tag);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun ctrl->status.idx = (ctrl->status.idx + 1) &
557*4882a593Smuzhiyun RSXX_CS_IDX_MASK;
558*4882a593Smuzhiyun ctrl->e_cnt++;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun dma_intr_coal_auto_tune(ctrl->card);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
566*4882a593Smuzhiyun del_timer_sync(&ctrl->activity_timer);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun spin_lock_irqsave(&ctrl->card->irq_lock, flags);
569*4882a593Smuzhiyun rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
570*4882a593Smuzhiyun spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun spin_lock_bh(&ctrl->queue_lock);
573*4882a593Smuzhiyun if (ctrl->stats.sw_q_depth)
574*4882a593Smuzhiyun queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
575*4882a593Smuzhiyun spin_unlock_bh(&ctrl->queue_lock);
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
rsxx_schedule_issue(struct work_struct * work)578*4882a593Smuzhiyun static void rsxx_schedule_issue(struct work_struct *work)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun struct rsxx_dma_ctrl *ctrl;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun mutex_lock(&ctrl->work_lock);
585*4882a593Smuzhiyun rsxx_issue_dmas(ctrl);
586*4882a593Smuzhiyun mutex_unlock(&ctrl->work_lock);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
rsxx_schedule_done(struct work_struct * work)589*4882a593Smuzhiyun static void rsxx_schedule_done(struct work_struct *work)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun struct rsxx_dma_ctrl *ctrl;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun mutex_lock(&ctrl->work_lock);
596*4882a593Smuzhiyun rsxx_dma_done(ctrl);
597*4882a593Smuzhiyun mutex_unlock(&ctrl->work_lock);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
rsxx_queue_discard(struct rsxx_cardinfo * card,struct list_head * q,unsigned int laddr,rsxx_dma_cb cb,void * cb_data)600*4882a593Smuzhiyun static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card,
601*4882a593Smuzhiyun struct list_head *q,
602*4882a593Smuzhiyun unsigned int laddr,
603*4882a593Smuzhiyun rsxx_dma_cb cb,
604*4882a593Smuzhiyun void *cb_data)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun struct rsxx_dma *dma;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
609*4882a593Smuzhiyun if (!dma)
610*4882a593Smuzhiyun return BLK_STS_RESOURCE;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun dma->cmd = HW_CMD_BLK_DISCARD;
613*4882a593Smuzhiyun dma->laddr = laddr;
614*4882a593Smuzhiyun dma->dma_addr = 0;
615*4882a593Smuzhiyun dma->sub_page.off = 0;
616*4882a593Smuzhiyun dma->sub_page.cnt = 0;
617*4882a593Smuzhiyun dma->page = NULL;
618*4882a593Smuzhiyun dma->pg_off = 0;
619*4882a593Smuzhiyun dma->cb = cb;
620*4882a593Smuzhiyun dma->cb_data = cb_data;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun list_add_tail(&dma->list, q);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun return 0;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
rsxx_queue_dma(struct rsxx_cardinfo * card,struct list_head * q,int dir,unsigned int dma_off,unsigned int dma_len,unsigned int laddr,struct page * page,unsigned int pg_off,rsxx_dma_cb cb,void * cb_data)629*4882a593Smuzhiyun static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card,
630*4882a593Smuzhiyun struct list_head *q,
631*4882a593Smuzhiyun int dir,
632*4882a593Smuzhiyun unsigned int dma_off,
633*4882a593Smuzhiyun unsigned int dma_len,
634*4882a593Smuzhiyun unsigned int laddr,
635*4882a593Smuzhiyun struct page *page,
636*4882a593Smuzhiyun unsigned int pg_off,
637*4882a593Smuzhiyun rsxx_dma_cb cb,
638*4882a593Smuzhiyun void *cb_data)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun struct rsxx_dma *dma;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
643*4882a593Smuzhiyun if (!dma)
644*4882a593Smuzhiyun return BLK_STS_RESOURCE;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
647*4882a593Smuzhiyun dma->laddr = laddr;
648*4882a593Smuzhiyun dma->sub_page.off = (dma_off >> 9);
649*4882a593Smuzhiyun dma->sub_page.cnt = (dma_len >> 9);
650*4882a593Smuzhiyun dma->page = page;
651*4882a593Smuzhiyun dma->pg_off = pg_off;
652*4882a593Smuzhiyun dma->cb = cb;
653*4882a593Smuzhiyun dma->cb_data = cb_data;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun dev_dbg(CARD_TO_DEV(card),
656*4882a593Smuzhiyun "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
657*4882a593Smuzhiyun dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
658*4882a593Smuzhiyun dma->sub_page.cnt, dma->page, dma->pg_off);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /* Queue the DMA */
661*4882a593Smuzhiyun list_add_tail(&dma->list, q);
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun return 0;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
rsxx_dma_queue_bio(struct rsxx_cardinfo * card,struct bio * bio,atomic_t * n_dmas,rsxx_dma_cb cb,void * cb_data)666*4882a593Smuzhiyun blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
667*4882a593Smuzhiyun struct bio *bio,
668*4882a593Smuzhiyun atomic_t *n_dmas,
669*4882a593Smuzhiyun rsxx_dma_cb cb,
670*4882a593Smuzhiyun void *cb_data)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun struct list_head dma_list[RSXX_MAX_TARGETS];
673*4882a593Smuzhiyun struct bio_vec bvec;
674*4882a593Smuzhiyun struct bvec_iter iter;
675*4882a593Smuzhiyun unsigned long long addr8;
676*4882a593Smuzhiyun unsigned int laddr;
677*4882a593Smuzhiyun unsigned int bv_len;
678*4882a593Smuzhiyun unsigned int bv_off;
679*4882a593Smuzhiyun unsigned int dma_off;
680*4882a593Smuzhiyun unsigned int dma_len;
681*4882a593Smuzhiyun int dma_cnt[RSXX_MAX_TARGETS];
682*4882a593Smuzhiyun int tgt;
683*4882a593Smuzhiyun blk_status_t st;
684*4882a593Smuzhiyun int i;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
687*4882a593Smuzhiyun atomic_set(n_dmas, 0);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
690*4882a593Smuzhiyun INIT_LIST_HEAD(&dma_list[i]);
691*4882a593Smuzhiyun dma_cnt[i] = 0;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun if (bio_op(bio) == REQ_OP_DISCARD) {
695*4882a593Smuzhiyun bv_len = bio->bi_iter.bi_size;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun while (bv_len > 0) {
698*4882a593Smuzhiyun tgt = rsxx_get_dma_tgt(card, addr8);
699*4882a593Smuzhiyun laddr = rsxx_addr8_to_laddr(addr8, card);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
702*4882a593Smuzhiyun cb, cb_data);
703*4882a593Smuzhiyun if (st)
704*4882a593Smuzhiyun goto bvec_err;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun dma_cnt[tgt]++;
707*4882a593Smuzhiyun atomic_inc(n_dmas);
708*4882a593Smuzhiyun addr8 += RSXX_HW_BLK_SIZE;
709*4882a593Smuzhiyun bv_len -= RSXX_HW_BLK_SIZE;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun } else {
712*4882a593Smuzhiyun bio_for_each_segment(bvec, bio, iter) {
713*4882a593Smuzhiyun bv_len = bvec.bv_len;
714*4882a593Smuzhiyun bv_off = bvec.bv_offset;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun while (bv_len > 0) {
717*4882a593Smuzhiyun tgt = rsxx_get_dma_tgt(card, addr8);
718*4882a593Smuzhiyun laddr = rsxx_addr8_to_laddr(addr8, card);
719*4882a593Smuzhiyun dma_off = addr8 & RSXX_HW_BLK_MASK;
720*4882a593Smuzhiyun dma_len = min(bv_len,
721*4882a593Smuzhiyun RSXX_HW_BLK_SIZE - dma_off);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun st = rsxx_queue_dma(card, &dma_list[tgt],
724*4882a593Smuzhiyun bio_data_dir(bio),
725*4882a593Smuzhiyun dma_off, dma_len,
726*4882a593Smuzhiyun laddr, bvec.bv_page,
727*4882a593Smuzhiyun bv_off, cb, cb_data);
728*4882a593Smuzhiyun if (st)
729*4882a593Smuzhiyun goto bvec_err;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun dma_cnt[tgt]++;
732*4882a593Smuzhiyun atomic_inc(n_dmas);
733*4882a593Smuzhiyun addr8 += dma_len;
734*4882a593Smuzhiyun bv_off += dma_len;
735*4882a593Smuzhiyun bv_len -= dma_len;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
741*4882a593Smuzhiyun if (!list_empty(&dma_list[i])) {
742*4882a593Smuzhiyun spin_lock_bh(&card->ctrl[i].queue_lock);
743*4882a593Smuzhiyun card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
744*4882a593Smuzhiyun list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
745*4882a593Smuzhiyun spin_unlock_bh(&card->ctrl[i].queue_lock);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun queue_work(card->ctrl[i].issue_wq,
748*4882a593Smuzhiyun &card->ctrl[i].issue_dma_work);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun return 0;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun bvec_err:
755*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++)
756*4882a593Smuzhiyun rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
757*4882a593Smuzhiyun FREE_DMA);
758*4882a593Smuzhiyun return st;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun /*----------------- DMA Engine Initialization & Setup -------------------*/
rsxx_hw_buffers_init(struct pci_dev * dev,struct rsxx_dma_ctrl * ctrl)763*4882a593Smuzhiyun int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun ctrl->status.buf = dma_alloc_coherent(&dev->dev, STATUS_BUFFER_SIZE8,
766*4882a593Smuzhiyun &ctrl->status.dma_addr, GFP_KERNEL);
767*4882a593Smuzhiyun ctrl->cmd.buf = dma_alloc_coherent(&dev->dev, COMMAND_BUFFER_SIZE8,
768*4882a593Smuzhiyun &ctrl->cmd.dma_addr, GFP_KERNEL);
769*4882a593Smuzhiyun if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
770*4882a593Smuzhiyun return -ENOMEM;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
773*4882a593Smuzhiyun iowrite32(lower_32_bits(ctrl->status.dma_addr),
774*4882a593Smuzhiyun ctrl->regmap + SB_ADD_LO);
775*4882a593Smuzhiyun iowrite32(upper_32_bits(ctrl->status.dma_addr),
776*4882a593Smuzhiyun ctrl->regmap + SB_ADD_HI);
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
779*4882a593Smuzhiyun iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
780*4882a593Smuzhiyun iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
783*4882a593Smuzhiyun if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
784*4882a593Smuzhiyun dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
785*4882a593Smuzhiyun ctrl->status.idx);
786*4882a593Smuzhiyun return -EINVAL;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
789*4882a593Smuzhiyun iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
792*4882a593Smuzhiyun if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
793*4882a593Smuzhiyun dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
794*4882a593Smuzhiyun ctrl->status.idx);
795*4882a593Smuzhiyun return -EINVAL;
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
798*4882a593Smuzhiyun iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun return 0;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
rsxx_dma_ctrl_init(struct pci_dev * dev,struct rsxx_dma_ctrl * ctrl)803*4882a593Smuzhiyun static int rsxx_dma_ctrl_init(struct pci_dev *dev,
804*4882a593Smuzhiyun struct rsxx_dma_ctrl *ctrl)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun int i;
807*4882a593Smuzhiyun int st;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun memset(&ctrl->stats, 0, sizeof(ctrl->stats));
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
812*4882a593Smuzhiyun if (!ctrl->trackers)
813*4882a593Smuzhiyun return -ENOMEM;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun ctrl->trackers->head = 0;
816*4882a593Smuzhiyun for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
817*4882a593Smuzhiyun ctrl->trackers->list[i].next_tag = i + 1;
818*4882a593Smuzhiyun ctrl->trackers->list[i].dma = NULL;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
821*4882a593Smuzhiyun spin_lock_init(&ctrl->trackers->lock);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun spin_lock_init(&ctrl->queue_lock);
824*4882a593Smuzhiyun mutex_init(&ctrl->work_lock);
825*4882a593Smuzhiyun INIT_LIST_HEAD(&ctrl->queue);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0);
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
830*4882a593Smuzhiyun if (!ctrl->issue_wq)
831*4882a593Smuzhiyun return -ENOMEM;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
834*4882a593Smuzhiyun if (!ctrl->done_wq)
835*4882a593Smuzhiyun return -ENOMEM;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue);
838*4882a593Smuzhiyun INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done);
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun st = rsxx_hw_buffers_init(dev, ctrl);
841*4882a593Smuzhiyun if (st)
842*4882a593Smuzhiyun return st;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun return 0;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
rsxx_dma_stripe_setup(struct rsxx_cardinfo * card,unsigned int stripe_size8)847*4882a593Smuzhiyun static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
848*4882a593Smuzhiyun unsigned int stripe_size8)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun if (!is_power_of_2(stripe_size8)) {
851*4882a593Smuzhiyun dev_err(CARD_TO_DEV(card),
852*4882a593Smuzhiyun "stripe_size is NOT a power of 2!\n");
853*4882a593Smuzhiyun return -EINVAL;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun card->_stripe.lower_mask = stripe_size8 - 1;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun card->_stripe.upper_mask = ~(card->_stripe.lower_mask);
859*4882a593Smuzhiyun card->_stripe.upper_shift = ffs(card->n_targets) - 1;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun card->_stripe.target_mask = card->n_targets - 1;
862*4882a593Smuzhiyun card->_stripe.target_shift = ffs(stripe_size8) - 1;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n",
865*4882a593Smuzhiyun card->_stripe.lower_mask);
866*4882a593Smuzhiyun dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n",
867*4882a593Smuzhiyun card->_stripe.upper_shift);
868*4882a593Smuzhiyun dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n",
869*4882a593Smuzhiyun card->_stripe.upper_mask);
870*4882a593Smuzhiyun dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n",
871*4882a593Smuzhiyun card->_stripe.target_mask);
872*4882a593Smuzhiyun dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n",
873*4882a593Smuzhiyun card->_stripe.target_shift);
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun return 0;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
rsxx_dma_configure(struct rsxx_cardinfo * card)878*4882a593Smuzhiyun int rsxx_dma_configure(struct rsxx_cardinfo *card)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun u32 intr_coal;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
883*4882a593Smuzhiyun card->config.data.intr_coal.count,
884*4882a593Smuzhiyun card->config.data.intr_coal.latency);
885*4882a593Smuzhiyun iowrite32(intr_coal, card->regmap + INTR_COAL);
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun return rsxx_dma_stripe_setup(card, card->config.data.stripe_size);
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
rsxx_dma_setup(struct rsxx_cardinfo * card)890*4882a593Smuzhiyun int rsxx_dma_setup(struct rsxx_cardinfo *card)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun unsigned long flags;
893*4882a593Smuzhiyun int st;
894*4882a593Smuzhiyun int i;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun dev_info(CARD_TO_DEV(card),
897*4882a593Smuzhiyun "Initializing %d DMA targets\n",
898*4882a593Smuzhiyun card->n_targets);
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun /* Regmap is divided up into 4K chunks. One for each DMA channel */
901*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++)
902*4882a593Smuzhiyun card->ctrl[i].regmap = card->regmap + (i * 4096);
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun card->dma_fault = 0;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun /* Reset the DMA queues */
907*4882a593Smuzhiyun rsxx_dma_queue_reset(card);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun /************* Setup DMA Control *************/
910*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
911*4882a593Smuzhiyun st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
912*4882a593Smuzhiyun if (st)
913*4882a593Smuzhiyun goto failed_dma_setup;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun card->ctrl[i].card = card;
916*4882a593Smuzhiyun card->ctrl[i].id = i;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun card->scrub_hard = 1;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (card->config_valid)
922*4882a593Smuzhiyun rsxx_dma_configure(card);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun /* Enable the interrupts after all setup has completed. */
925*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
926*4882a593Smuzhiyun spin_lock_irqsave(&card->irq_lock, flags);
927*4882a593Smuzhiyun rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i));
928*4882a593Smuzhiyun spin_unlock_irqrestore(&card->irq_lock, flags);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun return 0;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun failed_dma_setup:
934*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
935*4882a593Smuzhiyun struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun if (ctrl->issue_wq) {
938*4882a593Smuzhiyun destroy_workqueue(ctrl->issue_wq);
939*4882a593Smuzhiyun ctrl->issue_wq = NULL;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun if (ctrl->done_wq) {
943*4882a593Smuzhiyun destroy_workqueue(ctrl->done_wq);
944*4882a593Smuzhiyun ctrl->done_wq = NULL;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun if (ctrl->trackers)
948*4882a593Smuzhiyun vfree(ctrl->trackers);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun if (ctrl->status.buf)
951*4882a593Smuzhiyun dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
952*4882a593Smuzhiyun ctrl->status.buf,
953*4882a593Smuzhiyun ctrl->status.dma_addr);
954*4882a593Smuzhiyun if (ctrl->cmd.buf)
955*4882a593Smuzhiyun dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
956*4882a593Smuzhiyun ctrl->cmd.buf, ctrl->cmd.dma_addr);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun return st;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
rsxx_dma_cancel(struct rsxx_dma_ctrl * ctrl)962*4882a593Smuzhiyun int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun struct rsxx_dma *dma;
965*4882a593Smuzhiyun int i;
966*4882a593Smuzhiyun int cnt = 0;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /* Clean up issued DMAs */
969*4882a593Smuzhiyun for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
970*4882a593Smuzhiyun dma = get_tracker_dma(ctrl->trackers, i);
971*4882a593Smuzhiyun if (dma) {
972*4882a593Smuzhiyun atomic_dec(&ctrl->stats.hw_q_depth);
973*4882a593Smuzhiyun rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
974*4882a593Smuzhiyun push_tracker(ctrl->trackers, i);
975*4882a593Smuzhiyun cnt++;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun return cnt;
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun
rsxx_dma_destroy(struct rsxx_cardinfo * card)982*4882a593Smuzhiyun void rsxx_dma_destroy(struct rsxx_cardinfo *card)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun struct rsxx_dma_ctrl *ctrl;
985*4882a593Smuzhiyun int i;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
988*4882a593Smuzhiyun ctrl = &card->ctrl[i];
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (ctrl->issue_wq) {
991*4882a593Smuzhiyun destroy_workqueue(ctrl->issue_wq);
992*4882a593Smuzhiyun ctrl->issue_wq = NULL;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun if (ctrl->done_wq) {
996*4882a593Smuzhiyun destroy_workqueue(ctrl->done_wq);
997*4882a593Smuzhiyun ctrl->done_wq = NULL;
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun if (timer_pending(&ctrl->activity_timer))
1001*4882a593Smuzhiyun del_timer_sync(&ctrl->activity_timer);
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun /* Clean up the DMA queue */
1004*4882a593Smuzhiyun spin_lock_bh(&ctrl->queue_lock);
1005*4882a593Smuzhiyun rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
1006*4882a593Smuzhiyun spin_unlock_bh(&ctrl->queue_lock);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun rsxx_dma_cancel(ctrl);
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun vfree(ctrl->trackers);
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
1013*4882a593Smuzhiyun ctrl->status.buf, ctrl->status.dma_addr);
1014*4882a593Smuzhiyun dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
1015*4882a593Smuzhiyun ctrl->cmd.buf, ctrl->cmd.dma_addr);
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo * card)1019*4882a593Smuzhiyun int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun int i;
1022*4882a593Smuzhiyun int j;
1023*4882a593Smuzhiyun int cnt;
1024*4882a593Smuzhiyun struct rsxx_dma *dma;
1025*4882a593Smuzhiyun struct list_head *issued_dmas;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun issued_dmas = kcalloc(card->n_targets, sizeof(*issued_dmas),
1028*4882a593Smuzhiyun GFP_KERNEL);
1029*4882a593Smuzhiyun if (!issued_dmas)
1030*4882a593Smuzhiyun return -ENOMEM;
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
1033*4882a593Smuzhiyun INIT_LIST_HEAD(&issued_dmas[i]);
1034*4882a593Smuzhiyun cnt = 0;
1035*4882a593Smuzhiyun for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
1036*4882a593Smuzhiyun dma = get_tracker_dma(card->ctrl[i].trackers, j);
1037*4882a593Smuzhiyun if (dma == NULL)
1038*4882a593Smuzhiyun continue;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun if (dma->cmd == HW_CMD_BLK_WRITE)
1041*4882a593Smuzhiyun card->ctrl[i].stats.writes_issued--;
1042*4882a593Smuzhiyun else if (dma->cmd == HW_CMD_BLK_DISCARD)
1043*4882a593Smuzhiyun card->ctrl[i].stats.discards_issued--;
1044*4882a593Smuzhiyun else
1045*4882a593Smuzhiyun card->ctrl[i].stats.reads_issued--;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun if (dma->cmd != HW_CMD_BLK_DISCARD) {
1048*4882a593Smuzhiyun dma_unmap_page(&card->dev->dev, dma->dma_addr,
1049*4882a593Smuzhiyun get_dma_size(dma),
1050*4882a593Smuzhiyun dma->cmd == HW_CMD_BLK_WRITE ?
1051*4882a593Smuzhiyun DMA_TO_DEVICE :
1052*4882a593Smuzhiyun DMA_FROM_DEVICE);
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun list_add_tail(&dma->list, &issued_dmas[i]);
1056*4882a593Smuzhiyun push_tracker(card->ctrl[i].trackers, j);
1057*4882a593Smuzhiyun cnt++;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun spin_lock_bh(&card->ctrl[i].queue_lock);
1061*4882a593Smuzhiyun list_splice(&issued_dmas[i], &card->ctrl[i].queue);
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
1064*4882a593Smuzhiyun card->ctrl[i].stats.sw_q_depth += cnt;
1065*4882a593Smuzhiyun card->ctrl[i].e_cnt = 0;
1066*4882a593Smuzhiyun spin_unlock_bh(&card->ctrl[i].queue_lock);
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun kfree(issued_dmas);
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun return 0;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
rsxx_dma_init(void)1074*4882a593Smuzhiyun int rsxx_dma_init(void)
1075*4882a593Smuzhiyun {
1076*4882a593Smuzhiyun rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
1077*4882a593Smuzhiyun if (!rsxx_dma_pool)
1078*4882a593Smuzhiyun return -ENOMEM;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun return 0;
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun
rsxx_dma_cleanup(void)1084*4882a593Smuzhiyun void rsxx_dma_cleanup(void)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun kmem_cache_destroy(rsxx_dma_pool);
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun
1089