1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Filename: core.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6*4882a593Smuzhiyun * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * (C) Copyright 2013 IBM Corporation
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/pci.h>
16*4882a593Smuzhiyun #include <linux/reboot.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/bitops.h>
19*4882a593Smuzhiyun #include <linux/delay.h>
20*4882a593Smuzhiyun #include <linux/debugfs.h>
21*4882a593Smuzhiyun #include <linux/seq_file.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <linux/genhd.h>
24*4882a593Smuzhiyun #include <linux/idr.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include "rsxx_priv.h"
27*4882a593Smuzhiyun #include "rsxx_cfg.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define NO_LEGACY 0
30*4882a593Smuzhiyun #define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver");
33*4882a593Smuzhiyun MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
34*4882a593Smuzhiyun MODULE_LICENSE("GPL");
35*4882a593Smuzhiyun MODULE_VERSION(DRIVER_VERSION);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun static unsigned int force_legacy = NO_LEGACY;
38*4882a593Smuzhiyun module_param(force_legacy, uint, 0444);
39*4882a593Smuzhiyun MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun static unsigned int sync_start = 1;
42*4882a593Smuzhiyun module_param(sync_start, uint, 0444);
43*4882a593Smuzhiyun MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete "
44*4882a593Smuzhiyun "until the card startup has completed.");
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun static DEFINE_IDA(rsxx_disk_ida);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* --------------------Debugfs Setup ------------------- */
49*4882a593Smuzhiyun
rsxx_attr_pci_regs_show(struct seq_file * m,void * p)50*4882a593Smuzhiyun static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun struct rsxx_cardinfo *card = m->private;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun seq_printf(m, "HWID 0x%08x\n",
55*4882a593Smuzhiyun ioread32(card->regmap + HWID));
56*4882a593Smuzhiyun seq_printf(m, "SCRATCH 0x%08x\n",
57*4882a593Smuzhiyun ioread32(card->regmap + SCRATCH));
58*4882a593Smuzhiyun seq_printf(m, "IER 0x%08x\n",
59*4882a593Smuzhiyun ioread32(card->regmap + IER));
60*4882a593Smuzhiyun seq_printf(m, "IPR 0x%08x\n",
61*4882a593Smuzhiyun ioread32(card->regmap + IPR));
62*4882a593Smuzhiyun seq_printf(m, "CREG_CMD 0x%08x\n",
63*4882a593Smuzhiyun ioread32(card->regmap + CREG_CMD));
64*4882a593Smuzhiyun seq_printf(m, "CREG_ADD 0x%08x\n",
65*4882a593Smuzhiyun ioread32(card->regmap + CREG_ADD));
66*4882a593Smuzhiyun seq_printf(m, "CREG_CNT 0x%08x\n",
67*4882a593Smuzhiyun ioread32(card->regmap + CREG_CNT));
68*4882a593Smuzhiyun seq_printf(m, "CREG_STAT 0x%08x\n",
69*4882a593Smuzhiyun ioread32(card->regmap + CREG_STAT));
70*4882a593Smuzhiyun seq_printf(m, "CREG_DATA0 0x%08x\n",
71*4882a593Smuzhiyun ioread32(card->regmap + CREG_DATA0));
72*4882a593Smuzhiyun seq_printf(m, "CREG_DATA1 0x%08x\n",
73*4882a593Smuzhiyun ioread32(card->regmap + CREG_DATA1));
74*4882a593Smuzhiyun seq_printf(m, "CREG_DATA2 0x%08x\n",
75*4882a593Smuzhiyun ioread32(card->regmap + CREG_DATA2));
76*4882a593Smuzhiyun seq_printf(m, "CREG_DATA3 0x%08x\n",
77*4882a593Smuzhiyun ioread32(card->regmap + CREG_DATA3));
78*4882a593Smuzhiyun seq_printf(m, "CREG_DATA4 0x%08x\n",
79*4882a593Smuzhiyun ioread32(card->regmap + CREG_DATA4));
80*4882a593Smuzhiyun seq_printf(m, "CREG_DATA5 0x%08x\n",
81*4882a593Smuzhiyun ioread32(card->regmap + CREG_DATA5));
82*4882a593Smuzhiyun seq_printf(m, "CREG_DATA6 0x%08x\n",
83*4882a593Smuzhiyun ioread32(card->regmap + CREG_DATA6));
84*4882a593Smuzhiyun seq_printf(m, "CREG_DATA7 0x%08x\n",
85*4882a593Smuzhiyun ioread32(card->regmap + CREG_DATA7));
86*4882a593Smuzhiyun seq_printf(m, "INTR_COAL 0x%08x\n",
87*4882a593Smuzhiyun ioread32(card->regmap + INTR_COAL));
88*4882a593Smuzhiyun seq_printf(m, "HW_ERROR 0x%08x\n",
89*4882a593Smuzhiyun ioread32(card->regmap + HW_ERROR));
90*4882a593Smuzhiyun seq_printf(m, "DEBUG0 0x%08x\n",
91*4882a593Smuzhiyun ioread32(card->regmap + PCI_DEBUG0));
92*4882a593Smuzhiyun seq_printf(m, "DEBUG1 0x%08x\n",
93*4882a593Smuzhiyun ioread32(card->regmap + PCI_DEBUG1));
94*4882a593Smuzhiyun seq_printf(m, "DEBUG2 0x%08x\n",
95*4882a593Smuzhiyun ioread32(card->regmap + PCI_DEBUG2));
96*4882a593Smuzhiyun seq_printf(m, "DEBUG3 0x%08x\n",
97*4882a593Smuzhiyun ioread32(card->regmap + PCI_DEBUG3));
98*4882a593Smuzhiyun seq_printf(m, "DEBUG4 0x%08x\n",
99*4882a593Smuzhiyun ioread32(card->regmap + PCI_DEBUG4));
100*4882a593Smuzhiyun seq_printf(m, "DEBUG5 0x%08x\n",
101*4882a593Smuzhiyun ioread32(card->regmap + PCI_DEBUG5));
102*4882a593Smuzhiyun seq_printf(m, "DEBUG6 0x%08x\n",
103*4882a593Smuzhiyun ioread32(card->regmap + PCI_DEBUG6));
104*4882a593Smuzhiyun seq_printf(m, "DEBUG7 0x%08x\n",
105*4882a593Smuzhiyun ioread32(card->regmap + PCI_DEBUG7));
106*4882a593Smuzhiyun seq_printf(m, "RECONFIG 0x%08x\n",
107*4882a593Smuzhiyun ioread32(card->regmap + PCI_RECONFIG));
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
rsxx_attr_stats_show(struct seq_file * m,void * p)112*4882a593Smuzhiyun static int rsxx_attr_stats_show(struct seq_file *m, void *p)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct rsxx_cardinfo *card = m->private;
115*4882a593Smuzhiyun int i;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
118*4882a593Smuzhiyun seq_printf(m, "Ctrl %d CRC Errors = %d\n",
119*4882a593Smuzhiyun i, card->ctrl[i].stats.crc_errors);
120*4882a593Smuzhiyun seq_printf(m, "Ctrl %d Hard Errors = %d\n",
121*4882a593Smuzhiyun i, card->ctrl[i].stats.hard_errors);
122*4882a593Smuzhiyun seq_printf(m, "Ctrl %d Soft Errors = %d\n",
123*4882a593Smuzhiyun i, card->ctrl[i].stats.soft_errors);
124*4882a593Smuzhiyun seq_printf(m, "Ctrl %d Writes Issued = %d\n",
125*4882a593Smuzhiyun i, card->ctrl[i].stats.writes_issued);
126*4882a593Smuzhiyun seq_printf(m, "Ctrl %d Writes Failed = %d\n",
127*4882a593Smuzhiyun i, card->ctrl[i].stats.writes_failed);
128*4882a593Smuzhiyun seq_printf(m, "Ctrl %d Reads Issued = %d\n",
129*4882a593Smuzhiyun i, card->ctrl[i].stats.reads_issued);
130*4882a593Smuzhiyun seq_printf(m, "Ctrl %d Reads Failed = %d\n",
131*4882a593Smuzhiyun i, card->ctrl[i].stats.reads_failed);
132*4882a593Smuzhiyun seq_printf(m, "Ctrl %d Reads Retried = %d\n",
133*4882a593Smuzhiyun i, card->ctrl[i].stats.reads_retried);
134*4882a593Smuzhiyun seq_printf(m, "Ctrl %d Discards Issued = %d\n",
135*4882a593Smuzhiyun i, card->ctrl[i].stats.discards_issued);
136*4882a593Smuzhiyun seq_printf(m, "Ctrl %d Discards Failed = %d\n",
137*4882a593Smuzhiyun i, card->ctrl[i].stats.discards_failed);
138*4882a593Smuzhiyun seq_printf(m, "Ctrl %d DMA SW Errors = %d\n",
139*4882a593Smuzhiyun i, card->ctrl[i].stats.dma_sw_err);
140*4882a593Smuzhiyun seq_printf(m, "Ctrl %d DMA HW Faults = %d\n",
141*4882a593Smuzhiyun i, card->ctrl[i].stats.dma_hw_fault);
142*4882a593Smuzhiyun seq_printf(m, "Ctrl %d DMAs Cancelled = %d\n",
143*4882a593Smuzhiyun i, card->ctrl[i].stats.dma_cancelled);
144*4882a593Smuzhiyun seq_printf(m, "Ctrl %d SW Queue Depth = %d\n",
145*4882a593Smuzhiyun i, card->ctrl[i].stats.sw_q_depth);
146*4882a593Smuzhiyun seq_printf(m, "Ctrl %d HW Queue Depth = %d\n",
147*4882a593Smuzhiyun i, atomic_read(&card->ctrl[i].stats.hw_q_depth));
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun return 0;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
rsxx_attr_stats_open(struct inode * inode,struct file * file)153*4882a593Smuzhiyun static int rsxx_attr_stats_open(struct inode *inode, struct file *file)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun return single_open(file, rsxx_attr_stats_show, inode->i_private);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
rsxx_attr_pci_regs_open(struct inode * inode,struct file * file)158*4882a593Smuzhiyun static int rsxx_attr_pci_regs_open(struct inode *inode, struct file *file)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun return single_open(file, rsxx_attr_pci_regs_show, inode->i_private);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
rsxx_cram_read(struct file * fp,char __user * ubuf,size_t cnt,loff_t * ppos)163*4882a593Smuzhiyun static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
164*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct rsxx_cardinfo *card = file_inode(fp)->i_private;
167*4882a593Smuzhiyun char *buf;
168*4882a593Smuzhiyun int st;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun buf = kzalloc(cnt, GFP_KERNEL);
171*4882a593Smuzhiyun if (!buf)
172*4882a593Smuzhiyun return -ENOMEM;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
175*4882a593Smuzhiyun if (!st) {
176*4882a593Smuzhiyun if (copy_to_user(ubuf, buf, cnt))
177*4882a593Smuzhiyun st = -EFAULT;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun kfree(buf);
180*4882a593Smuzhiyun if (st)
181*4882a593Smuzhiyun return st;
182*4882a593Smuzhiyun *ppos += cnt;
183*4882a593Smuzhiyun return cnt;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
rsxx_cram_write(struct file * fp,const char __user * ubuf,size_t cnt,loff_t * ppos)186*4882a593Smuzhiyun static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf,
187*4882a593Smuzhiyun size_t cnt, loff_t *ppos)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct rsxx_cardinfo *card = file_inode(fp)->i_private;
190*4882a593Smuzhiyun char *buf;
191*4882a593Smuzhiyun ssize_t st;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun buf = memdup_user(ubuf, cnt);
194*4882a593Smuzhiyun if (IS_ERR(buf))
195*4882a593Smuzhiyun return PTR_ERR(buf);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun st = rsxx_creg_write(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
198*4882a593Smuzhiyun kfree(buf);
199*4882a593Smuzhiyun if (st)
200*4882a593Smuzhiyun return st;
201*4882a593Smuzhiyun *ppos += cnt;
202*4882a593Smuzhiyun return cnt;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun static const struct file_operations debugfs_cram_fops = {
206*4882a593Smuzhiyun .owner = THIS_MODULE,
207*4882a593Smuzhiyun .read = rsxx_cram_read,
208*4882a593Smuzhiyun .write = rsxx_cram_write,
209*4882a593Smuzhiyun };
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun static const struct file_operations debugfs_stats_fops = {
212*4882a593Smuzhiyun .owner = THIS_MODULE,
213*4882a593Smuzhiyun .open = rsxx_attr_stats_open,
214*4882a593Smuzhiyun .read = seq_read,
215*4882a593Smuzhiyun .llseek = seq_lseek,
216*4882a593Smuzhiyun .release = single_release,
217*4882a593Smuzhiyun };
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun static const struct file_operations debugfs_pci_regs_fops = {
220*4882a593Smuzhiyun .owner = THIS_MODULE,
221*4882a593Smuzhiyun .open = rsxx_attr_pci_regs_open,
222*4882a593Smuzhiyun .read = seq_read,
223*4882a593Smuzhiyun .llseek = seq_lseek,
224*4882a593Smuzhiyun .release = single_release,
225*4882a593Smuzhiyun };
226*4882a593Smuzhiyun
rsxx_debugfs_dev_new(struct rsxx_cardinfo * card)227*4882a593Smuzhiyun static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun struct dentry *debugfs_stats;
230*4882a593Smuzhiyun struct dentry *debugfs_pci_regs;
231*4882a593Smuzhiyun struct dentry *debugfs_cram;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun card->debugfs_dir = debugfs_create_dir(card->gendisk->disk_name, NULL);
234*4882a593Smuzhiyun if (IS_ERR_OR_NULL(card->debugfs_dir))
235*4882a593Smuzhiyun goto failed_debugfs_dir;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun debugfs_stats = debugfs_create_file("stats", 0444,
238*4882a593Smuzhiyun card->debugfs_dir, card,
239*4882a593Smuzhiyun &debugfs_stats_fops);
240*4882a593Smuzhiyun if (IS_ERR_OR_NULL(debugfs_stats))
241*4882a593Smuzhiyun goto failed_debugfs_stats;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun debugfs_pci_regs = debugfs_create_file("pci_regs", 0444,
244*4882a593Smuzhiyun card->debugfs_dir, card,
245*4882a593Smuzhiyun &debugfs_pci_regs_fops);
246*4882a593Smuzhiyun if (IS_ERR_OR_NULL(debugfs_pci_regs))
247*4882a593Smuzhiyun goto failed_debugfs_pci_regs;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun debugfs_cram = debugfs_create_file("cram", 0644,
250*4882a593Smuzhiyun card->debugfs_dir, card,
251*4882a593Smuzhiyun &debugfs_cram_fops);
252*4882a593Smuzhiyun if (IS_ERR_OR_NULL(debugfs_cram))
253*4882a593Smuzhiyun goto failed_debugfs_cram;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun return;
256*4882a593Smuzhiyun failed_debugfs_cram:
257*4882a593Smuzhiyun debugfs_remove(debugfs_pci_regs);
258*4882a593Smuzhiyun failed_debugfs_pci_regs:
259*4882a593Smuzhiyun debugfs_remove(debugfs_stats);
260*4882a593Smuzhiyun failed_debugfs_stats:
261*4882a593Smuzhiyun debugfs_remove(card->debugfs_dir);
262*4882a593Smuzhiyun failed_debugfs_dir:
263*4882a593Smuzhiyun card->debugfs_dir = NULL;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /*----------------- Interrupt Control & Handling -------------------*/
267*4882a593Smuzhiyun
rsxx_mask_interrupts(struct rsxx_cardinfo * card)268*4882a593Smuzhiyun static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun card->isr_mask = 0;
271*4882a593Smuzhiyun card->ier_mask = 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
__enable_intr(unsigned int * mask,unsigned int intr)274*4882a593Smuzhiyun static void __enable_intr(unsigned int *mask, unsigned int intr)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun *mask |= intr;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
__disable_intr(unsigned int * mask,unsigned int intr)279*4882a593Smuzhiyun static void __disable_intr(unsigned int *mask, unsigned int intr)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun *mask &= ~intr;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun * NOTE: Disabling the IER will disable the hardware interrupt.
286*4882a593Smuzhiyun * Disabling the ISR will disable the software handling of the ISR bit.
287*4882a593Smuzhiyun *
288*4882a593Smuzhiyun * Enable/Disable interrupt functions assume the card->irq_lock
289*4882a593Smuzhiyun * is held by the caller.
290*4882a593Smuzhiyun */
rsxx_enable_ier(struct rsxx_cardinfo * card,unsigned int intr)291*4882a593Smuzhiyun void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun if (unlikely(card->halt) ||
294*4882a593Smuzhiyun unlikely(card->eeh_state))
295*4882a593Smuzhiyun return;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun __enable_intr(&card->ier_mask, intr);
298*4882a593Smuzhiyun iowrite32(card->ier_mask, card->regmap + IER);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
rsxx_disable_ier(struct rsxx_cardinfo * card,unsigned int intr)301*4882a593Smuzhiyun void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun if (unlikely(card->eeh_state))
304*4882a593Smuzhiyun return;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun __disable_intr(&card->ier_mask, intr);
307*4882a593Smuzhiyun iowrite32(card->ier_mask, card->regmap + IER);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
rsxx_enable_ier_and_isr(struct rsxx_cardinfo * card,unsigned int intr)310*4882a593Smuzhiyun void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
311*4882a593Smuzhiyun unsigned int intr)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun if (unlikely(card->halt) ||
314*4882a593Smuzhiyun unlikely(card->eeh_state))
315*4882a593Smuzhiyun return;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun __enable_intr(&card->isr_mask, intr);
318*4882a593Smuzhiyun __enable_intr(&card->ier_mask, intr);
319*4882a593Smuzhiyun iowrite32(card->ier_mask, card->regmap + IER);
320*4882a593Smuzhiyun }
rsxx_disable_ier_and_isr(struct rsxx_cardinfo * card,unsigned int intr)321*4882a593Smuzhiyun void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
322*4882a593Smuzhiyun unsigned int intr)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun if (unlikely(card->eeh_state))
325*4882a593Smuzhiyun return;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun __disable_intr(&card->isr_mask, intr);
328*4882a593Smuzhiyun __disable_intr(&card->ier_mask, intr);
329*4882a593Smuzhiyun iowrite32(card->ier_mask, card->regmap + IER);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
rsxx_isr(int irq,void * pdata)332*4882a593Smuzhiyun static irqreturn_t rsxx_isr(int irq, void *pdata)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct rsxx_cardinfo *card = pdata;
335*4882a593Smuzhiyun unsigned int isr;
336*4882a593Smuzhiyun int handled = 0;
337*4882a593Smuzhiyun int reread_isr;
338*4882a593Smuzhiyun int i;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun spin_lock(&card->irq_lock);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun do {
343*4882a593Smuzhiyun reread_isr = 0;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (unlikely(card->eeh_state))
346*4882a593Smuzhiyun break;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun isr = ioread32(card->regmap + ISR);
349*4882a593Smuzhiyun if (isr == 0xffffffff) {
350*4882a593Smuzhiyun /*
351*4882a593Smuzhiyun * A few systems seem to have an intermittent issue
352*4882a593Smuzhiyun * where PCI reads return all Fs, but retrying the read
353*4882a593Smuzhiyun * a little later will return as expected.
354*4882a593Smuzhiyun */
355*4882a593Smuzhiyun dev_info(CARD_TO_DEV(card),
356*4882a593Smuzhiyun "ISR = 0xFFFFFFFF, retrying later\n");
357*4882a593Smuzhiyun break;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun isr &= card->isr_mask;
361*4882a593Smuzhiyun if (!isr)
362*4882a593Smuzhiyun break;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
365*4882a593Smuzhiyun if (isr & CR_INTR_DMA(i)) {
366*4882a593Smuzhiyun if (card->ier_mask & CR_INTR_DMA(i)) {
367*4882a593Smuzhiyun rsxx_disable_ier(card, CR_INTR_DMA(i));
368*4882a593Smuzhiyun reread_isr = 1;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun queue_work(card->ctrl[i].done_wq,
371*4882a593Smuzhiyun &card->ctrl[i].dma_done_work);
372*4882a593Smuzhiyun handled++;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun if (isr & CR_INTR_CREG) {
377*4882a593Smuzhiyun queue_work(card->creg_ctrl.creg_wq,
378*4882a593Smuzhiyun &card->creg_ctrl.done_work);
379*4882a593Smuzhiyun handled++;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun if (isr & CR_INTR_EVENT) {
383*4882a593Smuzhiyun queue_work(card->event_wq, &card->event_work);
384*4882a593Smuzhiyun rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
385*4882a593Smuzhiyun handled++;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun } while (reread_isr);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun spin_unlock(&card->irq_lock);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun return handled ? IRQ_HANDLED : IRQ_NONE;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /*----------------- Card Event Handler -------------------*/
rsxx_card_state_to_str(unsigned int state)395*4882a593Smuzhiyun static const char * const rsxx_card_state_to_str(unsigned int state)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun static const char * const state_strings[] = {
398*4882a593Smuzhiyun "Unknown", "Shutdown", "Starting", "Formatting",
399*4882a593Smuzhiyun "Uninitialized", "Good", "Shutting Down",
400*4882a593Smuzhiyun "Fault", "Read Only Fault", "dStroying"
401*4882a593Smuzhiyun };
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun return state_strings[ffs(state)];
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
card_state_change(struct rsxx_cardinfo * card,unsigned int new_state)406*4882a593Smuzhiyun static void card_state_change(struct rsxx_cardinfo *card,
407*4882a593Smuzhiyun unsigned int new_state)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun int st;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun dev_info(CARD_TO_DEV(card),
412*4882a593Smuzhiyun "card state change detected.(%s -> %s)\n",
413*4882a593Smuzhiyun rsxx_card_state_to_str(card->state),
414*4882a593Smuzhiyun rsxx_card_state_to_str(new_state));
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun card->state = new_state;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* Don't attach DMA interfaces if the card has an invalid config */
419*4882a593Smuzhiyun if (!card->config_valid)
420*4882a593Smuzhiyun return;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun switch (new_state) {
423*4882a593Smuzhiyun case CARD_STATE_RD_ONLY_FAULT:
424*4882a593Smuzhiyun dev_crit(CARD_TO_DEV(card),
425*4882a593Smuzhiyun "Hardware has entered read-only mode!\n");
426*4882a593Smuzhiyun /*
427*4882a593Smuzhiyun * Fall through so the DMA devices can be attached and
428*4882a593Smuzhiyun * the user can attempt to pull off their data.
429*4882a593Smuzhiyun */
430*4882a593Smuzhiyun fallthrough;
431*4882a593Smuzhiyun case CARD_STATE_GOOD:
432*4882a593Smuzhiyun st = rsxx_get_card_size8(card, &card->size8);
433*4882a593Smuzhiyun if (st)
434*4882a593Smuzhiyun dev_err(CARD_TO_DEV(card),
435*4882a593Smuzhiyun "Failed attaching DMA devices\n");
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (card->config_valid)
438*4882a593Smuzhiyun set_capacity(card->gendisk, card->size8 >> 9);
439*4882a593Smuzhiyun break;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun case CARD_STATE_FAULT:
442*4882a593Smuzhiyun dev_crit(CARD_TO_DEV(card),
443*4882a593Smuzhiyun "Hardware Fault reported!\n");
444*4882a593Smuzhiyun fallthrough;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /* Everything else, detach DMA interface if it's attached. */
447*4882a593Smuzhiyun case CARD_STATE_SHUTDOWN:
448*4882a593Smuzhiyun case CARD_STATE_STARTING:
449*4882a593Smuzhiyun case CARD_STATE_FORMATTING:
450*4882a593Smuzhiyun case CARD_STATE_UNINITIALIZED:
451*4882a593Smuzhiyun case CARD_STATE_SHUTTING_DOWN:
452*4882a593Smuzhiyun /*
453*4882a593Smuzhiyun * dStroy is a term coined by marketing to represent the low level
454*4882a593Smuzhiyun * secure erase.
455*4882a593Smuzhiyun */
456*4882a593Smuzhiyun case CARD_STATE_DSTROYING:
457*4882a593Smuzhiyun set_capacity(card->gendisk, 0);
458*4882a593Smuzhiyun break;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
card_event_handler(struct work_struct * work)462*4882a593Smuzhiyun static void card_event_handler(struct work_struct *work)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun struct rsxx_cardinfo *card;
465*4882a593Smuzhiyun unsigned int state;
466*4882a593Smuzhiyun unsigned long flags;
467*4882a593Smuzhiyun int st;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun card = container_of(work, struct rsxx_cardinfo, event_work);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (unlikely(card->halt))
472*4882a593Smuzhiyun return;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /*
475*4882a593Smuzhiyun * Enable the interrupt now to avoid any weird race conditions where a
476*4882a593Smuzhiyun * state change might occur while rsxx_get_card_state() is
477*4882a593Smuzhiyun * processing a returned creg cmd.
478*4882a593Smuzhiyun */
479*4882a593Smuzhiyun spin_lock_irqsave(&card->irq_lock, flags);
480*4882a593Smuzhiyun rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
481*4882a593Smuzhiyun spin_unlock_irqrestore(&card->irq_lock, flags);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun st = rsxx_get_card_state(card, &state);
484*4882a593Smuzhiyun if (st) {
485*4882a593Smuzhiyun dev_info(CARD_TO_DEV(card),
486*4882a593Smuzhiyun "Failed reading state after event.\n");
487*4882a593Smuzhiyun return;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (card->state != state)
491*4882a593Smuzhiyun card_state_change(card, state);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
494*4882a593Smuzhiyun rsxx_read_hw_log(card);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /*----------------- Card Operations -------------------*/
card_shutdown(struct rsxx_cardinfo * card)498*4882a593Smuzhiyun static int card_shutdown(struct rsxx_cardinfo *card)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun unsigned int state;
501*4882a593Smuzhiyun signed long start;
502*4882a593Smuzhiyun const int timeout = msecs_to_jiffies(120000);
503*4882a593Smuzhiyun int st;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /* We can't issue a shutdown if the card is in a transition state */
506*4882a593Smuzhiyun start = jiffies;
507*4882a593Smuzhiyun do {
508*4882a593Smuzhiyun st = rsxx_get_card_state(card, &state);
509*4882a593Smuzhiyun if (st)
510*4882a593Smuzhiyun return st;
511*4882a593Smuzhiyun } while (state == CARD_STATE_STARTING &&
512*4882a593Smuzhiyun (jiffies - start < timeout));
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (state == CARD_STATE_STARTING)
515*4882a593Smuzhiyun return -ETIMEDOUT;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /* Only issue a shutdown if we need to */
518*4882a593Smuzhiyun if ((state != CARD_STATE_SHUTTING_DOWN) &&
519*4882a593Smuzhiyun (state != CARD_STATE_SHUTDOWN)) {
520*4882a593Smuzhiyun st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
521*4882a593Smuzhiyun if (st)
522*4882a593Smuzhiyun return st;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun start = jiffies;
526*4882a593Smuzhiyun do {
527*4882a593Smuzhiyun st = rsxx_get_card_state(card, &state);
528*4882a593Smuzhiyun if (st)
529*4882a593Smuzhiyun return st;
530*4882a593Smuzhiyun } while (state != CARD_STATE_SHUTDOWN &&
531*4882a593Smuzhiyun (jiffies - start < timeout));
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (state != CARD_STATE_SHUTDOWN)
534*4882a593Smuzhiyun return -ETIMEDOUT;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun return 0;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
rsxx_eeh_frozen(struct pci_dev * dev)539*4882a593Smuzhiyun static int rsxx_eeh_frozen(struct pci_dev *dev)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun struct rsxx_cardinfo *card = pci_get_drvdata(dev);
542*4882a593Smuzhiyun int i;
543*4882a593Smuzhiyun int st;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun dev_warn(&dev->dev, "IBM Flash Adapter PCI: preparing for slot reset.\n");
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun card->eeh_state = 1;
548*4882a593Smuzhiyun rsxx_mask_interrupts(card);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /*
551*4882a593Smuzhiyun * We need to guarantee that the write for eeh_state and masking
552*4882a593Smuzhiyun * interrupts does not become reordered. This will prevent a possible
553*4882a593Smuzhiyun * race condition with the EEH code.
554*4882a593Smuzhiyun */
555*4882a593Smuzhiyun wmb();
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun pci_disable_device(dev);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun st = rsxx_eeh_save_issued_dmas(card);
560*4882a593Smuzhiyun if (st)
561*4882a593Smuzhiyun return st;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun rsxx_eeh_save_issued_creg(card);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
566*4882a593Smuzhiyun if (card->ctrl[i].status.buf)
567*4882a593Smuzhiyun dma_free_coherent(&card->dev->dev,
568*4882a593Smuzhiyun STATUS_BUFFER_SIZE8,
569*4882a593Smuzhiyun card->ctrl[i].status.buf,
570*4882a593Smuzhiyun card->ctrl[i].status.dma_addr);
571*4882a593Smuzhiyun if (card->ctrl[i].cmd.buf)
572*4882a593Smuzhiyun dma_free_coherent(&card->dev->dev,
573*4882a593Smuzhiyun COMMAND_BUFFER_SIZE8,
574*4882a593Smuzhiyun card->ctrl[i].cmd.buf,
575*4882a593Smuzhiyun card->ctrl[i].cmd.dma_addr);
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun return 0;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
rsxx_eeh_failure(struct pci_dev * dev)581*4882a593Smuzhiyun static void rsxx_eeh_failure(struct pci_dev *dev)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun struct rsxx_cardinfo *card = pci_get_drvdata(dev);
584*4882a593Smuzhiyun int i;
585*4882a593Smuzhiyun int cnt = 0;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun dev_err(&dev->dev, "IBM Flash Adapter PCI: disabling failed card.\n");
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun card->eeh_state = 1;
590*4882a593Smuzhiyun card->halt = 1;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
593*4882a593Smuzhiyun spin_lock_bh(&card->ctrl[i].queue_lock);
594*4882a593Smuzhiyun cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
595*4882a593Smuzhiyun &card->ctrl[i].queue,
596*4882a593Smuzhiyun COMPLETE_DMA);
597*4882a593Smuzhiyun spin_unlock_bh(&card->ctrl[i].queue_lock);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun cnt += rsxx_dma_cancel(&card->ctrl[i]);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun if (cnt)
602*4882a593Smuzhiyun dev_info(CARD_TO_DEV(card),
603*4882a593Smuzhiyun "Freed %d queued DMAs on channel %d\n",
604*4882a593Smuzhiyun cnt, card->ctrl[i].id);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo * card)608*4882a593Smuzhiyun static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun unsigned int status;
611*4882a593Smuzhiyun int iter = 0;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /* We need to wait for the hardware to reset */
614*4882a593Smuzhiyun while (iter++ < 10) {
615*4882a593Smuzhiyun status = ioread32(card->regmap + PCI_RECONFIG);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun if (status & RSXX_FLUSH_BUSY) {
618*4882a593Smuzhiyun ssleep(1);
619*4882a593Smuzhiyun continue;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun if (status & RSXX_FLUSH_TIMEOUT)
623*4882a593Smuzhiyun dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n");
624*4882a593Smuzhiyun return 0;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /* Hardware failed resetting itself. */
628*4882a593Smuzhiyun return -1;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
rsxx_error_detected(struct pci_dev * dev,pci_channel_state_t error)631*4882a593Smuzhiyun static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
632*4882a593Smuzhiyun pci_channel_state_t error)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun int st;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun if (dev->revision < RSXX_EEH_SUPPORT)
637*4882a593Smuzhiyun return PCI_ERS_RESULT_NONE;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun if (error == pci_channel_io_perm_failure) {
640*4882a593Smuzhiyun rsxx_eeh_failure(dev);
641*4882a593Smuzhiyun return PCI_ERS_RESULT_DISCONNECT;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun st = rsxx_eeh_frozen(dev);
645*4882a593Smuzhiyun if (st) {
646*4882a593Smuzhiyun dev_err(&dev->dev, "Slot reset setup failed\n");
647*4882a593Smuzhiyun rsxx_eeh_failure(dev);
648*4882a593Smuzhiyun return PCI_ERS_RESULT_DISCONNECT;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun return PCI_ERS_RESULT_NEED_RESET;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
rsxx_slot_reset(struct pci_dev * dev)654*4882a593Smuzhiyun static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun struct rsxx_cardinfo *card = pci_get_drvdata(dev);
657*4882a593Smuzhiyun unsigned long flags;
658*4882a593Smuzhiyun int i;
659*4882a593Smuzhiyun int st;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun dev_warn(&dev->dev,
662*4882a593Smuzhiyun "IBM Flash Adapter PCI: recovering from slot reset.\n");
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun st = pci_enable_device(dev);
665*4882a593Smuzhiyun if (st)
666*4882a593Smuzhiyun goto failed_hw_setup;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun pci_set_master(dev);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun st = rsxx_eeh_fifo_flush_poll(card);
671*4882a593Smuzhiyun if (st)
672*4882a593Smuzhiyun goto failed_hw_setup;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun rsxx_dma_queue_reset(card);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
677*4882a593Smuzhiyun st = rsxx_hw_buffers_init(dev, &card->ctrl[i]);
678*4882a593Smuzhiyun if (st)
679*4882a593Smuzhiyun goto failed_hw_buffers_init;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun if (card->config_valid)
683*4882a593Smuzhiyun rsxx_dma_configure(card);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /* Clears the ISR register from spurious interrupts */
686*4882a593Smuzhiyun st = ioread32(card->regmap + ISR);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun card->eeh_state = 0;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun spin_lock_irqsave(&card->irq_lock, flags);
691*4882a593Smuzhiyun if (card->n_targets & RSXX_MAX_TARGETS)
692*4882a593Smuzhiyun rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
693*4882a593Smuzhiyun else
694*4882a593Smuzhiyun rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C);
695*4882a593Smuzhiyun spin_unlock_irqrestore(&card->irq_lock, flags);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun rsxx_kick_creg_queue(card);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
700*4882a593Smuzhiyun spin_lock(&card->ctrl[i].queue_lock);
701*4882a593Smuzhiyun if (list_empty(&card->ctrl[i].queue)) {
702*4882a593Smuzhiyun spin_unlock(&card->ctrl[i].queue_lock);
703*4882a593Smuzhiyun continue;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun spin_unlock(&card->ctrl[i].queue_lock);
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun queue_work(card->ctrl[i].issue_wq,
708*4882a593Smuzhiyun &card->ctrl[i].issue_dma_work);
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun dev_info(&dev->dev, "IBM Flash Adapter PCI: recovery complete.\n");
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun return PCI_ERS_RESULT_RECOVERED;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun failed_hw_buffers_init:
716*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
717*4882a593Smuzhiyun if (card->ctrl[i].status.buf)
718*4882a593Smuzhiyun dma_free_coherent(&card->dev->dev,
719*4882a593Smuzhiyun STATUS_BUFFER_SIZE8,
720*4882a593Smuzhiyun card->ctrl[i].status.buf,
721*4882a593Smuzhiyun card->ctrl[i].status.dma_addr);
722*4882a593Smuzhiyun if (card->ctrl[i].cmd.buf)
723*4882a593Smuzhiyun dma_free_coherent(&card->dev->dev,
724*4882a593Smuzhiyun COMMAND_BUFFER_SIZE8,
725*4882a593Smuzhiyun card->ctrl[i].cmd.buf,
726*4882a593Smuzhiyun card->ctrl[i].cmd.dma_addr);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun failed_hw_setup:
729*4882a593Smuzhiyun rsxx_eeh_failure(dev);
730*4882a593Smuzhiyun return PCI_ERS_RESULT_DISCONNECT;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /*----------------- Driver Initialization & Setup -------------------*/
735*4882a593Smuzhiyun /* Returns: 0 if the driver is compatible with the device
736*4882a593Smuzhiyun -1 if the driver is NOT compatible with the device */
rsxx_compatibility_check(struct rsxx_cardinfo * card)737*4882a593Smuzhiyun static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun unsigned char pci_rev;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun if (pci_rev > RS70_PCI_REV_SUPPORTED)
744*4882a593Smuzhiyun return -1;
745*4882a593Smuzhiyun return 0;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
rsxx_pci_probe(struct pci_dev * dev,const struct pci_device_id * id)748*4882a593Smuzhiyun static int rsxx_pci_probe(struct pci_dev *dev,
749*4882a593Smuzhiyun const struct pci_device_id *id)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun struct rsxx_cardinfo *card;
752*4882a593Smuzhiyun int st;
753*4882a593Smuzhiyun unsigned int sync_timeout;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun card = kzalloc(sizeof(*card), GFP_KERNEL);
758*4882a593Smuzhiyun if (!card)
759*4882a593Smuzhiyun return -ENOMEM;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun card->dev = dev;
762*4882a593Smuzhiyun pci_set_drvdata(dev, card);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun st = ida_alloc(&rsxx_disk_ida, GFP_KERNEL);
765*4882a593Smuzhiyun if (st < 0)
766*4882a593Smuzhiyun goto failed_ida_get;
767*4882a593Smuzhiyun card->disk_id = st;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun st = pci_enable_device(dev);
770*4882a593Smuzhiyun if (st)
771*4882a593Smuzhiyun goto failed_enable;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun pci_set_master(dev);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
776*4882a593Smuzhiyun if (st) {
777*4882a593Smuzhiyun dev_err(CARD_TO_DEV(card),
778*4882a593Smuzhiyun "No usable DMA configuration,aborting\n");
779*4882a593Smuzhiyun goto failed_dma_mask;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun st = pci_request_regions(dev, DRIVER_NAME);
783*4882a593Smuzhiyun if (st) {
784*4882a593Smuzhiyun dev_err(CARD_TO_DEV(card),
785*4882a593Smuzhiyun "Failed to request memory region\n");
786*4882a593Smuzhiyun goto failed_request_regions;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun if (pci_resource_len(dev, 0) == 0) {
790*4882a593Smuzhiyun dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
791*4882a593Smuzhiyun st = -ENOMEM;
792*4882a593Smuzhiyun goto failed_iomap;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun card->regmap = pci_iomap(dev, 0, 0);
796*4882a593Smuzhiyun if (!card->regmap) {
797*4882a593Smuzhiyun dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
798*4882a593Smuzhiyun st = -ENOMEM;
799*4882a593Smuzhiyun goto failed_iomap;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun spin_lock_init(&card->irq_lock);
803*4882a593Smuzhiyun card->halt = 0;
804*4882a593Smuzhiyun card->eeh_state = 0;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun spin_lock_irq(&card->irq_lock);
807*4882a593Smuzhiyun rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
808*4882a593Smuzhiyun spin_unlock_irq(&card->irq_lock);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun if (!force_legacy) {
811*4882a593Smuzhiyun st = pci_enable_msi(dev);
812*4882a593Smuzhiyun if (st)
813*4882a593Smuzhiyun dev_warn(CARD_TO_DEV(card),
814*4882a593Smuzhiyun "Failed to enable MSI\n");
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun st = request_irq(dev->irq, rsxx_isr, IRQF_SHARED,
818*4882a593Smuzhiyun DRIVER_NAME, card);
819*4882a593Smuzhiyun if (st) {
820*4882a593Smuzhiyun dev_err(CARD_TO_DEV(card),
821*4882a593Smuzhiyun "Failed requesting IRQ%d\n", dev->irq);
822*4882a593Smuzhiyun goto failed_irq;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /************* Setup Processor Command Interface *************/
826*4882a593Smuzhiyun st = rsxx_creg_setup(card);
827*4882a593Smuzhiyun if (st) {
828*4882a593Smuzhiyun dev_err(CARD_TO_DEV(card), "Failed to setup creg interface.\n");
829*4882a593Smuzhiyun goto failed_creg_setup;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun spin_lock_irq(&card->irq_lock);
833*4882a593Smuzhiyun rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
834*4882a593Smuzhiyun spin_unlock_irq(&card->irq_lock);
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun st = rsxx_compatibility_check(card);
837*4882a593Smuzhiyun if (st) {
838*4882a593Smuzhiyun dev_warn(CARD_TO_DEV(card),
839*4882a593Smuzhiyun "Incompatible driver detected. Please update the driver.\n");
840*4882a593Smuzhiyun st = -EINVAL;
841*4882a593Smuzhiyun goto failed_compatiblity_check;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun /************* Load Card Config *************/
845*4882a593Smuzhiyun st = rsxx_load_config(card);
846*4882a593Smuzhiyun if (st)
847*4882a593Smuzhiyun dev_err(CARD_TO_DEV(card),
848*4882a593Smuzhiyun "Failed loading card config\n");
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /************* Setup DMA Engine *************/
851*4882a593Smuzhiyun st = rsxx_get_num_targets(card, &card->n_targets);
852*4882a593Smuzhiyun if (st)
853*4882a593Smuzhiyun dev_info(CARD_TO_DEV(card),
854*4882a593Smuzhiyun "Failed reading the number of DMA targets\n");
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun card->ctrl = kcalloc(card->n_targets, sizeof(*card->ctrl),
857*4882a593Smuzhiyun GFP_KERNEL);
858*4882a593Smuzhiyun if (!card->ctrl) {
859*4882a593Smuzhiyun st = -ENOMEM;
860*4882a593Smuzhiyun goto failed_dma_setup;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun st = rsxx_dma_setup(card);
864*4882a593Smuzhiyun if (st) {
865*4882a593Smuzhiyun dev_info(CARD_TO_DEV(card),
866*4882a593Smuzhiyun "Failed to setup DMA engine\n");
867*4882a593Smuzhiyun goto failed_dma_setup;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun /************* Setup Card Event Handler *************/
871*4882a593Smuzhiyun card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
872*4882a593Smuzhiyun if (!card->event_wq) {
873*4882a593Smuzhiyun dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
874*4882a593Smuzhiyun st = -ENOMEM;
875*4882a593Smuzhiyun goto failed_event_handler;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun INIT_WORK(&card->event_work, card_event_handler);
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun st = rsxx_setup_dev(card);
881*4882a593Smuzhiyun if (st)
882*4882a593Smuzhiyun goto failed_create_dev;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun rsxx_get_card_state(card, &card->state);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun dev_info(CARD_TO_DEV(card),
887*4882a593Smuzhiyun "card state: %s\n",
888*4882a593Smuzhiyun rsxx_card_state_to_str(card->state));
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun /*
891*4882a593Smuzhiyun * Now that the DMA Engine and devices have been setup,
892*4882a593Smuzhiyun * we can enable the event interrupt(it kicks off actions in
893*4882a593Smuzhiyun * those layers so we couldn't enable it right away.)
894*4882a593Smuzhiyun */
895*4882a593Smuzhiyun spin_lock_irq(&card->irq_lock);
896*4882a593Smuzhiyun rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
897*4882a593Smuzhiyun spin_unlock_irq(&card->irq_lock);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun if (card->state == CARD_STATE_SHUTDOWN) {
900*4882a593Smuzhiyun st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
901*4882a593Smuzhiyun if (st)
902*4882a593Smuzhiyun dev_crit(CARD_TO_DEV(card),
903*4882a593Smuzhiyun "Failed issuing card startup\n");
904*4882a593Smuzhiyun if (sync_start) {
905*4882a593Smuzhiyun sync_timeout = SYNC_START_TIMEOUT;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun dev_info(CARD_TO_DEV(card),
908*4882a593Smuzhiyun "Waiting for card to startup\n");
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun do {
911*4882a593Smuzhiyun ssleep(1);
912*4882a593Smuzhiyun sync_timeout--;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun rsxx_get_card_state(card, &card->state);
915*4882a593Smuzhiyun } while (sync_timeout &&
916*4882a593Smuzhiyun (card->state == CARD_STATE_STARTING));
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun if (card->state == CARD_STATE_STARTING) {
919*4882a593Smuzhiyun dev_warn(CARD_TO_DEV(card),
920*4882a593Smuzhiyun "Card startup timed out\n");
921*4882a593Smuzhiyun card->size8 = 0;
922*4882a593Smuzhiyun } else {
923*4882a593Smuzhiyun dev_info(CARD_TO_DEV(card),
924*4882a593Smuzhiyun "card state: %s\n",
925*4882a593Smuzhiyun rsxx_card_state_to_str(card->state));
926*4882a593Smuzhiyun st = rsxx_get_card_size8(card, &card->size8);
927*4882a593Smuzhiyun if (st)
928*4882a593Smuzhiyun card->size8 = 0;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun } else if (card->state == CARD_STATE_GOOD ||
932*4882a593Smuzhiyun card->state == CARD_STATE_RD_ONLY_FAULT) {
933*4882a593Smuzhiyun st = rsxx_get_card_size8(card, &card->size8);
934*4882a593Smuzhiyun if (st)
935*4882a593Smuzhiyun card->size8 = 0;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun rsxx_attach_dev(card);
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun /************* Setup Debugfs *************/
941*4882a593Smuzhiyun rsxx_debugfs_dev_new(card);
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun return 0;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun failed_create_dev:
946*4882a593Smuzhiyun destroy_workqueue(card->event_wq);
947*4882a593Smuzhiyun card->event_wq = NULL;
948*4882a593Smuzhiyun failed_event_handler:
949*4882a593Smuzhiyun rsxx_dma_destroy(card);
950*4882a593Smuzhiyun failed_dma_setup:
951*4882a593Smuzhiyun failed_compatiblity_check:
952*4882a593Smuzhiyun destroy_workqueue(card->creg_ctrl.creg_wq);
953*4882a593Smuzhiyun card->creg_ctrl.creg_wq = NULL;
954*4882a593Smuzhiyun failed_creg_setup:
955*4882a593Smuzhiyun spin_lock_irq(&card->irq_lock);
956*4882a593Smuzhiyun rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
957*4882a593Smuzhiyun spin_unlock_irq(&card->irq_lock);
958*4882a593Smuzhiyun free_irq(dev->irq, card);
959*4882a593Smuzhiyun if (!force_legacy)
960*4882a593Smuzhiyun pci_disable_msi(dev);
961*4882a593Smuzhiyun failed_irq:
962*4882a593Smuzhiyun pci_iounmap(dev, card->regmap);
963*4882a593Smuzhiyun failed_iomap:
964*4882a593Smuzhiyun pci_release_regions(dev);
965*4882a593Smuzhiyun failed_request_regions:
966*4882a593Smuzhiyun failed_dma_mask:
967*4882a593Smuzhiyun pci_disable_device(dev);
968*4882a593Smuzhiyun failed_enable:
969*4882a593Smuzhiyun ida_free(&rsxx_disk_ida, card->disk_id);
970*4882a593Smuzhiyun failed_ida_get:
971*4882a593Smuzhiyun kfree(card);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun return st;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
rsxx_pci_remove(struct pci_dev * dev)976*4882a593Smuzhiyun static void rsxx_pci_remove(struct pci_dev *dev)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun struct rsxx_cardinfo *card = pci_get_drvdata(dev);
979*4882a593Smuzhiyun unsigned long flags;
980*4882a593Smuzhiyun int st;
981*4882a593Smuzhiyun int i;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun if (!card)
984*4882a593Smuzhiyun return;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun dev_info(CARD_TO_DEV(card),
987*4882a593Smuzhiyun "Removing PCI-Flash SSD.\n");
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun rsxx_detach_dev(card);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
992*4882a593Smuzhiyun spin_lock_irqsave(&card->irq_lock, flags);
993*4882a593Smuzhiyun rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
994*4882a593Smuzhiyun spin_unlock_irqrestore(&card->irq_lock, flags);
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun st = card_shutdown(card);
998*4882a593Smuzhiyun if (st)
999*4882a593Smuzhiyun dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /* Sync outstanding event handlers. */
1002*4882a593Smuzhiyun spin_lock_irqsave(&card->irq_lock, flags);
1003*4882a593Smuzhiyun rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
1004*4882a593Smuzhiyun spin_unlock_irqrestore(&card->irq_lock, flags);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun cancel_work_sync(&card->event_work);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun destroy_workqueue(card->event_wq);
1009*4882a593Smuzhiyun rsxx_destroy_dev(card);
1010*4882a593Smuzhiyun rsxx_dma_destroy(card);
1011*4882a593Smuzhiyun destroy_workqueue(card->creg_ctrl.creg_wq);
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun spin_lock_irqsave(&card->irq_lock, flags);
1014*4882a593Smuzhiyun rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
1015*4882a593Smuzhiyun spin_unlock_irqrestore(&card->irq_lock, flags);
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun /* Prevent work_structs from re-queuing themselves. */
1018*4882a593Smuzhiyun card->halt = 1;
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun debugfs_remove_recursive(card->debugfs_dir);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun free_irq(dev->irq, card);
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun if (!force_legacy)
1025*4882a593Smuzhiyun pci_disable_msi(dev);
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun rsxx_creg_destroy(card);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun pci_iounmap(dev, card->regmap);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun pci_disable_device(dev);
1032*4882a593Smuzhiyun pci_release_regions(dev);
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun ida_free(&rsxx_disk_ida, card->disk_id);
1035*4882a593Smuzhiyun kfree(card);
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
rsxx_pci_suspend(struct pci_dev * dev,pm_message_t state)1038*4882a593Smuzhiyun static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun /* We don't support suspend at this time. */
1041*4882a593Smuzhiyun return -ENOSYS;
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
rsxx_pci_shutdown(struct pci_dev * dev)1044*4882a593Smuzhiyun static void rsxx_pci_shutdown(struct pci_dev *dev)
1045*4882a593Smuzhiyun {
1046*4882a593Smuzhiyun struct rsxx_cardinfo *card = pci_get_drvdata(dev);
1047*4882a593Smuzhiyun unsigned long flags;
1048*4882a593Smuzhiyun int i;
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun if (!card)
1051*4882a593Smuzhiyun return;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun rsxx_detach_dev(card);
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun for (i = 0; i < card->n_targets; i++) {
1058*4882a593Smuzhiyun spin_lock_irqsave(&card->irq_lock, flags);
1059*4882a593Smuzhiyun rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
1060*4882a593Smuzhiyun spin_unlock_irqrestore(&card->irq_lock, flags);
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun card_shutdown(card);
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun static const struct pci_error_handlers rsxx_err_handler = {
1067*4882a593Smuzhiyun .error_detected = rsxx_error_detected,
1068*4882a593Smuzhiyun .slot_reset = rsxx_slot_reset,
1069*4882a593Smuzhiyun };
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun static const struct pci_device_id rsxx_pci_ids[] = {
1072*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)},
1073*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)},
1074*4882a593Smuzhiyun {0,},
1075*4882a593Smuzhiyun };
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun static struct pci_driver rsxx_pci_driver = {
1080*4882a593Smuzhiyun .name = DRIVER_NAME,
1081*4882a593Smuzhiyun .id_table = rsxx_pci_ids,
1082*4882a593Smuzhiyun .probe = rsxx_pci_probe,
1083*4882a593Smuzhiyun .remove = rsxx_pci_remove,
1084*4882a593Smuzhiyun .suspend = rsxx_pci_suspend,
1085*4882a593Smuzhiyun .shutdown = rsxx_pci_shutdown,
1086*4882a593Smuzhiyun .err_handler = &rsxx_err_handler,
1087*4882a593Smuzhiyun };
1088*4882a593Smuzhiyun
rsxx_core_init(void)1089*4882a593Smuzhiyun static int __init rsxx_core_init(void)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun int st;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun st = rsxx_dev_init();
1094*4882a593Smuzhiyun if (st)
1095*4882a593Smuzhiyun return st;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun st = rsxx_dma_init();
1098*4882a593Smuzhiyun if (st)
1099*4882a593Smuzhiyun goto dma_init_failed;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun st = rsxx_creg_init();
1102*4882a593Smuzhiyun if (st)
1103*4882a593Smuzhiyun goto creg_init_failed;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun return pci_register_driver(&rsxx_pci_driver);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun creg_init_failed:
1108*4882a593Smuzhiyun rsxx_dma_cleanup();
1109*4882a593Smuzhiyun dma_init_failed:
1110*4882a593Smuzhiyun rsxx_dev_cleanup();
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun return st;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
rsxx_core_cleanup(void)1115*4882a593Smuzhiyun static void __exit rsxx_core_cleanup(void)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun pci_unregister_driver(&rsxx_pci_driver);
1118*4882a593Smuzhiyun rsxx_creg_cleanup();
1119*4882a593Smuzhiyun rsxx_dma_cleanup();
1120*4882a593Smuzhiyun rsxx_dev_cleanup();
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun module_init(rsxx_core_init);
1124*4882a593Smuzhiyun module_exit(rsxx_core_cleanup);
1125