1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Marvell 88SE64xx/88SE94xx pci init
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2007 Red Hat, Inc.
6*4882a593Smuzhiyun * Copyright 2008 Marvell. <kewei@marvell.com>
7*4882a593Smuzhiyun * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include "mv_sas.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun int interrupt_coalescing = 0x80;
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun static struct scsi_transport_template *mvs_stt;
16*4882a593Smuzhiyun static const struct mvs_chip_info mvs_chips[] = {
17*4882a593Smuzhiyun [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
18*4882a593Smuzhiyun [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
19*4882a593Smuzhiyun [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, },
20*4882a593Smuzhiyun [chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
21*4882a593Smuzhiyun [chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
22*4882a593Smuzhiyun [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
23*4882a593Smuzhiyun [chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
24*4882a593Smuzhiyun [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
25*4882a593Smuzhiyun [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun static struct device_attribute *mvst_host_attrs[];
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define SOC_SAS_NUM 2
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun static struct scsi_host_template mvs_sht = {
33*4882a593Smuzhiyun .module = THIS_MODULE,
34*4882a593Smuzhiyun .name = DRV_NAME,
35*4882a593Smuzhiyun .queuecommand = sas_queuecommand,
36*4882a593Smuzhiyun .dma_need_drain = ata_scsi_dma_need_drain,
37*4882a593Smuzhiyun .target_alloc = sas_target_alloc,
38*4882a593Smuzhiyun .slave_configure = sas_slave_configure,
39*4882a593Smuzhiyun .scan_finished = mvs_scan_finished,
40*4882a593Smuzhiyun .scan_start = mvs_scan_start,
41*4882a593Smuzhiyun .change_queue_depth = sas_change_queue_depth,
42*4882a593Smuzhiyun .bios_param = sas_bios_param,
43*4882a593Smuzhiyun .can_queue = 1,
44*4882a593Smuzhiyun .this_id = -1,
45*4882a593Smuzhiyun .sg_tablesize = SG_ALL,
46*4882a593Smuzhiyun .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
47*4882a593Smuzhiyun .eh_device_reset_handler = sas_eh_device_reset_handler,
48*4882a593Smuzhiyun .eh_target_reset_handler = sas_eh_target_reset_handler,
49*4882a593Smuzhiyun .slave_alloc = sas_slave_alloc,
50*4882a593Smuzhiyun .target_destroy = sas_target_destroy,
51*4882a593Smuzhiyun .ioctl = sas_ioctl,
52*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
53*4882a593Smuzhiyun .compat_ioctl = sas_ioctl,
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun .shost_attrs = mvst_host_attrs,
56*4882a593Smuzhiyun .track_queue_depth = 1,
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun static struct sas_domain_function_template mvs_transport_ops = {
60*4882a593Smuzhiyun .lldd_dev_found = mvs_dev_found,
61*4882a593Smuzhiyun .lldd_dev_gone = mvs_dev_gone,
62*4882a593Smuzhiyun .lldd_execute_task = mvs_queue_command,
63*4882a593Smuzhiyun .lldd_control_phy = mvs_phy_control,
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun .lldd_abort_task = mvs_abort_task,
66*4882a593Smuzhiyun .lldd_abort_task_set = mvs_abort_task_set,
67*4882a593Smuzhiyun .lldd_clear_aca = mvs_clear_aca,
68*4882a593Smuzhiyun .lldd_clear_task_set = mvs_clear_task_set,
69*4882a593Smuzhiyun .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
70*4882a593Smuzhiyun .lldd_lu_reset = mvs_lu_reset,
71*4882a593Smuzhiyun .lldd_query_task = mvs_query_task,
72*4882a593Smuzhiyun .lldd_port_formed = mvs_port_formed,
73*4882a593Smuzhiyun .lldd_port_deformed = mvs_port_deformed,
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun .lldd_write_gpio = mvs_gpio_write,
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
mvs_phy_init(struct mvs_info * mvi,int phy_id)79*4882a593Smuzhiyun static void mvs_phy_init(struct mvs_info *mvi, int phy_id)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun struct mvs_phy *phy = &mvi->phy[phy_id];
82*4882a593Smuzhiyun struct asd_sas_phy *sas_phy = &phy->sas_phy;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun phy->mvi = mvi;
85*4882a593Smuzhiyun phy->port = NULL;
86*4882a593Smuzhiyun timer_setup(&phy->timer, NULL, 0);
87*4882a593Smuzhiyun sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
88*4882a593Smuzhiyun sas_phy->class = SAS;
89*4882a593Smuzhiyun sas_phy->iproto = SAS_PROTOCOL_ALL;
90*4882a593Smuzhiyun sas_phy->tproto = 0;
91*4882a593Smuzhiyun sas_phy->type = PHY_TYPE_PHYSICAL;
92*4882a593Smuzhiyun sas_phy->role = PHY_ROLE_INITIATOR;
93*4882a593Smuzhiyun sas_phy->oob_mode = OOB_NOT_CONNECTED;
94*4882a593Smuzhiyun sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun sas_phy->id = phy_id;
97*4882a593Smuzhiyun sas_phy->sas_addr = &mvi->sas_addr[0];
98*4882a593Smuzhiyun sas_phy->frame_rcvd = &phy->frame_rcvd[0];
99*4882a593Smuzhiyun sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
100*4882a593Smuzhiyun sas_phy->lldd_phy = phy;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
mvs_free(struct mvs_info * mvi)103*4882a593Smuzhiyun static void mvs_free(struct mvs_info *mvi)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct mvs_wq *mwq;
106*4882a593Smuzhiyun int slot_nr;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun if (!mvi)
109*4882a593Smuzhiyun return;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (mvi->flags & MVF_FLAG_SOC)
112*4882a593Smuzhiyun slot_nr = MVS_SOC_SLOTS;
113*4882a593Smuzhiyun else
114*4882a593Smuzhiyun slot_nr = MVS_CHIP_SLOT_SZ;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun dma_pool_destroy(mvi->dma_pool);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (mvi->tx)
119*4882a593Smuzhiyun dma_free_coherent(mvi->dev,
120*4882a593Smuzhiyun sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
121*4882a593Smuzhiyun mvi->tx, mvi->tx_dma);
122*4882a593Smuzhiyun if (mvi->rx_fis)
123*4882a593Smuzhiyun dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
124*4882a593Smuzhiyun mvi->rx_fis, mvi->rx_fis_dma);
125*4882a593Smuzhiyun if (mvi->rx)
126*4882a593Smuzhiyun dma_free_coherent(mvi->dev,
127*4882a593Smuzhiyun sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
128*4882a593Smuzhiyun mvi->rx, mvi->rx_dma);
129*4882a593Smuzhiyun if (mvi->slot)
130*4882a593Smuzhiyun dma_free_coherent(mvi->dev,
131*4882a593Smuzhiyun sizeof(*mvi->slot) * slot_nr,
132*4882a593Smuzhiyun mvi->slot, mvi->slot_dma);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun if (mvi->bulk_buffer)
135*4882a593Smuzhiyun dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
136*4882a593Smuzhiyun mvi->bulk_buffer, mvi->bulk_buffer_dma);
137*4882a593Smuzhiyun if (mvi->bulk_buffer1)
138*4882a593Smuzhiyun dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
139*4882a593Smuzhiyun mvi->bulk_buffer1, mvi->bulk_buffer_dma1);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun MVS_CHIP_DISP->chip_iounmap(mvi);
142*4882a593Smuzhiyun if (mvi->shost)
143*4882a593Smuzhiyun scsi_host_put(mvi->shost);
144*4882a593Smuzhiyun list_for_each_entry(mwq, &mvi->wq_list, entry)
145*4882a593Smuzhiyun cancel_delayed_work(&mwq->work_q);
146*4882a593Smuzhiyun kfree(mvi->tags);
147*4882a593Smuzhiyun kfree(mvi);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun #ifdef CONFIG_SCSI_MVSAS_TASKLET
mvs_tasklet(unsigned long opaque)151*4882a593Smuzhiyun static void mvs_tasklet(unsigned long opaque)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun u32 stat;
154*4882a593Smuzhiyun u16 core_nr, i = 0;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun struct mvs_info *mvi;
157*4882a593Smuzhiyun struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
160*4882a593Smuzhiyun mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (unlikely(!mvi))
163*4882a593Smuzhiyun BUG_ON(1);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq);
166*4882a593Smuzhiyun if (!stat)
167*4882a593Smuzhiyun goto out;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun for (i = 0; i < core_nr; i++) {
170*4882a593Smuzhiyun mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
171*4882a593Smuzhiyun MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun out:
174*4882a593Smuzhiyun MVS_CHIP_DISP->interrupt_enable(mvi);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun #endif
178*4882a593Smuzhiyun
mvs_interrupt(int irq,void * opaque)179*4882a593Smuzhiyun static irqreturn_t mvs_interrupt(int irq, void *opaque)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun u32 stat;
182*4882a593Smuzhiyun struct mvs_info *mvi;
183*4882a593Smuzhiyun struct sas_ha_struct *sha = opaque;
184*4882a593Smuzhiyun #ifndef CONFIG_SCSI_MVSAS_TASKLET
185*4882a593Smuzhiyun u32 i;
186*4882a593Smuzhiyun u32 core_nr;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
189*4882a593Smuzhiyun #endif
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (unlikely(!mvi))
194*4882a593Smuzhiyun return IRQ_NONE;
195*4882a593Smuzhiyun #ifdef CONFIG_SCSI_MVSAS_TASKLET
196*4882a593Smuzhiyun MVS_CHIP_DISP->interrupt_disable(mvi);
197*4882a593Smuzhiyun #endif
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun stat = MVS_CHIP_DISP->isr_status(mvi, irq);
200*4882a593Smuzhiyun if (!stat) {
201*4882a593Smuzhiyun #ifdef CONFIG_SCSI_MVSAS_TASKLET
202*4882a593Smuzhiyun MVS_CHIP_DISP->interrupt_enable(mvi);
203*4882a593Smuzhiyun #endif
204*4882a593Smuzhiyun return IRQ_NONE;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun #ifdef CONFIG_SCSI_MVSAS_TASKLET
208*4882a593Smuzhiyun tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
209*4882a593Smuzhiyun #else
210*4882a593Smuzhiyun for (i = 0; i < core_nr; i++) {
211*4882a593Smuzhiyun mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
212*4882a593Smuzhiyun MVS_CHIP_DISP->isr(mvi, irq, stat);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun #endif
215*4882a593Smuzhiyun return IRQ_HANDLED;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
mvs_alloc(struct mvs_info * mvi,struct Scsi_Host * shost)218*4882a593Smuzhiyun static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun int i = 0, slot_nr;
221*4882a593Smuzhiyun char pool_name[32];
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (mvi->flags & MVF_FLAG_SOC)
224*4882a593Smuzhiyun slot_nr = MVS_SOC_SLOTS;
225*4882a593Smuzhiyun else
226*4882a593Smuzhiyun slot_nr = MVS_CHIP_SLOT_SZ;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun spin_lock_init(&mvi->lock);
229*4882a593Smuzhiyun for (i = 0; i < mvi->chip->n_phy; i++) {
230*4882a593Smuzhiyun mvs_phy_init(mvi, i);
231*4882a593Smuzhiyun mvi->port[i].wide_port_phymap = 0;
232*4882a593Smuzhiyun mvi->port[i].port_attached = 0;
233*4882a593Smuzhiyun INIT_LIST_HEAD(&mvi->port[i].list);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun for (i = 0; i < MVS_MAX_DEVICES; i++) {
236*4882a593Smuzhiyun mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
237*4882a593Smuzhiyun mvi->devices[i].dev_type = SAS_PHY_UNUSED;
238*4882a593Smuzhiyun mvi->devices[i].device_id = i;
239*4882a593Smuzhiyun mvi->devices[i].dev_status = MVS_DEV_NORMAL;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun * alloc and init our DMA areas
244*4882a593Smuzhiyun */
245*4882a593Smuzhiyun mvi->tx = dma_alloc_coherent(mvi->dev,
246*4882a593Smuzhiyun sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
247*4882a593Smuzhiyun &mvi->tx_dma, GFP_KERNEL);
248*4882a593Smuzhiyun if (!mvi->tx)
249*4882a593Smuzhiyun goto err_out;
250*4882a593Smuzhiyun mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
251*4882a593Smuzhiyun &mvi->rx_fis_dma, GFP_KERNEL);
252*4882a593Smuzhiyun if (!mvi->rx_fis)
253*4882a593Smuzhiyun goto err_out;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun mvi->rx = dma_alloc_coherent(mvi->dev,
256*4882a593Smuzhiyun sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
257*4882a593Smuzhiyun &mvi->rx_dma, GFP_KERNEL);
258*4882a593Smuzhiyun if (!mvi->rx)
259*4882a593Smuzhiyun goto err_out;
260*4882a593Smuzhiyun mvi->rx[0] = cpu_to_le32(0xfff);
261*4882a593Smuzhiyun mvi->rx_cons = 0xfff;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun mvi->slot = dma_alloc_coherent(mvi->dev,
264*4882a593Smuzhiyun sizeof(*mvi->slot) * slot_nr,
265*4882a593Smuzhiyun &mvi->slot_dma, GFP_KERNEL);
266*4882a593Smuzhiyun if (!mvi->slot)
267*4882a593Smuzhiyun goto err_out;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
270*4882a593Smuzhiyun TRASH_BUCKET_SIZE,
271*4882a593Smuzhiyun &mvi->bulk_buffer_dma, GFP_KERNEL);
272*4882a593Smuzhiyun if (!mvi->bulk_buffer)
273*4882a593Smuzhiyun goto err_out;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev,
276*4882a593Smuzhiyun TRASH_BUCKET_SIZE,
277*4882a593Smuzhiyun &mvi->bulk_buffer_dma1, GFP_KERNEL);
278*4882a593Smuzhiyun if (!mvi->bulk_buffer1)
279*4882a593Smuzhiyun goto err_out;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
282*4882a593Smuzhiyun mvi->dma_pool = dma_pool_create(pool_name, &mvi->pdev->dev,
283*4882a593Smuzhiyun MVS_SLOT_BUF_SZ, 16, 0);
284*4882a593Smuzhiyun if (!mvi->dma_pool) {
285*4882a593Smuzhiyun printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name);
286*4882a593Smuzhiyun goto err_out;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun mvi->tags_num = slot_nr;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /* Initialize tags */
291*4882a593Smuzhiyun mvs_tag_init(mvi);
292*4882a593Smuzhiyun return 0;
293*4882a593Smuzhiyun err_out:
294*4882a593Smuzhiyun return 1;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun
mvs_ioremap(struct mvs_info * mvi,int bar,int bar_ex)298*4882a593Smuzhiyun int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun unsigned long res_start, res_len, res_flag_ex = 0;
301*4882a593Smuzhiyun struct pci_dev *pdev = mvi->pdev;
302*4882a593Smuzhiyun if (bar_ex != -1) {
303*4882a593Smuzhiyun /*
304*4882a593Smuzhiyun * ioremap main and peripheral registers
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyun res_start = pci_resource_start(pdev, bar_ex);
307*4882a593Smuzhiyun res_len = pci_resource_len(pdev, bar_ex);
308*4882a593Smuzhiyun if (!res_start || !res_len)
309*4882a593Smuzhiyun goto err_out;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun res_flag_ex = pci_resource_flags(pdev, bar_ex);
312*4882a593Smuzhiyun if (res_flag_ex & IORESOURCE_MEM)
313*4882a593Smuzhiyun mvi->regs_ex = ioremap(res_start, res_len);
314*4882a593Smuzhiyun else
315*4882a593Smuzhiyun mvi->regs_ex = (void *)res_start;
316*4882a593Smuzhiyun if (!mvi->regs_ex)
317*4882a593Smuzhiyun goto err_out;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun res_start = pci_resource_start(pdev, bar);
321*4882a593Smuzhiyun res_len = pci_resource_len(pdev, bar);
322*4882a593Smuzhiyun if (!res_start || !res_len) {
323*4882a593Smuzhiyun iounmap(mvi->regs_ex);
324*4882a593Smuzhiyun mvi->regs_ex = NULL;
325*4882a593Smuzhiyun goto err_out;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun mvi->regs = ioremap(res_start, res_len);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun if (!mvi->regs) {
331*4882a593Smuzhiyun if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
332*4882a593Smuzhiyun iounmap(mvi->regs_ex);
333*4882a593Smuzhiyun mvi->regs_ex = NULL;
334*4882a593Smuzhiyun goto err_out;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun return 0;
338*4882a593Smuzhiyun err_out:
339*4882a593Smuzhiyun return -1;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
mvs_iounmap(void __iomem * regs)342*4882a593Smuzhiyun void mvs_iounmap(void __iomem *regs)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun iounmap(regs);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
mvs_pci_alloc(struct pci_dev * pdev,const struct pci_device_id * ent,struct Scsi_Host * shost,unsigned int id)347*4882a593Smuzhiyun static struct mvs_info *mvs_pci_alloc(struct pci_dev *pdev,
348*4882a593Smuzhiyun const struct pci_device_id *ent,
349*4882a593Smuzhiyun struct Scsi_Host *shost, unsigned int id)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun struct mvs_info *mvi = NULL;
352*4882a593Smuzhiyun struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun mvi = kzalloc(sizeof(*mvi) +
355*4882a593Smuzhiyun (1L << mvs_chips[ent->driver_data].slot_width) *
356*4882a593Smuzhiyun sizeof(struct mvs_slot_info), GFP_KERNEL);
357*4882a593Smuzhiyun if (!mvi)
358*4882a593Smuzhiyun return NULL;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun mvi->pdev = pdev;
361*4882a593Smuzhiyun mvi->dev = &pdev->dev;
362*4882a593Smuzhiyun mvi->chip_id = ent->driver_data;
363*4882a593Smuzhiyun mvi->chip = &mvs_chips[mvi->chip_id];
364*4882a593Smuzhiyun INIT_LIST_HEAD(&mvi->wq_list);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
367*4882a593Smuzhiyun ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun mvi->id = id;
370*4882a593Smuzhiyun mvi->sas = sha;
371*4882a593Smuzhiyun mvi->shost = shost;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL);
374*4882a593Smuzhiyun if (!mvi->tags)
375*4882a593Smuzhiyun goto err_out;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (MVS_CHIP_DISP->chip_ioremap(mvi))
378*4882a593Smuzhiyun goto err_out;
379*4882a593Smuzhiyun if (!mvs_alloc(mvi, shost))
380*4882a593Smuzhiyun return mvi;
381*4882a593Smuzhiyun err_out:
382*4882a593Smuzhiyun mvs_free(mvi);
383*4882a593Smuzhiyun return NULL;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
pci_go_64(struct pci_dev * pdev)386*4882a593Smuzhiyun static int pci_go_64(struct pci_dev *pdev)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun int rc;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
391*4882a593Smuzhiyun if (rc) {
392*4882a593Smuzhiyun rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
393*4882a593Smuzhiyun if (rc) {
394*4882a593Smuzhiyun dev_printk(KERN_ERR, &pdev->dev,
395*4882a593Smuzhiyun "32-bit DMA enable failed\n");
396*4882a593Smuzhiyun return rc;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun return rc;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
mvs_prep_sas_ha_init(struct Scsi_Host * shost,const struct mvs_chip_info * chip_info)403*4882a593Smuzhiyun static int mvs_prep_sas_ha_init(struct Scsi_Host *shost,
404*4882a593Smuzhiyun const struct mvs_chip_info *chip_info)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun int phy_nr, port_nr; unsigned short core_nr;
407*4882a593Smuzhiyun struct asd_sas_phy **arr_phy;
408*4882a593Smuzhiyun struct asd_sas_port **arr_port;
409*4882a593Smuzhiyun struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun core_nr = chip_info->n_host;
412*4882a593Smuzhiyun phy_nr = core_nr * chip_info->n_phy;
413*4882a593Smuzhiyun port_nr = phy_nr;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun memset(sha, 0x00, sizeof(struct sas_ha_struct));
416*4882a593Smuzhiyun arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
417*4882a593Smuzhiyun arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
418*4882a593Smuzhiyun if (!arr_phy || !arr_port)
419*4882a593Smuzhiyun goto exit_free;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun sha->sas_phy = arr_phy;
422*4882a593Smuzhiyun sha->sas_port = arr_port;
423*4882a593Smuzhiyun sha->core.shost = shost;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
426*4882a593Smuzhiyun if (!sha->lldd_ha)
427*4882a593Smuzhiyun goto exit_free;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun shost->transportt = mvs_stt;
432*4882a593Smuzhiyun shost->max_id = MVS_MAX_DEVICES;
433*4882a593Smuzhiyun shost->max_lun = ~0;
434*4882a593Smuzhiyun shost->max_channel = 1;
435*4882a593Smuzhiyun shost->max_cmd_len = 16;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun return 0;
438*4882a593Smuzhiyun exit_free:
439*4882a593Smuzhiyun kfree(arr_phy);
440*4882a593Smuzhiyun kfree(arr_port);
441*4882a593Smuzhiyun return -1;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
mvs_post_sas_ha_init(struct Scsi_Host * shost,const struct mvs_chip_info * chip_info)445*4882a593Smuzhiyun static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
446*4882a593Smuzhiyun const struct mvs_chip_info *chip_info)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun int can_queue, i = 0, j = 0;
449*4882a593Smuzhiyun struct mvs_info *mvi = NULL;
450*4882a593Smuzhiyun struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
451*4882a593Smuzhiyun unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun for (j = 0; j < nr_core; j++) {
454*4882a593Smuzhiyun mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
455*4882a593Smuzhiyun for (i = 0; i < chip_info->n_phy; i++) {
456*4882a593Smuzhiyun sha->sas_phy[j * chip_info->n_phy + i] =
457*4882a593Smuzhiyun &mvi->phy[i].sas_phy;
458*4882a593Smuzhiyun sha->sas_port[j * chip_info->n_phy + i] =
459*4882a593Smuzhiyun &mvi->port[i].sas_port;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun sha->sas_ha_name = DRV_NAME;
464*4882a593Smuzhiyun sha->dev = mvi->dev;
465*4882a593Smuzhiyun sha->lldd_module = THIS_MODULE;
466*4882a593Smuzhiyun sha->sas_addr = &mvi->sas_addr[0];
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun sha->num_phys = nr_core * chip_info->n_phy;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun if (mvi->flags & MVF_FLAG_SOC)
471*4882a593Smuzhiyun can_queue = MVS_SOC_CAN_QUEUE;
472*4882a593Smuzhiyun else
473*4882a593Smuzhiyun can_queue = MVS_CHIP_SLOT_SZ;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
476*4882a593Smuzhiyun shost->can_queue = can_queue;
477*4882a593Smuzhiyun mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
478*4882a593Smuzhiyun sha->core.shost = mvi->shost;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
mvs_init_sas_add(struct mvs_info * mvi)481*4882a593Smuzhiyun static void mvs_init_sas_add(struct mvs_info *mvi)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun u8 i;
484*4882a593Smuzhiyun for (i = 0; i < mvi->chip->n_phy; i++) {
485*4882a593Smuzhiyun mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
486*4882a593Smuzhiyun mvi->phy[i].dev_sas_addr =
487*4882a593Smuzhiyun cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
mvs_pci_init(struct pci_dev * pdev,const struct pci_device_id * ent)493*4882a593Smuzhiyun static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun unsigned int rc, nhost = 0;
496*4882a593Smuzhiyun struct mvs_info *mvi;
497*4882a593Smuzhiyun struct mvs_prv_info *mpi;
498*4882a593Smuzhiyun irq_handler_t irq_handler = mvs_interrupt;
499*4882a593Smuzhiyun struct Scsi_Host *shost = NULL;
500*4882a593Smuzhiyun const struct mvs_chip_info *chip;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun dev_printk(KERN_INFO, &pdev->dev,
503*4882a593Smuzhiyun "mvsas: driver version %s\n", DRV_VERSION);
504*4882a593Smuzhiyun rc = pci_enable_device(pdev);
505*4882a593Smuzhiyun if (rc)
506*4882a593Smuzhiyun goto err_out_enable;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun pci_set_master(pdev);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun rc = pci_request_regions(pdev, DRV_NAME);
511*4882a593Smuzhiyun if (rc)
512*4882a593Smuzhiyun goto err_out_disable;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun rc = pci_go_64(pdev);
515*4882a593Smuzhiyun if (rc)
516*4882a593Smuzhiyun goto err_out_regions;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
519*4882a593Smuzhiyun if (!shost) {
520*4882a593Smuzhiyun rc = -ENOMEM;
521*4882a593Smuzhiyun goto err_out_regions;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun chip = &mvs_chips[ent->driver_data];
525*4882a593Smuzhiyun SHOST_TO_SAS_HA(shost) =
526*4882a593Smuzhiyun kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
527*4882a593Smuzhiyun if (!SHOST_TO_SAS_HA(shost)) {
528*4882a593Smuzhiyun scsi_host_put(shost);
529*4882a593Smuzhiyun rc = -ENOMEM;
530*4882a593Smuzhiyun goto err_out_regions;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun rc = mvs_prep_sas_ha_init(shost, chip);
534*4882a593Smuzhiyun if (rc) {
535*4882a593Smuzhiyun scsi_host_put(shost);
536*4882a593Smuzhiyun rc = -ENOMEM;
537*4882a593Smuzhiyun goto err_out_regions;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun do {
543*4882a593Smuzhiyun mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
544*4882a593Smuzhiyun if (!mvi) {
545*4882a593Smuzhiyun rc = -ENOMEM;
546*4882a593Smuzhiyun goto err_out_regions;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun memset(&mvi->hba_info_param, 0xFF,
550*4882a593Smuzhiyun sizeof(struct hba_info_page));
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun mvs_init_sas_add(mvi);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun mvi->instance = nhost;
555*4882a593Smuzhiyun rc = MVS_CHIP_DISP->chip_init(mvi);
556*4882a593Smuzhiyun if (rc) {
557*4882a593Smuzhiyun mvs_free(mvi);
558*4882a593Smuzhiyun goto err_out_regions;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun nhost++;
561*4882a593Smuzhiyun } while (nhost < chip->n_host);
562*4882a593Smuzhiyun mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha);
563*4882a593Smuzhiyun #ifdef CONFIG_SCSI_MVSAS_TASKLET
564*4882a593Smuzhiyun tasklet_init(&(mpi->mv_tasklet), mvs_tasklet,
565*4882a593Smuzhiyun (unsigned long)SHOST_TO_SAS_HA(shost));
566*4882a593Smuzhiyun #endif
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun mvs_post_sas_ha_init(shost, chip);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun rc = scsi_add_host(shost, &pdev->dev);
571*4882a593Smuzhiyun if (rc)
572*4882a593Smuzhiyun goto err_out_shost;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
575*4882a593Smuzhiyun if (rc)
576*4882a593Smuzhiyun goto err_out_shost;
577*4882a593Smuzhiyun rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
578*4882a593Smuzhiyun DRV_NAME, SHOST_TO_SAS_HA(shost));
579*4882a593Smuzhiyun if (rc)
580*4882a593Smuzhiyun goto err_not_sas;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun MVS_CHIP_DISP->interrupt_enable(mvi);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun scsi_scan_host(mvi->shost);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun return 0;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun err_not_sas:
589*4882a593Smuzhiyun sas_unregister_ha(SHOST_TO_SAS_HA(shost));
590*4882a593Smuzhiyun err_out_shost:
591*4882a593Smuzhiyun scsi_remove_host(mvi->shost);
592*4882a593Smuzhiyun err_out_regions:
593*4882a593Smuzhiyun pci_release_regions(pdev);
594*4882a593Smuzhiyun err_out_disable:
595*4882a593Smuzhiyun pci_disable_device(pdev);
596*4882a593Smuzhiyun err_out_enable:
597*4882a593Smuzhiyun return rc;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
mvs_pci_remove(struct pci_dev * pdev)600*4882a593Smuzhiyun static void mvs_pci_remove(struct pci_dev *pdev)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun unsigned short core_nr, i = 0;
603*4882a593Smuzhiyun struct sas_ha_struct *sha = pci_get_drvdata(pdev);
604*4882a593Smuzhiyun struct mvs_info *mvi = NULL;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
607*4882a593Smuzhiyun mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun #ifdef CONFIG_SCSI_MVSAS_TASKLET
610*4882a593Smuzhiyun tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
611*4882a593Smuzhiyun #endif
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun sas_unregister_ha(sha);
614*4882a593Smuzhiyun sas_remove_host(mvi->shost);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun MVS_CHIP_DISP->interrupt_disable(mvi);
617*4882a593Smuzhiyun free_irq(mvi->pdev->irq, sha);
618*4882a593Smuzhiyun for (i = 0; i < core_nr; i++) {
619*4882a593Smuzhiyun mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
620*4882a593Smuzhiyun mvs_free(mvi);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun kfree(sha->sas_phy);
623*4882a593Smuzhiyun kfree(sha->sas_port);
624*4882a593Smuzhiyun kfree(sha);
625*4882a593Smuzhiyun pci_release_regions(pdev);
626*4882a593Smuzhiyun pci_disable_device(pdev);
627*4882a593Smuzhiyun return;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun static struct pci_device_id mvs_pci_table[] = {
631*4882a593Smuzhiyun { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
632*4882a593Smuzhiyun { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun .vendor = PCI_VENDOR_ID_MARVELL,
635*4882a593Smuzhiyun .device = 0x6440,
636*4882a593Smuzhiyun .subvendor = PCI_ANY_ID,
637*4882a593Smuzhiyun .subdevice = 0x6480,
638*4882a593Smuzhiyun .class = 0,
639*4882a593Smuzhiyun .class_mask = 0,
640*4882a593Smuzhiyun .driver_data = chip_6485,
641*4882a593Smuzhiyun },
642*4882a593Smuzhiyun { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
643*4882a593Smuzhiyun { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
644*4882a593Smuzhiyun { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
645*4882a593Smuzhiyun { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
646*4882a593Smuzhiyun { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
647*4882a593Smuzhiyun { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
648*4882a593Smuzhiyun { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
649*4882a593Smuzhiyun { PCI_VDEVICE(TTI, 0x2640), chip_6440 },
650*4882a593Smuzhiyun { PCI_VDEVICE(TTI, 0x2710), chip_9480 },
651*4882a593Smuzhiyun { PCI_VDEVICE(TTI, 0x2720), chip_9480 },
652*4882a593Smuzhiyun { PCI_VDEVICE(TTI, 0x2721), chip_9480 },
653*4882a593Smuzhiyun { PCI_VDEVICE(TTI, 0x2722), chip_9480 },
654*4882a593Smuzhiyun { PCI_VDEVICE(TTI, 0x2740), chip_9480 },
655*4882a593Smuzhiyun { PCI_VDEVICE(TTI, 0x2744), chip_9480 },
656*4882a593Smuzhiyun { PCI_VDEVICE(TTI, 0x2760), chip_9480 },
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun .vendor = PCI_VENDOR_ID_MARVELL_EXT,
659*4882a593Smuzhiyun .device = 0x9480,
660*4882a593Smuzhiyun .subvendor = PCI_ANY_ID,
661*4882a593Smuzhiyun .subdevice = 0x9480,
662*4882a593Smuzhiyun .class = 0,
663*4882a593Smuzhiyun .class_mask = 0,
664*4882a593Smuzhiyun .driver_data = chip_9480,
665*4882a593Smuzhiyun },
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun .vendor = PCI_VENDOR_ID_MARVELL_EXT,
668*4882a593Smuzhiyun .device = 0x9445,
669*4882a593Smuzhiyun .subvendor = PCI_ANY_ID,
670*4882a593Smuzhiyun .subdevice = 0x9480,
671*4882a593Smuzhiyun .class = 0,
672*4882a593Smuzhiyun .class_mask = 0,
673*4882a593Smuzhiyun .driver_data = chip_9445,
674*4882a593Smuzhiyun },
675*4882a593Smuzhiyun { PCI_VDEVICE(MARVELL_EXT, 0x9485), chip_9485 }, /* Marvell 9480/9485 (any vendor/model) */
676*4882a593Smuzhiyun { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */
677*4882a593Smuzhiyun { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
678*4882a593Smuzhiyun { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
679*4882a593Smuzhiyun { PCI_VDEVICE(OCZ, 0x1041), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
680*4882a593Smuzhiyun { PCI_VDEVICE(OCZ, 0x1042), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
681*4882a593Smuzhiyun { PCI_VDEVICE(OCZ, 0x1043), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
682*4882a593Smuzhiyun { PCI_VDEVICE(OCZ, 0x1044), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
683*4882a593Smuzhiyun { PCI_VDEVICE(OCZ, 0x1080), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
684*4882a593Smuzhiyun { PCI_VDEVICE(OCZ, 0x1083), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
685*4882a593Smuzhiyun { PCI_VDEVICE(OCZ, 0x1084), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun { } /* terminate list */
688*4882a593Smuzhiyun };
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun static struct pci_driver mvs_pci_driver = {
691*4882a593Smuzhiyun .name = DRV_NAME,
692*4882a593Smuzhiyun .id_table = mvs_pci_table,
693*4882a593Smuzhiyun .probe = mvs_pci_init,
694*4882a593Smuzhiyun .remove = mvs_pci_remove,
695*4882a593Smuzhiyun };
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun static ssize_t
mvs_show_driver_version(struct device * cdev,struct device_attribute * attr,char * buffer)698*4882a593Smuzhiyun mvs_show_driver_version(struct device *cdev,
699*4882a593Smuzhiyun struct device_attribute *attr, char *buffer)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun return sysfs_emit(buffer, "%s\n", DRV_VERSION);
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun static DEVICE_ATTR(driver_version,
705*4882a593Smuzhiyun S_IRUGO,
706*4882a593Smuzhiyun mvs_show_driver_version,
707*4882a593Smuzhiyun NULL);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun static ssize_t
mvs_store_interrupt_coalescing(struct device * cdev,struct device_attribute * attr,const char * buffer,size_t size)710*4882a593Smuzhiyun mvs_store_interrupt_coalescing(struct device *cdev,
711*4882a593Smuzhiyun struct device_attribute *attr,
712*4882a593Smuzhiyun const char *buffer, size_t size)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun unsigned int val = 0;
715*4882a593Smuzhiyun struct mvs_info *mvi = NULL;
716*4882a593Smuzhiyun struct Scsi_Host *shost = class_to_shost(cdev);
717*4882a593Smuzhiyun struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
718*4882a593Smuzhiyun u8 i, core_nr;
719*4882a593Smuzhiyun if (buffer == NULL)
720*4882a593Smuzhiyun return size;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun if (sscanf(buffer, "%u", &val) != 1)
723*4882a593Smuzhiyun return -EINVAL;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun if (val >= 0x10000) {
726*4882a593Smuzhiyun mv_dprintk("interrupt coalescing timer %d us is"
727*4882a593Smuzhiyun "too long\n", val);
728*4882a593Smuzhiyun return strlen(buffer);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun interrupt_coalescing = val;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
734*4882a593Smuzhiyun mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun if (unlikely(!mvi))
737*4882a593Smuzhiyun return -EINVAL;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun for (i = 0; i < core_nr; i++) {
740*4882a593Smuzhiyun mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
741*4882a593Smuzhiyun if (MVS_CHIP_DISP->tune_interrupt)
742*4882a593Smuzhiyun MVS_CHIP_DISP->tune_interrupt(mvi,
743*4882a593Smuzhiyun interrupt_coalescing);
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun mv_dprintk("set interrupt coalescing time to %d us\n",
746*4882a593Smuzhiyun interrupt_coalescing);
747*4882a593Smuzhiyun return strlen(buffer);
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
mvs_show_interrupt_coalescing(struct device * cdev,struct device_attribute * attr,char * buffer)750*4882a593Smuzhiyun static ssize_t mvs_show_interrupt_coalescing(struct device *cdev,
751*4882a593Smuzhiyun struct device_attribute *attr, char *buffer)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun return sysfs_emit(buffer, "%d\n", interrupt_coalescing);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun static DEVICE_ATTR(interrupt_coalescing,
757*4882a593Smuzhiyun S_IRUGO|S_IWUSR,
758*4882a593Smuzhiyun mvs_show_interrupt_coalescing,
759*4882a593Smuzhiyun mvs_store_interrupt_coalescing);
760*4882a593Smuzhiyun
mvs_init(void)761*4882a593Smuzhiyun static int __init mvs_init(void)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun int rc;
764*4882a593Smuzhiyun mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
765*4882a593Smuzhiyun if (!mvs_stt)
766*4882a593Smuzhiyun return -ENOMEM;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun rc = pci_register_driver(&mvs_pci_driver);
769*4882a593Smuzhiyun if (rc)
770*4882a593Smuzhiyun goto err_out;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun return 0;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun err_out:
775*4882a593Smuzhiyun sas_release_transport(mvs_stt);
776*4882a593Smuzhiyun return rc;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
mvs_exit(void)779*4882a593Smuzhiyun static void __exit mvs_exit(void)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun pci_unregister_driver(&mvs_pci_driver);
782*4882a593Smuzhiyun sas_release_transport(mvs_stt);
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun static struct device_attribute *mvst_host_attrs[] = {
786*4882a593Smuzhiyun &dev_attr_driver_version,
787*4882a593Smuzhiyun &dev_attr_interrupt_coalescing,
788*4882a593Smuzhiyun NULL,
789*4882a593Smuzhiyun };
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun module_init(mvs_init);
792*4882a593Smuzhiyun module_exit(mvs_exit);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
795*4882a593Smuzhiyun MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
796*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
797*4882a593Smuzhiyun MODULE_LICENSE("GPL");
798*4882a593Smuzhiyun #ifdef CONFIG_PCI
799*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, mvs_pci_table);
800*4882a593Smuzhiyun #endif
801