xref: /OK3568_Linux_fs/kernel/drivers/scsi/isci/init.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is provided under a dual BSD/GPLv2 license.  When using or
3*4882a593Smuzhiyun  * redistributing this file, you may do so under either license.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * GPL LICENSE SUMMARY
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun  * it under the terms of version 2 of the GNU General Public License as
11*4882a593Smuzhiyun  * published by the Free Software Foundation.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun  * WITHOUT ANY WARRANTY; without even the implied warranty of
15*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16*4882a593Smuzhiyun  * General Public License for more details.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * You should have received a copy of the GNU General Public License
19*4882a593Smuzhiyun  * along with this program; if not, write to the Free Software
20*4882a593Smuzhiyun  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21*4882a593Smuzhiyun  * The full GNU General Public License is included in this distribution
22*4882a593Smuzhiyun  * in the file called LICENSE.GPL.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * BSD LICENSE
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27*4882a593Smuzhiyun  * All rights reserved.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
30*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
31*4882a593Smuzhiyun  * are met:
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  *   * Redistributions of source code must retain the above copyright
34*4882a593Smuzhiyun  *     notice, this list of conditions and the following disclaimer.
35*4882a593Smuzhiyun  *   * Redistributions in binary form must reproduce the above copyright
36*4882a593Smuzhiyun  *     notice, this list of conditions and the following disclaimer in
37*4882a593Smuzhiyun  *     the documentation and/or other materials provided with the
38*4882a593Smuzhiyun  *     distribution.
39*4882a593Smuzhiyun  *   * Neither the name of Intel Corporation nor the names of its
40*4882a593Smuzhiyun  *     contributors may be used to endorse or promote products derived
41*4882a593Smuzhiyun  *     from this software without specific prior written permission.
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44*4882a593Smuzhiyun  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45*4882a593Smuzhiyun  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46*4882a593Smuzhiyun  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47*4882a593Smuzhiyun  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48*4882a593Smuzhiyun  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49*4882a593Smuzhiyun  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50*4882a593Smuzhiyun  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51*4882a593Smuzhiyun  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52*4882a593Smuzhiyun  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53*4882a593Smuzhiyun  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #include <linux/kernel.h>
57*4882a593Smuzhiyun #include <linux/init.h>
58*4882a593Smuzhiyun #include <linux/module.h>
59*4882a593Smuzhiyun #include <linux/firmware.h>
60*4882a593Smuzhiyun #include <linux/efi.h>
61*4882a593Smuzhiyun #include <asm/string.h>
62*4882a593Smuzhiyun #include <scsi/scsi_host.h>
63*4882a593Smuzhiyun #include "host.h"
64*4882a593Smuzhiyun #include "isci.h"
65*4882a593Smuzhiyun #include "task.h"
66*4882a593Smuzhiyun #include "probe_roms.h"
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define MAJ 1
69*4882a593Smuzhiyun #define MIN 2
70*4882a593Smuzhiyun #define BUILD 0
71*4882a593Smuzhiyun #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
72*4882a593Smuzhiyun 	__stringify(BUILD)
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun static struct scsi_transport_template *isci_transport_template;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun static const struct pci_device_id isci_id_table[] = {
79*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x1D61),},
80*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x1D63),},
81*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x1D65),},
82*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x1D67),},
83*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x1D69),},
84*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x1D6B),},
85*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x1D60),},
86*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x1D62),},
87*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x1D64),},
88*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x1D66),},
89*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x1D68),},
90*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, 0x1D6A),},
91*4882a593Smuzhiyun 	{}
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, isci_id_table);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /* linux isci specific settings */
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun unsigned char no_outbound_task_to = 2;
99*4882a593Smuzhiyun module_param(no_outbound_task_to, byte, 0);
100*4882a593Smuzhiyun MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun u16 ssp_max_occ_to = 20;
103*4882a593Smuzhiyun module_param(ssp_max_occ_to, ushort, 0);
104*4882a593Smuzhiyun MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun u16 stp_max_occ_to = 5;
107*4882a593Smuzhiyun module_param(stp_max_occ_to, ushort, 0);
108*4882a593Smuzhiyun MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun u16 ssp_inactive_to = 5;
111*4882a593Smuzhiyun module_param(ssp_inactive_to, ushort, 0);
112*4882a593Smuzhiyun MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun u16 stp_inactive_to = 5;
115*4882a593Smuzhiyun module_param(stp_inactive_to, ushort, 0);
116*4882a593Smuzhiyun MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
119*4882a593Smuzhiyun module_param(phy_gen, byte, 0);
120*4882a593Smuzhiyun MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun unsigned char max_concurr_spinup;
123*4882a593Smuzhiyun module_param(max_concurr_spinup, byte, 0);
124*4882a593Smuzhiyun MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
127*4882a593Smuzhiyun module_param(cable_selection_override, uint, 0);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun MODULE_PARM_DESC(cable_selection_override,
130*4882a593Smuzhiyun 		 "This field indicates length of the SAS/SATA cable between "
131*4882a593Smuzhiyun 		 "host and device. If any bits > 15 are set (default) "
132*4882a593Smuzhiyun 		 "indicates \"use platform defaults\"");
133*4882a593Smuzhiyun 
isci_show_id(struct device * dev,struct device_attribute * attr,char * buf)134*4882a593Smuzhiyun static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
137*4882a593Smuzhiyun 	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
138*4882a593Smuzhiyun 	struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun static struct device_attribute *isci_host_attrs[] = {
146*4882a593Smuzhiyun 	&dev_attr_isci_id,
147*4882a593Smuzhiyun 	NULL
148*4882a593Smuzhiyun };
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun static struct scsi_host_template isci_sht = {
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	.module				= THIS_MODULE,
153*4882a593Smuzhiyun 	.name				= DRV_NAME,
154*4882a593Smuzhiyun 	.proc_name			= DRV_NAME,
155*4882a593Smuzhiyun 	.queuecommand			= sas_queuecommand,
156*4882a593Smuzhiyun 	.dma_need_drain			= ata_scsi_dma_need_drain,
157*4882a593Smuzhiyun 	.target_alloc			= sas_target_alloc,
158*4882a593Smuzhiyun 	.slave_configure		= sas_slave_configure,
159*4882a593Smuzhiyun 	.scan_finished			= isci_host_scan_finished,
160*4882a593Smuzhiyun 	.scan_start			= isci_host_start,
161*4882a593Smuzhiyun 	.change_queue_depth		= sas_change_queue_depth,
162*4882a593Smuzhiyun 	.bios_param			= sas_bios_param,
163*4882a593Smuzhiyun 	.can_queue			= ISCI_CAN_QUEUE_VAL,
164*4882a593Smuzhiyun 	.this_id			= -1,
165*4882a593Smuzhiyun 	.sg_tablesize			= SG_ALL,
166*4882a593Smuzhiyun 	.max_sectors			= SCSI_DEFAULT_MAX_SECTORS,
167*4882a593Smuzhiyun 	.eh_abort_handler		= sas_eh_abort_handler,
168*4882a593Smuzhiyun 	.eh_device_reset_handler        = sas_eh_device_reset_handler,
169*4882a593Smuzhiyun 	.eh_target_reset_handler        = sas_eh_target_reset_handler,
170*4882a593Smuzhiyun 	.slave_alloc			= sas_slave_alloc,
171*4882a593Smuzhiyun 	.target_destroy			= sas_target_destroy,
172*4882a593Smuzhiyun 	.ioctl				= sas_ioctl,
173*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
174*4882a593Smuzhiyun 	.compat_ioctl			= sas_ioctl,
175*4882a593Smuzhiyun #endif
176*4882a593Smuzhiyun 	.shost_attrs			= isci_host_attrs,
177*4882a593Smuzhiyun 	.track_queue_depth		= 1,
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun static struct sas_domain_function_template isci_transport_ops  = {
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	/* The class calls these to notify the LLDD of an event. */
183*4882a593Smuzhiyun 	.lldd_port_formed	= isci_port_formed,
184*4882a593Smuzhiyun 	.lldd_port_deformed	= isci_port_deformed,
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	/* The class calls these when a device is found or gone. */
187*4882a593Smuzhiyun 	.lldd_dev_found		= isci_remote_device_found,
188*4882a593Smuzhiyun 	.lldd_dev_gone		= isci_remote_device_gone,
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	.lldd_execute_task	= isci_task_execute_task,
191*4882a593Smuzhiyun 	/* Task Management Functions. Must be called from process context. */
192*4882a593Smuzhiyun 	.lldd_abort_task	= isci_task_abort_task,
193*4882a593Smuzhiyun 	.lldd_abort_task_set	= isci_task_abort_task_set,
194*4882a593Smuzhiyun 	.lldd_clear_aca		= isci_task_clear_aca,
195*4882a593Smuzhiyun 	.lldd_clear_task_set	= isci_task_clear_task_set,
196*4882a593Smuzhiyun 	.lldd_I_T_nexus_reset	= isci_task_I_T_nexus_reset,
197*4882a593Smuzhiyun 	.lldd_lu_reset		= isci_task_lu_reset,
198*4882a593Smuzhiyun 	.lldd_query_task	= isci_task_query_task,
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/* ata recovery called from ata-eh */
201*4882a593Smuzhiyun 	.lldd_ata_check_ready	= isci_ata_check_ready,
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/* Port and Adapter management */
204*4882a593Smuzhiyun 	.lldd_clear_nexus_port	= isci_task_clear_nexus_port,
205*4882a593Smuzhiyun 	.lldd_clear_nexus_ha	= isci_task_clear_nexus_ha,
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* Phy management */
208*4882a593Smuzhiyun 	.lldd_control_phy	= isci_phy_control,
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/* GPIO support */
211*4882a593Smuzhiyun 	.lldd_write_gpio	= isci_gpio_write,
212*4882a593Smuzhiyun };
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /******************************************************************************
216*4882a593Smuzhiyun * P R O T E C T E D  M E T H O D S
217*4882a593Smuzhiyun ******************************************************************************/
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun /**
222*4882a593Smuzhiyun  * isci_register_sas_ha() - This method initializes various lldd
223*4882a593Smuzhiyun  *    specific members of the sas_ha struct and calls the libsas
224*4882a593Smuzhiyun  *    sas_register_ha() function.
225*4882a593Smuzhiyun  * @isci_host: This parameter specifies the lldd specific wrapper for the
226*4882a593Smuzhiyun  *    libsas sas_ha struct.
227*4882a593Smuzhiyun  *
228*4882a593Smuzhiyun  * This method returns an error code indicating success or failure. The user
229*4882a593Smuzhiyun  * should check for possible memory allocation error return otherwise, a zero
230*4882a593Smuzhiyun  * indicates success.
231*4882a593Smuzhiyun  */
isci_register_sas_ha(struct isci_host * isci_host)232*4882a593Smuzhiyun static int isci_register_sas_ha(struct isci_host *isci_host)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	int i;
235*4882a593Smuzhiyun 	struct sas_ha_struct *sas_ha = &(isci_host->sas_ha);
236*4882a593Smuzhiyun 	struct asd_sas_phy **sas_phys;
237*4882a593Smuzhiyun 	struct asd_sas_port **sas_ports;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	sas_phys = devm_kcalloc(&isci_host->pdev->dev,
240*4882a593Smuzhiyun 				SCI_MAX_PHYS, sizeof(void *),
241*4882a593Smuzhiyun 				GFP_KERNEL);
242*4882a593Smuzhiyun 	if (!sas_phys)
243*4882a593Smuzhiyun 		return -ENOMEM;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	sas_ports = devm_kcalloc(&isci_host->pdev->dev,
246*4882a593Smuzhiyun 				 SCI_MAX_PORTS, sizeof(void *),
247*4882a593Smuzhiyun 				 GFP_KERNEL);
248*4882a593Smuzhiyun 	if (!sas_ports)
249*4882a593Smuzhiyun 		return -ENOMEM;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	sas_ha->sas_ha_name = DRV_NAME;
252*4882a593Smuzhiyun 	sas_ha->lldd_module = THIS_MODULE;
253*4882a593Smuzhiyun 	sas_ha->sas_addr    = &isci_host->phys[0].sas_addr[0];
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	for (i = 0; i < SCI_MAX_PHYS; i++) {
256*4882a593Smuzhiyun 		sas_phys[i] = &isci_host->phys[i].sas_phy;
257*4882a593Smuzhiyun 		sas_ports[i] = &isci_host->sas_ports[i];
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	sas_ha->sas_phy  = sas_phys;
261*4882a593Smuzhiyun 	sas_ha->sas_port = sas_ports;
262*4882a593Smuzhiyun 	sas_ha->num_phys = SCI_MAX_PHYS;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	sas_ha->strict_wide_ports = 1;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	sas_register_ha(sas_ha);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
isci_unregister(struct isci_host * isci_host)271*4882a593Smuzhiyun static void isci_unregister(struct isci_host *isci_host)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct Scsi_Host *shost;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (!isci_host)
276*4882a593Smuzhiyun 		return;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	shost = to_shost(isci_host);
279*4882a593Smuzhiyun 	sas_unregister_ha(&isci_host->sas_ha);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	sas_remove_host(shost);
282*4882a593Smuzhiyun 	scsi_host_put(shost);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
isci_pci_init(struct pci_dev * pdev)285*4882a593Smuzhiyun static int isci_pci_init(struct pci_dev *pdev)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	int err, bar_num, bar_mask = 0;
288*4882a593Smuzhiyun 	void __iomem * const *iomap;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	err = pcim_enable_device(pdev);
291*4882a593Smuzhiyun 	if (err) {
292*4882a593Smuzhiyun 		dev_err(&pdev->dev,
293*4882a593Smuzhiyun 			"failed enable PCI device %s!\n",
294*4882a593Smuzhiyun 			pci_name(pdev));
295*4882a593Smuzhiyun 		return err;
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
299*4882a593Smuzhiyun 		bar_mask |= 1 << (bar_num * 2);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
302*4882a593Smuzhiyun 	if (err)
303*4882a593Smuzhiyun 		return err;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	iomap = pcim_iomap_table(pdev);
306*4882a593Smuzhiyun 	if (!iomap)
307*4882a593Smuzhiyun 		return -ENOMEM;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	pci_set_master(pdev);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
312*4882a593Smuzhiyun 	if (err)
313*4882a593Smuzhiyun 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
314*4882a593Smuzhiyun 	return err;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
num_controllers(struct pci_dev * pdev)317*4882a593Smuzhiyun static int num_controllers(struct pci_dev *pdev)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	/* bar size alone can tell us if we are running with a dual controller
320*4882a593Smuzhiyun 	 * part, no need to trust revision ids that might be under broken firmware
321*4882a593Smuzhiyun 	 * control
322*4882a593Smuzhiyun 	 */
323*4882a593Smuzhiyun 	resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2);
324*4882a593Smuzhiyun 	resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS &&
327*4882a593Smuzhiyun 	    smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS)
328*4882a593Smuzhiyun 		return SCI_MAX_CONTROLLERS;
329*4882a593Smuzhiyun 	else
330*4882a593Smuzhiyun 		return 1;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
isci_setup_interrupts(struct pci_dev * pdev)333*4882a593Smuzhiyun static int isci_setup_interrupts(struct pci_dev *pdev)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	int err, i, num_msix;
336*4882a593Smuzhiyun 	struct isci_host *ihost;
337*4882a593Smuzhiyun 	struct isci_pci_info *pci_info = to_pci_info(pdev);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	/*
340*4882a593Smuzhiyun 	 *  Determine the number of vectors associated with this
341*4882a593Smuzhiyun 	 *  PCI function.
342*4882a593Smuzhiyun 	 */
343*4882a593Smuzhiyun 	num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	err = pci_alloc_irq_vectors(pdev, num_msix, num_msix, PCI_IRQ_MSIX);
346*4882a593Smuzhiyun 	if (err < 0)
347*4882a593Smuzhiyun 		goto intx;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	for (i = 0; i < num_msix; i++) {
350*4882a593Smuzhiyun 		int id = i / SCI_NUM_MSI_X_INT;
351*4882a593Smuzhiyun 		irq_handler_t isr;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 		ihost = pci_info->hosts[id];
354*4882a593Smuzhiyun 		/* odd numbered vectors are error interrupts */
355*4882a593Smuzhiyun 		if (i & 1)
356*4882a593Smuzhiyun 			isr = isci_error_isr;
357*4882a593Smuzhiyun 		else
358*4882a593Smuzhiyun 			isr = isci_msix_isr;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 		err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
361*4882a593Smuzhiyun 				isr, 0, DRV_NAME"-msix", ihost);
362*4882a593Smuzhiyun 		if (!err)
363*4882a593Smuzhiyun 			continue;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		dev_info(&pdev->dev, "msix setup failed falling back to intx\n");
366*4882a593Smuzhiyun 		while (i--) {
367*4882a593Smuzhiyun 			id = i / SCI_NUM_MSI_X_INT;
368*4882a593Smuzhiyun 			ihost = pci_info->hosts[id];
369*4882a593Smuzhiyun 			devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
370*4882a593Smuzhiyun 					ihost);
371*4882a593Smuzhiyun 		}
372*4882a593Smuzhiyun 		pci_free_irq_vectors(pdev);
373*4882a593Smuzhiyun 		goto intx;
374*4882a593Smuzhiyun 	}
375*4882a593Smuzhiyun 	return 0;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun  intx:
378*4882a593Smuzhiyun 	for_each_isci_host(i, ihost, pdev) {
379*4882a593Smuzhiyun 		err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 0),
380*4882a593Smuzhiyun 				isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx",
381*4882a593Smuzhiyun 				ihost);
382*4882a593Smuzhiyun 		if (err)
383*4882a593Smuzhiyun 			break;
384*4882a593Smuzhiyun 	}
385*4882a593Smuzhiyun 	return err;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
isci_user_parameters_get(struct sci_user_parameters * u)388*4882a593Smuzhiyun static void isci_user_parameters_get(struct sci_user_parameters *u)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	int i;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	for (i = 0; i < SCI_MAX_PHYS; i++) {
393*4882a593Smuzhiyun 		struct sci_phy_user_params *u_phy = &u->phys[i];
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 		u_phy->max_speed_generation = phy_gen;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 		/* we are not exporting these for now */
398*4882a593Smuzhiyun 		u_phy->align_insertion_frequency = 0x7f;
399*4882a593Smuzhiyun 		u_phy->in_connection_align_insertion_frequency = 0xff;
400*4882a593Smuzhiyun 		u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	u->stp_inactivity_timeout = stp_inactive_to;
404*4882a593Smuzhiyun 	u->ssp_inactivity_timeout = ssp_inactive_to;
405*4882a593Smuzhiyun 	u->stp_max_occupancy_timeout = stp_max_occ_to;
406*4882a593Smuzhiyun 	u->ssp_max_occupancy_timeout = ssp_max_occ_to;
407*4882a593Smuzhiyun 	u->no_outbound_task_timeout = no_outbound_task_to;
408*4882a593Smuzhiyun 	u->max_concurr_spinup = max_concurr_spinup;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun 
sci_user_parameters_set(struct isci_host * ihost,struct sci_user_parameters * sci_parms)411*4882a593Smuzhiyun static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
412*4882a593Smuzhiyun 					       struct sci_user_parameters *sci_parms)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	u16 index;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	/*
417*4882a593Smuzhiyun 	 * Validate the user parameters.  If they are not legal, then
418*4882a593Smuzhiyun 	 * return a failure.
419*4882a593Smuzhiyun 	 */
420*4882a593Smuzhiyun 	for (index = 0; index < SCI_MAX_PHYS; index++) {
421*4882a593Smuzhiyun 		struct sci_phy_user_params *u;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 		u = &sci_parms->phys[index];
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) &&
426*4882a593Smuzhiyun 		      (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED)))
427*4882a593Smuzhiyun 			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 		if ((u->in_connection_align_insertion_frequency < 3) ||
430*4882a593Smuzhiyun 		    (u->align_insertion_frequency == 0) ||
431*4882a593Smuzhiyun 		    (u->notify_enable_spin_up_insertion_frequency == 0))
432*4882a593Smuzhiyun 			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	if ((sci_parms->stp_inactivity_timeout == 0) ||
436*4882a593Smuzhiyun 	    (sci_parms->ssp_inactivity_timeout == 0) ||
437*4882a593Smuzhiyun 	    (sci_parms->stp_max_occupancy_timeout == 0) ||
438*4882a593Smuzhiyun 	    (sci_parms->ssp_max_occupancy_timeout == 0) ||
439*4882a593Smuzhiyun 	    (sci_parms->no_outbound_task_timeout == 0))
440*4882a593Smuzhiyun 		return SCI_FAILURE_INVALID_PARAMETER_VALUE;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	return SCI_SUCCESS;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
sci_oem_defaults(struct isci_host * ihost)447*4882a593Smuzhiyun static void sci_oem_defaults(struct isci_host *ihost)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	/* these defaults are overridden by the platform / firmware */
450*4882a593Smuzhiyun 	struct sci_user_parameters *user = &ihost->user_parameters;
451*4882a593Smuzhiyun 	struct sci_oem_params *oem = &ihost->oem_parameters;
452*4882a593Smuzhiyun 	int i;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	/* Default to APC mode. */
455*4882a593Smuzhiyun 	oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	/* Default to APC mode. */
458*4882a593Smuzhiyun 	oem->controller.max_concurr_spin_up = 1;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	/* Default to no SSC operation. */
461*4882a593Smuzhiyun 	oem->controller.do_enable_ssc = false;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	/* Default to short cables on all phys. */
464*4882a593Smuzhiyun 	oem->controller.cable_selection_mask = 0;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* Initialize all of the port parameter information to narrow ports. */
467*4882a593Smuzhiyun 	for (i = 0; i < SCI_MAX_PORTS; i++)
468*4882a593Smuzhiyun 		oem->ports[i].phy_mask = 0;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	/* Initialize all of the phy parameter information. */
471*4882a593Smuzhiyun 	for (i = 0; i < SCI_MAX_PHYS; i++) {
472*4882a593Smuzhiyun 		/* Default to 3G (i.e. Gen 2). */
473*4882a593Smuzhiyun 		user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 		/* the frequencies cannot be 0 */
476*4882a593Smuzhiyun 		user->phys[i].align_insertion_frequency = 0x7f;
477*4882a593Smuzhiyun 		user->phys[i].in_connection_align_insertion_frequency = 0xff;
478*4882a593Smuzhiyun 		user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 		/* Previous Vitesse based expanders had a arbitration issue that
481*4882a593Smuzhiyun 		 * is worked around by having the upper 32-bits of SAS address
482*4882a593Smuzhiyun 		 * with a value greater then the Vitesse company identifier.
483*4882a593Smuzhiyun 		 * Hence, usage of 0x5FCFFFFF.
484*4882a593Smuzhiyun 		 */
485*4882a593Smuzhiyun 		oem->phys[i].sas_address.low = 0x1 + ihost->id;
486*4882a593Smuzhiyun 		oem->phys[i].sas_address.high = 0x5FCFFFFF;
487*4882a593Smuzhiyun 	}
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	user->stp_inactivity_timeout = 5;
490*4882a593Smuzhiyun 	user->ssp_inactivity_timeout = 5;
491*4882a593Smuzhiyun 	user->stp_max_occupancy_timeout = 5;
492*4882a593Smuzhiyun 	user->ssp_max_occupancy_timeout = 20;
493*4882a593Smuzhiyun 	user->no_outbound_task_timeout = 2;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
isci_host_alloc(struct pci_dev * pdev,int id)496*4882a593Smuzhiyun static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	struct isci_orom *orom = to_pci_info(pdev)->orom;
499*4882a593Smuzhiyun 	struct sci_user_parameters sci_user_params;
500*4882a593Smuzhiyun 	u8 oem_version = ISCI_ROM_VER_1_0;
501*4882a593Smuzhiyun 	struct isci_host *ihost;
502*4882a593Smuzhiyun 	struct Scsi_Host *shost;
503*4882a593Smuzhiyun 	int err, i;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL);
506*4882a593Smuzhiyun 	if (!ihost)
507*4882a593Smuzhiyun 		return NULL;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	ihost->pdev = pdev;
510*4882a593Smuzhiyun 	ihost->id = id;
511*4882a593Smuzhiyun 	spin_lock_init(&ihost->scic_lock);
512*4882a593Smuzhiyun 	init_waitqueue_head(&ihost->eventq);
513*4882a593Smuzhiyun 	ihost->sas_ha.dev = &ihost->pdev->dev;
514*4882a593Smuzhiyun 	ihost->sas_ha.lldd_ha = ihost;
515*4882a593Smuzhiyun 	tasklet_init(&ihost->completion_tasklet,
516*4882a593Smuzhiyun 		     isci_host_completion_routine, (unsigned long)ihost);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	/* validate module parameters */
519*4882a593Smuzhiyun 	/* TODO: kill struct sci_user_parameters and reference directly */
520*4882a593Smuzhiyun 	sci_oem_defaults(ihost);
521*4882a593Smuzhiyun 	isci_user_parameters_get(&sci_user_params);
522*4882a593Smuzhiyun 	if (sci_user_parameters_set(ihost, &sci_user_params)) {
523*4882a593Smuzhiyun 		dev_warn(&pdev->dev,
524*4882a593Smuzhiyun 			 "%s: sci_user_parameters_set failed\n", __func__);
525*4882a593Smuzhiyun 		return NULL;
526*4882a593Smuzhiyun 	}
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	/* sanity check platform (or 'firmware') oem parameters */
529*4882a593Smuzhiyun 	if (orom) {
530*4882a593Smuzhiyun 		if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) {
531*4882a593Smuzhiyun 			dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n");
532*4882a593Smuzhiyun 			return NULL;
533*4882a593Smuzhiyun 		}
534*4882a593Smuzhiyun 		ihost->oem_parameters = orom->ctrl[id];
535*4882a593Smuzhiyun 		oem_version = orom->hdr.version;
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	/* validate oem parameters (platform, firmware, or built-in defaults) */
539*4882a593Smuzhiyun 	if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) {
540*4882a593Smuzhiyun 		dev_warn(&pdev->dev, "oem parameter validation failed\n");
541*4882a593Smuzhiyun 		return NULL;
542*4882a593Smuzhiyun 	}
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	for (i = 0; i < SCI_MAX_PORTS; i++) {
545*4882a593Smuzhiyun 		struct isci_port *iport = &ihost->ports[i];
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 		INIT_LIST_HEAD(&iport->remote_dev_list);
548*4882a593Smuzhiyun 		iport->isci_host = ihost;
549*4882a593Smuzhiyun 	}
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	for (i = 0; i < SCI_MAX_PHYS; i++)
552*4882a593Smuzhiyun 		isci_phy_init(&ihost->phys[i], ihost, i);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
555*4882a593Smuzhiyun 		struct isci_remote_device *idev = &ihost->devices[i];
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		INIT_LIST_HEAD(&idev->node);
558*4882a593Smuzhiyun 	}
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	shost = scsi_host_alloc(&isci_sht, sizeof(void *));
561*4882a593Smuzhiyun 	if (!shost)
562*4882a593Smuzhiyun 		return NULL;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
565*4882a593Smuzhiyun 		 "{%s, %s, %s, %s}\n",
566*4882a593Smuzhiyun 		 (is_cable_select_overridden() ? "* " : ""), ihost->id,
567*4882a593Smuzhiyun 		 lookup_cable_names(decode_cable_selection(ihost, 3)),
568*4882a593Smuzhiyun 		 lookup_cable_names(decode_cable_selection(ihost, 2)),
569*4882a593Smuzhiyun 		 lookup_cable_names(decode_cable_selection(ihost, 1)),
570*4882a593Smuzhiyun 		 lookup_cable_names(decode_cable_selection(ihost, 0)));
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	err = isci_host_init(ihost);
573*4882a593Smuzhiyun 	if (err)
574*4882a593Smuzhiyun 		goto err_shost;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	SHOST_TO_SAS_HA(shost) = &ihost->sas_ha;
577*4882a593Smuzhiyun 	ihost->sas_ha.core.shost = shost;
578*4882a593Smuzhiyun 	shost->transportt = isci_transport_template;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	shost->max_id = ~0;
581*4882a593Smuzhiyun 	shost->max_lun = ~0;
582*4882a593Smuzhiyun 	shost->max_cmd_len = MAX_COMMAND_SIZE;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	/* turn on DIF support */
585*4882a593Smuzhiyun 	scsi_host_set_prot(shost,
586*4882a593Smuzhiyun 			   SHOST_DIF_TYPE1_PROTECTION |
587*4882a593Smuzhiyun 			   SHOST_DIF_TYPE2_PROTECTION |
588*4882a593Smuzhiyun 			   SHOST_DIF_TYPE3_PROTECTION);
589*4882a593Smuzhiyun 	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	err = scsi_add_host(shost, &pdev->dev);
592*4882a593Smuzhiyun 	if (err)
593*4882a593Smuzhiyun 		goto err_shost;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	err = isci_register_sas_ha(ihost);
596*4882a593Smuzhiyun 	if (err)
597*4882a593Smuzhiyun 		goto err_shost_remove;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	return ihost;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun  err_shost_remove:
602*4882a593Smuzhiyun 	scsi_remove_host(shost);
603*4882a593Smuzhiyun  err_shost:
604*4882a593Smuzhiyun 	scsi_host_put(shost);
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	return NULL;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun 
isci_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)609*4882a593Smuzhiyun static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	struct isci_pci_info *pci_info;
612*4882a593Smuzhiyun 	int err, i;
613*4882a593Smuzhiyun 	struct isci_host *isci_host;
614*4882a593Smuzhiyun 	const struct firmware *fw = NULL;
615*4882a593Smuzhiyun 	struct isci_orom *orom = NULL;
616*4882a593Smuzhiyun 	char *source = "(platform)";
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	dev_info(&pdev->dev, "driver configured for rev: %d silicon\n",
619*4882a593Smuzhiyun 		 pdev->revision);
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
622*4882a593Smuzhiyun 	if (!pci_info)
623*4882a593Smuzhiyun 		return -ENOMEM;
624*4882a593Smuzhiyun 	pci_set_drvdata(pdev, pci_info);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
627*4882a593Smuzhiyun 		orom = isci_get_efi_var(pdev);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	if (!orom)
630*4882a593Smuzhiyun 		orom = isci_request_oprom(pdev);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	for (i = 0; orom && i < num_controllers(pdev); i++) {
633*4882a593Smuzhiyun 		if (sci_oem_parameters_validate(&orom->ctrl[i],
634*4882a593Smuzhiyun 						orom->hdr.version)) {
635*4882a593Smuzhiyun 			dev_warn(&pdev->dev,
636*4882a593Smuzhiyun 				 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
637*4882a593Smuzhiyun 			orom = NULL;
638*4882a593Smuzhiyun 			break;
639*4882a593Smuzhiyun 		}
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	if (!orom) {
643*4882a593Smuzhiyun 		source = "(firmware)";
644*4882a593Smuzhiyun 		orom = isci_request_firmware(pdev, fw);
645*4882a593Smuzhiyun 		if (!orom) {
646*4882a593Smuzhiyun 			/* TODO convert this to WARN_TAINT_ONCE once the
647*4882a593Smuzhiyun 			 * orom/efi parameter support is widely available
648*4882a593Smuzhiyun 			 */
649*4882a593Smuzhiyun 			dev_warn(&pdev->dev,
650*4882a593Smuzhiyun 				 "Loading user firmware failed, using default "
651*4882a593Smuzhiyun 				 "values\n");
652*4882a593Smuzhiyun 			dev_warn(&pdev->dev,
653*4882a593Smuzhiyun 				 "Default OEM configuration being used: 4 "
654*4882a593Smuzhiyun 				 "narrow ports, and default SAS Addresses\n");
655*4882a593Smuzhiyun 		}
656*4882a593Smuzhiyun 	}
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	if (orom)
659*4882a593Smuzhiyun 		dev_info(&pdev->dev,
660*4882a593Smuzhiyun 			 "OEM SAS parameters (version: %u.%u) loaded %s\n",
661*4882a593Smuzhiyun 			 (orom->hdr.version & 0xf0) >> 4,
662*4882a593Smuzhiyun 			 (orom->hdr.version & 0xf), source);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	pci_info->orom = orom;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	err = isci_pci_init(pdev);
667*4882a593Smuzhiyun 	if (err)
668*4882a593Smuzhiyun 		return err;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	for (i = 0; i < num_controllers(pdev); i++) {
671*4882a593Smuzhiyun 		struct isci_host *h = isci_host_alloc(pdev, i);
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 		if (!h) {
674*4882a593Smuzhiyun 			err = -ENOMEM;
675*4882a593Smuzhiyun 			goto err_host_alloc;
676*4882a593Smuzhiyun 		}
677*4882a593Smuzhiyun 		pci_info->hosts[i] = h;
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	err = isci_setup_interrupts(pdev);
681*4882a593Smuzhiyun 	if (err)
682*4882a593Smuzhiyun 		goto err_host_alloc;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	for_each_isci_host(i, isci_host, pdev)
685*4882a593Smuzhiyun 		scsi_scan_host(to_shost(isci_host));
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	return 0;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun  err_host_alloc:
690*4882a593Smuzhiyun 	for_each_isci_host(i, isci_host, pdev)
691*4882a593Smuzhiyun 		isci_unregister(isci_host);
692*4882a593Smuzhiyun 	return err;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
isci_pci_remove(struct pci_dev * pdev)695*4882a593Smuzhiyun static void isci_pci_remove(struct pci_dev *pdev)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun 	struct isci_host *ihost;
698*4882a593Smuzhiyun 	int i;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	for_each_isci_host(i, ihost, pdev) {
701*4882a593Smuzhiyun 		wait_for_start(ihost);
702*4882a593Smuzhiyun 		isci_unregister(ihost);
703*4882a593Smuzhiyun 		isci_host_deinit(ihost);
704*4882a593Smuzhiyun 	}
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
isci_suspend(struct device * dev)708*4882a593Smuzhiyun static int isci_suspend(struct device *dev)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev);
711*4882a593Smuzhiyun 	struct isci_host *ihost;
712*4882a593Smuzhiyun 	int i;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	for_each_isci_host(i, ihost, pdev) {
715*4882a593Smuzhiyun 		sas_suspend_ha(&ihost->sas_ha);
716*4882a593Smuzhiyun 		isci_host_deinit(ihost);
717*4882a593Smuzhiyun 	}
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	pci_save_state(pdev);
720*4882a593Smuzhiyun 	pci_disable_device(pdev);
721*4882a593Smuzhiyun 	pci_set_power_state(pdev, PCI_D3hot);
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	return 0;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun 
isci_resume(struct device * dev)726*4882a593Smuzhiyun static int isci_resume(struct device *dev)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev);
729*4882a593Smuzhiyun 	struct isci_host *ihost;
730*4882a593Smuzhiyun 	int rc, i;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	pci_set_power_state(pdev, PCI_D0);
733*4882a593Smuzhiyun 	pci_restore_state(pdev);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	rc = pcim_enable_device(pdev);
736*4882a593Smuzhiyun 	if (rc) {
737*4882a593Smuzhiyun 		dev_err(&pdev->dev,
738*4882a593Smuzhiyun 			"enabling device failure after resume(%d)\n", rc);
739*4882a593Smuzhiyun 		return rc;
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	pci_set_master(pdev);
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	for_each_isci_host(i, ihost, pdev) {
745*4882a593Smuzhiyun 		sas_prep_resume_ha(&ihost->sas_ha);
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 		isci_host_init(ihost);
748*4882a593Smuzhiyun 		isci_host_start(ihost->sas_ha.core.shost);
749*4882a593Smuzhiyun 		wait_for_start(ihost);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 		sas_resume_ha(&ihost->sas_ha);
752*4882a593Smuzhiyun 	}
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	return 0;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun #endif
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun static struct pci_driver isci_pci_driver = {
761*4882a593Smuzhiyun 	.name		= DRV_NAME,
762*4882a593Smuzhiyun 	.id_table	= isci_id_table,
763*4882a593Smuzhiyun 	.probe		= isci_pci_probe,
764*4882a593Smuzhiyun 	.remove		= isci_pci_remove,
765*4882a593Smuzhiyun 	.driver.pm      = &isci_pm_ops,
766*4882a593Smuzhiyun };
767*4882a593Smuzhiyun 
isci_init(void)768*4882a593Smuzhiyun static __init int isci_init(void)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun 	int err;
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n",
773*4882a593Smuzhiyun 		DRV_NAME, DRV_VERSION);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
776*4882a593Smuzhiyun 	if (!isci_transport_template)
777*4882a593Smuzhiyun 		return -ENOMEM;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	err = pci_register_driver(&isci_pci_driver);
780*4882a593Smuzhiyun 	if (err)
781*4882a593Smuzhiyun 		sas_release_transport(isci_transport_template);
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	return err;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun 
isci_exit(void)786*4882a593Smuzhiyun static __exit void isci_exit(void)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun 	pci_unregister_driver(&isci_pci_driver);
789*4882a593Smuzhiyun 	sas_release_transport(isci_transport_template);
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
793*4882a593Smuzhiyun MODULE_FIRMWARE(ISCI_FW_NAME);
794*4882a593Smuzhiyun module_init(isci_init);
795*4882a593Smuzhiyun module_exit(isci_exit);
796