xref: /OK3568_Linux_fs/kernel/drivers/scsi/sym53c8xx_2/sym_glue.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
4*4882a593Smuzhiyun  * of PCI-SCSI IO processors.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
7*4882a593Smuzhiyun  * Copyright (c) 2003-2005  Matthew Wilcox <matthew@wil.cx>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This driver is derived from the Linux sym53c8xx driver.
10*4882a593Smuzhiyun  * Copyright (C) 1998-2000  Gerard Roudier
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
13*4882a593Smuzhiyun  * a port of the FreeBSD ncr driver to Linux-1.2.13.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * The original ncr driver has been written for 386bsd and FreeBSD by
16*4882a593Smuzhiyun  *         Wolfgang Stanglmeier        <wolf@cologne.de>
17*4882a593Smuzhiyun  *         Stefan Esser                <se@mi.Uni-Koeln.de>
18*4882a593Smuzhiyun  * Copyright (C) 1994  Wolfgang Stanglmeier
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * Other major contributions:
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * NVRAM detection and reading.
23*4882a593Smuzhiyun  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  *-----------------------------------------------------------------------------
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun #include <linux/ctype.h>
28*4882a593Smuzhiyun #include <linux/init.h>
29*4882a593Smuzhiyun #include <linux/module.h>
30*4882a593Smuzhiyun #include <linux/moduleparam.h>
31*4882a593Smuzhiyun #include <linux/spinlock.h>
32*4882a593Smuzhiyun #include <scsi/scsi.h>
33*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
34*4882a593Smuzhiyun #include <scsi/scsi_device.h>
35*4882a593Smuzhiyun #include <scsi/scsi_transport.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include "sym_glue.h"
38*4882a593Smuzhiyun #include "sym_nvram.h"
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define NAME53C		"sym53c"
41*4882a593Smuzhiyun #define NAME53C8XX	"sym53c8xx"
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP;
44*4882a593Smuzhiyun unsigned int sym_debug_flags = 0;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun static char *excl_string;
47*4882a593Smuzhiyun static char *safe_string;
48*4882a593Smuzhiyun module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0);
49*4882a593Smuzhiyun module_param_named(burst, sym_driver_setup.burst_order, byte, 0);
50*4882a593Smuzhiyun module_param_named(led, sym_driver_setup.scsi_led, byte, 0);
51*4882a593Smuzhiyun module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0);
52*4882a593Smuzhiyun module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0);
53*4882a593Smuzhiyun module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0);
54*4882a593Smuzhiyun module_param_named(hostid, sym_driver_setup.host_id, byte, 0);
55*4882a593Smuzhiyun module_param_named(verb, sym_driver_setup.verbose, byte, 0);
56*4882a593Smuzhiyun module_param_named(debug, sym_debug_flags, uint, 0);
57*4882a593Smuzhiyun module_param_named(settle, sym_driver_setup.settle_delay, byte, 0);
58*4882a593Smuzhiyun module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0);
59*4882a593Smuzhiyun module_param_named(excl, excl_string, charp, 0);
60*4882a593Smuzhiyun module_param_named(safe, safe_string, charp, 0);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default");
63*4882a593Smuzhiyun MODULE_PARM_DESC(burst, "Maximum burst.  0 to disable, 255 to read from registers");
64*4882a593Smuzhiyun MODULE_PARM_DESC(led, "Set to 1 to enable LED support");
65*4882a593Smuzhiyun MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3");
66*4882a593Smuzhiyun MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole");
67*4882a593Smuzhiyun MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error");
68*4882a593Smuzhiyun MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters");
69*4882a593Smuzhiyun MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive");
70*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "Set bits to enable debugging");
71*4882a593Smuzhiyun MODULE_PARM_DESC(settle, "Settle delay in seconds.  Default 3");
72*4882a593Smuzhiyun MODULE_PARM_DESC(nvram, "Option currently not used");
73*4882a593Smuzhiyun MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached");
74*4882a593Smuzhiyun MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\"");
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun MODULE_LICENSE("GPL");
77*4882a593Smuzhiyun MODULE_VERSION(SYM_VERSION);
78*4882a593Smuzhiyun MODULE_AUTHOR("Matthew Wilcox <matthew@wil.cx>");
79*4882a593Smuzhiyun MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters");
80*4882a593Smuzhiyun 
sym2_setup_params(void)81*4882a593Smuzhiyun static void sym2_setup_params(void)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	char *p = excl_string;
84*4882a593Smuzhiyun 	int xi = 0;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	while (p && (xi < 8)) {
87*4882a593Smuzhiyun 		char *next_p;
88*4882a593Smuzhiyun 		int val = (int) simple_strtoul(p, &next_p, 0);
89*4882a593Smuzhiyun 		sym_driver_setup.excludes[xi++] = val;
90*4882a593Smuzhiyun 		p = next_p;
91*4882a593Smuzhiyun 	}
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (safe_string) {
94*4882a593Smuzhiyun 		if (*safe_string == 'y') {
95*4882a593Smuzhiyun 			sym_driver_setup.max_tag = 0;
96*4882a593Smuzhiyun 			sym_driver_setup.burst_order = 0;
97*4882a593Smuzhiyun 			sym_driver_setup.scsi_led = 0;
98*4882a593Smuzhiyun 			sym_driver_setup.scsi_diff = 1;
99*4882a593Smuzhiyun 			sym_driver_setup.irq_mode = 0;
100*4882a593Smuzhiyun 			sym_driver_setup.scsi_bus_check = 2;
101*4882a593Smuzhiyun 			sym_driver_setup.host_id = 7;
102*4882a593Smuzhiyun 			sym_driver_setup.verbose = 2;
103*4882a593Smuzhiyun 			sym_driver_setup.settle_delay = 10;
104*4882a593Smuzhiyun 			sym_driver_setup.use_nvram = 1;
105*4882a593Smuzhiyun 		} else if (*safe_string != 'n') {
106*4882a593Smuzhiyun 			printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s"
107*4882a593Smuzhiyun 					" passed to safe option", safe_string);
108*4882a593Smuzhiyun 		}
109*4882a593Smuzhiyun 	}
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun static struct scsi_transport_template *sym2_transport_template = NULL;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun  *  Driver private area in the SCSI command structure.
116*4882a593Smuzhiyun  */
117*4882a593Smuzhiyun struct sym_ucmd {		/* Override the SCSI pointer structure */
118*4882a593Smuzhiyun 	struct completion *eh_done;		/* SCSI error handling */
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #define SYM_UCMD_PTR(cmd)  ((struct sym_ucmd *)(&(cmd)->SCp))
122*4882a593Smuzhiyun #define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host)
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun  *  Complete a pending CAM CCB.
126*4882a593Smuzhiyun  */
sym_xpt_done(struct sym_hcb * np,struct scsi_cmnd * cmd)127*4882a593Smuzhiyun void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
130*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd));
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (ucmd->eh_done)
133*4882a593Smuzhiyun 		complete(ucmd->eh_done);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	scsi_dma_unmap(cmd);
136*4882a593Smuzhiyun 	cmd->scsi_done(cmd);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun  *  Tell the SCSI layer about a BUS RESET.
141*4882a593Smuzhiyun  */
sym_xpt_async_bus_reset(struct sym_hcb * np)142*4882a593Smuzhiyun void sym_xpt_async_bus_reset(struct sym_hcb *np)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np));
145*4882a593Smuzhiyun 	np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ;
146*4882a593Smuzhiyun 	np->s.settle_time_valid = 1;
147*4882a593Smuzhiyun 	if (sym_verbose >= 2)
148*4882a593Smuzhiyun 		printf_info("%s: command processing suspended for %d seconds\n",
149*4882a593Smuzhiyun 			    sym_name(np), sym_driver_setup.settle_delay);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun  *  Choose the more appropriate CAM status if
154*4882a593Smuzhiyun  *  the IO encountered an extended error.
155*4882a593Smuzhiyun  */
sym_xerr_cam_status(int cam_status,int x_status)156*4882a593Smuzhiyun static int sym_xerr_cam_status(int cam_status, int x_status)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	if (x_status) {
159*4882a593Smuzhiyun 		if (x_status & XE_PARITY_ERR)
160*4882a593Smuzhiyun 			cam_status = DID_PARITY;
161*4882a593Smuzhiyun 		else
162*4882a593Smuzhiyun 			cam_status = DID_ERROR;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 	return cam_status;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun  *  Build CAM result for a failed or auto-sensed IO.
169*4882a593Smuzhiyun  */
sym_set_cam_result_error(struct sym_hcb * np,struct sym_ccb * cp,int resid)170*4882a593Smuzhiyun void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	struct scsi_cmnd *cmd = cp->cmd;
173*4882a593Smuzhiyun 	u_int cam_status, scsi_status, drv_status;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	drv_status  = 0;
176*4882a593Smuzhiyun 	cam_status  = DID_OK;
177*4882a593Smuzhiyun 	scsi_status = cp->ssss_status;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (cp->host_flags & HF_SENSE) {
180*4882a593Smuzhiyun 		scsi_status = cp->sv_scsi_status;
181*4882a593Smuzhiyun 		resid = cp->sv_resid;
182*4882a593Smuzhiyun 		if (sym_verbose && cp->sv_xerr_status)
183*4882a593Smuzhiyun 			sym_print_xerr(cmd, cp->sv_xerr_status);
184*4882a593Smuzhiyun 		if (cp->host_status == HS_COMPLETE &&
185*4882a593Smuzhiyun 		    cp->ssss_status == S_GOOD &&
186*4882a593Smuzhiyun 		    cp->xerr_status == 0) {
187*4882a593Smuzhiyun 			cam_status = sym_xerr_cam_status(DID_OK,
188*4882a593Smuzhiyun 							 cp->sv_xerr_status);
189*4882a593Smuzhiyun 			drv_status = DRIVER_SENSE;
190*4882a593Smuzhiyun 			/*
191*4882a593Smuzhiyun 			 *  Bounce back the sense data to user.
192*4882a593Smuzhiyun 			 */
193*4882a593Smuzhiyun 			memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
194*4882a593Smuzhiyun 			memcpy(cmd->sense_buffer, cp->sns_bbuf,
195*4882a593Smuzhiyun 			       min(SCSI_SENSE_BUFFERSIZE, SYM_SNS_BBUF_LEN));
196*4882a593Smuzhiyun #if 0
197*4882a593Smuzhiyun 			/*
198*4882a593Smuzhiyun 			 *  If the device reports a UNIT ATTENTION condition
199*4882a593Smuzhiyun 			 *  due to a RESET condition, we should consider all
200*4882a593Smuzhiyun 			 *  disconnect CCBs for this unit as aborted.
201*4882a593Smuzhiyun 			 */
202*4882a593Smuzhiyun 			if (1) {
203*4882a593Smuzhiyun 				u_char *p;
204*4882a593Smuzhiyun 				p  = (u_char *) cmd->sense_data;
205*4882a593Smuzhiyun 				if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29)
206*4882a593Smuzhiyun 					sym_clear_tasks(np, DID_ABORT,
207*4882a593Smuzhiyun 							cp->target,cp->lun, -1);
208*4882a593Smuzhiyun 			}
209*4882a593Smuzhiyun #endif
210*4882a593Smuzhiyun 		} else {
211*4882a593Smuzhiyun 			/*
212*4882a593Smuzhiyun 			 * Error return from our internal request sense.  This
213*4882a593Smuzhiyun 			 * is bad: we must clear the contingent allegiance
214*4882a593Smuzhiyun 			 * condition otherwise the device will always return
215*4882a593Smuzhiyun 			 * BUSY.  Use a big stick.
216*4882a593Smuzhiyun 			 */
217*4882a593Smuzhiyun 			sym_reset_scsi_target(np, cmd->device->id);
218*4882a593Smuzhiyun 			cam_status = DID_ERROR;
219*4882a593Smuzhiyun 		}
220*4882a593Smuzhiyun 	} else if (cp->host_status == HS_COMPLETE) 	/* Bad SCSI status */
221*4882a593Smuzhiyun 		cam_status = DID_OK;
222*4882a593Smuzhiyun 	else if (cp->host_status == HS_SEL_TIMEOUT)	/* Selection timeout */
223*4882a593Smuzhiyun 		cam_status = DID_NO_CONNECT;
224*4882a593Smuzhiyun 	else if (cp->host_status == HS_UNEXPECTED)	/* Unexpected BUS FREE*/
225*4882a593Smuzhiyun 		cam_status = DID_ERROR;
226*4882a593Smuzhiyun 	else {						/* Extended error */
227*4882a593Smuzhiyun 		if (sym_verbose) {
228*4882a593Smuzhiyun 			sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n",
229*4882a593Smuzhiyun 				cp->host_status, cp->ssss_status,
230*4882a593Smuzhiyun 				cp->xerr_status);
231*4882a593Smuzhiyun 		}
232*4882a593Smuzhiyun 		/*
233*4882a593Smuzhiyun 		 *  Set the most appropriate value for CAM status.
234*4882a593Smuzhiyun 		 */
235*4882a593Smuzhiyun 		cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
236*4882a593Smuzhiyun 	}
237*4882a593Smuzhiyun 	scsi_set_resid(cmd, resid);
238*4882a593Smuzhiyun 	cmd->result = (drv_status << 24) | (cam_status << 16) | scsi_status;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
sym_scatter(struct sym_hcb * np,struct sym_ccb * cp,struct scsi_cmnd * cmd)241*4882a593Smuzhiyun static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	int segment;
244*4882a593Smuzhiyun 	int use_sg;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	cp->data_len = 0;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	use_sg = scsi_dma_map(cmd);
249*4882a593Smuzhiyun 	if (use_sg > 0) {
250*4882a593Smuzhiyun 		struct scatterlist *sg;
251*4882a593Smuzhiyun 		struct sym_tcb *tp = &np->target[cp->target];
252*4882a593Smuzhiyun 		struct sym_tblmove *data;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 		if (use_sg > SYM_CONF_MAX_SG) {
255*4882a593Smuzhiyun 			scsi_dma_unmap(cmd);
256*4882a593Smuzhiyun 			return -1;
257*4882a593Smuzhiyun 		}
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 		data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		scsi_for_each_sg(cmd, sg, use_sg, segment) {
262*4882a593Smuzhiyun 			dma_addr_t baddr = sg_dma_address(sg);
263*4882a593Smuzhiyun 			unsigned int len = sg_dma_len(sg);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 			if ((len & 1) && (tp->head.wval & EWS)) {
266*4882a593Smuzhiyun 				len++;
267*4882a593Smuzhiyun 				cp->odd_byte_adjustment++;
268*4882a593Smuzhiyun 			}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 			sym_build_sge(np, &data[segment], baddr, len);
271*4882a593Smuzhiyun 			cp->data_len += len;
272*4882a593Smuzhiyun 		}
273*4882a593Smuzhiyun 	} else {
274*4882a593Smuzhiyun 		segment = -2;
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	return segment;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun  *  Queue a SCSI command.
282*4882a593Smuzhiyun  */
sym_queue_command(struct sym_hcb * np,struct scsi_cmnd * cmd)283*4882a593Smuzhiyun static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	struct scsi_device *sdev = cmd->device;
286*4882a593Smuzhiyun 	struct sym_tcb *tp;
287*4882a593Smuzhiyun 	struct sym_lcb *lp;
288*4882a593Smuzhiyun 	struct sym_ccb *cp;
289*4882a593Smuzhiyun 	int	order;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	/*
292*4882a593Smuzhiyun 	 *  Retrieve the target descriptor.
293*4882a593Smuzhiyun 	 */
294*4882a593Smuzhiyun 	tp = &np->target[sdev->id];
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	/*
297*4882a593Smuzhiyun 	 *  Select tagged/untagged.
298*4882a593Smuzhiyun 	 */
299*4882a593Smuzhiyun 	lp = sym_lp(tp, sdev->lun);
300*4882a593Smuzhiyun 	order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/*
303*4882a593Smuzhiyun 	 *  Queue the SCSI IO.
304*4882a593Smuzhiyun 	 */
305*4882a593Smuzhiyun 	cp = sym_get_ccb(np, cmd, order);
306*4882a593Smuzhiyun 	if (!cp)
307*4882a593Smuzhiyun 		return 1;	/* Means resource shortage */
308*4882a593Smuzhiyun 	sym_queue_scsiio(np, cmd, cp);
309*4882a593Smuzhiyun 	return 0;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun  *  Setup buffers and pointers that address the CDB.
314*4882a593Smuzhiyun  */
sym_setup_cdb(struct sym_hcb * np,struct scsi_cmnd * cmd,struct sym_ccb * cp)315*4882a593Smuzhiyun static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]);
320*4882a593Smuzhiyun 	cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	return 0;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun /*
326*4882a593Smuzhiyun  *  Setup pointers that address the data and start the I/O.
327*4882a593Smuzhiyun  */
sym_setup_data_and_start(struct sym_hcb * np,struct scsi_cmnd * cmd,struct sym_ccb * cp)328*4882a593Smuzhiyun int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	u32 lastp, goalp;
331*4882a593Smuzhiyun 	int dir;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	/*
334*4882a593Smuzhiyun 	 *  Build the CDB.
335*4882a593Smuzhiyun 	 */
336*4882a593Smuzhiyun 	if (sym_setup_cdb(np, cmd, cp))
337*4882a593Smuzhiyun 		goto out_abort;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	/*
340*4882a593Smuzhiyun 	 *  No direction means no data.
341*4882a593Smuzhiyun 	 */
342*4882a593Smuzhiyun 	dir = cmd->sc_data_direction;
343*4882a593Smuzhiyun 	if (dir != DMA_NONE) {
344*4882a593Smuzhiyun 		cp->segments = sym_scatter(np, cp, cmd);
345*4882a593Smuzhiyun 		if (cp->segments < 0) {
346*4882a593Smuzhiyun 			sym_set_cam_status(cmd, DID_ERROR);
347*4882a593Smuzhiyun 			goto out_abort;
348*4882a593Smuzhiyun 		}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 		/*
351*4882a593Smuzhiyun 		 *  No segments means no data.
352*4882a593Smuzhiyun 		 */
353*4882a593Smuzhiyun 		if (!cp->segments)
354*4882a593Smuzhiyun 			dir = DMA_NONE;
355*4882a593Smuzhiyun 	} else {
356*4882a593Smuzhiyun 		cp->data_len = 0;
357*4882a593Smuzhiyun 		cp->segments = 0;
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	/*
361*4882a593Smuzhiyun 	 *  Set the data pointer.
362*4882a593Smuzhiyun 	 */
363*4882a593Smuzhiyun 	switch (dir) {
364*4882a593Smuzhiyun 	case DMA_BIDIRECTIONAL:
365*4882a593Smuzhiyun 		scmd_printk(KERN_INFO, cmd, "got DMA_BIDIRECTIONAL command");
366*4882a593Smuzhiyun 		sym_set_cam_status(cmd, DID_ERROR);
367*4882a593Smuzhiyun 		goto out_abort;
368*4882a593Smuzhiyun 	case DMA_TO_DEVICE:
369*4882a593Smuzhiyun 		goalp = SCRIPTA_BA(np, data_out2) + 8;
370*4882a593Smuzhiyun 		lastp = goalp - 8 - (cp->segments * (2*4));
371*4882a593Smuzhiyun 		break;
372*4882a593Smuzhiyun 	case DMA_FROM_DEVICE:
373*4882a593Smuzhiyun 		cp->host_flags |= HF_DATA_IN;
374*4882a593Smuzhiyun 		goalp = SCRIPTA_BA(np, data_in2) + 8;
375*4882a593Smuzhiyun 		lastp = goalp - 8 - (cp->segments * (2*4));
376*4882a593Smuzhiyun 		break;
377*4882a593Smuzhiyun 	case DMA_NONE:
378*4882a593Smuzhiyun 	default:
379*4882a593Smuzhiyun 		lastp = goalp = SCRIPTB_BA(np, no_data);
380*4882a593Smuzhiyun 		break;
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/*
384*4882a593Smuzhiyun 	 *  Set all pointers values needed by SCRIPTS.
385*4882a593Smuzhiyun 	 */
386*4882a593Smuzhiyun 	cp->phys.head.lastp = cpu_to_scr(lastp);
387*4882a593Smuzhiyun 	cp->phys.head.savep = cpu_to_scr(lastp);
388*4882a593Smuzhiyun 	cp->startp	    = cp->phys.head.savep;
389*4882a593Smuzhiyun 	cp->goalp	    = cpu_to_scr(goalp);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	/*
392*4882a593Smuzhiyun 	 *  When `#ifed 1', the code below makes the driver
393*4882a593Smuzhiyun 	 *  panic on the first attempt to write to a SCSI device.
394*4882a593Smuzhiyun 	 *  It is the first test we want to do after a driver
395*4882a593Smuzhiyun 	 *  change that does not seem obviously safe. :)
396*4882a593Smuzhiyun 	 */
397*4882a593Smuzhiyun #if 0
398*4882a593Smuzhiyun 	switch (cp->cdb_buf[0]) {
399*4882a593Smuzhiyun 	case 0x0A: case 0x2A: case 0xAA:
400*4882a593Smuzhiyun 		panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n");
401*4882a593Smuzhiyun 		break;
402*4882a593Smuzhiyun 	default:
403*4882a593Smuzhiyun 		break;
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun #endif
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	/*
408*4882a593Smuzhiyun 	 *	activate this job.
409*4882a593Smuzhiyun 	 */
410*4882a593Smuzhiyun 	sym_put_start_queue(np, cp);
411*4882a593Smuzhiyun 	return 0;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun out_abort:
414*4882a593Smuzhiyun 	sym_free_ccb(np, cp);
415*4882a593Smuzhiyun 	sym_xpt_done(np, cmd);
416*4882a593Smuzhiyun 	return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun  *  timer daemon.
422*4882a593Smuzhiyun  *
423*4882a593Smuzhiyun  *  Misused to keep the driver running when
424*4882a593Smuzhiyun  *  interrupts are not configured correctly.
425*4882a593Smuzhiyun  */
sym_timer(struct sym_hcb * np)426*4882a593Smuzhiyun static void sym_timer(struct sym_hcb *np)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun 	unsigned long thistime = jiffies;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	/*
431*4882a593Smuzhiyun 	 *  Restart the timer.
432*4882a593Smuzhiyun 	 */
433*4882a593Smuzhiyun 	np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL;
434*4882a593Smuzhiyun 	add_timer(&np->s.timer);
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	/*
437*4882a593Smuzhiyun 	 *  If we are resetting the ncr, wait for settle_time before
438*4882a593Smuzhiyun 	 *  clearing it. Then command processing will be resumed.
439*4882a593Smuzhiyun 	 */
440*4882a593Smuzhiyun 	if (np->s.settle_time_valid) {
441*4882a593Smuzhiyun 		if (time_before_eq(np->s.settle_time, thistime)) {
442*4882a593Smuzhiyun 			if (sym_verbose >= 2 )
443*4882a593Smuzhiyun 				printk("%s: command processing resumed\n",
444*4882a593Smuzhiyun 				       sym_name(np));
445*4882a593Smuzhiyun 			np->s.settle_time_valid = 0;
446*4882a593Smuzhiyun 		}
447*4882a593Smuzhiyun 		return;
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	/*
451*4882a593Smuzhiyun 	 *	Nothing to do for now, but that may come.
452*4882a593Smuzhiyun 	 */
453*4882a593Smuzhiyun 	if (np->s.lasttime + 4*HZ < thistime) {
454*4882a593Smuzhiyun 		np->s.lasttime = thistime;
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun #ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS
458*4882a593Smuzhiyun 	/*
459*4882a593Smuzhiyun 	 *  Some way-broken PCI bridges may lead to
460*4882a593Smuzhiyun 	 *  completions being lost when the clearing
461*4882a593Smuzhiyun 	 *  of the INTFLY flag by the CPU occurs
462*4882a593Smuzhiyun 	 *  concurrently with the chip raising this flag.
463*4882a593Smuzhiyun 	 *  If this ever happen, lost completions will
464*4882a593Smuzhiyun 	 * be reaped here.
465*4882a593Smuzhiyun 	 */
466*4882a593Smuzhiyun 	sym_wakeup_done(np);
467*4882a593Smuzhiyun #endif
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun  *  PCI BUS error handler.
473*4882a593Smuzhiyun  */
sym_log_bus_error(struct Scsi_Host * shost)474*4882a593Smuzhiyun void sym_log_bus_error(struct Scsi_Host *shost)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	struct sym_data *sym_data = shost_priv(shost);
477*4882a593Smuzhiyun 	struct pci_dev *pdev = sym_data->pdev;
478*4882a593Smuzhiyun 	unsigned short pci_sts;
479*4882a593Smuzhiyun 	pci_read_config_word(pdev, PCI_STATUS, &pci_sts);
480*4882a593Smuzhiyun 	if (pci_sts & 0xf900) {
481*4882a593Smuzhiyun 		pci_write_config_word(pdev, PCI_STATUS, pci_sts);
482*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, shost,
483*4882a593Smuzhiyun 			"PCI bus error: status = 0x%04x\n", pci_sts & 0xf900);
484*4882a593Smuzhiyun 	}
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun /*
488*4882a593Smuzhiyun  * queuecommand method.  Entered with the host adapter lock held and
489*4882a593Smuzhiyun  * interrupts disabled.
490*4882a593Smuzhiyun  */
sym53c8xx_queue_command_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))491*4882a593Smuzhiyun static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd,
492*4882a593Smuzhiyun 					void (*done)(struct scsi_cmnd *))
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	struct sym_hcb *np = SYM_SOFTC_PTR(cmd);
495*4882a593Smuzhiyun 	struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd);
496*4882a593Smuzhiyun 	int sts = 0;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	cmd->scsi_done = done;
499*4882a593Smuzhiyun 	memset(ucp, 0, sizeof(*ucp));
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	/*
502*4882a593Smuzhiyun 	 *  Shorten our settle_time if needed for
503*4882a593Smuzhiyun 	 *  this command not to time out.
504*4882a593Smuzhiyun 	 */
505*4882a593Smuzhiyun 	if (np->s.settle_time_valid && cmd->request->timeout) {
506*4882a593Smuzhiyun 		unsigned long tlimit = jiffies + cmd->request->timeout;
507*4882a593Smuzhiyun 		tlimit -= SYM_CONF_TIMER_INTERVAL*2;
508*4882a593Smuzhiyun 		if (time_after(np->s.settle_time, tlimit)) {
509*4882a593Smuzhiyun 			np->s.settle_time = tlimit;
510*4882a593Smuzhiyun 		}
511*4882a593Smuzhiyun 	}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	if (np->s.settle_time_valid)
514*4882a593Smuzhiyun 		return SCSI_MLQUEUE_HOST_BUSY;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	sts = sym_queue_command(np, cmd);
517*4882a593Smuzhiyun 	if (sts)
518*4882a593Smuzhiyun 		return SCSI_MLQUEUE_HOST_BUSY;
519*4882a593Smuzhiyun 	return 0;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
DEF_SCSI_QCMD(sym53c8xx_queue_command)522*4882a593Smuzhiyun static DEF_SCSI_QCMD(sym53c8xx_queue_command)
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun /*
525*4882a593Smuzhiyun  *  Linux entry point of the interrupt handler.
526*4882a593Smuzhiyun  */
527*4882a593Smuzhiyun static irqreturn_t sym53c8xx_intr(int irq, void *dev_id)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	struct Scsi_Host *shost = dev_id;
530*4882a593Smuzhiyun 	struct sym_data *sym_data = shost_priv(shost);
531*4882a593Smuzhiyun 	irqreturn_t result;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	/* Avoid spinloop trying to handle interrupts on frozen device */
534*4882a593Smuzhiyun 	if (pci_channel_offline(sym_data->pdev))
535*4882a593Smuzhiyun 		return IRQ_NONE;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("[");
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	spin_lock(shost->host_lock);
540*4882a593Smuzhiyun 	result = sym_interrupt(shost);
541*4882a593Smuzhiyun 	spin_unlock(shost->host_lock);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n");
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	return result;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun /*
549*4882a593Smuzhiyun  *  Linux entry point of the timer handler
550*4882a593Smuzhiyun  */
sym53c8xx_timer(struct timer_list * t)551*4882a593Smuzhiyun static void sym53c8xx_timer(struct timer_list *t)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	struct sym_hcb *np = from_timer(np, t, s.timer);
554*4882a593Smuzhiyun 	unsigned long flags;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	spin_lock_irqsave(np->s.host->host_lock, flags);
557*4882a593Smuzhiyun 	sym_timer(np);
558*4882a593Smuzhiyun 	spin_unlock_irqrestore(np->s.host->host_lock, flags);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun /*
563*4882a593Smuzhiyun  *  What the eh thread wants us to perform.
564*4882a593Smuzhiyun  */
565*4882a593Smuzhiyun #define SYM_EH_ABORT		0
566*4882a593Smuzhiyun #define SYM_EH_DEVICE_RESET	1
567*4882a593Smuzhiyun #define SYM_EH_BUS_RESET	2
568*4882a593Smuzhiyun #define SYM_EH_HOST_RESET	3
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun /*
571*4882a593Smuzhiyun  *  Generic method for our eh processing.
572*4882a593Smuzhiyun  *  The 'op' argument tells what we have to do.
573*4882a593Smuzhiyun  */
sym_eh_handler(int op,char * opname,struct scsi_cmnd * cmd)574*4882a593Smuzhiyun static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
577*4882a593Smuzhiyun 	struct Scsi_Host *shost = cmd->device->host;
578*4882a593Smuzhiyun 	struct sym_data *sym_data = shost_priv(shost);
579*4882a593Smuzhiyun 	struct pci_dev *pdev = sym_data->pdev;
580*4882a593Smuzhiyun 	struct sym_hcb *np = sym_data->ncb;
581*4882a593Smuzhiyun 	SYM_QUEHEAD *qp;
582*4882a593Smuzhiyun 	int cmd_queued = 0;
583*4882a593Smuzhiyun 	int sts = -1;
584*4882a593Smuzhiyun 	struct completion eh_done;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	scmd_printk(KERN_WARNING, cmd, "%s operation started\n", opname);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	/* We may be in an error condition because the PCI bus
589*4882a593Smuzhiyun 	 * went down. In this case, we need to wait until the
590*4882a593Smuzhiyun 	 * PCI bus is reset, the card is reset, and only then
591*4882a593Smuzhiyun 	 * proceed with the scsi error recovery.  There's no
592*4882a593Smuzhiyun 	 * point in hurrying; take a leisurely wait.
593*4882a593Smuzhiyun 	 */
594*4882a593Smuzhiyun #define WAIT_FOR_PCI_RECOVERY	35
595*4882a593Smuzhiyun 	if (pci_channel_offline(pdev)) {
596*4882a593Smuzhiyun 		int finished_reset = 0;
597*4882a593Smuzhiyun 		init_completion(&eh_done);
598*4882a593Smuzhiyun 		spin_lock_irq(shost->host_lock);
599*4882a593Smuzhiyun 		/* Make sure we didn't race */
600*4882a593Smuzhiyun 		if (pci_channel_offline(pdev)) {
601*4882a593Smuzhiyun 			BUG_ON(sym_data->io_reset);
602*4882a593Smuzhiyun 			sym_data->io_reset = &eh_done;
603*4882a593Smuzhiyun 		} else {
604*4882a593Smuzhiyun 			finished_reset = 1;
605*4882a593Smuzhiyun 		}
606*4882a593Smuzhiyun 		spin_unlock_irq(shost->host_lock);
607*4882a593Smuzhiyun 		if (!finished_reset)
608*4882a593Smuzhiyun 			finished_reset = wait_for_completion_timeout
609*4882a593Smuzhiyun 						(sym_data->io_reset,
610*4882a593Smuzhiyun 						WAIT_FOR_PCI_RECOVERY*HZ);
611*4882a593Smuzhiyun 		spin_lock_irq(shost->host_lock);
612*4882a593Smuzhiyun 		sym_data->io_reset = NULL;
613*4882a593Smuzhiyun 		spin_unlock_irq(shost->host_lock);
614*4882a593Smuzhiyun 		if (!finished_reset)
615*4882a593Smuzhiyun 			return SCSI_FAILED;
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	spin_lock_irq(shost->host_lock);
619*4882a593Smuzhiyun 	/* This one is queued in some place -> to wait for completion */
620*4882a593Smuzhiyun 	FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
621*4882a593Smuzhiyun 		struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
622*4882a593Smuzhiyun 		if (cp->cmd == cmd) {
623*4882a593Smuzhiyun 			cmd_queued = 1;
624*4882a593Smuzhiyun 			break;
625*4882a593Smuzhiyun 		}
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	/* Try to proceed the operation we have been asked for */
629*4882a593Smuzhiyun 	sts = -1;
630*4882a593Smuzhiyun 	switch(op) {
631*4882a593Smuzhiyun 	case SYM_EH_ABORT:
632*4882a593Smuzhiyun 		sts = sym_abort_scsiio(np, cmd, 1);
633*4882a593Smuzhiyun 		break;
634*4882a593Smuzhiyun 	case SYM_EH_DEVICE_RESET:
635*4882a593Smuzhiyun 		sts = sym_reset_scsi_target(np, cmd->device->id);
636*4882a593Smuzhiyun 		break;
637*4882a593Smuzhiyun 	case SYM_EH_BUS_RESET:
638*4882a593Smuzhiyun 		sym_reset_scsi_bus(np, 1);
639*4882a593Smuzhiyun 		sts = 0;
640*4882a593Smuzhiyun 		break;
641*4882a593Smuzhiyun 	case SYM_EH_HOST_RESET:
642*4882a593Smuzhiyun 		sym_reset_scsi_bus(np, 0);
643*4882a593Smuzhiyun 		sym_start_up(shost, 1);
644*4882a593Smuzhiyun 		sts = 0;
645*4882a593Smuzhiyun 		break;
646*4882a593Smuzhiyun 	default:
647*4882a593Smuzhiyun 		break;
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	/* On error, restore everything and cross fingers :) */
651*4882a593Smuzhiyun 	if (sts)
652*4882a593Smuzhiyun 		cmd_queued = 0;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	if (cmd_queued) {
655*4882a593Smuzhiyun 		init_completion(&eh_done);
656*4882a593Smuzhiyun 		ucmd->eh_done = &eh_done;
657*4882a593Smuzhiyun 		spin_unlock_irq(shost->host_lock);
658*4882a593Smuzhiyun 		if (!wait_for_completion_timeout(&eh_done, 5*HZ)) {
659*4882a593Smuzhiyun 			ucmd->eh_done = NULL;
660*4882a593Smuzhiyun 			sts = -2;
661*4882a593Smuzhiyun 		}
662*4882a593Smuzhiyun 	} else {
663*4882a593Smuzhiyun 		spin_unlock_irq(shost->host_lock);
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname,
667*4882a593Smuzhiyun 			sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed");
668*4882a593Smuzhiyun 	return sts ? SCSI_FAILED : SCSI_SUCCESS;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun /*
673*4882a593Smuzhiyun  * Error handlers called from the eh thread (one thread per HBA).
674*4882a593Smuzhiyun  */
sym53c8xx_eh_abort_handler(struct scsi_cmnd * cmd)675*4882a593Smuzhiyun static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun 
sym53c8xx_eh_device_reset_handler(struct scsi_cmnd * cmd)680*4882a593Smuzhiyun static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun 
sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd * cmd)685*4882a593Smuzhiyun static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun 	return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun 
sym53c8xx_eh_host_reset_handler(struct scsi_cmnd * cmd)690*4882a593Smuzhiyun static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun /*
696*4882a593Smuzhiyun  *  Tune device queuing depth, according to various limits.
697*4882a593Smuzhiyun  */
sym_tune_dev_queuing(struct sym_tcb * tp,int lun,u_short reqtags)698*4882a593Smuzhiyun static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun 	struct sym_lcb *lp = sym_lp(tp, lun);
701*4882a593Smuzhiyun 	u_short	oldtags;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	if (!lp)
704*4882a593Smuzhiyun 		return;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	oldtags = lp->s.reqtags;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	if (reqtags > lp->s.scdev_depth)
709*4882a593Smuzhiyun 		reqtags = lp->s.scdev_depth;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	lp->s.reqtags     = reqtags;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	if (reqtags != oldtags) {
714*4882a593Smuzhiyun 		dev_info(&tp->starget->dev,
715*4882a593Smuzhiyun 		         "tagged command queuing %s, command queue depth %d.\n",
716*4882a593Smuzhiyun 		          lp->s.reqtags ? "enabled" : "disabled", reqtags);
717*4882a593Smuzhiyun 	}
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun 
sym53c8xx_slave_alloc(struct scsi_device * sdev)720*4882a593Smuzhiyun static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(sdev->host);
723*4882a593Smuzhiyun 	struct sym_tcb *tp = &np->target[sdev->id];
724*4882a593Smuzhiyun 	struct sym_lcb *lp;
725*4882a593Smuzhiyun 	unsigned long flags;
726*4882a593Smuzhiyun 	int error;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN)
729*4882a593Smuzhiyun 		return -ENXIO;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	spin_lock_irqsave(np->s.host->host_lock, flags);
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	/*
734*4882a593Smuzhiyun 	 * Fail the device init if the device is flagged NOSCAN at BOOT in
735*4882a593Smuzhiyun 	 * the NVRAM.  This may speed up boot and maintain coherency with
736*4882a593Smuzhiyun 	 * BIOS device numbering.  Clearing the flag allows the user to
737*4882a593Smuzhiyun 	 * rescan skipped devices later.  We also return an error for
738*4882a593Smuzhiyun 	 * devices not flagged for SCAN LUNS in the NVRAM since some single
739*4882a593Smuzhiyun 	 * lun devices behave badly when asked for a non zero LUN.
740*4882a593Smuzhiyun 	 */
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) {
743*4882a593Smuzhiyun 		tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
744*4882a593Smuzhiyun 		starget_printk(KERN_INFO, sdev->sdev_target,
745*4882a593Smuzhiyun 				"Scan at boot disabled in NVRAM\n");
746*4882a593Smuzhiyun 		error = -ENXIO;
747*4882a593Smuzhiyun 		goto out;
748*4882a593Smuzhiyun 	}
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) {
751*4882a593Smuzhiyun 		if (sdev->lun != 0) {
752*4882a593Smuzhiyun 			error = -ENXIO;
753*4882a593Smuzhiyun 			goto out;
754*4882a593Smuzhiyun 		}
755*4882a593Smuzhiyun 		starget_printk(KERN_INFO, sdev->sdev_target,
756*4882a593Smuzhiyun 				"Multiple LUNs disabled in NVRAM\n");
757*4882a593Smuzhiyun 	}
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	lp = sym_alloc_lcb(np, sdev->id, sdev->lun);
760*4882a593Smuzhiyun 	if (!lp) {
761*4882a593Smuzhiyun 		error = -ENOMEM;
762*4882a593Smuzhiyun 		goto out;
763*4882a593Smuzhiyun 	}
764*4882a593Smuzhiyun 	if (tp->nlcb == 1)
765*4882a593Smuzhiyun 		tp->starget = sdev->sdev_target;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	spi_min_period(tp->starget) = tp->usr_period;
768*4882a593Smuzhiyun 	spi_max_width(tp->starget) = tp->usr_width;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	error = 0;
771*4882a593Smuzhiyun out:
772*4882a593Smuzhiyun 	spin_unlock_irqrestore(np->s.host->host_lock, flags);
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	return error;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun /*
778*4882a593Smuzhiyun  * Linux entry point for device queue sizing.
779*4882a593Smuzhiyun  */
sym53c8xx_slave_configure(struct scsi_device * sdev)780*4882a593Smuzhiyun static int sym53c8xx_slave_configure(struct scsi_device *sdev)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(sdev->host);
783*4882a593Smuzhiyun 	struct sym_tcb *tp = &np->target[sdev->id];
784*4882a593Smuzhiyun 	struct sym_lcb *lp = sym_lp(tp, sdev->lun);
785*4882a593Smuzhiyun 	int reqtags, depth_to_use;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	/*
788*4882a593Smuzhiyun 	 *  Get user flags.
789*4882a593Smuzhiyun 	 */
790*4882a593Smuzhiyun 	lp->curr_flags = lp->user_flags;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	/*
793*4882a593Smuzhiyun 	 *  Select queue depth from driver setup.
794*4882a593Smuzhiyun 	 *  Do not use more than configured by user.
795*4882a593Smuzhiyun 	 *  Use at least 1.
796*4882a593Smuzhiyun 	 *  Do not use more than our maximum.
797*4882a593Smuzhiyun 	 */
798*4882a593Smuzhiyun 	reqtags = sym_driver_setup.max_tag;
799*4882a593Smuzhiyun 	if (reqtags > tp->usrtags)
800*4882a593Smuzhiyun 		reqtags = tp->usrtags;
801*4882a593Smuzhiyun 	if (!sdev->tagged_supported)
802*4882a593Smuzhiyun 		reqtags = 0;
803*4882a593Smuzhiyun 	if (reqtags > SYM_CONF_MAX_TAG)
804*4882a593Smuzhiyun 		reqtags = SYM_CONF_MAX_TAG;
805*4882a593Smuzhiyun 	depth_to_use = reqtags ? reqtags : 1;
806*4882a593Smuzhiyun 	scsi_change_queue_depth(sdev, depth_to_use);
807*4882a593Smuzhiyun 	lp->s.scdev_depth = depth_to_use;
808*4882a593Smuzhiyun 	sym_tune_dev_queuing(tp, sdev->lun, reqtags);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	if (!spi_initial_dv(sdev->sdev_target))
811*4882a593Smuzhiyun 		spi_dv_device(sdev);
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	return 0;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
sym53c8xx_slave_destroy(struct scsi_device * sdev)816*4882a593Smuzhiyun static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(sdev->host);
819*4882a593Smuzhiyun 	struct sym_tcb *tp = &np->target[sdev->id];
820*4882a593Smuzhiyun 	struct sym_lcb *lp = sym_lp(tp, sdev->lun);
821*4882a593Smuzhiyun 	unsigned long flags;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	/* if slave_alloc returned before allocating a sym_lcb, return */
824*4882a593Smuzhiyun 	if (!lp)
825*4882a593Smuzhiyun 		return;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	spin_lock_irqsave(np->s.host->host_lock, flags);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	if (lp->busy_itlq || lp->busy_itl) {
830*4882a593Smuzhiyun 		/*
831*4882a593Smuzhiyun 		 * This really shouldn't happen, but we can't return an error
832*4882a593Smuzhiyun 		 * so let's try to stop all on-going I/O.
833*4882a593Smuzhiyun 		 */
834*4882a593Smuzhiyun 		starget_printk(KERN_WARNING, tp->starget,
835*4882a593Smuzhiyun 			       "Removing busy LCB (%d)\n", (u8)sdev->lun);
836*4882a593Smuzhiyun 		sym_reset_scsi_bus(np, 1);
837*4882a593Smuzhiyun 	}
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) {
840*4882a593Smuzhiyun 		/*
841*4882a593Smuzhiyun 		 * It was the last unit for this target.
842*4882a593Smuzhiyun 		 */
843*4882a593Smuzhiyun 		tp->head.sval        = 0;
844*4882a593Smuzhiyun 		tp->head.wval        = np->rv_scntl3;
845*4882a593Smuzhiyun 		tp->head.uval        = 0;
846*4882a593Smuzhiyun 		tp->tgoal.check_nego = 1;
847*4882a593Smuzhiyun 		tp->starget	     = NULL;
848*4882a593Smuzhiyun 	}
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	spin_unlock_irqrestore(np->s.host->host_lock, flags);
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun /*
854*4882a593Smuzhiyun  *  Linux entry point for info() function
855*4882a593Smuzhiyun  */
sym53c8xx_info(struct Scsi_Host * host)856*4882a593Smuzhiyun static const char *sym53c8xx_info (struct Scsi_Host *host)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun 	return SYM_DRIVER_NAME;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun #ifdef SYM_LINUX_PROC_INFO_SUPPORT
863*4882a593Smuzhiyun /*
864*4882a593Smuzhiyun  *  Proc file system stuff
865*4882a593Smuzhiyun  *
866*4882a593Smuzhiyun  *  A read operation returns adapter information.
867*4882a593Smuzhiyun  *  A write operation is a control command.
868*4882a593Smuzhiyun  *  The string is parsed in the driver code and the command is passed
869*4882a593Smuzhiyun  *  to the sym_usercmd() function.
870*4882a593Smuzhiyun  */
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun #ifdef SYM_LINUX_USER_COMMAND_SUPPORT
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun struct	sym_usrcmd {
875*4882a593Smuzhiyun 	u_long	target;
876*4882a593Smuzhiyun 	u_long	lun;
877*4882a593Smuzhiyun 	u_long	data;
878*4882a593Smuzhiyun 	u_long	cmd;
879*4882a593Smuzhiyun };
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun #define UC_SETSYNC      10
882*4882a593Smuzhiyun #define UC_SETTAGS	11
883*4882a593Smuzhiyun #define UC_SETDEBUG	12
884*4882a593Smuzhiyun #define UC_SETWIDE	14
885*4882a593Smuzhiyun #define UC_SETFLAG	15
886*4882a593Smuzhiyun #define UC_SETVERBOSE	17
887*4882a593Smuzhiyun #define UC_RESETDEV	18
888*4882a593Smuzhiyun #define UC_CLEARDEV	19
889*4882a593Smuzhiyun 
sym_exec_user_command(struct sym_hcb * np,struct sym_usrcmd * uc)890*4882a593Smuzhiyun static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun 	struct sym_tcb *tp;
893*4882a593Smuzhiyun 	int t, l;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	switch (uc->cmd) {
896*4882a593Smuzhiyun 	case 0: return;
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
899*4882a593Smuzhiyun 	case UC_SETDEBUG:
900*4882a593Smuzhiyun 		sym_debug_flags = uc->data;
901*4882a593Smuzhiyun 		break;
902*4882a593Smuzhiyun #endif
903*4882a593Smuzhiyun 	case UC_SETVERBOSE:
904*4882a593Smuzhiyun 		np->verbose = uc->data;
905*4882a593Smuzhiyun 		break;
906*4882a593Smuzhiyun 	default:
907*4882a593Smuzhiyun 		/*
908*4882a593Smuzhiyun 		 * We assume that other commands apply to targets.
909*4882a593Smuzhiyun 		 * This should always be the case and avoid the below
910*4882a593Smuzhiyun 		 * 4 lines to be repeated 6 times.
911*4882a593Smuzhiyun 		 */
912*4882a593Smuzhiyun 		for (t = 0; t < SYM_CONF_MAX_TARGET; t++) {
913*4882a593Smuzhiyun 			if (!((uc->target >> t) & 1))
914*4882a593Smuzhiyun 				continue;
915*4882a593Smuzhiyun 			tp = &np->target[t];
916*4882a593Smuzhiyun 			if (!tp->nlcb)
917*4882a593Smuzhiyun 				continue;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 			switch (uc->cmd) {
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 			case UC_SETSYNC:
922*4882a593Smuzhiyun 				if (!uc->data || uc->data >= 255) {
923*4882a593Smuzhiyun 					tp->tgoal.iu = tp->tgoal.dt =
924*4882a593Smuzhiyun 						tp->tgoal.qas = 0;
925*4882a593Smuzhiyun 					tp->tgoal.offset = 0;
926*4882a593Smuzhiyun 				} else if (uc->data <= 9 && np->minsync_dt) {
927*4882a593Smuzhiyun 					if (uc->data < np->minsync_dt)
928*4882a593Smuzhiyun 						uc->data = np->minsync_dt;
929*4882a593Smuzhiyun 					tp->tgoal.iu = tp->tgoal.dt =
930*4882a593Smuzhiyun 						tp->tgoal.qas = 1;
931*4882a593Smuzhiyun 					tp->tgoal.width = 1;
932*4882a593Smuzhiyun 					tp->tgoal.period = uc->data;
933*4882a593Smuzhiyun 					tp->tgoal.offset = np->maxoffs_dt;
934*4882a593Smuzhiyun 				} else {
935*4882a593Smuzhiyun 					if (uc->data < np->minsync)
936*4882a593Smuzhiyun 						uc->data = np->minsync;
937*4882a593Smuzhiyun 					tp->tgoal.iu = tp->tgoal.dt =
938*4882a593Smuzhiyun 						tp->tgoal.qas = 0;
939*4882a593Smuzhiyun 					tp->tgoal.period = uc->data;
940*4882a593Smuzhiyun 					tp->tgoal.offset = np->maxoffs;
941*4882a593Smuzhiyun 				}
942*4882a593Smuzhiyun 				tp->tgoal.check_nego = 1;
943*4882a593Smuzhiyun 				break;
944*4882a593Smuzhiyun 			case UC_SETWIDE:
945*4882a593Smuzhiyun 				tp->tgoal.width = uc->data ? 1 : 0;
946*4882a593Smuzhiyun 				tp->tgoal.check_nego = 1;
947*4882a593Smuzhiyun 				break;
948*4882a593Smuzhiyun 			case UC_SETTAGS:
949*4882a593Smuzhiyun 				for (l = 0; l < SYM_CONF_MAX_LUN; l++)
950*4882a593Smuzhiyun 					sym_tune_dev_queuing(tp, l, uc->data);
951*4882a593Smuzhiyun 				break;
952*4882a593Smuzhiyun 			case UC_RESETDEV:
953*4882a593Smuzhiyun 				tp->to_reset = 1;
954*4882a593Smuzhiyun 				np->istat_sem = SEM;
955*4882a593Smuzhiyun 				OUTB(np, nc_istat, SIGP|SEM);
956*4882a593Smuzhiyun 				break;
957*4882a593Smuzhiyun 			case UC_CLEARDEV:
958*4882a593Smuzhiyun 				for (l = 0; l < SYM_CONF_MAX_LUN; l++) {
959*4882a593Smuzhiyun 					struct sym_lcb *lp = sym_lp(tp, l);
960*4882a593Smuzhiyun 					if (lp) lp->to_clear = 1;
961*4882a593Smuzhiyun 				}
962*4882a593Smuzhiyun 				np->istat_sem = SEM;
963*4882a593Smuzhiyun 				OUTB(np, nc_istat, SIGP|SEM);
964*4882a593Smuzhiyun 				break;
965*4882a593Smuzhiyun 			case UC_SETFLAG:
966*4882a593Smuzhiyun 				tp->usrflags = uc->data;
967*4882a593Smuzhiyun 				break;
968*4882a593Smuzhiyun 			}
969*4882a593Smuzhiyun 		}
970*4882a593Smuzhiyun 		break;
971*4882a593Smuzhiyun 	}
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun 
sym_skip_spaces(char * ptr,int len)974*4882a593Smuzhiyun static int sym_skip_spaces(char *ptr, int len)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun 	int cnt, c;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	return (len - cnt);
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun 
get_int_arg(char * ptr,int len,u_long * pv)983*4882a593Smuzhiyun static int get_int_arg(char *ptr, int len, u_long *pv)
984*4882a593Smuzhiyun {
985*4882a593Smuzhiyun 	char *end;
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	*pv = simple_strtoul(ptr, &end, 10);
988*4882a593Smuzhiyun 	return (end - ptr);
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun 
is_keyword(char * ptr,int len,char * verb)991*4882a593Smuzhiyun static int is_keyword(char *ptr, int len, char *verb)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	int verb_len = strlen(verb);
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	if (len >= verb_len && !memcmp(verb, ptr, verb_len))
996*4882a593Smuzhiyun 		return verb_len;
997*4882a593Smuzhiyun 	else
998*4882a593Smuzhiyun 		return 0;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun #define SKIP_SPACES(ptr, len)						\
1002*4882a593Smuzhiyun 	if ((arg_len = sym_skip_spaces(ptr, len)) < 1)			\
1003*4882a593Smuzhiyun 		return -EINVAL;						\
1004*4882a593Smuzhiyun 	ptr += arg_len; len -= arg_len;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun #define GET_INT_ARG(ptr, len, v)					\
1007*4882a593Smuzhiyun 	if (!(arg_len = get_int_arg(ptr, len, &(v))))			\
1008*4882a593Smuzhiyun 		return -EINVAL;						\
1009*4882a593Smuzhiyun 	ptr += arg_len; len -= arg_len;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun /*
1013*4882a593Smuzhiyun  * Parse a control command
1014*4882a593Smuzhiyun  */
1015*4882a593Smuzhiyun 
sym_user_command(struct Scsi_Host * shost,char * buffer,int length)1016*4882a593Smuzhiyun static int sym_user_command(struct Scsi_Host *shost, char *buffer, int length)
1017*4882a593Smuzhiyun {
1018*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(shost);
1019*4882a593Smuzhiyun 	char *ptr	= buffer;
1020*4882a593Smuzhiyun 	int len		= length;
1021*4882a593Smuzhiyun 	struct sym_usrcmd cmd, *uc = &cmd;
1022*4882a593Smuzhiyun 	int		arg_len;
1023*4882a593Smuzhiyun 	u_long 		target;
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	memset(uc, 0, sizeof(*uc));
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	if (len > 0 && ptr[len-1] == '\n')
1028*4882a593Smuzhiyun 		--len;
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	if	((arg_len = is_keyword(ptr, len, "setsync")) != 0)
1031*4882a593Smuzhiyun 		uc->cmd = UC_SETSYNC;
1032*4882a593Smuzhiyun 	else if	((arg_len = is_keyword(ptr, len, "settags")) != 0)
1033*4882a593Smuzhiyun 		uc->cmd = UC_SETTAGS;
1034*4882a593Smuzhiyun 	else if	((arg_len = is_keyword(ptr, len, "setverbose")) != 0)
1035*4882a593Smuzhiyun 		uc->cmd = UC_SETVERBOSE;
1036*4882a593Smuzhiyun 	else if	((arg_len = is_keyword(ptr, len, "setwide")) != 0)
1037*4882a593Smuzhiyun 		uc->cmd = UC_SETWIDE;
1038*4882a593Smuzhiyun #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
1039*4882a593Smuzhiyun 	else if	((arg_len = is_keyword(ptr, len, "setdebug")) != 0)
1040*4882a593Smuzhiyun 		uc->cmd = UC_SETDEBUG;
1041*4882a593Smuzhiyun #endif
1042*4882a593Smuzhiyun 	else if	((arg_len = is_keyword(ptr, len, "setflag")) != 0)
1043*4882a593Smuzhiyun 		uc->cmd = UC_SETFLAG;
1044*4882a593Smuzhiyun 	else if	((arg_len = is_keyword(ptr, len, "resetdev")) != 0)
1045*4882a593Smuzhiyun 		uc->cmd = UC_RESETDEV;
1046*4882a593Smuzhiyun 	else if	((arg_len = is_keyword(ptr, len, "cleardev")) != 0)
1047*4882a593Smuzhiyun 		uc->cmd = UC_CLEARDEV;
1048*4882a593Smuzhiyun 	else
1049*4882a593Smuzhiyun 		arg_len = 0;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun #ifdef DEBUG_PROC_INFO
1052*4882a593Smuzhiyun printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd);
1053*4882a593Smuzhiyun #endif
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	if (!arg_len)
1056*4882a593Smuzhiyun 		return -EINVAL;
1057*4882a593Smuzhiyun 	ptr += arg_len; len -= arg_len;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	switch(uc->cmd) {
1060*4882a593Smuzhiyun 	case UC_SETSYNC:
1061*4882a593Smuzhiyun 	case UC_SETTAGS:
1062*4882a593Smuzhiyun 	case UC_SETWIDE:
1063*4882a593Smuzhiyun 	case UC_SETFLAG:
1064*4882a593Smuzhiyun 	case UC_RESETDEV:
1065*4882a593Smuzhiyun 	case UC_CLEARDEV:
1066*4882a593Smuzhiyun 		SKIP_SPACES(ptr, len);
1067*4882a593Smuzhiyun 		if ((arg_len = is_keyword(ptr, len, "all")) != 0) {
1068*4882a593Smuzhiyun 			ptr += arg_len; len -= arg_len;
1069*4882a593Smuzhiyun 			uc->target = ~0;
1070*4882a593Smuzhiyun 		} else {
1071*4882a593Smuzhiyun 			GET_INT_ARG(ptr, len, target);
1072*4882a593Smuzhiyun 			uc->target = (1<<target);
1073*4882a593Smuzhiyun #ifdef DEBUG_PROC_INFO
1074*4882a593Smuzhiyun printk("sym_user_command: target=%ld\n", target);
1075*4882a593Smuzhiyun #endif
1076*4882a593Smuzhiyun 		}
1077*4882a593Smuzhiyun 		break;
1078*4882a593Smuzhiyun 	}
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	switch(uc->cmd) {
1081*4882a593Smuzhiyun 	case UC_SETVERBOSE:
1082*4882a593Smuzhiyun 	case UC_SETSYNC:
1083*4882a593Smuzhiyun 	case UC_SETTAGS:
1084*4882a593Smuzhiyun 	case UC_SETWIDE:
1085*4882a593Smuzhiyun 		SKIP_SPACES(ptr, len);
1086*4882a593Smuzhiyun 		GET_INT_ARG(ptr, len, uc->data);
1087*4882a593Smuzhiyun #ifdef DEBUG_PROC_INFO
1088*4882a593Smuzhiyun printk("sym_user_command: data=%ld\n", uc->data);
1089*4882a593Smuzhiyun #endif
1090*4882a593Smuzhiyun 		break;
1091*4882a593Smuzhiyun #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
1092*4882a593Smuzhiyun 	case UC_SETDEBUG:
1093*4882a593Smuzhiyun 		while (len > 0) {
1094*4882a593Smuzhiyun 			SKIP_SPACES(ptr, len);
1095*4882a593Smuzhiyun 			if	((arg_len = is_keyword(ptr, len, "alloc")))
1096*4882a593Smuzhiyun 				uc->data |= DEBUG_ALLOC;
1097*4882a593Smuzhiyun 			else if	((arg_len = is_keyword(ptr, len, "phase")))
1098*4882a593Smuzhiyun 				uc->data |= DEBUG_PHASE;
1099*4882a593Smuzhiyun 			else if	((arg_len = is_keyword(ptr, len, "queue")))
1100*4882a593Smuzhiyun 				uc->data |= DEBUG_QUEUE;
1101*4882a593Smuzhiyun 			else if	((arg_len = is_keyword(ptr, len, "result")))
1102*4882a593Smuzhiyun 				uc->data |= DEBUG_RESULT;
1103*4882a593Smuzhiyun 			else if	((arg_len = is_keyword(ptr, len, "scatter")))
1104*4882a593Smuzhiyun 				uc->data |= DEBUG_SCATTER;
1105*4882a593Smuzhiyun 			else if	((arg_len = is_keyword(ptr, len, "script")))
1106*4882a593Smuzhiyun 				uc->data |= DEBUG_SCRIPT;
1107*4882a593Smuzhiyun 			else if	((arg_len = is_keyword(ptr, len, "tiny")))
1108*4882a593Smuzhiyun 				uc->data |= DEBUG_TINY;
1109*4882a593Smuzhiyun 			else if	((arg_len = is_keyword(ptr, len, "timing")))
1110*4882a593Smuzhiyun 				uc->data |= DEBUG_TIMING;
1111*4882a593Smuzhiyun 			else if	((arg_len = is_keyword(ptr, len, "nego")))
1112*4882a593Smuzhiyun 				uc->data |= DEBUG_NEGO;
1113*4882a593Smuzhiyun 			else if	((arg_len = is_keyword(ptr, len, "tags")))
1114*4882a593Smuzhiyun 				uc->data |= DEBUG_TAGS;
1115*4882a593Smuzhiyun 			else if	((arg_len = is_keyword(ptr, len, "pointer")))
1116*4882a593Smuzhiyun 				uc->data |= DEBUG_POINTER;
1117*4882a593Smuzhiyun 			else
1118*4882a593Smuzhiyun 				return -EINVAL;
1119*4882a593Smuzhiyun 			ptr += arg_len; len -= arg_len;
1120*4882a593Smuzhiyun 		}
1121*4882a593Smuzhiyun #ifdef DEBUG_PROC_INFO
1122*4882a593Smuzhiyun printk("sym_user_command: data=%ld\n", uc->data);
1123*4882a593Smuzhiyun #endif
1124*4882a593Smuzhiyun 		break;
1125*4882a593Smuzhiyun #endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */
1126*4882a593Smuzhiyun 	case UC_SETFLAG:
1127*4882a593Smuzhiyun 		while (len > 0) {
1128*4882a593Smuzhiyun 			SKIP_SPACES(ptr, len);
1129*4882a593Smuzhiyun 			if	((arg_len = is_keyword(ptr, len, "no_disc")))
1130*4882a593Smuzhiyun 				uc->data &= ~SYM_DISC_ENABLED;
1131*4882a593Smuzhiyun 			else
1132*4882a593Smuzhiyun 				return -EINVAL;
1133*4882a593Smuzhiyun 			ptr += arg_len; len -= arg_len;
1134*4882a593Smuzhiyun 		}
1135*4882a593Smuzhiyun 		break;
1136*4882a593Smuzhiyun 	default:
1137*4882a593Smuzhiyun 		break;
1138*4882a593Smuzhiyun 	}
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	if (len)
1141*4882a593Smuzhiyun 		return -EINVAL;
1142*4882a593Smuzhiyun 	else {
1143*4882a593Smuzhiyun 		unsigned long flags;
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 		spin_lock_irqsave(shost->host_lock, flags);
1146*4882a593Smuzhiyun 		sym_exec_user_command(np, uc);
1147*4882a593Smuzhiyun 		spin_unlock_irqrestore(shost->host_lock, flags);
1148*4882a593Smuzhiyun 	}
1149*4882a593Smuzhiyun 	return length;
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun #endif	/* SYM_LINUX_USER_COMMAND_SUPPORT */
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun /*
1156*4882a593Smuzhiyun  *  Copy formatted information into the input buffer.
1157*4882a593Smuzhiyun  */
sym_show_info(struct seq_file * m,struct Scsi_Host * shost)1158*4882a593Smuzhiyun static int sym_show_info(struct seq_file *m, struct Scsi_Host *shost)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun #ifdef SYM_LINUX_USER_INFO_SUPPORT
1161*4882a593Smuzhiyun 	struct sym_data *sym_data = shost_priv(shost);
1162*4882a593Smuzhiyun 	struct pci_dev *pdev = sym_data->pdev;
1163*4882a593Smuzhiyun 	struct sym_hcb *np = sym_data->ncb;
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	seq_printf(m, "Chip " NAME53C "%s, device id 0x%x, "
1166*4882a593Smuzhiyun 		 "revision id 0x%x\n", np->s.chip_name,
1167*4882a593Smuzhiyun 		 pdev->device, pdev->revision);
1168*4882a593Smuzhiyun 	seq_printf(m, "At PCI address %s, IRQ %u\n",
1169*4882a593Smuzhiyun 			 pci_name(pdev), pdev->irq);
1170*4882a593Smuzhiyun 	seq_printf(m, "Min. period factor %d, %s SCSI BUS%s\n",
1171*4882a593Smuzhiyun 		 (int) (np->minsync_dt ? np->minsync_dt : np->minsync),
1172*4882a593Smuzhiyun 		 np->maxwide ? "Wide" : "Narrow",
1173*4882a593Smuzhiyun 		 np->minsync_dt ? ", DT capable" : "");
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	seq_printf(m, "Max. started commands %d, "
1176*4882a593Smuzhiyun 		 "max. commands per LUN %d\n",
1177*4882a593Smuzhiyun 		 SYM_CONF_MAX_START, SYM_CONF_MAX_TAG);
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	return 0;
1180*4882a593Smuzhiyun #else
1181*4882a593Smuzhiyun 	return -EINVAL;
1182*4882a593Smuzhiyun #endif /* SYM_LINUX_USER_INFO_SUPPORT */
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun #endif /* SYM_LINUX_PROC_INFO_SUPPORT */
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun /*
1188*4882a593Smuzhiyun  * Free resources claimed by sym_iomap_device().  Note that
1189*4882a593Smuzhiyun  * sym_free_resources() should be used instead of this function after calling
1190*4882a593Smuzhiyun  * sym_attach().
1191*4882a593Smuzhiyun  */
sym_iounmap_device(struct sym_device * device)1192*4882a593Smuzhiyun static void sym_iounmap_device(struct sym_device *device)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun 	if (device->s.ioaddr)
1195*4882a593Smuzhiyun 		pci_iounmap(device->pdev, device->s.ioaddr);
1196*4882a593Smuzhiyun 	if (device->s.ramaddr)
1197*4882a593Smuzhiyun 		pci_iounmap(device->pdev, device->s.ramaddr);
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun /*
1201*4882a593Smuzhiyun  *	Free controller resources.
1202*4882a593Smuzhiyun  */
sym_free_resources(struct sym_hcb * np,struct pci_dev * pdev,int do_free_irq)1203*4882a593Smuzhiyun static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev,
1204*4882a593Smuzhiyun 		int do_free_irq)
1205*4882a593Smuzhiyun {
1206*4882a593Smuzhiyun 	/*
1207*4882a593Smuzhiyun 	 *  Free O/S specific resources.
1208*4882a593Smuzhiyun 	 */
1209*4882a593Smuzhiyun 	if (do_free_irq)
1210*4882a593Smuzhiyun 		free_irq(pdev->irq, np->s.host);
1211*4882a593Smuzhiyun 	if (np->s.ioaddr)
1212*4882a593Smuzhiyun 		pci_iounmap(pdev, np->s.ioaddr);
1213*4882a593Smuzhiyun 	if (np->s.ramaddr)
1214*4882a593Smuzhiyun 		pci_iounmap(pdev, np->s.ramaddr);
1215*4882a593Smuzhiyun 	/*
1216*4882a593Smuzhiyun 	 *  Free O/S independent resources.
1217*4882a593Smuzhiyun 	 */
1218*4882a593Smuzhiyun 	sym_hcb_free(np);
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	sym_mfree_dma(np, sizeof(*np), "HCB");
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun /*
1224*4882a593Smuzhiyun  *  Host attach and initialisations.
1225*4882a593Smuzhiyun  *
1226*4882a593Smuzhiyun  *  Allocate host data and ncb structure.
1227*4882a593Smuzhiyun  *  Remap MMIO region.
1228*4882a593Smuzhiyun  *  Do chip initialization.
1229*4882a593Smuzhiyun  *  If all is OK, install interrupt handling and
1230*4882a593Smuzhiyun  *  start the timer daemon.
1231*4882a593Smuzhiyun  */
sym_attach(struct scsi_host_template * tpnt,int unit,struct sym_device * dev)1232*4882a593Smuzhiyun static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
1233*4882a593Smuzhiyun 				    struct sym_device *dev)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun 	struct sym_data *sym_data;
1236*4882a593Smuzhiyun 	struct sym_hcb *np = NULL;
1237*4882a593Smuzhiyun 	struct Scsi_Host *shost = NULL;
1238*4882a593Smuzhiyun 	struct pci_dev *pdev = dev->pdev;
1239*4882a593Smuzhiyun 	unsigned long flags;
1240*4882a593Smuzhiyun 	struct sym_fw *fw;
1241*4882a593Smuzhiyun 	int do_free_irq = 0;
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	printk(KERN_INFO "sym%d: <%s> rev 0x%x at pci %s irq %u\n",
1244*4882a593Smuzhiyun 		unit, dev->chip.name, pdev->revision, pci_name(pdev),
1245*4882a593Smuzhiyun 		pdev->irq);
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	/*
1248*4882a593Smuzhiyun 	 *  Get the firmware for this chip.
1249*4882a593Smuzhiyun 	 */
1250*4882a593Smuzhiyun 	fw = sym_find_firmware(&dev->chip);
1251*4882a593Smuzhiyun 	if (!fw)
1252*4882a593Smuzhiyun 		goto attach_failed;
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	shost = scsi_host_alloc(tpnt, sizeof(*sym_data));
1255*4882a593Smuzhiyun 	if (!shost)
1256*4882a593Smuzhiyun 		goto attach_failed;
1257*4882a593Smuzhiyun 	sym_data = shost_priv(shost);
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	/*
1260*4882a593Smuzhiyun 	 *  Allocate immediately the host control block,
1261*4882a593Smuzhiyun 	 *  since we are only expecting to succeed. :)
1262*4882a593Smuzhiyun 	 *  We keep track in the HCB of all the resources that
1263*4882a593Smuzhiyun 	 *  are to be released on error.
1264*4882a593Smuzhiyun 	 */
1265*4882a593Smuzhiyun 	np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB");
1266*4882a593Smuzhiyun 	if (!np)
1267*4882a593Smuzhiyun 		goto attach_failed;
1268*4882a593Smuzhiyun 	np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */
1269*4882a593Smuzhiyun 	sym_data->ncb = np;
1270*4882a593Smuzhiyun 	sym_data->pdev = pdev;
1271*4882a593Smuzhiyun 	np->s.host = shost;
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	pci_set_drvdata(pdev, shost);
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	/*
1276*4882a593Smuzhiyun 	 *  Copy some useful infos to the HCB.
1277*4882a593Smuzhiyun 	 */
1278*4882a593Smuzhiyun 	np->hcb_ba	= vtobus(np);
1279*4882a593Smuzhiyun 	np->verbose	= sym_driver_setup.verbose;
1280*4882a593Smuzhiyun 	np->s.unit	= unit;
1281*4882a593Smuzhiyun 	np->features	= dev->chip.features;
1282*4882a593Smuzhiyun 	np->clock_divn	= dev->chip.nr_divisor;
1283*4882a593Smuzhiyun 	np->maxoffs	= dev->chip.offset_max;
1284*4882a593Smuzhiyun 	np->maxburst	= dev->chip.burst_max;
1285*4882a593Smuzhiyun 	np->myaddr	= dev->host_id;
1286*4882a593Smuzhiyun 	np->mmio_ba	= (u32)dev->mmio_base;
1287*4882a593Smuzhiyun 	np->ram_ba	= (u32)dev->ram_base;
1288*4882a593Smuzhiyun 	np->s.ioaddr	= dev->s.ioaddr;
1289*4882a593Smuzhiyun 	np->s.ramaddr	= dev->s.ramaddr;
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	/*
1292*4882a593Smuzhiyun 	 *  Edit its name.
1293*4882a593Smuzhiyun 	 */
1294*4882a593Smuzhiyun 	strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name));
1295*4882a593Smuzhiyun 	sprintf(np->s.inst_name, "sym%d", np->s.unit);
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun 	if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) &&
1298*4882a593Smuzhiyun 			!dma_set_mask(&pdev->dev, DMA_DAC_MASK)) {
1299*4882a593Smuzhiyun 		set_dac(np);
1300*4882a593Smuzhiyun 	} else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
1301*4882a593Smuzhiyun 		printf_warning("%s: No suitable DMA available\n", sym_name(np));
1302*4882a593Smuzhiyun 		goto attach_failed;
1303*4882a593Smuzhiyun 	}
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 	if (sym_hcb_attach(shost, fw, dev->nvram))
1306*4882a593Smuzhiyun 		goto attach_failed;
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	/*
1309*4882a593Smuzhiyun 	 *  Install the interrupt handler.
1310*4882a593Smuzhiyun 	 *  If we synchonize the C code with SCRIPTS on interrupt,
1311*4882a593Smuzhiyun 	 *  we do not want to share the INTR line at all.
1312*4882a593Smuzhiyun 	 */
1313*4882a593Smuzhiyun 	if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX,
1314*4882a593Smuzhiyun 			shost)) {
1315*4882a593Smuzhiyun 		printf_err("%s: request irq %u failure\n",
1316*4882a593Smuzhiyun 			sym_name(np), pdev->irq);
1317*4882a593Smuzhiyun 		goto attach_failed;
1318*4882a593Smuzhiyun 	}
1319*4882a593Smuzhiyun 	do_free_irq = 1;
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	/*
1322*4882a593Smuzhiyun 	 *  After SCSI devices have been opened, we cannot
1323*4882a593Smuzhiyun 	 *  reset the bus safely, so we do it here.
1324*4882a593Smuzhiyun 	 */
1325*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
1326*4882a593Smuzhiyun 	if (sym_reset_scsi_bus(np, 0))
1327*4882a593Smuzhiyun 		goto reset_failed;
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	/*
1330*4882a593Smuzhiyun 	 *  Start the SCRIPTS.
1331*4882a593Smuzhiyun 	 */
1332*4882a593Smuzhiyun 	sym_start_up(shost, 1);
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 	/*
1335*4882a593Smuzhiyun 	 *  Start the timer daemon
1336*4882a593Smuzhiyun 	 */
1337*4882a593Smuzhiyun 	timer_setup(&np->s.timer, sym53c8xx_timer, 0);
1338*4882a593Smuzhiyun 	np->s.lasttime=0;
1339*4882a593Smuzhiyun 	sym_timer (np);
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	/*
1342*4882a593Smuzhiyun 	 *  Fill Linux host instance structure
1343*4882a593Smuzhiyun 	 *  and return success.
1344*4882a593Smuzhiyun 	 */
1345*4882a593Smuzhiyun 	shost->max_channel	= 0;
1346*4882a593Smuzhiyun 	shost->this_id		= np->myaddr;
1347*4882a593Smuzhiyun 	shost->max_id		= np->maxwide ? 16 : 8;
1348*4882a593Smuzhiyun 	shost->max_lun		= SYM_CONF_MAX_LUN;
1349*4882a593Smuzhiyun 	shost->unique_id	= pci_resource_start(pdev, 0);
1350*4882a593Smuzhiyun 	shost->cmd_per_lun	= SYM_CONF_MAX_TAG;
1351*4882a593Smuzhiyun 	shost->can_queue	= (SYM_CONF_MAX_START-2);
1352*4882a593Smuzhiyun 	shost->sg_tablesize	= SYM_CONF_MAX_SG;
1353*4882a593Smuzhiyun 	shost->max_cmd_len	= 16;
1354*4882a593Smuzhiyun 	BUG_ON(sym2_transport_template == NULL);
1355*4882a593Smuzhiyun 	shost->transportt	= sym2_transport_template;
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	/* 53c896 rev 1 errata: DMA may not cross 16MB boundary */
1358*4882a593Smuzhiyun 	if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 2)
1359*4882a593Smuzhiyun 		shost->dma_boundary = 0xFFFFFF;
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	return shost;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun  reset_failed:
1366*4882a593Smuzhiyun 	printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, "
1367*4882a593Smuzhiyun 		   "TERMINATION, DEVICE POWER etc.!\n", sym_name(np));
1368*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
1369*4882a593Smuzhiyun  attach_failed:
1370*4882a593Smuzhiyun 	printf_info("sym%d: giving up ...\n", unit);
1371*4882a593Smuzhiyun 	if (np)
1372*4882a593Smuzhiyun 		sym_free_resources(np, pdev, do_free_irq);
1373*4882a593Smuzhiyun 	else
1374*4882a593Smuzhiyun 		sym_iounmap_device(dev);
1375*4882a593Smuzhiyun 	if (shost)
1376*4882a593Smuzhiyun 		scsi_host_put(shost);
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 	return NULL;
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun /*
1383*4882a593Smuzhiyun  *    Detect and try to read SYMBIOS and TEKRAM NVRAM.
1384*4882a593Smuzhiyun  */
1385*4882a593Smuzhiyun #if SYM_CONF_NVRAM_SUPPORT
sym_get_nvram(struct sym_device * devp,struct sym_nvram * nvp)1386*4882a593Smuzhiyun static void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun 	devp->nvram = nvp;
1389*4882a593Smuzhiyun 	nvp->type = 0;
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	sym_read_nvram(devp, nvp);
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun #else
sym_get_nvram(struct sym_device * devp,struct sym_nvram * nvp)1394*4882a593Smuzhiyun static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
1395*4882a593Smuzhiyun {
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun #endif	/* SYM_CONF_NVRAM_SUPPORT */
1398*4882a593Smuzhiyun 
sym_check_supported(struct sym_device * device)1399*4882a593Smuzhiyun static int sym_check_supported(struct sym_device *device)
1400*4882a593Smuzhiyun {
1401*4882a593Smuzhiyun 	struct sym_chip *chip;
1402*4882a593Smuzhiyun 	struct pci_dev *pdev = device->pdev;
1403*4882a593Smuzhiyun 	unsigned long io_port = pci_resource_start(pdev, 0);
1404*4882a593Smuzhiyun 	int i;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	/*
1407*4882a593Smuzhiyun 	 *  If user excluded this chip, do not initialize it.
1408*4882a593Smuzhiyun 	 *  I hate this code so much.  Must kill it.
1409*4882a593Smuzhiyun 	 */
1410*4882a593Smuzhiyun 	if (io_port) {
1411*4882a593Smuzhiyun 		for (i = 0 ; i < 8 ; i++) {
1412*4882a593Smuzhiyun 			if (sym_driver_setup.excludes[i] == io_port)
1413*4882a593Smuzhiyun 				return -ENODEV;
1414*4882a593Smuzhiyun 		}
1415*4882a593Smuzhiyun 	}
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 	/*
1418*4882a593Smuzhiyun 	 * Check if the chip is supported.  Then copy the chip description
1419*4882a593Smuzhiyun 	 * to our device structure so we can make it match the actual device
1420*4882a593Smuzhiyun 	 * and options.
1421*4882a593Smuzhiyun 	 */
1422*4882a593Smuzhiyun 	chip = sym_lookup_chip_table(pdev->device, pdev->revision);
1423*4882a593Smuzhiyun 	if (!chip) {
1424*4882a593Smuzhiyun 		dev_info(&pdev->dev, "device not supported\n");
1425*4882a593Smuzhiyun 		return -ENODEV;
1426*4882a593Smuzhiyun 	}
1427*4882a593Smuzhiyun 	memcpy(&device->chip, chip, sizeof(device->chip));
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	return 0;
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun /*
1433*4882a593Smuzhiyun  * Ignore Symbios chips controlled by various RAID controllers.
1434*4882a593Smuzhiyun  * These controllers set value 0x52414944 at RAM end - 16.
1435*4882a593Smuzhiyun  */
sym_check_raid(struct sym_device * device)1436*4882a593Smuzhiyun static int sym_check_raid(struct sym_device *device)
1437*4882a593Smuzhiyun {
1438*4882a593Smuzhiyun 	unsigned int ram_size, ram_val;
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 	if (!device->s.ramaddr)
1441*4882a593Smuzhiyun 		return 0;
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	if (device->chip.features & FE_RAM8K)
1444*4882a593Smuzhiyun 		ram_size = 8192;
1445*4882a593Smuzhiyun 	else
1446*4882a593Smuzhiyun 		ram_size = 4096;
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	ram_val = readl(device->s.ramaddr + ram_size - 16);
1449*4882a593Smuzhiyun 	if (ram_val != 0x52414944)
1450*4882a593Smuzhiyun 		return 0;
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	dev_info(&device->pdev->dev,
1453*4882a593Smuzhiyun 			"not initializing, driven by RAID controller.\n");
1454*4882a593Smuzhiyun 	return -ENODEV;
1455*4882a593Smuzhiyun }
1456*4882a593Smuzhiyun 
sym_set_workarounds(struct sym_device * device)1457*4882a593Smuzhiyun static int sym_set_workarounds(struct sym_device *device)
1458*4882a593Smuzhiyun {
1459*4882a593Smuzhiyun 	struct sym_chip *chip = &device->chip;
1460*4882a593Smuzhiyun 	struct pci_dev *pdev = device->pdev;
1461*4882a593Smuzhiyun 	u_short status_reg;
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	/*
1464*4882a593Smuzhiyun 	 *  (ITEM 12 of a DEL about the 896 I haven't yet).
1465*4882a593Smuzhiyun 	 *  We must ensure the chip will use WRITE AND INVALIDATE.
1466*4882a593Smuzhiyun 	 *  The revision number limit is for now arbitrary.
1467*4882a593Smuzhiyun 	 */
1468*4882a593Smuzhiyun 	if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 0x4) {
1469*4882a593Smuzhiyun 		chip->features	|= (FE_WRIE | FE_CLSE);
1470*4882a593Smuzhiyun 	}
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	/* If the chip can do Memory Write Invalidate, enable it */
1473*4882a593Smuzhiyun 	if (chip->features & FE_WRIE) {
1474*4882a593Smuzhiyun 		if (pci_set_mwi(pdev))
1475*4882a593Smuzhiyun 			return -ENODEV;
1476*4882a593Smuzhiyun 	}
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	/*
1479*4882a593Smuzhiyun 	 *  Work around for errant bit in 895A. The 66Mhz
1480*4882a593Smuzhiyun 	 *  capable bit is set erroneously. Clear this bit.
1481*4882a593Smuzhiyun 	 *  (Item 1 DEL 533)
1482*4882a593Smuzhiyun 	 *
1483*4882a593Smuzhiyun 	 *  Make sure Config space and Features agree.
1484*4882a593Smuzhiyun 	 *
1485*4882a593Smuzhiyun 	 *  Recall: writes are not normal to status register -
1486*4882a593Smuzhiyun 	 *  write a 1 to clear and a 0 to leave unchanged.
1487*4882a593Smuzhiyun 	 *  Can only reset bits.
1488*4882a593Smuzhiyun 	 */
1489*4882a593Smuzhiyun 	pci_read_config_word(pdev, PCI_STATUS, &status_reg);
1490*4882a593Smuzhiyun 	if (chip->features & FE_66MHZ) {
1491*4882a593Smuzhiyun 		if (!(status_reg & PCI_STATUS_66MHZ))
1492*4882a593Smuzhiyun 			chip->features &= ~FE_66MHZ;
1493*4882a593Smuzhiyun 	} else {
1494*4882a593Smuzhiyun 		if (status_reg & PCI_STATUS_66MHZ) {
1495*4882a593Smuzhiyun 			status_reg = PCI_STATUS_66MHZ;
1496*4882a593Smuzhiyun 			pci_write_config_word(pdev, PCI_STATUS, status_reg);
1497*4882a593Smuzhiyun 			pci_read_config_word(pdev, PCI_STATUS, &status_reg);
1498*4882a593Smuzhiyun 		}
1499*4882a593Smuzhiyun 	}
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun 	return 0;
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun /*
1505*4882a593Smuzhiyun  * Map HBA registers and on-chip SRAM (if present).
1506*4882a593Smuzhiyun  */
sym_iomap_device(struct sym_device * device)1507*4882a593Smuzhiyun static int sym_iomap_device(struct sym_device *device)
1508*4882a593Smuzhiyun {
1509*4882a593Smuzhiyun 	struct pci_dev *pdev = device->pdev;
1510*4882a593Smuzhiyun 	struct pci_bus_region bus_addr;
1511*4882a593Smuzhiyun 	int i = 2;
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	pcibios_resource_to_bus(pdev->bus, &bus_addr, &pdev->resource[1]);
1514*4882a593Smuzhiyun 	device->mmio_base = bus_addr.start;
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	if (device->chip.features & FE_RAM) {
1517*4882a593Smuzhiyun 		/*
1518*4882a593Smuzhiyun 		 * If the BAR is 64-bit, resource 2 will be occupied by the
1519*4882a593Smuzhiyun 		 * upper 32 bits
1520*4882a593Smuzhiyun 		 */
1521*4882a593Smuzhiyun 		if (!pdev->resource[i].flags)
1522*4882a593Smuzhiyun 			i++;
1523*4882a593Smuzhiyun 		pcibios_resource_to_bus(pdev->bus, &bus_addr,
1524*4882a593Smuzhiyun 					&pdev->resource[i]);
1525*4882a593Smuzhiyun 		device->ram_base = bus_addr.start;
1526*4882a593Smuzhiyun 	}
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun #ifdef CONFIG_SCSI_SYM53C8XX_MMIO
1529*4882a593Smuzhiyun 	if (device->mmio_base)
1530*4882a593Smuzhiyun 		device->s.ioaddr = pci_iomap(pdev, 1,
1531*4882a593Smuzhiyun 						pci_resource_len(pdev, 1));
1532*4882a593Smuzhiyun #endif
1533*4882a593Smuzhiyun 	if (!device->s.ioaddr)
1534*4882a593Smuzhiyun 		device->s.ioaddr = pci_iomap(pdev, 0,
1535*4882a593Smuzhiyun 						pci_resource_len(pdev, 0));
1536*4882a593Smuzhiyun 	if (!device->s.ioaddr) {
1537*4882a593Smuzhiyun 		dev_err(&pdev->dev, "could not map registers; giving up.\n");
1538*4882a593Smuzhiyun 		return -EIO;
1539*4882a593Smuzhiyun 	}
1540*4882a593Smuzhiyun 	if (device->ram_base) {
1541*4882a593Smuzhiyun 		device->s.ramaddr = pci_iomap(pdev, i,
1542*4882a593Smuzhiyun 						pci_resource_len(pdev, i));
1543*4882a593Smuzhiyun 		if (!device->s.ramaddr) {
1544*4882a593Smuzhiyun 			dev_warn(&pdev->dev,
1545*4882a593Smuzhiyun 				"could not map SRAM; continuing anyway.\n");
1546*4882a593Smuzhiyun 			device->ram_base = 0;
1547*4882a593Smuzhiyun 		}
1548*4882a593Smuzhiyun 	}
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	return 0;
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun /*
1554*4882a593Smuzhiyun  * The NCR PQS and PDS cards are constructed as a DEC bridge
1555*4882a593Smuzhiyun  * behind which sits a proprietary NCR memory controller and
1556*4882a593Smuzhiyun  * either four or two 53c875s as separate devices.  We can tell
1557*4882a593Smuzhiyun  * if an 875 is part of a PQS/PDS or not since if it is, it will
1558*4882a593Smuzhiyun  * be on the same bus as the memory controller.  In its usual
1559*4882a593Smuzhiyun  * mode of operation, the 875s are slaved to the memory
1560*4882a593Smuzhiyun  * controller for all transfers.  To operate with the Linux
1561*4882a593Smuzhiyun  * driver, the memory controller is disabled and the 875s
1562*4882a593Smuzhiyun  * freed to function independently.  The only wrinkle is that
1563*4882a593Smuzhiyun  * the preset SCSI ID (which may be zero) must be read in from
1564*4882a593Smuzhiyun  * a special configuration space register of the 875.
1565*4882a593Smuzhiyun  */
sym_config_pqs(struct pci_dev * pdev,struct sym_device * sym_dev)1566*4882a593Smuzhiyun static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev)
1567*4882a593Smuzhiyun {
1568*4882a593Smuzhiyun 	int slot;
1569*4882a593Smuzhiyun 	u8 tmp;
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 	for (slot = 0; slot < 256; slot++) {
1572*4882a593Smuzhiyun 		struct pci_dev *memc = pci_get_slot(pdev->bus, slot);
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 		if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) {
1575*4882a593Smuzhiyun 			pci_dev_put(memc);
1576*4882a593Smuzhiyun 			continue;
1577*4882a593Smuzhiyun 		}
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 		/* bit 1: allow individual 875 configuration */
1580*4882a593Smuzhiyun 		pci_read_config_byte(memc, 0x44, &tmp);
1581*4882a593Smuzhiyun 		if ((tmp & 0x2) == 0) {
1582*4882a593Smuzhiyun 			tmp |= 0x2;
1583*4882a593Smuzhiyun 			pci_write_config_byte(memc, 0x44, tmp);
1584*4882a593Smuzhiyun 		}
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 		/* bit 2: drive individual 875 interrupts to the bus */
1587*4882a593Smuzhiyun 		pci_read_config_byte(memc, 0x45, &tmp);
1588*4882a593Smuzhiyun 		if ((tmp & 0x4) == 0) {
1589*4882a593Smuzhiyun 			tmp |= 0x4;
1590*4882a593Smuzhiyun 			pci_write_config_byte(memc, 0x45, tmp);
1591*4882a593Smuzhiyun 		}
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 		pci_dev_put(memc);
1594*4882a593Smuzhiyun 		break;
1595*4882a593Smuzhiyun 	}
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	pci_read_config_byte(pdev, 0x84, &tmp);
1598*4882a593Smuzhiyun 	sym_dev->host_id = tmp;
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun /*
1602*4882a593Smuzhiyun  *  Called before unloading the module.
1603*4882a593Smuzhiyun  *  Detach the host.
1604*4882a593Smuzhiyun  *  We have to free resources and halt the NCR chip.
1605*4882a593Smuzhiyun  */
sym_detach(struct Scsi_Host * shost,struct pci_dev * pdev)1606*4882a593Smuzhiyun static int sym_detach(struct Scsi_Host *shost, struct pci_dev *pdev)
1607*4882a593Smuzhiyun {
1608*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(shost);
1609*4882a593Smuzhiyun 	printk("%s: detaching ...\n", sym_name(np));
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 	del_timer_sync(&np->s.timer);
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	/*
1614*4882a593Smuzhiyun 	 * Reset NCR chip.
1615*4882a593Smuzhiyun 	 * We should use sym_soft_reset(), but we don't want to do
1616*4882a593Smuzhiyun 	 * so, since we may not be safe if interrupts occur.
1617*4882a593Smuzhiyun 	 */
1618*4882a593Smuzhiyun 	printk("%s: resetting chip\n", sym_name(np));
1619*4882a593Smuzhiyun 	OUTB(np, nc_istat, SRST);
1620*4882a593Smuzhiyun 	INB(np, nc_mbox1);
1621*4882a593Smuzhiyun 	udelay(10);
1622*4882a593Smuzhiyun 	OUTB(np, nc_istat, 0);
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 	sym_free_resources(np, pdev, 1);
1625*4882a593Smuzhiyun 	scsi_host_put(shost);
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 	return 1;
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun /*
1631*4882a593Smuzhiyun  * Driver host template.
1632*4882a593Smuzhiyun  */
1633*4882a593Smuzhiyun static struct scsi_host_template sym2_template = {
1634*4882a593Smuzhiyun 	.module			= THIS_MODULE,
1635*4882a593Smuzhiyun 	.name			= "sym53c8xx",
1636*4882a593Smuzhiyun 	.info			= sym53c8xx_info,
1637*4882a593Smuzhiyun 	.queuecommand		= sym53c8xx_queue_command,
1638*4882a593Smuzhiyun 	.slave_alloc		= sym53c8xx_slave_alloc,
1639*4882a593Smuzhiyun 	.slave_configure	= sym53c8xx_slave_configure,
1640*4882a593Smuzhiyun 	.slave_destroy		= sym53c8xx_slave_destroy,
1641*4882a593Smuzhiyun 	.eh_abort_handler	= sym53c8xx_eh_abort_handler,
1642*4882a593Smuzhiyun 	.eh_device_reset_handler = sym53c8xx_eh_device_reset_handler,
1643*4882a593Smuzhiyun 	.eh_bus_reset_handler	= sym53c8xx_eh_bus_reset_handler,
1644*4882a593Smuzhiyun 	.eh_host_reset_handler	= sym53c8xx_eh_host_reset_handler,
1645*4882a593Smuzhiyun 	.this_id		= 7,
1646*4882a593Smuzhiyun 	.max_sectors		= 0xFFFF,
1647*4882a593Smuzhiyun #ifdef SYM_LINUX_PROC_INFO_SUPPORT
1648*4882a593Smuzhiyun 	.show_info		= sym_show_info,
1649*4882a593Smuzhiyun #ifdef	SYM_LINUX_USER_COMMAND_SUPPORT
1650*4882a593Smuzhiyun 	.write_info		= sym_user_command,
1651*4882a593Smuzhiyun #endif
1652*4882a593Smuzhiyun 	.proc_name		= NAME53C8XX,
1653*4882a593Smuzhiyun #endif
1654*4882a593Smuzhiyun };
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun static int attach_count;
1657*4882a593Smuzhiyun 
sym2_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1658*4882a593Smuzhiyun static int sym2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1659*4882a593Smuzhiyun {
1660*4882a593Smuzhiyun 	struct sym_device sym_dev;
1661*4882a593Smuzhiyun 	struct sym_nvram nvram;
1662*4882a593Smuzhiyun 	struct Scsi_Host *shost;
1663*4882a593Smuzhiyun 	int do_iounmap = 0;
1664*4882a593Smuzhiyun 	int do_disable_device = 1;
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 	memset(&sym_dev, 0, sizeof(sym_dev));
1667*4882a593Smuzhiyun 	memset(&nvram, 0, sizeof(nvram));
1668*4882a593Smuzhiyun 	sym_dev.pdev = pdev;
1669*4882a593Smuzhiyun 	sym_dev.host_id = SYM_SETUP_HOST_ID;
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun 	if (pci_enable_device(pdev))
1672*4882a593Smuzhiyun 		goto leave;
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun 	pci_set_master(pdev);
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 	if (pci_request_regions(pdev, NAME53C8XX))
1677*4882a593Smuzhiyun 		goto disable;
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	if (sym_check_supported(&sym_dev))
1680*4882a593Smuzhiyun 		goto free;
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun 	if (sym_iomap_device(&sym_dev))
1683*4882a593Smuzhiyun 		goto free;
1684*4882a593Smuzhiyun 	do_iounmap = 1;
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	if (sym_check_raid(&sym_dev)) {
1687*4882a593Smuzhiyun 		do_disable_device = 0;	/* Don't disable the device */
1688*4882a593Smuzhiyun 		goto free;
1689*4882a593Smuzhiyun 	}
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	if (sym_set_workarounds(&sym_dev))
1692*4882a593Smuzhiyun 		goto free;
1693*4882a593Smuzhiyun 
1694*4882a593Smuzhiyun 	sym_config_pqs(pdev, &sym_dev);
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 	sym_get_nvram(&sym_dev, &nvram);
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 	do_iounmap = 0; /* Don't sym_iounmap_device() after sym_attach(). */
1699*4882a593Smuzhiyun 	shost = sym_attach(&sym2_template, attach_count, &sym_dev);
1700*4882a593Smuzhiyun 	if (!shost)
1701*4882a593Smuzhiyun 		goto free;
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	if (scsi_add_host(shost, &pdev->dev))
1704*4882a593Smuzhiyun 		goto detach;
1705*4882a593Smuzhiyun 	scsi_scan_host(shost);
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	attach_count++;
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	return 0;
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun  detach:
1712*4882a593Smuzhiyun 	sym_detach(pci_get_drvdata(pdev), pdev);
1713*4882a593Smuzhiyun  free:
1714*4882a593Smuzhiyun 	if (do_iounmap)
1715*4882a593Smuzhiyun 		sym_iounmap_device(&sym_dev);
1716*4882a593Smuzhiyun 	pci_release_regions(pdev);
1717*4882a593Smuzhiyun  disable:
1718*4882a593Smuzhiyun 	if (do_disable_device)
1719*4882a593Smuzhiyun 		pci_disable_device(pdev);
1720*4882a593Smuzhiyun  leave:
1721*4882a593Smuzhiyun 	return -ENODEV;
1722*4882a593Smuzhiyun }
1723*4882a593Smuzhiyun 
sym2_remove(struct pci_dev * pdev)1724*4882a593Smuzhiyun static void sym2_remove(struct pci_dev *pdev)
1725*4882a593Smuzhiyun {
1726*4882a593Smuzhiyun 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 	scsi_remove_host(shost);
1729*4882a593Smuzhiyun 	sym_detach(shost, pdev);
1730*4882a593Smuzhiyun 	pci_release_regions(pdev);
1731*4882a593Smuzhiyun 	pci_disable_device(pdev);
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 	attach_count--;
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun /**
1737*4882a593Smuzhiyun  * sym2_io_error_detected() - called when PCI error is detected
1738*4882a593Smuzhiyun  * @pdev: pointer to PCI device
1739*4882a593Smuzhiyun  * @state: current state of the PCI slot
1740*4882a593Smuzhiyun  */
sym2_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)1741*4882a593Smuzhiyun static pci_ers_result_t sym2_io_error_detected(struct pci_dev *pdev,
1742*4882a593Smuzhiyun                                          pci_channel_state_t state)
1743*4882a593Smuzhiyun {
1744*4882a593Smuzhiyun 	/* If slot is permanently frozen, turn everything off */
1745*4882a593Smuzhiyun 	if (state == pci_channel_io_perm_failure) {
1746*4882a593Smuzhiyun 		sym2_remove(pdev);
1747*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
1748*4882a593Smuzhiyun 	}
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	disable_irq(pdev->irq);
1751*4882a593Smuzhiyun 	pci_disable_device(pdev);
1752*4882a593Smuzhiyun 
1753*4882a593Smuzhiyun 	/* Request that MMIO be enabled, so register dump can be taken. */
1754*4882a593Smuzhiyun 	return PCI_ERS_RESULT_CAN_RECOVER;
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun /**
1758*4882a593Smuzhiyun  * sym2_io_slot_dump - Enable MMIO and dump debug registers
1759*4882a593Smuzhiyun  * @pdev: pointer to PCI device
1760*4882a593Smuzhiyun  */
sym2_io_slot_dump(struct pci_dev * pdev)1761*4882a593Smuzhiyun static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev)
1762*4882a593Smuzhiyun {
1763*4882a593Smuzhiyun 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	sym_dump_registers(shost);
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun 	/* Request a slot reset. */
1768*4882a593Smuzhiyun 	return PCI_ERS_RESULT_NEED_RESET;
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun /**
1772*4882a593Smuzhiyun  * sym2_reset_workarounds - hardware-specific work-arounds
1773*4882a593Smuzhiyun  * @pdev: pointer to PCI device
1774*4882a593Smuzhiyun  *
1775*4882a593Smuzhiyun  * This routine is similar to sym_set_workarounds(), except
1776*4882a593Smuzhiyun  * that, at this point, we already know that the device was
1777*4882a593Smuzhiyun  * successfully initialized at least once before, and so most
1778*4882a593Smuzhiyun  * of the steps taken there are un-needed here.
1779*4882a593Smuzhiyun  */
sym2_reset_workarounds(struct pci_dev * pdev)1780*4882a593Smuzhiyun static void sym2_reset_workarounds(struct pci_dev *pdev)
1781*4882a593Smuzhiyun {
1782*4882a593Smuzhiyun 	u_short status_reg;
1783*4882a593Smuzhiyun 	struct sym_chip *chip;
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 	chip = sym_lookup_chip_table(pdev->device, pdev->revision);
1786*4882a593Smuzhiyun 
1787*4882a593Smuzhiyun 	/* Work around for errant bit in 895A, in a fashion
1788*4882a593Smuzhiyun 	 * similar to what is done in sym_set_workarounds().
1789*4882a593Smuzhiyun 	 */
1790*4882a593Smuzhiyun 	pci_read_config_word(pdev, PCI_STATUS, &status_reg);
1791*4882a593Smuzhiyun 	if (!(chip->features & FE_66MHZ) && (status_reg & PCI_STATUS_66MHZ)) {
1792*4882a593Smuzhiyun 		status_reg = PCI_STATUS_66MHZ;
1793*4882a593Smuzhiyun 		pci_write_config_word(pdev, PCI_STATUS, status_reg);
1794*4882a593Smuzhiyun 		pci_read_config_word(pdev, PCI_STATUS, &status_reg);
1795*4882a593Smuzhiyun 	}
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun /**
1799*4882a593Smuzhiyun  * sym2_io_slot_reset() - called when the pci bus has been reset.
1800*4882a593Smuzhiyun  * @pdev: pointer to PCI device
1801*4882a593Smuzhiyun  *
1802*4882a593Smuzhiyun  * Restart the card from scratch.
1803*4882a593Smuzhiyun  */
sym2_io_slot_reset(struct pci_dev * pdev)1804*4882a593Smuzhiyun static pci_ers_result_t sym2_io_slot_reset(struct pci_dev *pdev)
1805*4882a593Smuzhiyun {
1806*4882a593Smuzhiyun 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
1807*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(shost);
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun 	printk(KERN_INFO "%s: recovering from a PCI slot reset\n",
1810*4882a593Smuzhiyun 	          sym_name(np));
1811*4882a593Smuzhiyun 
1812*4882a593Smuzhiyun 	if (pci_enable_device(pdev)) {
1813*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to enable after PCI reset\n",
1814*4882a593Smuzhiyun 		        sym_name(np));
1815*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
1816*4882a593Smuzhiyun 	}
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 	pci_set_master(pdev);
1819*4882a593Smuzhiyun 	enable_irq(pdev->irq);
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	/* If the chip can do Memory Write Invalidate, enable it */
1822*4882a593Smuzhiyun 	if (np->features & FE_WRIE) {
1823*4882a593Smuzhiyun 		if (pci_set_mwi(pdev))
1824*4882a593Smuzhiyun 			return PCI_ERS_RESULT_DISCONNECT;
1825*4882a593Smuzhiyun 	}
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun 	/* Perform work-arounds, analogous to sym_set_workarounds() */
1828*4882a593Smuzhiyun 	sym2_reset_workarounds(pdev);
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	/* Perform host reset only on one instance of the card */
1831*4882a593Smuzhiyun 	if (PCI_FUNC(pdev->devfn) == 0) {
1832*4882a593Smuzhiyun 		if (sym_reset_scsi_bus(np, 0)) {
1833*4882a593Smuzhiyun 			printk(KERN_ERR "%s: Unable to reset scsi host\n",
1834*4882a593Smuzhiyun 			        sym_name(np));
1835*4882a593Smuzhiyun 			return PCI_ERS_RESULT_DISCONNECT;
1836*4882a593Smuzhiyun 		}
1837*4882a593Smuzhiyun 		sym_start_up(shost, 1);
1838*4882a593Smuzhiyun 	}
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	return PCI_ERS_RESULT_RECOVERED;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun /**
1844*4882a593Smuzhiyun  * sym2_io_resume() - resume normal ops after PCI reset
1845*4882a593Smuzhiyun  * @pdev: pointer to PCI device
1846*4882a593Smuzhiyun  *
1847*4882a593Smuzhiyun  * Called when the error recovery driver tells us that its
1848*4882a593Smuzhiyun  * OK to resume normal operation. Use completion to allow
1849*4882a593Smuzhiyun  * halted scsi ops to resume.
1850*4882a593Smuzhiyun  */
sym2_io_resume(struct pci_dev * pdev)1851*4882a593Smuzhiyun static void sym2_io_resume(struct pci_dev *pdev)
1852*4882a593Smuzhiyun {
1853*4882a593Smuzhiyun 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
1854*4882a593Smuzhiyun 	struct sym_data *sym_data = shost_priv(shost);
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 	spin_lock_irq(shost->host_lock);
1857*4882a593Smuzhiyun 	if (sym_data->io_reset)
1858*4882a593Smuzhiyun 		complete(sym_data->io_reset);
1859*4882a593Smuzhiyun 	spin_unlock_irq(shost->host_lock);
1860*4882a593Smuzhiyun }
1861*4882a593Smuzhiyun 
sym2_get_signalling(struct Scsi_Host * shost)1862*4882a593Smuzhiyun static void sym2_get_signalling(struct Scsi_Host *shost)
1863*4882a593Smuzhiyun {
1864*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(shost);
1865*4882a593Smuzhiyun 	enum spi_signal_type type;
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 	switch (np->scsi_mode) {
1868*4882a593Smuzhiyun 	case SMODE_SE:
1869*4882a593Smuzhiyun 		type = SPI_SIGNAL_SE;
1870*4882a593Smuzhiyun 		break;
1871*4882a593Smuzhiyun 	case SMODE_LVD:
1872*4882a593Smuzhiyun 		type = SPI_SIGNAL_LVD;
1873*4882a593Smuzhiyun 		break;
1874*4882a593Smuzhiyun 	case SMODE_HVD:
1875*4882a593Smuzhiyun 		type = SPI_SIGNAL_HVD;
1876*4882a593Smuzhiyun 		break;
1877*4882a593Smuzhiyun 	default:
1878*4882a593Smuzhiyun 		type = SPI_SIGNAL_UNKNOWN;
1879*4882a593Smuzhiyun 		break;
1880*4882a593Smuzhiyun 	}
1881*4882a593Smuzhiyun 	spi_signalling(shost) = type;
1882*4882a593Smuzhiyun }
1883*4882a593Smuzhiyun 
sym2_set_offset(struct scsi_target * starget,int offset)1884*4882a593Smuzhiyun static void sym2_set_offset(struct scsi_target *starget, int offset)
1885*4882a593Smuzhiyun {
1886*4882a593Smuzhiyun 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1887*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(shost);
1888*4882a593Smuzhiyun 	struct sym_tcb *tp = &np->target[starget->id];
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun 	tp->tgoal.offset = offset;
1891*4882a593Smuzhiyun 	tp->tgoal.check_nego = 1;
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun 
sym2_set_period(struct scsi_target * starget,int period)1894*4882a593Smuzhiyun static void sym2_set_period(struct scsi_target *starget, int period)
1895*4882a593Smuzhiyun {
1896*4882a593Smuzhiyun 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1897*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(shost);
1898*4882a593Smuzhiyun 	struct sym_tcb *tp = &np->target[starget->id];
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 	/* have to have DT for these transfers, but DT will also
1901*4882a593Smuzhiyun 	 * set width, so check that this is allowed */
1902*4882a593Smuzhiyun 	if (period <= np->minsync && spi_width(starget))
1903*4882a593Smuzhiyun 		tp->tgoal.dt = 1;
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun 	tp->tgoal.period = period;
1906*4882a593Smuzhiyun 	tp->tgoal.check_nego = 1;
1907*4882a593Smuzhiyun }
1908*4882a593Smuzhiyun 
sym2_set_width(struct scsi_target * starget,int width)1909*4882a593Smuzhiyun static void sym2_set_width(struct scsi_target *starget, int width)
1910*4882a593Smuzhiyun {
1911*4882a593Smuzhiyun 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1912*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(shost);
1913*4882a593Smuzhiyun 	struct sym_tcb *tp = &np->target[starget->id];
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	/* It is illegal to have DT set on narrow transfers.  If DT is
1916*4882a593Smuzhiyun 	 * clear, we must also clear IU and QAS.  */
1917*4882a593Smuzhiyun 	if (width == 0)
1918*4882a593Smuzhiyun 		tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 	tp->tgoal.width = width;
1921*4882a593Smuzhiyun 	tp->tgoal.check_nego = 1;
1922*4882a593Smuzhiyun }
1923*4882a593Smuzhiyun 
sym2_set_dt(struct scsi_target * starget,int dt)1924*4882a593Smuzhiyun static void sym2_set_dt(struct scsi_target *starget, int dt)
1925*4882a593Smuzhiyun {
1926*4882a593Smuzhiyun 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1927*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(shost);
1928*4882a593Smuzhiyun 	struct sym_tcb *tp = &np->target[starget->id];
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 	/* We must clear QAS and IU if DT is clear */
1931*4882a593Smuzhiyun 	if (dt)
1932*4882a593Smuzhiyun 		tp->tgoal.dt = 1;
1933*4882a593Smuzhiyun 	else
1934*4882a593Smuzhiyun 		tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
1935*4882a593Smuzhiyun 	tp->tgoal.check_nego = 1;
1936*4882a593Smuzhiyun }
1937*4882a593Smuzhiyun 
1938*4882a593Smuzhiyun #if 0
1939*4882a593Smuzhiyun static void sym2_set_iu(struct scsi_target *starget, int iu)
1940*4882a593Smuzhiyun {
1941*4882a593Smuzhiyun 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1942*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(shost);
1943*4882a593Smuzhiyun 	struct sym_tcb *tp = &np->target[starget->id];
1944*4882a593Smuzhiyun 
1945*4882a593Smuzhiyun 	if (iu)
1946*4882a593Smuzhiyun 		tp->tgoal.iu = tp->tgoal.dt = 1;
1947*4882a593Smuzhiyun 	else
1948*4882a593Smuzhiyun 		tp->tgoal.iu = 0;
1949*4882a593Smuzhiyun 	tp->tgoal.check_nego = 1;
1950*4882a593Smuzhiyun }
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun static void sym2_set_qas(struct scsi_target *starget, int qas)
1953*4882a593Smuzhiyun {
1954*4882a593Smuzhiyun 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1955*4882a593Smuzhiyun 	struct sym_hcb *np = sym_get_hcb(shost);
1956*4882a593Smuzhiyun 	struct sym_tcb *tp = &np->target[starget->id];
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 	if (qas)
1959*4882a593Smuzhiyun 		tp->tgoal.dt = tp->tgoal.qas = 1;
1960*4882a593Smuzhiyun 	else
1961*4882a593Smuzhiyun 		tp->tgoal.qas = 0;
1962*4882a593Smuzhiyun 	tp->tgoal.check_nego = 1;
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun #endif
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun static struct spi_function_template sym2_transport_functions = {
1967*4882a593Smuzhiyun 	.set_offset	= sym2_set_offset,
1968*4882a593Smuzhiyun 	.show_offset	= 1,
1969*4882a593Smuzhiyun 	.set_period	= sym2_set_period,
1970*4882a593Smuzhiyun 	.show_period	= 1,
1971*4882a593Smuzhiyun 	.set_width	= sym2_set_width,
1972*4882a593Smuzhiyun 	.show_width	= 1,
1973*4882a593Smuzhiyun 	.set_dt		= sym2_set_dt,
1974*4882a593Smuzhiyun 	.show_dt	= 1,
1975*4882a593Smuzhiyun #if 0
1976*4882a593Smuzhiyun 	.set_iu		= sym2_set_iu,
1977*4882a593Smuzhiyun 	.show_iu	= 1,
1978*4882a593Smuzhiyun 	.set_qas	= sym2_set_qas,
1979*4882a593Smuzhiyun 	.show_qas	= 1,
1980*4882a593Smuzhiyun #endif
1981*4882a593Smuzhiyun 	.get_signalling	= sym2_get_signalling,
1982*4882a593Smuzhiyun };
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun static struct pci_device_id sym2_id_table[] = {
1985*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810,
1986*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1987*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820,
1988*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
1989*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825,
1990*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1991*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815,
1992*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1993*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP,
1994*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
1995*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860,
1996*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1997*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510,
1998*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8,  0xffff00, 0UL },
1999*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896,
2000*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2001*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895,
2002*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2003*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885,
2004*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2005*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875,
2006*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2007*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510,
2008*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID,  PCI_CLASS_STORAGE_SCSI<<8,  0xffff00, 0UL }, /* new */
2009*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A,
2010*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2011*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A,
2012*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2013*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33,
2014*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2015*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66,
2016*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2017*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J,
2018*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2019*4882a593Smuzhiyun 	{ 0, }
2020*4882a593Smuzhiyun };
2021*4882a593Smuzhiyun 
2022*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, sym2_id_table);
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun static const struct pci_error_handlers sym2_err_handler = {
2025*4882a593Smuzhiyun 	.error_detected	= sym2_io_error_detected,
2026*4882a593Smuzhiyun 	.mmio_enabled	= sym2_io_slot_dump,
2027*4882a593Smuzhiyun 	.slot_reset	= sym2_io_slot_reset,
2028*4882a593Smuzhiyun 	.resume		= sym2_io_resume,
2029*4882a593Smuzhiyun };
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun static struct pci_driver sym2_driver = {
2032*4882a593Smuzhiyun 	.name		= NAME53C8XX,
2033*4882a593Smuzhiyun 	.id_table	= sym2_id_table,
2034*4882a593Smuzhiyun 	.probe		= sym2_probe,
2035*4882a593Smuzhiyun 	.remove		= sym2_remove,
2036*4882a593Smuzhiyun 	.err_handler 	= &sym2_err_handler,
2037*4882a593Smuzhiyun };
2038*4882a593Smuzhiyun 
sym2_init(void)2039*4882a593Smuzhiyun static int __init sym2_init(void)
2040*4882a593Smuzhiyun {
2041*4882a593Smuzhiyun 	int error;
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 	sym2_setup_params();
2044*4882a593Smuzhiyun 	sym2_transport_template = spi_attach_transport(&sym2_transport_functions);
2045*4882a593Smuzhiyun 	if (!sym2_transport_template)
2046*4882a593Smuzhiyun 		return -ENODEV;
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun 	error = pci_register_driver(&sym2_driver);
2049*4882a593Smuzhiyun 	if (error)
2050*4882a593Smuzhiyun 		spi_release_transport(sym2_transport_template);
2051*4882a593Smuzhiyun 	return error;
2052*4882a593Smuzhiyun }
2053*4882a593Smuzhiyun 
sym2_exit(void)2054*4882a593Smuzhiyun static void __exit sym2_exit(void)
2055*4882a593Smuzhiyun {
2056*4882a593Smuzhiyun 	pci_unregister_driver(&sym2_driver);
2057*4882a593Smuzhiyun 	spi_release_transport(sym2_transport_template);
2058*4882a593Smuzhiyun }
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun module_init(sym2_init);
2061*4882a593Smuzhiyun module_exit(sym2_exit);
2062