1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Adaptec AIC79xx device driver for Linux.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.c#171 $
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * --------------------------------------------------------------------------
7*4882a593Smuzhiyun * Copyright (c) 1994-2000 Justin T. Gibbs.
8*4882a593Smuzhiyun * Copyright (c) 1997-1999 Doug Ledford
9*4882a593Smuzhiyun * Copyright (c) 2000-2003 Adaptec Inc.
10*4882a593Smuzhiyun * All rights reserved.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
13*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
14*4882a593Smuzhiyun * are met:
15*4882a593Smuzhiyun * 1. Redistributions of source code must retain the above copyright
16*4882a593Smuzhiyun * notice, this list of conditions, and the following disclaimer,
17*4882a593Smuzhiyun * without modification.
18*4882a593Smuzhiyun * 2. Redistributions in binary form must reproduce at minimum a disclaimer
19*4882a593Smuzhiyun * substantially similar to the "NO WARRANTY" disclaimer below
20*4882a593Smuzhiyun * ("Disclaimer") and any redistribution must be conditioned upon
21*4882a593Smuzhiyun * including a substantially similar Disclaimer requirement for further
22*4882a593Smuzhiyun * binary redistribution.
23*4882a593Smuzhiyun * 3. Neither the names of the above-listed copyright holders nor the names
24*4882a593Smuzhiyun * of any contributors may be used to endorse or promote products derived
25*4882a593Smuzhiyun * from this software without specific prior written permission.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * Alternatively, this software may be distributed under the terms of the
28*4882a593Smuzhiyun * GNU General Public License ("GPL") version 2 as published by the Free
29*4882a593Smuzhiyun * Software Foundation.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * NO WARRANTY
32*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
35*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
36*4882a593Smuzhiyun * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37*4882a593Smuzhiyun * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38*4882a593Smuzhiyun * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39*4882a593Smuzhiyun * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40*4882a593Smuzhiyun * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
41*4882a593Smuzhiyun * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42*4882a593Smuzhiyun * POSSIBILITY OF SUCH DAMAGES.
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include "aic79xx_osm.h"
46*4882a593Smuzhiyun #include "aic79xx_inline.h"
47*4882a593Smuzhiyun #include <scsi/scsicam.h>
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun static struct scsi_transport_template *ahd_linux_transport_template = NULL;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #include <linux/init.h> /* __setup */
52*4882a593Smuzhiyun #include <linux/mm.h> /* For fetching system memory size */
53*4882a593Smuzhiyun #include <linux/blkdev.h> /* For block_size() */
54*4882a593Smuzhiyun #include <linux/delay.h> /* For ssleep/msleep */
55*4882a593Smuzhiyun #include <linux/device.h>
56*4882a593Smuzhiyun #include <linux/slab.h>
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * Bucket size for counting good commands in between bad ones.
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun #define AHD_LINUX_ERR_THRESH 1000
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * Set this to the delay in seconds after SCSI bus reset.
65*4882a593Smuzhiyun * Note, we honor this only for the initial bus reset.
66*4882a593Smuzhiyun * The scsi error recovery code performs its own bus settle
67*4882a593Smuzhiyun * delay handling for error recovery actions.
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun #ifdef CONFIG_AIC79XX_RESET_DELAY_MS
70*4882a593Smuzhiyun #define AIC79XX_RESET_DELAY CONFIG_AIC79XX_RESET_DELAY_MS
71*4882a593Smuzhiyun #else
72*4882a593Smuzhiyun #define AIC79XX_RESET_DELAY 5000
73*4882a593Smuzhiyun #endif
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun * To change the default number of tagged transactions allowed per-device,
77*4882a593Smuzhiyun * add a line to the lilo.conf file like:
78*4882a593Smuzhiyun * append="aic79xx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
79*4882a593Smuzhiyun * which will result in the first four devices on the first two
80*4882a593Smuzhiyun * controllers being set to a tagged queue depth of 32.
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * The tag_commands is an array of 16 to allow for wide and twin adapters.
83*4882a593Smuzhiyun * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
84*4882a593Smuzhiyun * for channel 1.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun typedef struct {
87*4882a593Smuzhiyun uint16_t tag_commands[16]; /* Allow for wide/twin adapters. */
88*4882a593Smuzhiyun } adapter_tag_info_t;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun * Modify this as you see fit for your system.
92*4882a593Smuzhiyun *
93*4882a593Smuzhiyun * 0 tagged queuing disabled
94*4882a593Smuzhiyun * 1 <= n <= 253 n == max tags ever dispatched.
95*4882a593Smuzhiyun *
96*4882a593Smuzhiyun * The driver will throttle the number of commands dispatched to a
97*4882a593Smuzhiyun * device if it returns queue full. For devices with a fixed maximum
98*4882a593Smuzhiyun * queue depth, the driver will eventually determine this depth and
99*4882a593Smuzhiyun * lock it in (a console message is printed to indicate that a lock
100*4882a593Smuzhiyun * has occurred). On some devices, queue full is returned for a temporary
101*4882a593Smuzhiyun * resource shortage. These devices will return queue full at varying
102*4882a593Smuzhiyun * depths. The driver will throttle back when the queue fulls occur and
103*4882a593Smuzhiyun * attempt to slowly increase the depth over time as the device recovers
104*4882a593Smuzhiyun * from the resource shortage.
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * In this example, the first line will disable tagged queueing for all
107*4882a593Smuzhiyun * the devices on the first probed aic79xx adapter.
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun * The second line enables tagged queueing with 4 commands/LUN for IDs
110*4882a593Smuzhiyun * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
111*4882a593Smuzhiyun * driver to attempt to use up to 64 tags for ID 1.
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * The third line is the same as the first line.
114*4882a593Smuzhiyun *
115*4882a593Smuzhiyun * The fourth line disables tagged queueing for devices 0 and 3. It
116*4882a593Smuzhiyun * enables tagged queueing for the other IDs, with 16 commands/LUN
117*4882a593Smuzhiyun * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
118*4882a593Smuzhiyun * IDs 2, 5-7, and 9-15.
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun * NOTE: The below structure is for reference only, the actual structure
123*4882a593Smuzhiyun * to modify in order to change things is just below this comment block.
124*4882a593Smuzhiyun adapter_tag_info_t aic79xx_tag_info[] =
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
127*4882a593Smuzhiyun {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
128*4882a593Smuzhiyun {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
129*4882a593Smuzhiyun {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun #ifdef CONFIG_AIC79XX_CMDS_PER_DEVICE
134*4882a593Smuzhiyun #define AIC79XX_CMDS_PER_DEVICE CONFIG_AIC79XX_CMDS_PER_DEVICE
135*4882a593Smuzhiyun #else
136*4882a593Smuzhiyun #define AIC79XX_CMDS_PER_DEVICE AHD_MAX_QUEUE
137*4882a593Smuzhiyun #endif
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun #define AIC79XX_CONFIGED_TAG_COMMANDS { \
140*4882a593Smuzhiyun AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
141*4882a593Smuzhiyun AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
142*4882a593Smuzhiyun AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
143*4882a593Smuzhiyun AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
144*4882a593Smuzhiyun AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
145*4882a593Smuzhiyun AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
146*4882a593Smuzhiyun AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
147*4882a593Smuzhiyun AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE \
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun * By default, use the number of commands specified by
152*4882a593Smuzhiyun * the users kernel configuration.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun static adapter_tag_info_t aic79xx_tag_info[] =
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
157*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
158*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
159*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
160*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
161*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
162*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
163*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
164*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
165*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
166*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
167*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
168*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
169*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
170*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS},
171*4882a593Smuzhiyun {AIC79XX_CONFIGED_TAG_COMMANDS}
172*4882a593Smuzhiyun };
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun * The I/O cell on the chip is very configurable in respect to its analog
176*4882a593Smuzhiyun * characteristics. Set the defaults here; they can be overriden with
177*4882a593Smuzhiyun * the proper insmod parameters.
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun struct ahd_linux_iocell_opts
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun uint8_t precomp;
182*4882a593Smuzhiyun uint8_t slewrate;
183*4882a593Smuzhiyun uint8_t amplitude;
184*4882a593Smuzhiyun };
185*4882a593Smuzhiyun #define AIC79XX_DEFAULT_PRECOMP 0xFF
186*4882a593Smuzhiyun #define AIC79XX_DEFAULT_SLEWRATE 0xFF
187*4882a593Smuzhiyun #define AIC79XX_DEFAULT_AMPLITUDE 0xFF
188*4882a593Smuzhiyun #define AIC79XX_DEFAULT_IOOPTS \
189*4882a593Smuzhiyun { \
190*4882a593Smuzhiyun AIC79XX_DEFAULT_PRECOMP, \
191*4882a593Smuzhiyun AIC79XX_DEFAULT_SLEWRATE, \
192*4882a593Smuzhiyun AIC79XX_DEFAULT_AMPLITUDE \
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun #define AIC79XX_PRECOMP_INDEX 0
195*4882a593Smuzhiyun #define AIC79XX_SLEWRATE_INDEX 1
196*4882a593Smuzhiyun #define AIC79XX_AMPLITUDE_INDEX 2
197*4882a593Smuzhiyun static const struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
200*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
201*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
202*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
203*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
204*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
205*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
206*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
207*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
208*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
209*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
210*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
211*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
212*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
213*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS,
214*4882a593Smuzhiyun AIC79XX_DEFAULT_IOOPTS
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun * There should be a specific return value for this in scsi.h, but
219*4882a593Smuzhiyun * it seems that most drivers ignore it.
220*4882a593Smuzhiyun */
221*4882a593Smuzhiyun #define DID_UNDERFLOW DID_ERROR
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun void
ahd_print_path(struct ahd_softc * ahd,struct scb * scb)224*4882a593Smuzhiyun ahd_print_path(struct ahd_softc *ahd, struct scb *scb)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun printk("(scsi%d:%c:%d:%d): ",
227*4882a593Smuzhiyun ahd->platform_data->host->host_no,
228*4882a593Smuzhiyun scb != NULL ? SCB_GET_CHANNEL(ahd, scb) : 'X',
229*4882a593Smuzhiyun scb != NULL ? SCB_GET_TARGET(ahd, scb) : -1,
230*4882a593Smuzhiyun scb != NULL ? SCB_GET_LUN(scb) : -1);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /*
234*4882a593Smuzhiyun * XXX - these options apply unilaterally to _all_ adapters
235*4882a593Smuzhiyun * cards in the system. This should be fixed. Exceptions to this
236*4882a593Smuzhiyun * rule are noted in the comments.
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This
241*4882a593Smuzhiyun * has no effect on any later resets that might occur due to things like
242*4882a593Smuzhiyun * SCSI bus timeouts.
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun static uint32_t aic79xx_no_reset;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Should we force EXTENDED translation on a controller.
248*4882a593Smuzhiyun * 0 == Use whatever is in the SEEPROM or default to off
249*4882a593Smuzhiyun * 1 == Use whatever is in the SEEPROM or default to on
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun static uint32_t aic79xx_extended;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * PCI bus parity checking of the Adaptec controllers. This is somewhat
255*4882a593Smuzhiyun * dubious at best. To my knowledge, this option has never actually
256*4882a593Smuzhiyun * solved a PCI parity problem, but on certain machines with broken PCI
257*4882a593Smuzhiyun * chipset configurations, it can generate tons of false error messages.
258*4882a593Smuzhiyun * It's included in the driver for completeness.
259*4882a593Smuzhiyun * 0 = Shut off PCI parity check
260*4882a593Smuzhiyun * non-0 = Enable PCI parity check
261*4882a593Smuzhiyun *
262*4882a593Smuzhiyun * NOTE: you can't actually pass -1 on the lilo prompt. So, to set this
263*4882a593Smuzhiyun * variable to -1 you would actually want to simply pass the variable
264*4882a593Smuzhiyun * name without a number. That will invert the 0 which will result in
265*4882a593Smuzhiyun * -1.
266*4882a593Smuzhiyun */
267*4882a593Smuzhiyun static uint32_t aic79xx_pci_parity = ~0;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun * There are lots of broken chipsets in the world. Some of them will
271*4882a593Smuzhiyun * violate the PCI spec when we issue byte sized memory writes to our
272*4882a593Smuzhiyun * controller. I/O mapped register access, if allowed by the given
273*4882a593Smuzhiyun * platform, will work in almost all cases.
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun uint32_t aic79xx_allow_memio = ~0;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /*
278*4882a593Smuzhiyun * So that we can set how long each device is given as a selection timeout.
279*4882a593Smuzhiyun * The table of values goes like this:
280*4882a593Smuzhiyun * 0 - 256ms
281*4882a593Smuzhiyun * 1 - 128ms
282*4882a593Smuzhiyun * 2 - 64ms
283*4882a593Smuzhiyun * 3 - 32ms
284*4882a593Smuzhiyun * We default to 256ms because some older devices need a longer time
285*4882a593Smuzhiyun * to respond to initial selection.
286*4882a593Smuzhiyun */
287*4882a593Smuzhiyun static uint32_t aic79xx_seltime;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun * Certain devices do not perform any aging on commands. Should the
291*4882a593Smuzhiyun * device be saturated by commands in one portion of the disk, it is
292*4882a593Smuzhiyun * possible for transactions on far away sectors to never be serviced.
293*4882a593Smuzhiyun * To handle these devices, we can periodically send an ordered tag to
294*4882a593Smuzhiyun * force all outstanding transactions to be serviced prior to a new
295*4882a593Smuzhiyun * transaction.
296*4882a593Smuzhiyun */
297*4882a593Smuzhiyun static uint32_t aic79xx_periodic_otag;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /* Some storage boxes are using an LSI chip which has a bug making it
300*4882a593Smuzhiyun * impossible to use aic79xx Rev B chip in 320 speeds. The following
301*4882a593Smuzhiyun * storage boxes have been reported to be buggy:
302*4882a593Smuzhiyun * EonStor 3U 16-Bay: U16U-G3A3
303*4882a593Smuzhiyun * EonStor 2U 12-Bay: U12U-G3A3
304*4882a593Smuzhiyun * SentinelRAID: 2500F R5 / R6
305*4882a593Smuzhiyun * SentinelRAID: 2500F R1
306*4882a593Smuzhiyun * SentinelRAID: 2500F/1500F
307*4882a593Smuzhiyun * SentinelRAID: 150F
308*4882a593Smuzhiyun *
309*4882a593Smuzhiyun * To get around this LSI bug, you can set your board to 160 mode
310*4882a593Smuzhiyun * or you can enable the SLOWCRC bit.
311*4882a593Smuzhiyun */
312*4882a593Smuzhiyun uint32_t aic79xx_slowcrc;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /*
315*4882a593Smuzhiyun * Module information and settable options.
316*4882a593Smuzhiyun */
317*4882a593Smuzhiyun static char *aic79xx = NULL;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun MODULE_AUTHOR("Maintainer: Hannes Reinecke <hare@suse.de>");
320*4882a593Smuzhiyun MODULE_DESCRIPTION("Adaptec AIC790X U320 SCSI Host Bus Adapter driver");
321*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
322*4882a593Smuzhiyun MODULE_VERSION(AIC79XX_DRIVER_VERSION);
323*4882a593Smuzhiyun module_param(aic79xx, charp, 0444);
324*4882a593Smuzhiyun MODULE_PARM_DESC(aic79xx,
325*4882a593Smuzhiyun "period-delimited options string:\n"
326*4882a593Smuzhiyun " verbose Enable verbose/diagnostic logging\n"
327*4882a593Smuzhiyun " allow_memio Allow device registers to be memory mapped\n"
328*4882a593Smuzhiyun " debug Bitmask of debug values to enable\n"
329*4882a593Smuzhiyun " no_reset Suppress initial bus resets\n"
330*4882a593Smuzhiyun " extended Enable extended geometry on all controllers\n"
331*4882a593Smuzhiyun " periodic_otag Send an ordered tagged transaction\n"
332*4882a593Smuzhiyun " periodically to prevent tag starvation.\n"
333*4882a593Smuzhiyun " This may be required by some older disk\n"
334*4882a593Smuzhiyun " or drives/RAID arrays.\n"
335*4882a593Smuzhiyun " tag_info:<tag_str> Set per-target tag depth\n"
336*4882a593Smuzhiyun " global_tag_depth:<int> Global tag depth for all targets on all buses\n"
337*4882a593Smuzhiyun " slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
338*4882a593Smuzhiyun " precomp:<pcomp_list> Set the signal precompensation (0-7).\n"
339*4882a593Smuzhiyun " amplitude:<int> Set the signal amplitude (0-7).\n"
340*4882a593Smuzhiyun " seltime:<int> Selection Timeout:\n"
341*4882a593Smuzhiyun " (0/256ms,1/128ms,2/64ms,3/32ms)\n"
342*4882a593Smuzhiyun " slowcrc Turn on the SLOWCRC bit (Rev B only)\n"
343*4882a593Smuzhiyun "\n"
344*4882a593Smuzhiyun " Sample modprobe configuration file:\n"
345*4882a593Smuzhiyun " # Enable verbose logging\n"
346*4882a593Smuzhiyun " # Set tag depth on Controller 2/Target 2 to 10 tags\n"
347*4882a593Smuzhiyun " # Shorten the selection timeout to 128ms\n"
348*4882a593Smuzhiyun "\n"
349*4882a593Smuzhiyun " options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
350*4882a593Smuzhiyun );
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun static void ahd_linux_handle_scsi_status(struct ahd_softc *,
353*4882a593Smuzhiyun struct scsi_device *,
354*4882a593Smuzhiyun struct scb *);
355*4882a593Smuzhiyun static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd,
356*4882a593Smuzhiyun struct scsi_cmnd *cmd);
357*4882a593Smuzhiyun static int ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd);
358*4882a593Smuzhiyun static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd);
359*4882a593Smuzhiyun static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd,
360*4882a593Smuzhiyun struct ahd_devinfo *devinfo);
361*4882a593Smuzhiyun static void ahd_linux_device_queue_depth(struct scsi_device *);
362*4882a593Smuzhiyun static int ahd_linux_run_command(struct ahd_softc*,
363*4882a593Smuzhiyun struct ahd_linux_device *,
364*4882a593Smuzhiyun struct scsi_cmnd *);
365*4882a593Smuzhiyun static void ahd_linux_setup_tag_info_global(char *p);
366*4882a593Smuzhiyun static int aic79xx_setup(char *c);
367*4882a593Smuzhiyun static void ahd_freeze_simq(struct ahd_softc *ahd);
368*4882a593Smuzhiyun static void ahd_release_simq(struct ahd_softc *ahd);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun static int ahd_linux_unit;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /************************** OS Utility Wrappers *******************************/
374*4882a593Smuzhiyun void ahd_delay(long);
375*4882a593Smuzhiyun void
ahd_delay(long usec)376*4882a593Smuzhiyun ahd_delay(long usec)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun /*
379*4882a593Smuzhiyun * udelay on Linux can have problems for
380*4882a593Smuzhiyun * multi-millisecond waits. Wait at most
381*4882a593Smuzhiyun * 1024us per call.
382*4882a593Smuzhiyun */
383*4882a593Smuzhiyun while (usec > 0) {
384*4882a593Smuzhiyun udelay(usec % 1024);
385*4882a593Smuzhiyun usec -= 1024;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /***************************** Low Level I/O **********************************/
391*4882a593Smuzhiyun uint8_t ahd_inb(struct ahd_softc * ahd, long port);
392*4882a593Smuzhiyun void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
393*4882a593Smuzhiyun void ahd_outw_atomic(struct ahd_softc * ahd,
394*4882a593Smuzhiyun long port, uint16_t val);
395*4882a593Smuzhiyun void ahd_outsb(struct ahd_softc * ahd, long port,
396*4882a593Smuzhiyun uint8_t *, int count);
397*4882a593Smuzhiyun void ahd_insb(struct ahd_softc * ahd, long port,
398*4882a593Smuzhiyun uint8_t *, int count);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun uint8_t
ahd_inb(struct ahd_softc * ahd,long port)401*4882a593Smuzhiyun ahd_inb(struct ahd_softc * ahd, long port)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun uint8_t x;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun if (ahd->tags[0] == BUS_SPACE_MEMIO) {
406*4882a593Smuzhiyun x = readb(ahd->bshs[0].maddr + port);
407*4882a593Smuzhiyun } else {
408*4882a593Smuzhiyun x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun mb();
411*4882a593Smuzhiyun return (x);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun #if 0 /* unused */
415*4882a593Smuzhiyun static uint16_t
416*4882a593Smuzhiyun ahd_inw_atomic(struct ahd_softc * ahd, long port)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun uint8_t x;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (ahd->tags[0] == BUS_SPACE_MEMIO) {
421*4882a593Smuzhiyun x = readw(ahd->bshs[0].maddr + port);
422*4882a593Smuzhiyun } else {
423*4882a593Smuzhiyun x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun mb();
426*4882a593Smuzhiyun return (x);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun #endif
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun void
ahd_outb(struct ahd_softc * ahd,long port,uint8_t val)431*4882a593Smuzhiyun ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun if (ahd->tags[0] == BUS_SPACE_MEMIO) {
434*4882a593Smuzhiyun writeb(val, ahd->bshs[0].maddr + port);
435*4882a593Smuzhiyun } else {
436*4882a593Smuzhiyun outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun mb();
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun void
ahd_outw_atomic(struct ahd_softc * ahd,long port,uint16_t val)442*4882a593Smuzhiyun ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun if (ahd->tags[0] == BUS_SPACE_MEMIO) {
445*4882a593Smuzhiyun writew(val, ahd->bshs[0].maddr + port);
446*4882a593Smuzhiyun } else {
447*4882a593Smuzhiyun outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun mb();
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun void
ahd_outsb(struct ahd_softc * ahd,long port,uint8_t * array,int count)453*4882a593Smuzhiyun ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun int i;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /*
458*4882a593Smuzhiyun * There is probably a more efficient way to do this on Linux
459*4882a593Smuzhiyun * but we don't use this for anything speed critical and this
460*4882a593Smuzhiyun * should work.
461*4882a593Smuzhiyun */
462*4882a593Smuzhiyun for (i = 0; i < count; i++)
463*4882a593Smuzhiyun ahd_outb(ahd, port, *array++);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun void
ahd_insb(struct ahd_softc * ahd,long port,uint8_t * array,int count)467*4882a593Smuzhiyun ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun int i;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun * There is probably a more efficient way to do this on Linux
473*4882a593Smuzhiyun * but we don't use this for anything speed critical and this
474*4882a593Smuzhiyun * should work.
475*4882a593Smuzhiyun */
476*4882a593Smuzhiyun for (i = 0; i < count; i++)
477*4882a593Smuzhiyun *array++ = ahd_inb(ahd, port);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /******************************* PCI Routines *********************************/
481*4882a593Smuzhiyun uint32_t
ahd_pci_read_config(ahd_dev_softc_t pci,int reg,int width)482*4882a593Smuzhiyun ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun switch (width) {
485*4882a593Smuzhiyun case 1:
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun uint8_t retval;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun pci_read_config_byte(pci, reg, &retval);
490*4882a593Smuzhiyun return (retval);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun case 2:
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun uint16_t retval;
495*4882a593Smuzhiyun pci_read_config_word(pci, reg, &retval);
496*4882a593Smuzhiyun return (retval);
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun case 4:
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun uint32_t retval;
501*4882a593Smuzhiyun pci_read_config_dword(pci, reg, &retval);
502*4882a593Smuzhiyun return (retval);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun default:
505*4882a593Smuzhiyun panic("ahd_pci_read_config: Read size too big");
506*4882a593Smuzhiyun /* NOTREACHED */
507*4882a593Smuzhiyun return (0);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun void
ahd_pci_write_config(ahd_dev_softc_t pci,int reg,uint32_t value,int width)512*4882a593Smuzhiyun ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun switch (width) {
515*4882a593Smuzhiyun case 1:
516*4882a593Smuzhiyun pci_write_config_byte(pci, reg, value);
517*4882a593Smuzhiyun break;
518*4882a593Smuzhiyun case 2:
519*4882a593Smuzhiyun pci_write_config_word(pci, reg, value);
520*4882a593Smuzhiyun break;
521*4882a593Smuzhiyun case 4:
522*4882a593Smuzhiyun pci_write_config_dword(pci, reg, value);
523*4882a593Smuzhiyun break;
524*4882a593Smuzhiyun default:
525*4882a593Smuzhiyun panic("ahd_pci_write_config: Write size too big");
526*4882a593Smuzhiyun /* NOTREACHED */
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /****************************** Inlines ***************************************/
531*4882a593Smuzhiyun static void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun static void
ahd_linux_unmap_scb(struct ahd_softc * ahd,struct scb * scb)534*4882a593Smuzhiyun ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun struct scsi_cmnd *cmd;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun cmd = scb->io_ctx;
539*4882a593Smuzhiyun ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
540*4882a593Smuzhiyun scsi_dma_unmap(cmd);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /******************************** Macros **************************************/
544*4882a593Smuzhiyun #define BUILD_SCSIID(ahd, cmd) \
545*4882a593Smuzhiyun (((scmd_id(cmd) << TID_SHIFT) & TID) | (ahd)->our_id)
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /*
548*4882a593Smuzhiyun * Return a string describing the driver.
549*4882a593Smuzhiyun */
550*4882a593Smuzhiyun static const char *
ahd_linux_info(struct Scsi_Host * host)551*4882a593Smuzhiyun ahd_linux_info(struct Scsi_Host *host)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun static char buffer[512];
554*4882a593Smuzhiyun char ahd_info[256];
555*4882a593Smuzhiyun char *bp;
556*4882a593Smuzhiyun struct ahd_softc *ahd;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun bp = &buffer[0];
559*4882a593Smuzhiyun ahd = *(struct ahd_softc **)host->hostdata;
560*4882a593Smuzhiyun memset(bp, 0, sizeof(buffer));
561*4882a593Smuzhiyun strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev " AIC79XX_DRIVER_VERSION "\n"
562*4882a593Smuzhiyun " <");
563*4882a593Smuzhiyun strcat(bp, ahd->description);
564*4882a593Smuzhiyun strcat(bp, ">\n"
565*4882a593Smuzhiyun " ");
566*4882a593Smuzhiyun ahd_controller_info(ahd, ahd_info);
567*4882a593Smuzhiyun strcat(bp, ahd_info);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun return (bp);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun /*
573*4882a593Smuzhiyun * Queue an SCB to the controller.
574*4882a593Smuzhiyun */
575*4882a593Smuzhiyun static int
ahd_linux_queue_lck(struct scsi_cmnd * cmd,void (* scsi_done)(struct scsi_cmnd *))576*4882a593Smuzhiyun ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun struct ahd_softc *ahd;
579*4882a593Smuzhiyun struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device);
580*4882a593Smuzhiyun int rtn = SCSI_MLQUEUE_HOST_BUSY;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun cmd->scsi_done = scsi_done;
585*4882a593Smuzhiyun cmd->result = CAM_REQ_INPROG << 16;
586*4882a593Smuzhiyun rtn = ahd_linux_run_command(ahd, dev, cmd);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun return rtn;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
DEF_SCSI_QCMD(ahd_linux_queue)591*4882a593Smuzhiyun static DEF_SCSI_QCMD(ahd_linux_queue)
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun static struct scsi_target **
594*4882a593Smuzhiyun ahd_linux_target_in_softc(struct scsi_target *starget)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun struct ahd_softc *ahd =
597*4882a593Smuzhiyun *((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata);
598*4882a593Smuzhiyun unsigned int target_offset;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun target_offset = starget->id;
601*4882a593Smuzhiyun if (starget->channel != 0)
602*4882a593Smuzhiyun target_offset += 8;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun return &ahd->platform_data->starget[target_offset];
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun static int
ahd_linux_target_alloc(struct scsi_target * starget)608*4882a593Smuzhiyun ahd_linux_target_alloc(struct scsi_target *starget)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun struct ahd_softc *ahd =
611*4882a593Smuzhiyun *((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata);
612*4882a593Smuzhiyun struct seeprom_config *sc = ahd->seep_config;
613*4882a593Smuzhiyun unsigned long flags;
614*4882a593Smuzhiyun struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget);
615*4882a593Smuzhiyun struct ahd_devinfo devinfo;
616*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo;
617*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
618*4882a593Smuzhiyun char channel = starget->channel + 'A';
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun ahd_lock(ahd, &flags);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun BUG_ON(*ahd_targp != NULL);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun *ahd_targp = starget;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun if (sc) {
627*4882a593Smuzhiyun int flags = sc->device_flags[starget->id];
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
630*4882a593Smuzhiyun starget->id, &tstate);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun if ((flags & CFPACKETIZED) == 0) {
633*4882a593Smuzhiyun /* don't negotiate packetized (IU) transfers */
634*4882a593Smuzhiyun spi_max_iu(starget) = 0;
635*4882a593Smuzhiyun } else {
636*4882a593Smuzhiyun if ((ahd->features & AHD_RTI) == 0)
637*4882a593Smuzhiyun spi_rti(starget) = 0;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun if ((flags & CFQAS) == 0)
641*4882a593Smuzhiyun spi_max_qas(starget) = 0;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* Transinfo values have been set to BIOS settings */
644*4882a593Smuzhiyun spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;
645*4882a593Smuzhiyun spi_min_period(starget) = tinfo->user.period;
646*4882a593Smuzhiyun spi_max_offset(starget) = tinfo->user.offset;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,
650*4882a593Smuzhiyun starget->id, &tstate);
651*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, ahd->our_id, starget->id,
652*4882a593Smuzhiyun CAM_LUN_WILDCARD, channel,
653*4882a593Smuzhiyun ROLE_INITIATOR);
654*4882a593Smuzhiyun ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
655*4882a593Smuzhiyun AHD_TRANS_GOAL, /*paused*/FALSE);
656*4882a593Smuzhiyun ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
657*4882a593Smuzhiyun AHD_TRANS_GOAL, /*paused*/FALSE);
658*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun return 0;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun static void
ahd_linux_target_destroy(struct scsi_target * starget)664*4882a593Smuzhiyun ahd_linux_target_destroy(struct scsi_target *starget)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget);
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun *ahd_targp = NULL;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun static int
ahd_linux_slave_alloc(struct scsi_device * sdev)672*4882a593Smuzhiyun ahd_linux_slave_alloc(struct scsi_device *sdev)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun struct ahd_softc *ahd =
675*4882a593Smuzhiyun *((struct ahd_softc **)sdev->host->hostdata);
676*4882a593Smuzhiyun struct ahd_linux_device *dev;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun if (bootverbose)
679*4882a593Smuzhiyun printk("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun dev = scsi_transport_device_data(sdev);
682*4882a593Smuzhiyun memset(dev, 0, sizeof(*dev));
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /*
685*4882a593Smuzhiyun * We start out life using untagged
686*4882a593Smuzhiyun * transactions of which we allow one.
687*4882a593Smuzhiyun */
688*4882a593Smuzhiyun dev->openings = 1;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /*
691*4882a593Smuzhiyun * Set maxtags to 0. This will be changed if we
692*4882a593Smuzhiyun * later determine that we are dealing with
693*4882a593Smuzhiyun * a tagged queuing capable device.
694*4882a593Smuzhiyun */
695*4882a593Smuzhiyun dev->maxtags = 0;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun return (0);
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun static int
ahd_linux_slave_configure(struct scsi_device * sdev)701*4882a593Smuzhiyun ahd_linux_slave_configure(struct scsi_device *sdev)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun if (bootverbose)
704*4882a593Smuzhiyun sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun ahd_linux_device_queue_depth(sdev);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /* Initial Domain Validation */
709*4882a593Smuzhiyun if (!spi_initial_dv(sdev->sdev_target))
710*4882a593Smuzhiyun spi_dv_device(sdev);
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun return 0;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun #if defined(__i386__)
716*4882a593Smuzhiyun /*
717*4882a593Smuzhiyun * Return the disk geometry for the given SCSI device.
718*4882a593Smuzhiyun */
719*4882a593Smuzhiyun static int
ahd_linux_biosparam(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])720*4882a593Smuzhiyun ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
721*4882a593Smuzhiyun sector_t capacity, int geom[])
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun int heads;
724*4882a593Smuzhiyun int sectors;
725*4882a593Smuzhiyun int cylinders;
726*4882a593Smuzhiyun int extended;
727*4882a593Smuzhiyun struct ahd_softc *ahd;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun ahd = *((struct ahd_softc **)sdev->host->hostdata);
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun if (scsi_partsize(bdev, capacity, geom))
732*4882a593Smuzhiyun return 0;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun heads = 64;
735*4882a593Smuzhiyun sectors = 32;
736*4882a593Smuzhiyun cylinders = aic_sector_div(capacity, heads, sectors);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun if (aic79xx_extended != 0)
739*4882a593Smuzhiyun extended = 1;
740*4882a593Smuzhiyun else
741*4882a593Smuzhiyun extended = (ahd->flags & AHD_EXTENDED_TRANS_A) != 0;
742*4882a593Smuzhiyun if (extended && cylinders >= 1024) {
743*4882a593Smuzhiyun heads = 255;
744*4882a593Smuzhiyun sectors = 63;
745*4882a593Smuzhiyun cylinders = aic_sector_div(capacity, heads, sectors);
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun geom[0] = heads;
748*4882a593Smuzhiyun geom[1] = sectors;
749*4882a593Smuzhiyun geom[2] = cylinders;
750*4882a593Smuzhiyun return (0);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun #endif
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /*
755*4882a593Smuzhiyun * Abort the current SCSI command(s).
756*4882a593Smuzhiyun */
757*4882a593Smuzhiyun static int
ahd_linux_abort(struct scsi_cmnd * cmd)758*4882a593Smuzhiyun ahd_linux_abort(struct scsi_cmnd *cmd)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun int error;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun error = ahd_linux_queue_abort_cmd(cmd);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun return error;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /*
768*4882a593Smuzhiyun * Attempt to send a target reset message to the device that timed out.
769*4882a593Smuzhiyun */
770*4882a593Smuzhiyun static int
ahd_linux_dev_reset(struct scsi_cmnd * cmd)771*4882a593Smuzhiyun ahd_linux_dev_reset(struct scsi_cmnd *cmd)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun struct ahd_softc *ahd;
774*4882a593Smuzhiyun struct ahd_linux_device *dev;
775*4882a593Smuzhiyun struct scb *reset_scb;
776*4882a593Smuzhiyun u_int cdb_byte;
777*4882a593Smuzhiyun int retval = SUCCESS;
778*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo;
779*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
780*4882a593Smuzhiyun unsigned long flags;
781*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(done);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun reset_scb = NULL;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun scmd_printk(KERN_INFO, cmd,
788*4882a593Smuzhiyun "Attempting to queue a TARGET RESET message:");
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun printk("CDB:");
791*4882a593Smuzhiyun for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
792*4882a593Smuzhiyun printk(" 0x%x", cmd->cmnd[cdb_byte]);
793*4882a593Smuzhiyun printk("\n");
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun /*
796*4882a593Smuzhiyun * Determine if we currently own this command.
797*4882a593Smuzhiyun */
798*4882a593Smuzhiyun dev = scsi_transport_device_data(cmd->device);
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun if (dev == NULL) {
801*4882a593Smuzhiyun /*
802*4882a593Smuzhiyun * No target device for this command exists,
803*4882a593Smuzhiyun * so we must not still own the command.
804*4882a593Smuzhiyun */
805*4882a593Smuzhiyun scmd_printk(KERN_INFO, cmd, "Is not an active device\n");
806*4882a593Smuzhiyun return SUCCESS;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun /*
810*4882a593Smuzhiyun * Generate us a new SCB
811*4882a593Smuzhiyun */
812*4882a593Smuzhiyun reset_scb = ahd_get_scb(ahd, AHD_NEVER_COL_IDX);
813*4882a593Smuzhiyun if (!reset_scb) {
814*4882a593Smuzhiyun scmd_printk(KERN_INFO, cmd, "No SCB available\n");
815*4882a593Smuzhiyun return FAILED;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
819*4882a593Smuzhiyun cmd->device->id, &tstate);
820*4882a593Smuzhiyun reset_scb->io_ctx = cmd;
821*4882a593Smuzhiyun reset_scb->platform_data->dev = dev;
822*4882a593Smuzhiyun reset_scb->sg_count = 0;
823*4882a593Smuzhiyun ahd_set_residual(reset_scb, 0);
824*4882a593Smuzhiyun ahd_set_sense_residual(reset_scb, 0);
825*4882a593Smuzhiyun reset_scb->platform_data->xfer_len = 0;
826*4882a593Smuzhiyun reset_scb->hscb->control = 0;
827*4882a593Smuzhiyun reset_scb->hscb->scsiid = BUILD_SCSIID(ahd,cmd);
828*4882a593Smuzhiyun reset_scb->hscb->lun = cmd->device->lun;
829*4882a593Smuzhiyun reset_scb->hscb->cdb_len = 0;
830*4882a593Smuzhiyun reset_scb->hscb->task_management = SIU_TASKMGMT_LUN_RESET;
831*4882a593Smuzhiyun reset_scb->flags |= SCB_DEVICE_RESET|SCB_RECOVERY_SCB|SCB_ACTIVE;
832*4882a593Smuzhiyun if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
833*4882a593Smuzhiyun reset_scb->flags |= SCB_PACKETIZED;
834*4882a593Smuzhiyun } else {
835*4882a593Smuzhiyun reset_scb->hscb->control |= MK_MESSAGE;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun dev->openings--;
838*4882a593Smuzhiyun dev->active++;
839*4882a593Smuzhiyun dev->commands_issued++;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun ahd_lock(ahd, &flags);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun LIST_INSERT_HEAD(&ahd->pending_scbs, reset_scb, pending_links);
844*4882a593Smuzhiyun ahd_queue_scb(ahd, reset_scb);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun ahd->platform_data->eh_done = &done;
847*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun printk("%s: Device reset code sleeping\n", ahd_name(ahd));
850*4882a593Smuzhiyun if (!wait_for_completion_timeout(&done, 5 * HZ)) {
851*4882a593Smuzhiyun ahd_lock(ahd, &flags);
852*4882a593Smuzhiyun ahd->platform_data->eh_done = NULL;
853*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
854*4882a593Smuzhiyun printk("%s: Device reset timer expired (active %d)\n",
855*4882a593Smuzhiyun ahd_name(ahd), dev->active);
856*4882a593Smuzhiyun retval = FAILED;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun printk("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval);
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun return (retval);
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /*
864*4882a593Smuzhiyun * Reset the SCSI bus.
865*4882a593Smuzhiyun */
866*4882a593Smuzhiyun static int
ahd_linux_bus_reset(struct scsi_cmnd * cmd)867*4882a593Smuzhiyun ahd_linux_bus_reset(struct scsi_cmnd *cmd)
868*4882a593Smuzhiyun {
869*4882a593Smuzhiyun struct ahd_softc *ahd;
870*4882a593Smuzhiyun int found;
871*4882a593Smuzhiyun unsigned long flags;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
874*4882a593Smuzhiyun #ifdef AHD_DEBUG
875*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
876*4882a593Smuzhiyun printk("%s: Bus reset called for cmd %p\n",
877*4882a593Smuzhiyun ahd_name(ahd), cmd);
878*4882a593Smuzhiyun #endif
879*4882a593Smuzhiyun ahd_lock(ahd, &flags);
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun found = ahd_reset_channel(ahd, scmd_channel(cmd) + 'A',
882*4882a593Smuzhiyun /*initiate reset*/TRUE);
883*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun if (bootverbose)
886*4882a593Smuzhiyun printk("%s: SCSI bus reset delivered. "
887*4882a593Smuzhiyun "%d SCBs aborted.\n", ahd_name(ahd), found);
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun return (SUCCESS);
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun struct scsi_host_template aic79xx_driver_template = {
893*4882a593Smuzhiyun .module = THIS_MODULE,
894*4882a593Smuzhiyun .name = "aic79xx",
895*4882a593Smuzhiyun .proc_name = "aic79xx",
896*4882a593Smuzhiyun .show_info = ahd_linux_show_info,
897*4882a593Smuzhiyun .write_info = ahd_proc_write_seeprom,
898*4882a593Smuzhiyun .info = ahd_linux_info,
899*4882a593Smuzhiyun .queuecommand = ahd_linux_queue,
900*4882a593Smuzhiyun .eh_abort_handler = ahd_linux_abort,
901*4882a593Smuzhiyun .eh_device_reset_handler = ahd_linux_dev_reset,
902*4882a593Smuzhiyun .eh_bus_reset_handler = ahd_linux_bus_reset,
903*4882a593Smuzhiyun #if defined(__i386__)
904*4882a593Smuzhiyun .bios_param = ahd_linux_biosparam,
905*4882a593Smuzhiyun #endif
906*4882a593Smuzhiyun .can_queue = AHD_MAX_QUEUE,
907*4882a593Smuzhiyun .this_id = -1,
908*4882a593Smuzhiyun .max_sectors = 8192,
909*4882a593Smuzhiyun .cmd_per_lun = 2,
910*4882a593Smuzhiyun .slave_alloc = ahd_linux_slave_alloc,
911*4882a593Smuzhiyun .slave_configure = ahd_linux_slave_configure,
912*4882a593Smuzhiyun .target_alloc = ahd_linux_target_alloc,
913*4882a593Smuzhiyun .target_destroy = ahd_linux_target_destroy,
914*4882a593Smuzhiyun };
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun /******************************** Bus DMA *************************************/
917*4882a593Smuzhiyun int
ahd_dma_tag_create(struct ahd_softc * ahd,bus_dma_tag_t parent,bus_size_t alignment,bus_size_t boundary,dma_addr_t lowaddr,dma_addr_t highaddr,bus_dma_filter_t * filter,void * filterarg,bus_size_t maxsize,int nsegments,bus_size_t maxsegsz,int flags,bus_dma_tag_t * ret_tag)918*4882a593Smuzhiyun ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent,
919*4882a593Smuzhiyun bus_size_t alignment, bus_size_t boundary,
920*4882a593Smuzhiyun dma_addr_t lowaddr, dma_addr_t highaddr,
921*4882a593Smuzhiyun bus_dma_filter_t *filter, void *filterarg,
922*4882a593Smuzhiyun bus_size_t maxsize, int nsegments,
923*4882a593Smuzhiyun bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
924*4882a593Smuzhiyun {
925*4882a593Smuzhiyun bus_dma_tag_t dmat;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC);
928*4882a593Smuzhiyun if (dmat == NULL)
929*4882a593Smuzhiyun return (ENOMEM);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun /*
932*4882a593Smuzhiyun * Linux is very simplistic about DMA memory. For now don't
933*4882a593Smuzhiyun * maintain all specification information. Once Linux supplies
934*4882a593Smuzhiyun * better facilities for doing these operations, or the
935*4882a593Smuzhiyun * needs of this particular driver change, we might need to do
936*4882a593Smuzhiyun * more here.
937*4882a593Smuzhiyun */
938*4882a593Smuzhiyun dmat->alignment = alignment;
939*4882a593Smuzhiyun dmat->boundary = boundary;
940*4882a593Smuzhiyun dmat->maxsize = maxsize;
941*4882a593Smuzhiyun *ret_tag = dmat;
942*4882a593Smuzhiyun return (0);
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun void
ahd_dma_tag_destroy(struct ahd_softc * ahd,bus_dma_tag_t dmat)946*4882a593Smuzhiyun ahd_dma_tag_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun kfree(dmat);
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun int
ahd_dmamem_alloc(struct ahd_softc * ahd,bus_dma_tag_t dmat,void ** vaddr,int flags,bus_dmamap_t * mapp)952*4882a593Smuzhiyun ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
953*4882a593Smuzhiyun int flags, bus_dmamap_t *mapp)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun *vaddr = dma_alloc_coherent(&ahd->dev_softc->dev, dmat->maxsize, mapp,
956*4882a593Smuzhiyun GFP_ATOMIC);
957*4882a593Smuzhiyun if (*vaddr == NULL)
958*4882a593Smuzhiyun return (ENOMEM);
959*4882a593Smuzhiyun return(0);
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun void
ahd_dmamem_free(struct ahd_softc * ahd,bus_dma_tag_t dmat,void * vaddr,bus_dmamap_t map)963*4882a593Smuzhiyun ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat,
964*4882a593Smuzhiyun void* vaddr, bus_dmamap_t map)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun dma_free_coherent(&ahd->dev_softc->dev, dmat->maxsize, vaddr, map);
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun int
ahd_dmamap_load(struct ahd_softc * ahd,bus_dma_tag_t dmat,bus_dmamap_t map,void * buf,bus_size_t buflen,bus_dmamap_callback_t * cb,void * cb_arg,int flags)970*4882a593Smuzhiyun ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map,
971*4882a593Smuzhiyun void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
972*4882a593Smuzhiyun void *cb_arg, int flags)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun /*
975*4882a593Smuzhiyun * Assume for now that this will only be used during
976*4882a593Smuzhiyun * initialization and not for per-transaction buffer mapping.
977*4882a593Smuzhiyun */
978*4882a593Smuzhiyun bus_dma_segment_t stack_sg;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun stack_sg.ds_addr = map;
981*4882a593Smuzhiyun stack_sg.ds_len = dmat->maxsize;
982*4882a593Smuzhiyun cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
983*4882a593Smuzhiyun return (0);
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun void
ahd_dmamap_destroy(struct ahd_softc * ahd,bus_dma_tag_t dmat,bus_dmamap_t map)987*4882a593Smuzhiyun ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun int
ahd_dmamap_unload(struct ahd_softc * ahd,bus_dma_tag_t dmat,bus_dmamap_t map)992*4882a593Smuzhiyun ahd_dmamap_unload(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun /* Nothing to do */
995*4882a593Smuzhiyun return (0);
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun /********************* Platform Dependent Functions ***************************/
999*4882a593Smuzhiyun static void
ahd_linux_setup_iocell_info(u_long index,int instance,int targ,int32_t value)1000*4882a593Smuzhiyun ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value)
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun if ((instance >= 0)
1004*4882a593Smuzhiyun && (instance < ARRAY_SIZE(aic79xx_iocell_info))) {
1005*4882a593Smuzhiyun uint8_t *iocell_info;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun iocell_info = (uint8_t*)&aic79xx_iocell_info[instance];
1008*4882a593Smuzhiyun iocell_info[index] = value & 0xFFFF;
1009*4882a593Smuzhiyun if (bootverbose)
1010*4882a593Smuzhiyun printk("iocell[%d:%ld] = %d\n", instance, index, value);
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun static void
ahd_linux_setup_tag_info_global(char * p)1015*4882a593Smuzhiyun ahd_linux_setup_tag_info_global(char *p)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun int tags, i, j;
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
1020*4882a593Smuzhiyun printk("Setting Global Tags= %d\n", tags);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(aic79xx_tag_info); i++) {
1023*4882a593Smuzhiyun for (j = 0; j < AHD_NUM_TARGETS; j++) {
1024*4882a593Smuzhiyun aic79xx_tag_info[i].tag_commands[j] = tags;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun static void
ahd_linux_setup_tag_info(u_long arg,int instance,int targ,int32_t value)1030*4882a593Smuzhiyun ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun if ((instance >= 0) && (targ >= 0)
1034*4882a593Smuzhiyun && (instance < ARRAY_SIZE(aic79xx_tag_info))
1035*4882a593Smuzhiyun && (targ < AHD_NUM_TARGETS)) {
1036*4882a593Smuzhiyun aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF;
1037*4882a593Smuzhiyun if (bootverbose)
1038*4882a593Smuzhiyun printk("tag_info[%d:%d] = %d\n", instance, targ, value);
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun static char *
ahd_parse_brace_option(char * opt_name,char * opt_arg,char * end,int depth,void (* callback)(u_long,int,int,int32_t),u_long callback_arg)1043*4882a593Smuzhiyun ahd_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
1044*4882a593Smuzhiyun void (*callback)(u_long, int, int, int32_t),
1045*4882a593Smuzhiyun u_long callback_arg)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun char *tok_end;
1048*4882a593Smuzhiyun char *tok_end2;
1049*4882a593Smuzhiyun int i;
1050*4882a593Smuzhiyun int instance;
1051*4882a593Smuzhiyun int targ;
1052*4882a593Smuzhiyun int done;
1053*4882a593Smuzhiyun char tok_list[] = {'.', ',', '{', '}', '\0'};
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun /* All options use a ':' name/arg separator */
1056*4882a593Smuzhiyun if (*opt_arg != ':')
1057*4882a593Smuzhiyun return (opt_arg);
1058*4882a593Smuzhiyun opt_arg++;
1059*4882a593Smuzhiyun instance = -1;
1060*4882a593Smuzhiyun targ = -1;
1061*4882a593Smuzhiyun done = FALSE;
1062*4882a593Smuzhiyun /*
1063*4882a593Smuzhiyun * Restore separator that may be in
1064*4882a593Smuzhiyun * the middle of our option argument.
1065*4882a593Smuzhiyun */
1066*4882a593Smuzhiyun tok_end = strchr(opt_arg, '\0');
1067*4882a593Smuzhiyun if (tok_end < end)
1068*4882a593Smuzhiyun *tok_end = ',';
1069*4882a593Smuzhiyun while (!done) {
1070*4882a593Smuzhiyun switch (*opt_arg) {
1071*4882a593Smuzhiyun case '{':
1072*4882a593Smuzhiyun if (instance == -1) {
1073*4882a593Smuzhiyun instance = 0;
1074*4882a593Smuzhiyun } else {
1075*4882a593Smuzhiyun if (depth > 1) {
1076*4882a593Smuzhiyun if (targ == -1)
1077*4882a593Smuzhiyun targ = 0;
1078*4882a593Smuzhiyun } else {
1079*4882a593Smuzhiyun printk("Malformed Option %s\n",
1080*4882a593Smuzhiyun opt_name);
1081*4882a593Smuzhiyun done = TRUE;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun opt_arg++;
1085*4882a593Smuzhiyun break;
1086*4882a593Smuzhiyun case '}':
1087*4882a593Smuzhiyun if (targ != -1)
1088*4882a593Smuzhiyun targ = -1;
1089*4882a593Smuzhiyun else if (instance != -1)
1090*4882a593Smuzhiyun instance = -1;
1091*4882a593Smuzhiyun opt_arg++;
1092*4882a593Smuzhiyun break;
1093*4882a593Smuzhiyun case ',':
1094*4882a593Smuzhiyun case '.':
1095*4882a593Smuzhiyun if (instance == -1)
1096*4882a593Smuzhiyun done = TRUE;
1097*4882a593Smuzhiyun else if (targ >= 0)
1098*4882a593Smuzhiyun targ++;
1099*4882a593Smuzhiyun else if (instance >= 0)
1100*4882a593Smuzhiyun instance++;
1101*4882a593Smuzhiyun opt_arg++;
1102*4882a593Smuzhiyun break;
1103*4882a593Smuzhiyun case '\0':
1104*4882a593Smuzhiyun done = TRUE;
1105*4882a593Smuzhiyun break;
1106*4882a593Smuzhiyun default:
1107*4882a593Smuzhiyun tok_end = end;
1108*4882a593Smuzhiyun for (i = 0; tok_list[i]; i++) {
1109*4882a593Smuzhiyun tok_end2 = strchr(opt_arg, tok_list[i]);
1110*4882a593Smuzhiyun if ((tok_end2) && (tok_end2 < tok_end))
1111*4882a593Smuzhiyun tok_end = tok_end2;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun callback(callback_arg, instance, targ,
1114*4882a593Smuzhiyun simple_strtol(opt_arg, NULL, 0));
1115*4882a593Smuzhiyun opt_arg = tok_end;
1116*4882a593Smuzhiyun break;
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun return (opt_arg);
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /*
1123*4882a593Smuzhiyun * Handle Linux boot parameters. This routine allows for assigning a value
1124*4882a593Smuzhiyun * to a parameter with a ':' between the parameter and the value.
1125*4882a593Smuzhiyun * ie. aic79xx=stpwlev:1,extended
1126*4882a593Smuzhiyun */
1127*4882a593Smuzhiyun static int
aic79xx_setup(char * s)1128*4882a593Smuzhiyun aic79xx_setup(char *s)
1129*4882a593Smuzhiyun {
1130*4882a593Smuzhiyun int i, n;
1131*4882a593Smuzhiyun char *p;
1132*4882a593Smuzhiyun char *end;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun static const struct {
1135*4882a593Smuzhiyun const char *name;
1136*4882a593Smuzhiyun uint32_t *flag;
1137*4882a593Smuzhiyun } options[] = {
1138*4882a593Smuzhiyun { "extended", &aic79xx_extended },
1139*4882a593Smuzhiyun { "no_reset", &aic79xx_no_reset },
1140*4882a593Smuzhiyun { "verbose", &aic79xx_verbose },
1141*4882a593Smuzhiyun { "allow_memio", &aic79xx_allow_memio},
1142*4882a593Smuzhiyun #ifdef AHD_DEBUG
1143*4882a593Smuzhiyun { "debug", &ahd_debug },
1144*4882a593Smuzhiyun #endif
1145*4882a593Smuzhiyun { "periodic_otag", &aic79xx_periodic_otag },
1146*4882a593Smuzhiyun { "pci_parity", &aic79xx_pci_parity },
1147*4882a593Smuzhiyun { "seltime", &aic79xx_seltime },
1148*4882a593Smuzhiyun { "tag_info", NULL },
1149*4882a593Smuzhiyun { "global_tag_depth", NULL},
1150*4882a593Smuzhiyun { "slewrate", NULL },
1151*4882a593Smuzhiyun { "precomp", NULL },
1152*4882a593Smuzhiyun { "amplitude", NULL },
1153*4882a593Smuzhiyun { "slowcrc", &aic79xx_slowcrc },
1154*4882a593Smuzhiyun };
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun end = strchr(s, '\0');
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun /*
1159*4882a593Smuzhiyun * XXX ia64 gcc isn't smart enough to know that ARRAY_SIZE
1160*4882a593Smuzhiyun * will never be 0 in this case.
1161*4882a593Smuzhiyun */
1162*4882a593Smuzhiyun n = 0;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun while ((p = strsep(&s, ",.")) != NULL) {
1165*4882a593Smuzhiyun if (*p == '\0')
1166*4882a593Smuzhiyun continue;
1167*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(options); i++) {
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun n = strlen(options[i].name);
1170*4882a593Smuzhiyun if (strncmp(options[i].name, p, n) == 0)
1171*4882a593Smuzhiyun break;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun if (i == ARRAY_SIZE(options))
1174*4882a593Smuzhiyun continue;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun if (strncmp(p, "global_tag_depth", n) == 0) {
1177*4882a593Smuzhiyun ahd_linux_setup_tag_info_global(p + n);
1178*4882a593Smuzhiyun } else if (strncmp(p, "tag_info", n) == 0) {
1179*4882a593Smuzhiyun s = ahd_parse_brace_option("tag_info", p + n, end,
1180*4882a593Smuzhiyun 2, ahd_linux_setup_tag_info, 0);
1181*4882a593Smuzhiyun } else if (strncmp(p, "slewrate", n) == 0) {
1182*4882a593Smuzhiyun s = ahd_parse_brace_option("slewrate",
1183*4882a593Smuzhiyun p + n, end, 1, ahd_linux_setup_iocell_info,
1184*4882a593Smuzhiyun AIC79XX_SLEWRATE_INDEX);
1185*4882a593Smuzhiyun } else if (strncmp(p, "precomp", n) == 0) {
1186*4882a593Smuzhiyun s = ahd_parse_brace_option("precomp",
1187*4882a593Smuzhiyun p + n, end, 1, ahd_linux_setup_iocell_info,
1188*4882a593Smuzhiyun AIC79XX_PRECOMP_INDEX);
1189*4882a593Smuzhiyun } else if (strncmp(p, "amplitude", n) == 0) {
1190*4882a593Smuzhiyun s = ahd_parse_brace_option("amplitude",
1191*4882a593Smuzhiyun p + n, end, 1, ahd_linux_setup_iocell_info,
1192*4882a593Smuzhiyun AIC79XX_AMPLITUDE_INDEX);
1193*4882a593Smuzhiyun } else if (p[n] == ':') {
1194*4882a593Smuzhiyun *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
1195*4882a593Smuzhiyun } else if (!strncmp(p, "verbose", n)) {
1196*4882a593Smuzhiyun *(options[i].flag) = 1;
1197*4882a593Smuzhiyun } else {
1198*4882a593Smuzhiyun *(options[i].flag) ^= 0xFFFFFFFF;
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun return 1;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun __setup("aic79xx=", aic79xx_setup);
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun uint32_t aic79xx_verbose;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun int
ahd_linux_register_host(struct ahd_softc * ahd,struct scsi_host_template * template)1209*4882a593Smuzhiyun ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *template)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun char buf[80];
1212*4882a593Smuzhiyun struct Scsi_Host *host;
1213*4882a593Smuzhiyun char *new_name;
1214*4882a593Smuzhiyun u_long s;
1215*4882a593Smuzhiyun int retval;
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun template->name = ahd->description;
1218*4882a593Smuzhiyun host = scsi_host_alloc(template, sizeof(struct ahd_softc *));
1219*4882a593Smuzhiyun if (host == NULL)
1220*4882a593Smuzhiyun return (ENOMEM);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun *((struct ahd_softc **)host->hostdata) = ahd;
1223*4882a593Smuzhiyun ahd->platform_data->host = host;
1224*4882a593Smuzhiyun host->can_queue = AHD_MAX_QUEUE;
1225*4882a593Smuzhiyun host->cmd_per_lun = 2;
1226*4882a593Smuzhiyun host->sg_tablesize = AHD_NSEG;
1227*4882a593Smuzhiyun host->this_id = ahd->our_id;
1228*4882a593Smuzhiyun host->irq = ahd->platform_data->irq;
1229*4882a593Smuzhiyun host->max_id = (ahd->features & AHD_WIDE) ? 16 : 8;
1230*4882a593Smuzhiyun host->max_lun = AHD_NUM_LUNS;
1231*4882a593Smuzhiyun host->max_channel = 0;
1232*4882a593Smuzhiyun host->sg_tablesize = AHD_NSEG;
1233*4882a593Smuzhiyun ahd_lock(ahd, &s);
1234*4882a593Smuzhiyun ahd_set_unit(ahd, ahd_linux_unit++);
1235*4882a593Smuzhiyun ahd_unlock(ahd, &s);
1236*4882a593Smuzhiyun sprintf(buf, "scsi%d", host->host_no);
1237*4882a593Smuzhiyun new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
1238*4882a593Smuzhiyun if (new_name != NULL) {
1239*4882a593Smuzhiyun strcpy(new_name, buf);
1240*4882a593Smuzhiyun ahd_set_name(ahd, new_name);
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun host->unique_id = ahd->unit;
1243*4882a593Smuzhiyun ahd_linux_initialize_scsi_bus(ahd);
1244*4882a593Smuzhiyun ahd_intr_enable(ahd, TRUE);
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun host->transportt = ahd_linux_transport_template;
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun retval = scsi_add_host(host, &ahd->dev_softc->dev);
1249*4882a593Smuzhiyun if (retval) {
1250*4882a593Smuzhiyun printk(KERN_WARNING "aic79xx: scsi_add_host failed\n");
1251*4882a593Smuzhiyun scsi_host_put(host);
1252*4882a593Smuzhiyun return retval;
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun scsi_scan_host(host);
1256*4882a593Smuzhiyun return 0;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun /*
1260*4882a593Smuzhiyun * Place the SCSI bus into a known state by either resetting it,
1261*4882a593Smuzhiyun * or forcing transfer negotiations on the next command to any
1262*4882a593Smuzhiyun * target.
1263*4882a593Smuzhiyun */
1264*4882a593Smuzhiyun static void
ahd_linux_initialize_scsi_bus(struct ahd_softc * ahd)1265*4882a593Smuzhiyun ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd)
1266*4882a593Smuzhiyun {
1267*4882a593Smuzhiyun u_int target_id;
1268*4882a593Smuzhiyun u_int numtarg;
1269*4882a593Smuzhiyun unsigned long s;
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun target_id = 0;
1272*4882a593Smuzhiyun numtarg = 0;
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun if (aic79xx_no_reset != 0)
1275*4882a593Smuzhiyun ahd->flags &= ~AHD_RESET_BUS_A;
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun if ((ahd->flags & AHD_RESET_BUS_A) != 0)
1278*4882a593Smuzhiyun ahd_reset_channel(ahd, 'A', /*initiate_reset*/TRUE);
1279*4882a593Smuzhiyun else
1280*4882a593Smuzhiyun numtarg = (ahd->features & AHD_WIDE) ? 16 : 8;
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun ahd_lock(ahd, &s);
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun /*
1285*4882a593Smuzhiyun * Force negotiation to async for all targets that
1286*4882a593Smuzhiyun * will not see an initial bus reset.
1287*4882a593Smuzhiyun */
1288*4882a593Smuzhiyun for (; target_id < numtarg; target_id++) {
1289*4882a593Smuzhiyun struct ahd_devinfo devinfo;
1290*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo;
1291*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
1294*4882a593Smuzhiyun target_id, &tstate);
1295*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, ahd->our_id, target_id,
1296*4882a593Smuzhiyun CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR);
1297*4882a593Smuzhiyun ahd_update_neg_request(ahd, &devinfo, tstate,
1298*4882a593Smuzhiyun tinfo, AHD_NEG_ALWAYS);
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun ahd_unlock(ahd, &s);
1301*4882a593Smuzhiyun /* Give the bus some time to recover */
1302*4882a593Smuzhiyun if ((ahd->flags & AHD_RESET_BUS_A) != 0) {
1303*4882a593Smuzhiyun ahd_freeze_simq(ahd);
1304*4882a593Smuzhiyun msleep(AIC79XX_RESET_DELAY);
1305*4882a593Smuzhiyun ahd_release_simq(ahd);
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun int
ahd_platform_alloc(struct ahd_softc * ahd,void * platform_arg)1310*4882a593Smuzhiyun ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
1311*4882a593Smuzhiyun {
1312*4882a593Smuzhiyun ahd->platform_data =
1313*4882a593Smuzhiyun kzalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC);
1314*4882a593Smuzhiyun if (ahd->platform_data == NULL)
1315*4882a593Smuzhiyun return (ENOMEM);
1316*4882a593Smuzhiyun ahd->platform_data->irq = AHD_LINUX_NOIRQ;
1317*4882a593Smuzhiyun ahd_lockinit(ahd);
1318*4882a593Smuzhiyun ahd->seltime = (aic79xx_seltime & 0x3) << 4;
1319*4882a593Smuzhiyun return (0);
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun void
ahd_platform_free(struct ahd_softc * ahd)1323*4882a593Smuzhiyun ahd_platform_free(struct ahd_softc *ahd)
1324*4882a593Smuzhiyun {
1325*4882a593Smuzhiyun struct scsi_target *starget;
1326*4882a593Smuzhiyun int i;
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun if (ahd->platform_data != NULL) {
1329*4882a593Smuzhiyun /* destroy all of the device and target objects */
1330*4882a593Smuzhiyun for (i = 0; i < AHD_NUM_TARGETS; i++) {
1331*4882a593Smuzhiyun starget = ahd->platform_data->starget[i];
1332*4882a593Smuzhiyun if (starget != NULL) {
1333*4882a593Smuzhiyun ahd->platform_data->starget[i] = NULL;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun if (ahd->platform_data->irq != AHD_LINUX_NOIRQ)
1338*4882a593Smuzhiyun free_irq(ahd->platform_data->irq, ahd);
1339*4882a593Smuzhiyun if (ahd->tags[0] == BUS_SPACE_PIO
1340*4882a593Smuzhiyun && ahd->bshs[0].ioport != 0)
1341*4882a593Smuzhiyun release_region(ahd->bshs[0].ioport, 256);
1342*4882a593Smuzhiyun if (ahd->tags[1] == BUS_SPACE_PIO
1343*4882a593Smuzhiyun && ahd->bshs[1].ioport != 0)
1344*4882a593Smuzhiyun release_region(ahd->bshs[1].ioport, 256);
1345*4882a593Smuzhiyun if (ahd->tags[0] == BUS_SPACE_MEMIO
1346*4882a593Smuzhiyun && ahd->bshs[0].maddr != NULL) {
1347*4882a593Smuzhiyun iounmap(ahd->bshs[0].maddr);
1348*4882a593Smuzhiyun release_mem_region(ahd->platform_data->mem_busaddr,
1349*4882a593Smuzhiyun 0x1000);
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun if (ahd->platform_data->host)
1352*4882a593Smuzhiyun scsi_host_put(ahd->platform_data->host);
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun kfree(ahd->platform_data);
1355*4882a593Smuzhiyun }
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun void
ahd_platform_init(struct ahd_softc * ahd)1359*4882a593Smuzhiyun ahd_platform_init(struct ahd_softc *ahd)
1360*4882a593Smuzhiyun {
1361*4882a593Smuzhiyun /*
1362*4882a593Smuzhiyun * Lookup and commit any modified IO Cell options.
1363*4882a593Smuzhiyun */
1364*4882a593Smuzhiyun if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
1365*4882a593Smuzhiyun const struct ahd_linux_iocell_opts *iocell_opts;
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun iocell_opts = &aic79xx_iocell_info[ahd->unit];
1368*4882a593Smuzhiyun if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP)
1369*4882a593Smuzhiyun AHD_SET_PRECOMP(ahd, iocell_opts->precomp);
1370*4882a593Smuzhiyun if (iocell_opts->slewrate != AIC79XX_DEFAULT_SLEWRATE)
1371*4882a593Smuzhiyun AHD_SET_SLEWRATE(ahd, iocell_opts->slewrate);
1372*4882a593Smuzhiyun if (iocell_opts->amplitude != AIC79XX_DEFAULT_AMPLITUDE)
1373*4882a593Smuzhiyun AHD_SET_AMPLITUDE(ahd, iocell_opts->amplitude);
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun void
ahd_platform_freeze_devq(struct ahd_softc * ahd,struct scb * scb)1379*4882a593Smuzhiyun ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
1380*4882a593Smuzhiyun {
1381*4882a593Smuzhiyun ahd_platform_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
1382*4882a593Smuzhiyun SCB_GET_CHANNEL(ahd, scb),
1383*4882a593Smuzhiyun SCB_GET_LUN(scb), SCB_LIST_NULL,
1384*4882a593Smuzhiyun ROLE_UNKNOWN, CAM_REQUEUE_REQ);
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun void
ahd_platform_set_tags(struct ahd_softc * ahd,struct scsi_device * sdev,struct ahd_devinfo * devinfo,ahd_queue_alg alg)1388*4882a593Smuzhiyun ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
1389*4882a593Smuzhiyun struct ahd_devinfo *devinfo, ahd_queue_alg alg)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun struct ahd_linux_device *dev;
1392*4882a593Smuzhiyun int was_queuing;
1393*4882a593Smuzhiyun int now_queuing;
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun if (sdev == NULL)
1396*4882a593Smuzhiyun return;
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun dev = scsi_transport_device_data(sdev);
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun if (dev == NULL)
1401*4882a593Smuzhiyun return;
1402*4882a593Smuzhiyun was_queuing = dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED);
1403*4882a593Smuzhiyun switch (alg) {
1404*4882a593Smuzhiyun default:
1405*4882a593Smuzhiyun case AHD_QUEUE_NONE:
1406*4882a593Smuzhiyun now_queuing = 0;
1407*4882a593Smuzhiyun break;
1408*4882a593Smuzhiyun case AHD_QUEUE_BASIC:
1409*4882a593Smuzhiyun now_queuing = AHD_DEV_Q_BASIC;
1410*4882a593Smuzhiyun break;
1411*4882a593Smuzhiyun case AHD_QUEUE_TAGGED:
1412*4882a593Smuzhiyun now_queuing = AHD_DEV_Q_TAGGED;
1413*4882a593Smuzhiyun break;
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) == 0
1416*4882a593Smuzhiyun && (was_queuing != now_queuing)
1417*4882a593Smuzhiyun && (dev->active != 0)) {
1418*4882a593Smuzhiyun dev->flags |= AHD_DEV_FREEZE_TIL_EMPTY;
1419*4882a593Smuzhiyun dev->qfrozen++;
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun dev->flags &= ~(AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED|AHD_DEV_PERIODIC_OTAG);
1423*4882a593Smuzhiyun if (now_queuing) {
1424*4882a593Smuzhiyun u_int usertags;
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun usertags = ahd_linux_user_tagdepth(ahd, devinfo);
1427*4882a593Smuzhiyun if (!was_queuing) {
1428*4882a593Smuzhiyun /*
1429*4882a593Smuzhiyun * Start out aggressively and allow our
1430*4882a593Smuzhiyun * dynamic queue depth algorithm to take
1431*4882a593Smuzhiyun * care of the rest.
1432*4882a593Smuzhiyun */
1433*4882a593Smuzhiyun dev->maxtags = usertags;
1434*4882a593Smuzhiyun dev->openings = dev->maxtags - dev->active;
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun if (dev->maxtags == 0) {
1437*4882a593Smuzhiyun /*
1438*4882a593Smuzhiyun * Queueing is disabled by the user.
1439*4882a593Smuzhiyun */
1440*4882a593Smuzhiyun dev->openings = 1;
1441*4882a593Smuzhiyun } else if (alg == AHD_QUEUE_TAGGED) {
1442*4882a593Smuzhiyun dev->flags |= AHD_DEV_Q_TAGGED;
1443*4882a593Smuzhiyun if (aic79xx_periodic_otag != 0)
1444*4882a593Smuzhiyun dev->flags |= AHD_DEV_PERIODIC_OTAG;
1445*4882a593Smuzhiyun } else
1446*4882a593Smuzhiyun dev->flags |= AHD_DEV_Q_BASIC;
1447*4882a593Smuzhiyun } else {
1448*4882a593Smuzhiyun /* We can only have one opening. */
1449*4882a593Smuzhiyun dev->maxtags = 0;
1450*4882a593Smuzhiyun dev->openings = 1 - dev->active;
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
1454*4882a593Smuzhiyun case AHD_DEV_Q_BASIC:
1455*4882a593Smuzhiyun case AHD_DEV_Q_TAGGED:
1456*4882a593Smuzhiyun scsi_change_queue_depth(sdev,
1457*4882a593Smuzhiyun dev->openings + dev->active);
1458*4882a593Smuzhiyun break;
1459*4882a593Smuzhiyun default:
1460*4882a593Smuzhiyun /*
1461*4882a593Smuzhiyun * We allow the OS to queue 2 untagged transactions to
1462*4882a593Smuzhiyun * us at any time even though we can only execute them
1463*4882a593Smuzhiyun * serially on the controller/device. This should
1464*4882a593Smuzhiyun * remove some latency.
1465*4882a593Smuzhiyun */
1466*4882a593Smuzhiyun scsi_change_queue_depth(sdev, 1);
1467*4882a593Smuzhiyun break;
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun int
ahd_platform_abort_scbs(struct ahd_softc * ahd,int target,char channel,int lun,u_int tag,role_t role,uint32_t status)1472*4882a593Smuzhiyun ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, char channel,
1473*4882a593Smuzhiyun int lun, u_int tag, role_t role, uint32_t status)
1474*4882a593Smuzhiyun {
1475*4882a593Smuzhiyun return 0;
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun static u_int
ahd_linux_user_tagdepth(struct ahd_softc * ahd,struct ahd_devinfo * devinfo)1479*4882a593Smuzhiyun ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
1480*4882a593Smuzhiyun {
1481*4882a593Smuzhiyun static int warned_user;
1482*4882a593Smuzhiyun u_int tags;
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun tags = 0;
1485*4882a593Smuzhiyun if ((ahd->user_discenable & devinfo->target_mask) != 0) {
1486*4882a593Smuzhiyun if (ahd->unit >= ARRAY_SIZE(aic79xx_tag_info)) {
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun if (warned_user == 0) {
1489*4882a593Smuzhiyun printk(KERN_WARNING
1490*4882a593Smuzhiyun "aic79xx: WARNING: Insufficient tag_info instances\n"
1491*4882a593Smuzhiyun "aic79xx: for installed controllers. Using defaults\n"
1492*4882a593Smuzhiyun "aic79xx: Please update the aic79xx_tag_info array in\n"
1493*4882a593Smuzhiyun "aic79xx: the aic79xx_osm.c source file.\n");
1494*4882a593Smuzhiyun warned_user++;
1495*4882a593Smuzhiyun }
1496*4882a593Smuzhiyun tags = AHD_MAX_QUEUE;
1497*4882a593Smuzhiyun } else {
1498*4882a593Smuzhiyun adapter_tag_info_t *tag_info;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun tag_info = &aic79xx_tag_info[ahd->unit];
1501*4882a593Smuzhiyun tags = tag_info->tag_commands[devinfo->target_offset];
1502*4882a593Smuzhiyun if (tags > AHD_MAX_QUEUE)
1503*4882a593Smuzhiyun tags = AHD_MAX_QUEUE;
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun return (tags);
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun /*
1510*4882a593Smuzhiyun * Determines the queue depth for a given device.
1511*4882a593Smuzhiyun */
1512*4882a593Smuzhiyun static void
ahd_linux_device_queue_depth(struct scsi_device * sdev)1513*4882a593Smuzhiyun ahd_linux_device_queue_depth(struct scsi_device *sdev)
1514*4882a593Smuzhiyun {
1515*4882a593Smuzhiyun struct ahd_devinfo devinfo;
1516*4882a593Smuzhiyun u_int tags;
1517*4882a593Smuzhiyun struct ahd_softc *ahd = *((struct ahd_softc **)sdev->host->hostdata);
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo,
1520*4882a593Smuzhiyun ahd->our_id,
1521*4882a593Smuzhiyun sdev->sdev_target->id, sdev->lun,
1522*4882a593Smuzhiyun sdev->sdev_target->channel == 0 ? 'A' : 'B',
1523*4882a593Smuzhiyun ROLE_INITIATOR);
1524*4882a593Smuzhiyun tags = ahd_linux_user_tagdepth(ahd, &devinfo);
1525*4882a593Smuzhiyun if (tags != 0 && sdev->tagged_supported != 0) {
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_TAGGED);
1528*4882a593Smuzhiyun ahd_send_async(ahd, devinfo.channel, devinfo.target,
1529*4882a593Smuzhiyun devinfo.lun, AC_TRANSFER_NEG);
1530*4882a593Smuzhiyun ahd_print_devinfo(ahd, &devinfo);
1531*4882a593Smuzhiyun printk("Tagged Queuing enabled. Depth %d\n", tags);
1532*4882a593Smuzhiyun } else {
1533*4882a593Smuzhiyun ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_NONE);
1534*4882a593Smuzhiyun ahd_send_async(ahd, devinfo.channel, devinfo.target,
1535*4882a593Smuzhiyun devinfo.lun, AC_TRANSFER_NEG);
1536*4882a593Smuzhiyun }
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun static int
ahd_linux_run_command(struct ahd_softc * ahd,struct ahd_linux_device * dev,struct scsi_cmnd * cmd)1540*4882a593Smuzhiyun ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
1541*4882a593Smuzhiyun struct scsi_cmnd *cmd)
1542*4882a593Smuzhiyun {
1543*4882a593Smuzhiyun struct scb *scb;
1544*4882a593Smuzhiyun struct hardware_scb *hscb;
1545*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo;
1546*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
1547*4882a593Smuzhiyun u_int col_idx;
1548*4882a593Smuzhiyun uint16_t mask;
1549*4882a593Smuzhiyun unsigned long flags;
1550*4882a593Smuzhiyun int nseg;
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun nseg = scsi_dma_map(cmd);
1553*4882a593Smuzhiyun if (nseg < 0)
1554*4882a593Smuzhiyun return SCSI_MLQUEUE_HOST_BUSY;
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun ahd_lock(ahd, &flags);
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun /*
1559*4882a593Smuzhiyun * Get an scb to use.
1560*4882a593Smuzhiyun */
1561*4882a593Smuzhiyun tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
1562*4882a593Smuzhiyun cmd->device->id, &tstate);
1563*4882a593Smuzhiyun if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0
1564*4882a593Smuzhiyun || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
1565*4882a593Smuzhiyun col_idx = AHD_NEVER_COL_IDX;
1566*4882a593Smuzhiyun } else {
1567*4882a593Smuzhiyun col_idx = AHD_BUILD_COL_IDX(cmd->device->id,
1568*4882a593Smuzhiyun cmd->device->lun);
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
1571*4882a593Smuzhiyun ahd->flags |= AHD_RESOURCE_SHORTAGE;
1572*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
1573*4882a593Smuzhiyun scsi_dma_unmap(cmd);
1574*4882a593Smuzhiyun return SCSI_MLQUEUE_HOST_BUSY;
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun scb->io_ctx = cmd;
1578*4882a593Smuzhiyun scb->platform_data->dev = dev;
1579*4882a593Smuzhiyun hscb = scb->hscb;
1580*4882a593Smuzhiyun cmd->host_scribble = (char *)scb;
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun /*
1583*4882a593Smuzhiyun * Fill out basics of the HSCB.
1584*4882a593Smuzhiyun */
1585*4882a593Smuzhiyun hscb->control = 0;
1586*4882a593Smuzhiyun hscb->scsiid = BUILD_SCSIID(ahd, cmd);
1587*4882a593Smuzhiyun hscb->lun = cmd->device->lun;
1588*4882a593Smuzhiyun scb->hscb->task_management = 0;
1589*4882a593Smuzhiyun mask = SCB_GET_TARGET_MASK(ahd, scb);
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun if ((ahd->user_discenable & mask) != 0)
1592*4882a593Smuzhiyun hscb->control |= DISCENB;
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
1595*4882a593Smuzhiyun scb->flags |= SCB_PACKETIZED;
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun if ((tstate->auto_negotiate & mask) != 0) {
1598*4882a593Smuzhiyun scb->flags |= SCB_AUTO_NEGOTIATE;
1599*4882a593Smuzhiyun scb->hscb->control |= MK_MESSAGE;
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) {
1603*4882a593Smuzhiyun if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
1604*4882a593Smuzhiyun && (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
1605*4882a593Smuzhiyun hscb->control |= MSG_ORDERED_TASK;
1606*4882a593Smuzhiyun dev->commands_since_idle_or_otag = 0;
1607*4882a593Smuzhiyun } else {
1608*4882a593Smuzhiyun hscb->control |= MSG_SIMPLE_TASK;
1609*4882a593Smuzhiyun }
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun hscb->cdb_len = cmd->cmd_len;
1613*4882a593Smuzhiyun memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len);
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun scb->platform_data->xfer_len = 0;
1616*4882a593Smuzhiyun ahd_set_residual(scb, 0);
1617*4882a593Smuzhiyun ahd_set_sense_residual(scb, 0);
1618*4882a593Smuzhiyun scb->sg_count = 0;
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun if (nseg > 0) {
1621*4882a593Smuzhiyun void *sg = scb->sg_list;
1622*4882a593Smuzhiyun struct scatterlist *cur_seg;
1623*4882a593Smuzhiyun int i;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun scb->platform_data->xfer_len = 0;
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun scsi_for_each_sg(cmd, cur_seg, nseg, i) {
1628*4882a593Smuzhiyun dma_addr_t addr;
1629*4882a593Smuzhiyun bus_size_t len;
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun addr = sg_dma_address(cur_seg);
1632*4882a593Smuzhiyun len = sg_dma_len(cur_seg);
1633*4882a593Smuzhiyun scb->platform_data->xfer_len += len;
1634*4882a593Smuzhiyun sg = ahd_sg_setup(ahd, scb, sg, addr, len,
1635*4882a593Smuzhiyun i == (nseg - 1));
1636*4882a593Smuzhiyun }
1637*4882a593Smuzhiyun }
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1640*4882a593Smuzhiyun dev->openings--;
1641*4882a593Smuzhiyun dev->active++;
1642*4882a593Smuzhiyun dev->commands_issued++;
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0)
1645*4882a593Smuzhiyun dev->commands_since_idle_or_otag++;
1646*4882a593Smuzhiyun scb->flags |= SCB_ACTIVE;
1647*4882a593Smuzhiyun ahd_queue_scb(ahd, scb);
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun return 0;
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun /*
1655*4882a593Smuzhiyun * SCSI controller interrupt handler.
1656*4882a593Smuzhiyun */
1657*4882a593Smuzhiyun irqreturn_t
ahd_linux_isr(int irq,void * dev_id)1658*4882a593Smuzhiyun ahd_linux_isr(int irq, void *dev_id)
1659*4882a593Smuzhiyun {
1660*4882a593Smuzhiyun struct ahd_softc *ahd;
1661*4882a593Smuzhiyun u_long flags;
1662*4882a593Smuzhiyun int ours;
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun ahd = (struct ahd_softc *) dev_id;
1665*4882a593Smuzhiyun ahd_lock(ahd, &flags);
1666*4882a593Smuzhiyun ours = ahd_intr(ahd);
1667*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
1668*4882a593Smuzhiyun return IRQ_RETVAL(ours);
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun void
ahd_send_async(struct ahd_softc * ahd,char channel,u_int target,u_int lun,ac_code code)1672*4882a593Smuzhiyun ahd_send_async(struct ahd_softc *ahd, char channel,
1673*4882a593Smuzhiyun u_int target, u_int lun, ac_code code)
1674*4882a593Smuzhiyun {
1675*4882a593Smuzhiyun switch (code) {
1676*4882a593Smuzhiyun case AC_TRANSFER_NEG:
1677*4882a593Smuzhiyun {
1678*4882a593Smuzhiyun struct scsi_target *starget;
1679*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo;
1680*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
1681*4882a593Smuzhiyun unsigned int target_ppr_options;
1682*4882a593Smuzhiyun
1683*4882a593Smuzhiyun BUG_ON(target == CAM_TARGET_WILDCARD);
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,
1686*4882a593Smuzhiyun target, &tstate);
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun /*
1689*4882a593Smuzhiyun * Don't bother reporting results while
1690*4882a593Smuzhiyun * negotiations are still pending.
1691*4882a593Smuzhiyun */
1692*4882a593Smuzhiyun if (tinfo->curr.period != tinfo->goal.period
1693*4882a593Smuzhiyun || tinfo->curr.width != tinfo->goal.width
1694*4882a593Smuzhiyun || tinfo->curr.offset != tinfo->goal.offset
1695*4882a593Smuzhiyun || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
1696*4882a593Smuzhiyun if (bootverbose == 0)
1697*4882a593Smuzhiyun break;
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun /*
1700*4882a593Smuzhiyun * Don't bother reporting results that
1701*4882a593Smuzhiyun * are identical to those last reported.
1702*4882a593Smuzhiyun */
1703*4882a593Smuzhiyun starget = ahd->platform_data->starget[target];
1704*4882a593Smuzhiyun if (starget == NULL)
1705*4882a593Smuzhiyun break;
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun target_ppr_options =
1708*4882a593Smuzhiyun (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0)
1709*4882a593Smuzhiyun + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0)
1710*4882a593Smuzhiyun + (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0)
1711*4882a593Smuzhiyun + (spi_rd_strm(starget) ? MSG_EXT_PPR_RD_STRM : 0)
1712*4882a593Smuzhiyun + (spi_pcomp_en(starget) ? MSG_EXT_PPR_PCOMP_EN : 0)
1713*4882a593Smuzhiyun + (spi_rti(starget) ? MSG_EXT_PPR_RTI : 0)
1714*4882a593Smuzhiyun + (spi_wr_flow(starget) ? MSG_EXT_PPR_WR_FLOW : 0)
1715*4882a593Smuzhiyun + (spi_hold_mcs(starget) ? MSG_EXT_PPR_HOLD_MCS : 0);
1716*4882a593Smuzhiyun
1717*4882a593Smuzhiyun if (tinfo->curr.period == spi_period(starget)
1718*4882a593Smuzhiyun && tinfo->curr.width == spi_width(starget)
1719*4882a593Smuzhiyun && tinfo->curr.offset == spi_offset(starget)
1720*4882a593Smuzhiyun && tinfo->curr.ppr_options == target_ppr_options)
1721*4882a593Smuzhiyun if (bootverbose == 0)
1722*4882a593Smuzhiyun break;
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun spi_period(starget) = tinfo->curr.period;
1725*4882a593Smuzhiyun spi_width(starget) = tinfo->curr.width;
1726*4882a593Smuzhiyun spi_offset(starget) = tinfo->curr.offset;
1727*4882a593Smuzhiyun spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0;
1728*4882a593Smuzhiyun spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0;
1729*4882a593Smuzhiyun spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0;
1730*4882a593Smuzhiyun spi_rd_strm(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RD_STRM ? 1 : 0;
1731*4882a593Smuzhiyun spi_pcomp_en(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_PCOMP_EN ? 1 : 0;
1732*4882a593Smuzhiyun spi_rti(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RTI ? 1 : 0;
1733*4882a593Smuzhiyun spi_wr_flow(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_WR_FLOW ? 1 : 0;
1734*4882a593Smuzhiyun spi_hold_mcs(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_HOLD_MCS ? 1 : 0;
1735*4882a593Smuzhiyun spi_display_xfer_agreement(starget);
1736*4882a593Smuzhiyun break;
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun case AC_SENT_BDR:
1739*4882a593Smuzhiyun {
1740*4882a593Smuzhiyun WARN_ON(lun != CAM_LUN_WILDCARD);
1741*4882a593Smuzhiyun scsi_report_device_reset(ahd->platform_data->host,
1742*4882a593Smuzhiyun channel - 'A', target);
1743*4882a593Smuzhiyun break;
1744*4882a593Smuzhiyun }
1745*4882a593Smuzhiyun case AC_BUS_RESET:
1746*4882a593Smuzhiyun if (ahd->platform_data->host != NULL) {
1747*4882a593Smuzhiyun scsi_report_bus_reset(ahd->platform_data->host,
1748*4882a593Smuzhiyun channel - 'A');
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun break;
1751*4882a593Smuzhiyun default:
1752*4882a593Smuzhiyun panic("ahd_send_async: Unexpected async event");
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun }
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun /*
1757*4882a593Smuzhiyun * Calls the higher level scsi done function and frees the scb.
1758*4882a593Smuzhiyun */
1759*4882a593Smuzhiyun void
ahd_done(struct ahd_softc * ahd,struct scb * scb)1760*4882a593Smuzhiyun ahd_done(struct ahd_softc *ahd, struct scb *scb)
1761*4882a593Smuzhiyun {
1762*4882a593Smuzhiyun struct scsi_cmnd *cmd;
1763*4882a593Smuzhiyun struct ahd_linux_device *dev;
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun if ((scb->flags & SCB_ACTIVE) == 0) {
1766*4882a593Smuzhiyun printk("SCB %d done'd twice\n", SCB_GET_TAG(scb));
1767*4882a593Smuzhiyun ahd_dump_card_state(ahd);
1768*4882a593Smuzhiyun panic("Stopping for safety");
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun LIST_REMOVE(scb, pending_links);
1771*4882a593Smuzhiyun cmd = scb->io_ctx;
1772*4882a593Smuzhiyun dev = scb->platform_data->dev;
1773*4882a593Smuzhiyun dev->active--;
1774*4882a593Smuzhiyun dev->openings++;
1775*4882a593Smuzhiyun if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
1776*4882a593Smuzhiyun cmd->result &= ~(CAM_DEV_QFRZN << 16);
1777*4882a593Smuzhiyun dev->qfrozen--;
1778*4882a593Smuzhiyun }
1779*4882a593Smuzhiyun ahd_linux_unmap_scb(ahd, scb);
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun /*
1782*4882a593Smuzhiyun * Guard against stale sense data.
1783*4882a593Smuzhiyun * The Linux mid-layer assumes that sense
1784*4882a593Smuzhiyun * was retrieved anytime the first byte of
1785*4882a593Smuzhiyun * the sense buffer looks "sane".
1786*4882a593Smuzhiyun */
1787*4882a593Smuzhiyun cmd->sense_buffer[0] = 0;
1788*4882a593Smuzhiyun if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) {
1789*4882a593Smuzhiyun #ifdef AHD_REPORT_UNDERFLOWS
1790*4882a593Smuzhiyun uint32_t amount_xferred;
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun amount_xferred =
1793*4882a593Smuzhiyun ahd_get_transfer_length(scb) - ahd_get_residual(scb);
1794*4882a593Smuzhiyun #endif
1795*4882a593Smuzhiyun if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
1796*4882a593Smuzhiyun #ifdef AHD_DEBUG
1797*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_MISC) != 0) {
1798*4882a593Smuzhiyun ahd_print_path(ahd, scb);
1799*4882a593Smuzhiyun printk("Set CAM_UNCOR_PARITY\n");
1800*4882a593Smuzhiyun }
1801*4882a593Smuzhiyun #endif
1802*4882a593Smuzhiyun ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);
1803*4882a593Smuzhiyun #ifdef AHD_REPORT_UNDERFLOWS
1804*4882a593Smuzhiyun /*
1805*4882a593Smuzhiyun * This code is disabled by default as some
1806*4882a593Smuzhiyun * clients of the SCSI system do not properly
1807*4882a593Smuzhiyun * initialize the underflow parameter. This
1808*4882a593Smuzhiyun * results in spurious termination of commands
1809*4882a593Smuzhiyun * that complete as expected (e.g. underflow is
1810*4882a593Smuzhiyun * allowed as command can return variable amounts
1811*4882a593Smuzhiyun * of data.
1812*4882a593Smuzhiyun */
1813*4882a593Smuzhiyun } else if (amount_xferred < scb->io_ctx->underflow) {
1814*4882a593Smuzhiyun u_int i;
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun ahd_print_path(ahd, scb);
1817*4882a593Smuzhiyun printk("CDB:");
1818*4882a593Smuzhiyun for (i = 0; i < scb->io_ctx->cmd_len; i++)
1819*4882a593Smuzhiyun printk(" 0x%x", scb->io_ctx->cmnd[i]);
1820*4882a593Smuzhiyun printk("\n");
1821*4882a593Smuzhiyun ahd_print_path(ahd, scb);
1822*4882a593Smuzhiyun printk("Saw underflow (%ld of %ld bytes). "
1823*4882a593Smuzhiyun "Treated as error\n",
1824*4882a593Smuzhiyun ahd_get_residual(scb),
1825*4882a593Smuzhiyun ahd_get_transfer_length(scb));
1826*4882a593Smuzhiyun ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
1827*4882a593Smuzhiyun #endif
1828*4882a593Smuzhiyun } else {
1829*4882a593Smuzhiyun ahd_set_transaction_status(scb, CAM_REQ_CMP);
1830*4882a593Smuzhiyun }
1831*4882a593Smuzhiyun } else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
1832*4882a593Smuzhiyun ahd_linux_handle_scsi_status(ahd, cmd->device, scb);
1833*4882a593Smuzhiyun }
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun if (dev->openings == 1
1836*4882a593Smuzhiyun && ahd_get_transaction_status(scb) == CAM_REQ_CMP
1837*4882a593Smuzhiyun && ahd_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
1838*4882a593Smuzhiyun dev->tag_success_count++;
1839*4882a593Smuzhiyun /*
1840*4882a593Smuzhiyun * Some devices deal with temporary internal resource
1841*4882a593Smuzhiyun * shortages by returning queue full. When the queue
1842*4882a593Smuzhiyun * full occurrs, we throttle back. Slowly try to get
1843*4882a593Smuzhiyun * back to our previous queue depth.
1844*4882a593Smuzhiyun */
1845*4882a593Smuzhiyun if ((dev->openings + dev->active) < dev->maxtags
1846*4882a593Smuzhiyun && dev->tag_success_count > AHD_TAG_SUCCESS_INTERVAL) {
1847*4882a593Smuzhiyun dev->tag_success_count = 0;
1848*4882a593Smuzhiyun dev->openings++;
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun if (dev->active == 0)
1852*4882a593Smuzhiyun dev->commands_since_idle_or_otag = 0;
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
1855*4882a593Smuzhiyun printk("Recovery SCB completes\n");
1856*4882a593Smuzhiyun if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
1857*4882a593Smuzhiyun || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
1858*4882a593Smuzhiyun ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun if (ahd->platform_data->eh_done)
1861*4882a593Smuzhiyun complete(ahd->platform_data->eh_done);
1862*4882a593Smuzhiyun }
1863*4882a593Smuzhiyun
1864*4882a593Smuzhiyun ahd_free_scb(ahd, scb);
1865*4882a593Smuzhiyun ahd_linux_queue_cmd_complete(ahd, cmd);
1866*4882a593Smuzhiyun }
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun static void
ahd_linux_handle_scsi_status(struct ahd_softc * ahd,struct scsi_device * sdev,struct scb * scb)1869*4882a593Smuzhiyun ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
1870*4882a593Smuzhiyun struct scsi_device *sdev, struct scb *scb)
1871*4882a593Smuzhiyun {
1872*4882a593Smuzhiyun struct ahd_devinfo devinfo;
1873*4882a593Smuzhiyun struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
1874*4882a593Smuzhiyun
1875*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo,
1876*4882a593Smuzhiyun ahd->our_id,
1877*4882a593Smuzhiyun sdev->sdev_target->id, sdev->lun,
1878*4882a593Smuzhiyun sdev->sdev_target->channel == 0 ? 'A' : 'B',
1879*4882a593Smuzhiyun ROLE_INITIATOR);
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun /*
1882*4882a593Smuzhiyun * We don't currently trust the mid-layer to
1883*4882a593Smuzhiyun * properly deal with queue full or busy. So,
1884*4882a593Smuzhiyun * when one occurs, we tell the mid-layer to
1885*4882a593Smuzhiyun * unconditionally requeue the command to us
1886*4882a593Smuzhiyun * so that we can retry it ourselves. We also
1887*4882a593Smuzhiyun * implement our own throttling mechanism so
1888*4882a593Smuzhiyun * we don't clobber the device with too many
1889*4882a593Smuzhiyun * commands.
1890*4882a593Smuzhiyun */
1891*4882a593Smuzhiyun switch (ahd_get_scsi_status(scb)) {
1892*4882a593Smuzhiyun default:
1893*4882a593Smuzhiyun break;
1894*4882a593Smuzhiyun case SCSI_STATUS_CHECK_COND:
1895*4882a593Smuzhiyun case SCSI_STATUS_CMD_TERMINATED:
1896*4882a593Smuzhiyun {
1897*4882a593Smuzhiyun struct scsi_cmnd *cmd;
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun /*
1900*4882a593Smuzhiyun * Copy sense information to the OS's cmd
1901*4882a593Smuzhiyun * structure if it is available.
1902*4882a593Smuzhiyun */
1903*4882a593Smuzhiyun cmd = scb->io_ctx;
1904*4882a593Smuzhiyun if ((scb->flags & (SCB_SENSE|SCB_PKT_SENSE)) != 0) {
1905*4882a593Smuzhiyun struct scsi_status_iu_header *siu;
1906*4882a593Smuzhiyun u_int sense_size;
1907*4882a593Smuzhiyun u_int sense_offset;
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun if (scb->flags & SCB_SENSE) {
1910*4882a593Smuzhiyun sense_size = min(sizeof(struct scsi_sense_data)
1911*4882a593Smuzhiyun - ahd_get_sense_residual(scb),
1912*4882a593Smuzhiyun (u_long)SCSI_SENSE_BUFFERSIZE);
1913*4882a593Smuzhiyun sense_offset = 0;
1914*4882a593Smuzhiyun } else {
1915*4882a593Smuzhiyun /*
1916*4882a593Smuzhiyun * Copy only the sense data into the provided
1917*4882a593Smuzhiyun * buffer.
1918*4882a593Smuzhiyun */
1919*4882a593Smuzhiyun siu = (struct scsi_status_iu_header *)
1920*4882a593Smuzhiyun scb->sense_data;
1921*4882a593Smuzhiyun sense_size = min_t(size_t,
1922*4882a593Smuzhiyun scsi_4btoul(siu->sense_length),
1923*4882a593Smuzhiyun SCSI_SENSE_BUFFERSIZE);
1924*4882a593Smuzhiyun sense_offset = SIU_SENSE_OFFSET(siu);
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1928*4882a593Smuzhiyun memcpy(cmd->sense_buffer,
1929*4882a593Smuzhiyun ahd_get_sense_buf(ahd, scb)
1930*4882a593Smuzhiyun + sense_offset, sense_size);
1931*4882a593Smuzhiyun cmd->result |= (DRIVER_SENSE << 24);
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun #ifdef AHD_DEBUG
1934*4882a593Smuzhiyun if (ahd_debug & AHD_SHOW_SENSE) {
1935*4882a593Smuzhiyun int i;
1936*4882a593Smuzhiyun
1937*4882a593Smuzhiyun printk("Copied %d bytes of sense data at %d:",
1938*4882a593Smuzhiyun sense_size, sense_offset);
1939*4882a593Smuzhiyun for (i = 0; i < sense_size; i++) {
1940*4882a593Smuzhiyun if ((i & 0xF) == 0)
1941*4882a593Smuzhiyun printk("\n");
1942*4882a593Smuzhiyun printk("0x%x ", cmd->sense_buffer[i]);
1943*4882a593Smuzhiyun }
1944*4882a593Smuzhiyun printk("\n");
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun #endif
1947*4882a593Smuzhiyun }
1948*4882a593Smuzhiyun break;
1949*4882a593Smuzhiyun }
1950*4882a593Smuzhiyun case SCSI_STATUS_QUEUE_FULL:
1951*4882a593Smuzhiyun /*
1952*4882a593Smuzhiyun * By the time the core driver has returned this
1953*4882a593Smuzhiyun * command, all other commands that were queued
1954*4882a593Smuzhiyun * to us but not the device have been returned.
1955*4882a593Smuzhiyun * This ensures that dev->active is equal to
1956*4882a593Smuzhiyun * the number of commands actually queued to
1957*4882a593Smuzhiyun * the device.
1958*4882a593Smuzhiyun */
1959*4882a593Smuzhiyun dev->tag_success_count = 0;
1960*4882a593Smuzhiyun if (dev->active != 0) {
1961*4882a593Smuzhiyun /*
1962*4882a593Smuzhiyun * Drop our opening count to the number
1963*4882a593Smuzhiyun * of commands currently outstanding.
1964*4882a593Smuzhiyun */
1965*4882a593Smuzhiyun dev->openings = 0;
1966*4882a593Smuzhiyun #ifdef AHD_DEBUG
1967*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_QFULL) != 0) {
1968*4882a593Smuzhiyun ahd_print_path(ahd, scb);
1969*4882a593Smuzhiyun printk("Dropping tag count to %d\n",
1970*4882a593Smuzhiyun dev->active);
1971*4882a593Smuzhiyun }
1972*4882a593Smuzhiyun #endif
1973*4882a593Smuzhiyun if (dev->active == dev->tags_on_last_queuefull) {
1974*4882a593Smuzhiyun
1975*4882a593Smuzhiyun dev->last_queuefull_same_count++;
1976*4882a593Smuzhiyun /*
1977*4882a593Smuzhiyun * If we repeatedly see a queue full
1978*4882a593Smuzhiyun * at the same queue depth, this
1979*4882a593Smuzhiyun * device has a fixed number of tag
1980*4882a593Smuzhiyun * slots. Lock in this tag depth
1981*4882a593Smuzhiyun * so we stop seeing queue fulls from
1982*4882a593Smuzhiyun * this device.
1983*4882a593Smuzhiyun */
1984*4882a593Smuzhiyun if (dev->last_queuefull_same_count
1985*4882a593Smuzhiyun == AHD_LOCK_TAGS_COUNT) {
1986*4882a593Smuzhiyun dev->maxtags = dev->active;
1987*4882a593Smuzhiyun ahd_print_path(ahd, scb);
1988*4882a593Smuzhiyun printk("Locking max tag count at %d\n",
1989*4882a593Smuzhiyun dev->active);
1990*4882a593Smuzhiyun }
1991*4882a593Smuzhiyun } else {
1992*4882a593Smuzhiyun dev->tags_on_last_queuefull = dev->active;
1993*4882a593Smuzhiyun dev->last_queuefull_same_count = 0;
1994*4882a593Smuzhiyun }
1995*4882a593Smuzhiyun ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
1996*4882a593Smuzhiyun ahd_set_scsi_status(scb, SCSI_STATUS_OK);
1997*4882a593Smuzhiyun ahd_platform_set_tags(ahd, sdev, &devinfo,
1998*4882a593Smuzhiyun (dev->flags & AHD_DEV_Q_BASIC)
1999*4882a593Smuzhiyun ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
2000*4882a593Smuzhiyun break;
2001*4882a593Smuzhiyun }
2002*4882a593Smuzhiyun /*
2003*4882a593Smuzhiyun * Drop down to a single opening, and treat this
2004*4882a593Smuzhiyun * as if the target returned BUSY SCSI status.
2005*4882a593Smuzhiyun */
2006*4882a593Smuzhiyun dev->openings = 1;
2007*4882a593Smuzhiyun ahd_platform_set_tags(ahd, sdev, &devinfo,
2008*4882a593Smuzhiyun (dev->flags & AHD_DEV_Q_BASIC)
2009*4882a593Smuzhiyun ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
2010*4882a593Smuzhiyun ahd_set_scsi_status(scb, SCSI_STATUS_BUSY);
2011*4882a593Smuzhiyun }
2012*4882a593Smuzhiyun }
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun static void
ahd_linux_queue_cmd_complete(struct ahd_softc * ahd,struct scsi_cmnd * cmd)2015*4882a593Smuzhiyun ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
2016*4882a593Smuzhiyun {
2017*4882a593Smuzhiyun int status;
2018*4882a593Smuzhiyun int new_status = DID_OK;
2019*4882a593Smuzhiyun int do_fallback = 0;
2020*4882a593Smuzhiyun int scsi_status;
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun /*
2023*4882a593Smuzhiyun * Map CAM error codes into Linux Error codes. We
2024*4882a593Smuzhiyun * avoid the conversion so that the DV code has the
2025*4882a593Smuzhiyun * full error information available when making
2026*4882a593Smuzhiyun * state change decisions.
2027*4882a593Smuzhiyun */
2028*4882a593Smuzhiyun
2029*4882a593Smuzhiyun status = ahd_cmd_get_transaction_status(cmd);
2030*4882a593Smuzhiyun switch (status) {
2031*4882a593Smuzhiyun case CAM_REQ_INPROG:
2032*4882a593Smuzhiyun case CAM_REQ_CMP:
2033*4882a593Smuzhiyun new_status = DID_OK;
2034*4882a593Smuzhiyun break;
2035*4882a593Smuzhiyun case CAM_AUTOSENSE_FAIL:
2036*4882a593Smuzhiyun new_status = DID_ERROR;
2037*4882a593Smuzhiyun fallthrough;
2038*4882a593Smuzhiyun case CAM_SCSI_STATUS_ERROR:
2039*4882a593Smuzhiyun scsi_status = ahd_cmd_get_scsi_status(cmd);
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun switch(scsi_status) {
2042*4882a593Smuzhiyun case SCSI_STATUS_CMD_TERMINATED:
2043*4882a593Smuzhiyun case SCSI_STATUS_CHECK_COND:
2044*4882a593Smuzhiyun if ((cmd->result >> 24) != DRIVER_SENSE) {
2045*4882a593Smuzhiyun do_fallback = 1;
2046*4882a593Smuzhiyun } else {
2047*4882a593Smuzhiyun struct scsi_sense_data *sense;
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun sense = (struct scsi_sense_data *)
2050*4882a593Smuzhiyun cmd->sense_buffer;
2051*4882a593Smuzhiyun if (sense->extra_len >= 5 &&
2052*4882a593Smuzhiyun (sense->add_sense_code == 0x47
2053*4882a593Smuzhiyun || sense->add_sense_code == 0x48))
2054*4882a593Smuzhiyun do_fallback = 1;
2055*4882a593Smuzhiyun }
2056*4882a593Smuzhiyun break;
2057*4882a593Smuzhiyun default:
2058*4882a593Smuzhiyun break;
2059*4882a593Smuzhiyun }
2060*4882a593Smuzhiyun break;
2061*4882a593Smuzhiyun case CAM_REQ_ABORTED:
2062*4882a593Smuzhiyun new_status = DID_ABORT;
2063*4882a593Smuzhiyun break;
2064*4882a593Smuzhiyun case CAM_BUSY:
2065*4882a593Smuzhiyun new_status = DID_BUS_BUSY;
2066*4882a593Smuzhiyun break;
2067*4882a593Smuzhiyun case CAM_REQ_INVALID:
2068*4882a593Smuzhiyun case CAM_PATH_INVALID:
2069*4882a593Smuzhiyun new_status = DID_BAD_TARGET;
2070*4882a593Smuzhiyun break;
2071*4882a593Smuzhiyun case CAM_SEL_TIMEOUT:
2072*4882a593Smuzhiyun new_status = DID_NO_CONNECT;
2073*4882a593Smuzhiyun break;
2074*4882a593Smuzhiyun case CAM_SCSI_BUS_RESET:
2075*4882a593Smuzhiyun case CAM_BDR_SENT:
2076*4882a593Smuzhiyun new_status = DID_RESET;
2077*4882a593Smuzhiyun break;
2078*4882a593Smuzhiyun case CAM_UNCOR_PARITY:
2079*4882a593Smuzhiyun new_status = DID_PARITY;
2080*4882a593Smuzhiyun do_fallback = 1;
2081*4882a593Smuzhiyun break;
2082*4882a593Smuzhiyun case CAM_CMD_TIMEOUT:
2083*4882a593Smuzhiyun new_status = DID_TIME_OUT;
2084*4882a593Smuzhiyun do_fallback = 1;
2085*4882a593Smuzhiyun break;
2086*4882a593Smuzhiyun case CAM_REQ_CMP_ERR:
2087*4882a593Smuzhiyun case CAM_UNEXP_BUSFREE:
2088*4882a593Smuzhiyun case CAM_DATA_RUN_ERR:
2089*4882a593Smuzhiyun new_status = DID_ERROR;
2090*4882a593Smuzhiyun do_fallback = 1;
2091*4882a593Smuzhiyun break;
2092*4882a593Smuzhiyun case CAM_UA_ABORT:
2093*4882a593Smuzhiyun case CAM_NO_HBA:
2094*4882a593Smuzhiyun case CAM_SEQUENCE_FAIL:
2095*4882a593Smuzhiyun case CAM_CCB_LEN_ERR:
2096*4882a593Smuzhiyun case CAM_PROVIDE_FAIL:
2097*4882a593Smuzhiyun case CAM_REQ_TERMIO:
2098*4882a593Smuzhiyun case CAM_UNREC_HBA_ERROR:
2099*4882a593Smuzhiyun case CAM_REQ_TOO_BIG:
2100*4882a593Smuzhiyun new_status = DID_ERROR;
2101*4882a593Smuzhiyun break;
2102*4882a593Smuzhiyun case CAM_REQUEUE_REQ:
2103*4882a593Smuzhiyun new_status = DID_REQUEUE;
2104*4882a593Smuzhiyun break;
2105*4882a593Smuzhiyun default:
2106*4882a593Smuzhiyun /* We should never get here */
2107*4882a593Smuzhiyun new_status = DID_ERROR;
2108*4882a593Smuzhiyun break;
2109*4882a593Smuzhiyun }
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun if (do_fallback) {
2112*4882a593Smuzhiyun printk("%s: device overrun (status %x) on %d:%d:%d\n",
2113*4882a593Smuzhiyun ahd_name(ahd), status, cmd->device->channel,
2114*4882a593Smuzhiyun cmd->device->id, (u8)cmd->device->lun);
2115*4882a593Smuzhiyun }
2116*4882a593Smuzhiyun
2117*4882a593Smuzhiyun ahd_cmd_set_transaction_status(cmd, new_status);
2118*4882a593Smuzhiyun
2119*4882a593Smuzhiyun cmd->scsi_done(cmd);
2120*4882a593Smuzhiyun }
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun static void
ahd_freeze_simq(struct ahd_softc * ahd)2123*4882a593Smuzhiyun ahd_freeze_simq(struct ahd_softc *ahd)
2124*4882a593Smuzhiyun {
2125*4882a593Smuzhiyun scsi_block_requests(ahd->platform_data->host);
2126*4882a593Smuzhiyun }
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun static void
ahd_release_simq(struct ahd_softc * ahd)2129*4882a593Smuzhiyun ahd_release_simq(struct ahd_softc *ahd)
2130*4882a593Smuzhiyun {
2131*4882a593Smuzhiyun scsi_unblock_requests(ahd->platform_data->host);
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun static int
ahd_linux_queue_abort_cmd(struct scsi_cmnd * cmd)2135*4882a593Smuzhiyun ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
2136*4882a593Smuzhiyun {
2137*4882a593Smuzhiyun struct ahd_softc *ahd;
2138*4882a593Smuzhiyun struct ahd_linux_device *dev;
2139*4882a593Smuzhiyun struct scb *pending_scb;
2140*4882a593Smuzhiyun u_int saved_scbptr;
2141*4882a593Smuzhiyun u_int active_scbptr;
2142*4882a593Smuzhiyun u_int last_phase;
2143*4882a593Smuzhiyun u_int saved_scsiid;
2144*4882a593Smuzhiyun u_int cdb_byte;
2145*4882a593Smuzhiyun int retval = SUCCESS;
2146*4882a593Smuzhiyun int was_paused;
2147*4882a593Smuzhiyun int paused;
2148*4882a593Smuzhiyun int wait;
2149*4882a593Smuzhiyun int disconnected;
2150*4882a593Smuzhiyun ahd_mode_state saved_modes;
2151*4882a593Smuzhiyun unsigned long flags;
2152*4882a593Smuzhiyun
2153*4882a593Smuzhiyun pending_scb = NULL;
2154*4882a593Smuzhiyun paused = FALSE;
2155*4882a593Smuzhiyun wait = FALSE;
2156*4882a593Smuzhiyun ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
2157*4882a593Smuzhiyun
2158*4882a593Smuzhiyun scmd_printk(KERN_INFO, cmd,
2159*4882a593Smuzhiyun "Attempting to queue an ABORT message:");
2160*4882a593Smuzhiyun
2161*4882a593Smuzhiyun printk("CDB:");
2162*4882a593Smuzhiyun for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
2163*4882a593Smuzhiyun printk(" 0x%x", cmd->cmnd[cdb_byte]);
2164*4882a593Smuzhiyun printk("\n");
2165*4882a593Smuzhiyun
2166*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2167*4882a593Smuzhiyun
2168*4882a593Smuzhiyun /*
2169*4882a593Smuzhiyun * First determine if we currently own this command.
2170*4882a593Smuzhiyun * Start by searching the device queue. If not found
2171*4882a593Smuzhiyun * there, check the pending_scb list. If not found
2172*4882a593Smuzhiyun * at all, and the system wanted us to just abort the
2173*4882a593Smuzhiyun * command, return success.
2174*4882a593Smuzhiyun */
2175*4882a593Smuzhiyun dev = scsi_transport_device_data(cmd->device);
2176*4882a593Smuzhiyun
2177*4882a593Smuzhiyun if (dev == NULL) {
2178*4882a593Smuzhiyun /*
2179*4882a593Smuzhiyun * No target device for this command exists,
2180*4882a593Smuzhiyun * so we must not still own the command.
2181*4882a593Smuzhiyun */
2182*4882a593Smuzhiyun scmd_printk(KERN_INFO, cmd, "Is not an active device\n");
2183*4882a593Smuzhiyun goto done;
2184*4882a593Smuzhiyun }
2185*4882a593Smuzhiyun
2186*4882a593Smuzhiyun /*
2187*4882a593Smuzhiyun * See if we can find a matching cmd in the pending list.
2188*4882a593Smuzhiyun */
2189*4882a593Smuzhiyun LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
2190*4882a593Smuzhiyun if (pending_scb->io_ctx == cmd)
2191*4882a593Smuzhiyun break;
2192*4882a593Smuzhiyun }
2193*4882a593Smuzhiyun
2194*4882a593Smuzhiyun if (pending_scb == NULL) {
2195*4882a593Smuzhiyun scmd_printk(KERN_INFO, cmd, "Command not found\n");
2196*4882a593Smuzhiyun goto done;
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun
2199*4882a593Smuzhiyun if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
2200*4882a593Smuzhiyun /*
2201*4882a593Smuzhiyun * We can't queue two recovery actions using the same SCB
2202*4882a593Smuzhiyun */
2203*4882a593Smuzhiyun retval = FAILED;
2204*4882a593Smuzhiyun goto done;
2205*4882a593Smuzhiyun }
2206*4882a593Smuzhiyun
2207*4882a593Smuzhiyun /*
2208*4882a593Smuzhiyun * Ensure that the card doesn't do anything
2209*4882a593Smuzhiyun * behind our back. Also make sure that we
2210*4882a593Smuzhiyun * didn't "just" miss an interrupt that would
2211*4882a593Smuzhiyun * affect this cmd.
2212*4882a593Smuzhiyun */
2213*4882a593Smuzhiyun was_paused = ahd_is_paused(ahd);
2214*4882a593Smuzhiyun ahd_pause_and_flushwork(ahd);
2215*4882a593Smuzhiyun paused = TRUE;
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun if ((pending_scb->flags & SCB_ACTIVE) == 0) {
2218*4882a593Smuzhiyun scmd_printk(KERN_INFO, cmd, "Command already completed\n");
2219*4882a593Smuzhiyun goto done;
2220*4882a593Smuzhiyun }
2221*4882a593Smuzhiyun
2222*4882a593Smuzhiyun printk("%s: At time of recovery, card was %spaused\n",
2223*4882a593Smuzhiyun ahd_name(ahd), was_paused ? "" : "not ");
2224*4882a593Smuzhiyun ahd_dump_card_state(ahd);
2225*4882a593Smuzhiyun
2226*4882a593Smuzhiyun disconnected = TRUE;
2227*4882a593Smuzhiyun if (ahd_search_qinfifo(ahd, cmd->device->id,
2228*4882a593Smuzhiyun cmd->device->channel + 'A',
2229*4882a593Smuzhiyun cmd->device->lun,
2230*4882a593Smuzhiyun pending_scb->hscb->tag,
2231*4882a593Smuzhiyun ROLE_INITIATOR, CAM_REQ_ABORTED,
2232*4882a593Smuzhiyun SEARCH_COMPLETE) > 0) {
2233*4882a593Smuzhiyun printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
2234*4882a593Smuzhiyun ahd_name(ahd), cmd->device->channel,
2235*4882a593Smuzhiyun cmd->device->id, (u8)cmd->device->lun);
2236*4882a593Smuzhiyun goto done;
2237*4882a593Smuzhiyun }
2238*4882a593Smuzhiyun
2239*4882a593Smuzhiyun saved_modes = ahd_save_modes(ahd);
2240*4882a593Smuzhiyun ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2241*4882a593Smuzhiyun last_phase = ahd_inb(ahd, LASTPHASE);
2242*4882a593Smuzhiyun saved_scbptr = ahd_get_scbptr(ahd);
2243*4882a593Smuzhiyun active_scbptr = saved_scbptr;
2244*4882a593Smuzhiyun if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
2245*4882a593Smuzhiyun struct scb *bus_scb;
2246*4882a593Smuzhiyun
2247*4882a593Smuzhiyun bus_scb = ahd_lookup_scb(ahd, active_scbptr);
2248*4882a593Smuzhiyun if (bus_scb == pending_scb)
2249*4882a593Smuzhiyun disconnected = FALSE;
2250*4882a593Smuzhiyun }
2251*4882a593Smuzhiyun
2252*4882a593Smuzhiyun /*
2253*4882a593Smuzhiyun * At this point, pending_scb is the scb associated with the
2254*4882a593Smuzhiyun * passed in command. That command is currently active on the
2255*4882a593Smuzhiyun * bus or is in the disconnected state.
2256*4882a593Smuzhiyun */
2257*4882a593Smuzhiyun saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
2258*4882a593Smuzhiyun if (last_phase != P_BUSFREE
2259*4882a593Smuzhiyun && SCB_GET_TAG(pending_scb) == active_scbptr) {
2260*4882a593Smuzhiyun
2261*4882a593Smuzhiyun /*
2262*4882a593Smuzhiyun * We're active on the bus, so assert ATN
2263*4882a593Smuzhiyun * and hope that the target responds.
2264*4882a593Smuzhiyun */
2265*4882a593Smuzhiyun pending_scb = ahd_lookup_scb(ahd, active_scbptr);
2266*4882a593Smuzhiyun pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
2267*4882a593Smuzhiyun ahd_outb(ahd, MSG_OUT, HOST_MSG);
2268*4882a593Smuzhiyun ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
2269*4882a593Smuzhiyun scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n");
2270*4882a593Smuzhiyun wait = TRUE;
2271*4882a593Smuzhiyun } else if (disconnected) {
2272*4882a593Smuzhiyun
2273*4882a593Smuzhiyun /*
2274*4882a593Smuzhiyun * Actually re-queue this SCB in an attempt
2275*4882a593Smuzhiyun * to select the device before it reconnects.
2276*4882a593Smuzhiyun */
2277*4882a593Smuzhiyun pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
2278*4882a593Smuzhiyun ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb));
2279*4882a593Smuzhiyun pending_scb->hscb->cdb_len = 0;
2280*4882a593Smuzhiyun pending_scb->hscb->task_attribute = 0;
2281*4882a593Smuzhiyun pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK;
2282*4882a593Smuzhiyun
2283*4882a593Smuzhiyun if ((pending_scb->flags & SCB_PACKETIZED) != 0) {
2284*4882a593Smuzhiyun /*
2285*4882a593Smuzhiyun * Mark the SCB has having an outstanding
2286*4882a593Smuzhiyun * task management function. Should the command
2287*4882a593Smuzhiyun * complete normally before the task management
2288*4882a593Smuzhiyun * function can be sent, the host will be notified
2289*4882a593Smuzhiyun * to abort our requeued SCB.
2290*4882a593Smuzhiyun */
2291*4882a593Smuzhiyun ahd_outb(ahd, SCB_TASK_MANAGEMENT,
2292*4882a593Smuzhiyun pending_scb->hscb->task_management);
2293*4882a593Smuzhiyun } else {
2294*4882a593Smuzhiyun /*
2295*4882a593Smuzhiyun * If non-packetized, set the MK_MESSAGE control
2296*4882a593Smuzhiyun * bit indicating that we desire to send a message.
2297*4882a593Smuzhiyun * We also set the disconnected flag since there is
2298*4882a593Smuzhiyun * no guarantee that our SCB control byte matches
2299*4882a593Smuzhiyun * the version on the card. We don't want the
2300*4882a593Smuzhiyun * sequencer to abort the command thinking an
2301*4882a593Smuzhiyun * unsolicited reselection occurred.
2302*4882a593Smuzhiyun */
2303*4882a593Smuzhiyun pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
2304*4882a593Smuzhiyun
2305*4882a593Smuzhiyun /*
2306*4882a593Smuzhiyun * The sequencer will never re-reference the
2307*4882a593Smuzhiyun * in-core SCB. To make sure we are notified
2308*4882a593Smuzhiyun * during reselection, set the MK_MESSAGE flag in
2309*4882a593Smuzhiyun * the card's copy of the SCB.
2310*4882a593Smuzhiyun */
2311*4882a593Smuzhiyun ahd_outb(ahd, SCB_CONTROL,
2312*4882a593Smuzhiyun ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
2313*4882a593Smuzhiyun }
2314*4882a593Smuzhiyun
2315*4882a593Smuzhiyun /*
2316*4882a593Smuzhiyun * Clear out any entries in the QINFIFO first
2317*4882a593Smuzhiyun * so we are the next SCB for this target
2318*4882a593Smuzhiyun * to run.
2319*4882a593Smuzhiyun */
2320*4882a593Smuzhiyun ahd_search_qinfifo(ahd, cmd->device->id,
2321*4882a593Smuzhiyun cmd->device->channel + 'A', cmd->device->lun,
2322*4882a593Smuzhiyun SCB_LIST_NULL, ROLE_INITIATOR,
2323*4882a593Smuzhiyun CAM_REQUEUE_REQ, SEARCH_COMPLETE);
2324*4882a593Smuzhiyun ahd_qinfifo_requeue_tail(ahd, pending_scb);
2325*4882a593Smuzhiyun ahd_set_scbptr(ahd, saved_scbptr);
2326*4882a593Smuzhiyun ahd_print_path(ahd, pending_scb);
2327*4882a593Smuzhiyun printk("Device is disconnected, re-queuing SCB\n");
2328*4882a593Smuzhiyun wait = TRUE;
2329*4882a593Smuzhiyun } else {
2330*4882a593Smuzhiyun scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
2331*4882a593Smuzhiyun retval = FAILED;
2332*4882a593Smuzhiyun }
2333*4882a593Smuzhiyun
2334*4882a593Smuzhiyun
2335*4882a593Smuzhiyun ahd_restore_modes(ahd, saved_modes);
2336*4882a593Smuzhiyun done:
2337*4882a593Smuzhiyun if (paused)
2338*4882a593Smuzhiyun ahd_unpause(ahd);
2339*4882a593Smuzhiyun if (wait) {
2340*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(done);
2341*4882a593Smuzhiyun
2342*4882a593Smuzhiyun ahd->platform_data->eh_done = &done;
2343*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2344*4882a593Smuzhiyun
2345*4882a593Smuzhiyun printk("%s: Recovery code sleeping\n", ahd_name(ahd));
2346*4882a593Smuzhiyun if (!wait_for_completion_timeout(&done, 5 * HZ)) {
2347*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2348*4882a593Smuzhiyun ahd->platform_data->eh_done = NULL;
2349*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2350*4882a593Smuzhiyun printk("%s: Timer Expired (active %d)\n",
2351*4882a593Smuzhiyun ahd_name(ahd), dev->active);
2352*4882a593Smuzhiyun retval = FAILED;
2353*4882a593Smuzhiyun }
2354*4882a593Smuzhiyun printk("Recovery code awake\n");
2355*4882a593Smuzhiyun } else
2356*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun if (retval != SUCCESS)
2359*4882a593Smuzhiyun printk("%s: Command abort returning 0x%x\n",
2360*4882a593Smuzhiyun ahd_name(ahd), retval);
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun return retval;
2363*4882a593Smuzhiyun }
2364*4882a593Smuzhiyun
ahd_linux_set_width(struct scsi_target * starget,int width)2365*4882a593Smuzhiyun static void ahd_linux_set_width(struct scsi_target *starget, int width)
2366*4882a593Smuzhiyun {
2367*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2368*4882a593Smuzhiyun struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2369*4882a593Smuzhiyun struct ahd_devinfo devinfo;
2370*4882a593Smuzhiyun unsigned long flags;
2371*4882a593Smuzhiyun
2372*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2373*4882a593Smuzhiyun starget->channel + 'A', ROLE_INITIATOR);
2374*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2375*4882a593Smuzhiyun ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
2376*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2377*4882a593Smuzhiyun }
2378*4882a593Smuzhiyun
ahd_linux_set_period(struct scsi_target * starget,int period)2379*4882a593Smuzhiyun static void ahd_linux_set_period(struct scsi_target *starget, int period)
2380*4882a593Smuzhiyun {
2381*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2382*4882a593Smuzhiyun struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2383*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
2384*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo
2385*4882a593Smuzhiyun = ahd_fetch_transinfo(ahd,
2386*4882a593Smuzhiyun starget->channel + 'A',
2387*4882a593Smuzhiyun shost->this_id, starget->id, &tstate);
2388*4882a593Smuzhiyun struct ahd_devinfo devinfo;
2389*4882a593Smuzhiyun unsigned int ppr_options = tinfo->goal.ppr_options;
2390*4882a593Smuzhiyun unsigned int dt;
2391*4882a593Smuzhiyun unsigned long flags;
2392*4882a593Smuzhiyun unsigned long offset = tinfo->goal.offset;
2393*4882a593Smuzhiyun
2394*4882a593Smuzhiyun #ifdef AHD_DEBUG
2395*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_DV) != 0)
2396*4882a593Smuzhiyun printk("%s: set period to %d\n", ahd_name(ahd), period);
2397*4882a593Smuzhiyun #endif
2398*4882a593Smuzhiyun if (offset == 0)
2399*4882a593Smuzhiyun offset = MAX_OFFSET;
2400*4882a593Smuzhiyun
2401*4882a593Smuzhiyun if (period < 8)
2402*4882a593Smuzhiyun period = 8;
2403*4882a593Smuzhiyun if (period < 10) {
2404*4882a593Smuzhiyun if (spi_max_width(starget)) {
2405*4882a593Smuzhiyun ppr_options |= MSG_EXT_PPR_DT_REQ;
2406*4882a593Smuzhiyun if (period == 8)
2407*4882a593Smuzhiyun ppr_options |= MSG_EXT_PPR_IU_REQ;
2408*4882a593Smuzhiyun } else
2409*4882a593Smuzhiyun period = 10;
2410*4882a593Smuzhiyun }
2411*4882a593Smuzhiyun
2412*4882a593Smuzhiyun dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2415*4882a593Smuzhiyun starget->channel + 'A', ROLE_INITIATOR);
2416*4882a593Smuzhiyun
2417*4882a593Smuzhiyun /* all PPR requests apart from QAS require wide transfers */
2418*4882a593Smuzhiyun if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) {
2419*4882a593Smuzhiyun if (spi_width(starget) == 0)
2420*4882a593Smuzhiyun ppr_options &= MSG_EXT_PPR_QAS_REQ;
2421*4882a593Smuzhiyun }
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun ahd_find_syncrate(ahd, &period, &ppr_options,
2424*4882a593Smuzhiyun dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2425*4882a593Smuzhiyun
2426*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2427*4882a593Smuzhiyun ahd_set_syncrate(ahd, &devinfo, period, offset,
2428*4882a593Smuzhiyun ppr_options, AHD_TRANS_GOAL, FALSE);
2429*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2430*4882a593Smuzhiyun }
2431*4882a593Smuzhiyun
ahd_linux_set_offset(struct scsi_target * starget,int offset)2432*4882a593Smuzhiyun static void ahd_linux_set_offset(struct scsi_target *starget, int offset)
2433*4882a593Smuzhiyun {
2434*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2435*4882a593Smuzhiyun struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2436*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
2437*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo
2438*4882a593Smuzhiyun = ahd_fetch_transinfo(ahd,
2439*4882a593Smuzhiyun starget->channel + 'A',
2440*4882a593Smuzhiyun shost->this_id, starget->id, &tstate);
2441*4882a593Smuzhiyun struct ahd_devinfo devinfo;
2442*4882a593Smuzhiyun unsigned int ppr_options = 0;
2443*4882a593Smuzhiyun unsigned int period = 0;
2444*4882a593Smuzhiyun unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2445*4882a593Smuzhiyun unsigned long flags;
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun #ifdef AHD_DEBUG
2448*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_DV) != 0)
2449*4882a593Smuzhiyun printk("%s: set offset to %d\n", ahd_name(ahd), offset);
2450*4882a593Smuzhiyun #endif
2451*4882a593Smuzhiyun
2452*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2453*4882a593Smuzhiyun starget->channel + 'A', ROLE_INITIATOR);
2454*4882a593Smuzhiyun if (offset != 0) {
2455*4882a593Smuzhiyun period = tinfo->goal.period;
2456*4882a593Smuzhiyun ppr_options = tinfo->goal.ppr_options;
2457*4882a593Smuzhiyun ahd_find_syncrate(ahd, &period, &ppr_options,
2458*4882a593Smuzhiyun dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2459*4882a593Smuzhiyun }
2460*4882a593Smuzhiyun
2461*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2462*4882a593Smuzhiyun ahd_set_syncrate(ahd, &devinfo, period, offset, ppr_options,
2463*4882a593Smuzhiyun AHD_TRANS_GOAL, FALSE);
2464*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun
ahd_linux_set_dt(struct scsi_target * starget,int dt)2467*4882a593Smuzhiyun static void ahd_linux_set_dt(struct scsi_target *starget, int dt)
2468*4882a593Smuzhiyun {
2469*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2470*4882a593Smuzhiyun struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2471*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
2472*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo
2473*4882a593Smuzhiyun = ahd_fetch_transinfo(ahd,
2474*4882a593Smuzhiyun starget->channel + 'A',
2475*4882a593Smuzhiyun shost->this_id, starget->id, &tstate);
2476*4882a593Smuzhiyun struct ahd_devinfo devinfo;
2477*4882a593Smuzhiyun unsigned int ppr_options = tinfo->goal.ppr_options
2478*4882a593Smuzhiyun & ~MSG_EXT_PPR_DT_REQ;
2479*4882a593Smuzhiyun unsigned int period = tinfo->goal.period;
2480*4882a593Smuzhiyun unsigned int width = tinfo->goal.width;
2481*4882a593Smuzhiyun unsigned long flags;
2482*4882a593Smuzhiyun
2483*4882a593Smuzhiyun #ifdef AHD_DEBUG
2484*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_DV) != 0)
2485*4882a593Smuzhiyun printk("%s: %s DT\n", ahd_name(ahd),
2486*4882a593Smuzhiyun dt ? "enabling" : "disabling");
2487*4882a593Smuzhiyun #endif
2488*4882a593Smuzhiyun if (dt && spi_max_width(starget)) {
2489*4882a593Smuzhiyun ppr_options |= MSG_EXT_PPR_DT_REQ;
2490*4882a593Smuzhiyun if (!width)
2491*4882a593Smuzhiyun ahd_linux_set_width(starget, 1);
2492*4882a593Smuzhiyun } else {
2493*4882a593Smuzhiyun if (period <= 9)
2494*4882a593Smuzhiyun period = 10; /* If resetting DT, period must be >= 25ns */
2495*4882a593Smuzhiyun /* IU is invalid without DT set */
2496*4882a593Smuzhiyun ppr_options &= ~MSG_EXT_PPR_IU_REQ;
2497*4882a593Smuzhiyun }
2498*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2499*4882a593Smuzhiyun starget->channel + 'A', ROLE_INITIATOR);
2500*4882a593Smuzhiyun ahd_find_syncrate(ahd, &period, &ppr_options,
2501*4882a593Smuzhiyun dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2502*4882a593Smuzhiyun
2503*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2504*4882a593Smuzhiyun ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2505*4882a593Smuzhiyun ppr_options, AHD_TRANS_GOAL, FALSE);
2506*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2507*4882a593Smuzhiyun }
2508*4882a593Smuzhiyun
ahd_linux_set_qas(struct scsi_target * starget,int qas)2509*4882a593Smuzhiyun static void ahd_linux_set_qas(struct scsi_target *starget, int qas)
2510*4882a593Smuzhiyun {
2511*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2512*4882a593Smuzhiyun struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2513*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
2514*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo
2515*4882a593Smuzhiyun = ahd_fetch_transinfo(ahd,
2516*4882a593Smuzhiyun starget->channel + 'A',
2517*4882a593Smuzhiyun shost->this_id, starget->id, &tstate);
2518*4882a593Smuzhiyun struct ahd_devinfo devinfo;
2519*4882a593Smuzhiyun unsigned int ppr_options = tinfo->goal.ppr_options
2520*4882a593Smuzhiyun & ~MSG_EXT_PPR_QAS_REQ;
2521*4882a593Smuzhiyun unsigned int period = tinfo->goal.period;
2522*4882a593Smuzhiyun unsigned int dt;
2523*4882a593Smuzhiyun unsigned long flags;
2524*4882a593Smuzhiyun
2525*4882a593Smuzhiyun #ifdef AHD_DEBUG
2526*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_DV) != 0)
2527*4882a593Smuzhiyun printk("%s: %s QAS\n", ahd_name(ahd),
2528*4882a593Smuzhiyun qas ? "enabling" : "disabling");
2529*4882a593Smuzhiyun #endif
2530*4882a593Smuzhiyun
2531*4882a593Smuzhiyun if (qas) {
2532*4882a593Smuzhiyun ppr_options |= MSG_EXT_PPR_QAS_REQ;
2533*4882a593Smuzhiyun }
2534*4882a593Smuzhiyun
2535*4882a593Smuzhiyun dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2536*4882a593Smuzhiyun
2537*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2538*4882a593Smuzhiyun starget->channel + 'A', ROLE_INITIATOR);
2539*4882a593Smuzhiyun ahd_find_syncrate(ahd, &period, &ppr_options,
2540*4882a593Smuzhiyun dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2541*4882a593Smuzhiyun
2542*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2543*4882a593Smuzhiyun ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2544*4882a593Smuzhiyun ppr_options, AHD_TRANS_GOAL, FALSE);
2545*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2546*4882a593Smuzhiyun }
2547*4882a593Smuzhiyun
ahd_linux_set_iu(struct scsi_target * starget,int iu)2548*4882a593Smuzhiyun static void ahd_linux_set_iu(struct scsi_target *starget, int iu)
2549*4882a593Smuzhiyun {
2550*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2551*4882a593Smuzhiyun struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2552*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
2553*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo
2554*4882a593Smuzhiyun = ahd_fetch_transinfo(ahd,
2555*4882a593Smuzhiyun starget->channel + 'A',
2556*4882a593Smuzhiyun shost->this_id, starget->id, &tstate);
2557*4882a593Smuzhiyun struct ahd_devinfo devinfo;
2558*4882a593Smuzhiyun unsigned int ppr_options = tinfo->goal.ppr_options
2559*4882a593Smuzhiyun & ~MSG_EXT_PPR_IU_REQ;
2560*4882a593Smuzhiyun unsigned int period = tinfo->goal.period;
2561*4882a593Smuzhiyun unsigned int dt;
2562*4882a593Smuzhiyun unsigned long flags;
2563*4882a593Smuzhiyun
2564*4882a593Smuzhiyun #ifdef AHD_DEBUG
2565*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_DV) != 0)
2566*4882a593Smuzhiyun printk("%s: %s IU\n", ahd_name(ahd),
2567*4882a593Smuzhiyun iu ? "enabling" : "disabling");
2568*4882a593Smuzhiyun #endif
2569*4882a593Smuzhiyun
2570*4882a593Smuzhiyun if (iu && spi_max_width(starget)) {
2571*4882a593Smuzhiyun ppr_options |= MSG_EXT_PPR_IU_REQ;
2572*4882a593Smuzhiyun ppr_options |= MSG_EXT_PPR_DT_REQ; /* IU requires DT */
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2576*4882a593Smuzhiyun
2577*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2578*4882a593Smuzhiyun starget->channel + 'A', ROLE_INITIATOR);
2579*4882a593Smuzhiyun ahd_find_syncrate(ahd, &period, &ppr_options,
2580*4882a593Smuzhiyun dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2581*4882a593Smuzhiyun
2582*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2583*4882a593Smuzhiyun ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2584*4882a593Smuzhiyun ppr_options, AHD_TRANS_GOAL, FALSE);
2585*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2586*4882a593Smuzhiyun }
2587*4882a593Smuzhiyun
ahd_linux_set_rd_strm(struct scsi_target * starget,int rdstrm)2588*4882a593Smuzhiyun static void ahd_linux_set_rd_strm(struct scsi_target *starget, int rdstrm)
2589*4882a593Smuzhiyun {
2590*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2591*4882a593Smuzhiyun struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2592*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
2593*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo
2594*4882a593Smuzhiyun = ahd_fetch_transinfo(ahd,
2595*4882a593Smuzhiyun starget->channel + 'A',
2596*4882a593Smuzhiyun shost->this_id, starget->id, &tstate);
2597*4882a593Smuzhiyun struct ahd_devinfo devinfo;
2598*4882a593Smuzhiyun unsigned int ppr_options = tinfo->goal.ppr_options
2599*4882a593Smuzhiyun & ~MSG_EXT_PPR_RD_STRM;
2600*4882a593Smuzhiyun unsigned int period = tinfo->goal.period;
2601*4882a593Smuzhiyun unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2602*4882a593Smuzhiyun unsigned long flags;
2603*4882a593Smuzhiyun
2604*4882a593Smuzhiyun #ifdef AHD_DEBUG
2605*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_DV) != 0)
2606*4882a593Smuzhiyun printk("%s: %s Read Streaming\n", ahd_name(ahd),
2607*4882a593Smuzhiyun rdstrm ? "enabling" : "disabling");
2608*4882a593Smuzhiyun #endif
2609*4882a593Smuzhiyun
2610*4882a593Smuzhiyun if (rdstrm && spi_max_width(starget))
2611*4882a593Smuzhiyun ppr_options |= MSG_EXT_PPR_RD_STRM;
2612*4882a593Smuzhiyun
2613*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2614*4882a593Smuzhiyun starget->channel + 'A', ROLE_INITIATOR);
2615*4882a593Smuzhiyun ahd_find_syncrate(ahd, &period, &ppr_options,
2616*4882a593Smuzhiyun dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2617*4882a593Smuzhiyun
2618*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2619*4882a593Smuzhiyun ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2620*4882a593Smuzhiyun ppr_options, AHD_TRANS_GOAL, FALSE);
2621*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2622*4882a593Smuzhiyun }
2623*4882a593Smuzhiyun
ahd_linux_set_wr_flow(struct scsi_target * starget,int wrflow)2624*4882a593Smuzhiyun static void ahd_linux_set_wr_flow(struct scsi_target *starget, int wrflow)
2625*4882a593Smuzhiyun {
2626*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2627*4882a593Smuzhiyun struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2628*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
2629*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo
2630*4882a593Smuzhiyun = ahd_fetch_transinfo(ahd,
2631*4882a593Smuzhiyun starget->channel + 'A',
2632*4882a593Smuzhiyun shost->this_id, starget->id, &tstate);
2633*4882a593Smuzhiyun struct ahd_devinfo devinfo;
2634*4882a593Smuzhiyun unsigned int ppr_options = tinfo->goal.ppr_options
2635*4882a593Smuzhiyun & ~MSG_EXT_PPR_WR_FLOW;
2636*4882a593Smuzhiyun unsigned int period = tinfo->goal.period;
2637*4882a593Smuzhiyun unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2638*4882a593Smuzhiyun unsigned long flags;
2639*4882a593Smuzhiyun
2640*4882a593Smuzhiyun #ifdef AHD_DEBUG
2641*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_DV) != 0)
2642*4882a593Smuzhiyun printk("%s: %s Write Flow Control\n", ahd_name(ahd),
2643*4882a593Smuzhiyun wrflow ? "enabling" : "disabling");
2644*4882a593Smuzhiyun #endif
2645*4882a593Smuzhiyun
2646*4882a593Smuzhiyun if (wrflow && spi_max_width(starget))
2647*4882a593Smuzhiyun ppr_options |= MSG_EXT_PPR_WR_FLOW;
2648*4882a593Smuzhiyun
2649*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2650*4882a593Smuzhiyun starget->channel + 'A', ROLE_INITIATOR);
2651*4882a593Smuzhiyun ahd_find_syncrate(ahd, &period, &ppr_options,
2652*4882a593Smuzhiyun dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2653*4882a593Smuzhiyun
2654*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2655*4882a593Smuzhiyun ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2656*4882a593Smuzhiyun ppr_options, AHD_TRANS_GOAL, FALSE);
2657*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2658*4882a593Smuzhiyun }
2659*4882a593Smuzhiyun
ahd_linux_set_rti(struct scsi_target * starget,int rti)2660*4882a593Smuzhiyun static void ahd_linux_set_rti(struct scsi_target *starget, int rti)
2661*4882a593Smuzhiyun {
2662*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2663*4882a593Smuzhiyun struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2664*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
2665*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo
2666*4882a593Smuzhiyun = ahd_fetch_transinfo(ahd,
2667*4882a593Smuzhiyun starget->channel + 'A',
2668*4882a593Smuzhiyun shost->this_id, starget->id, &tstate);
2669*4882a593Smuzhiyun struct ahd_devinfo devinfo;
2670*4882a593Smuzhiyun unsigned int ppr_options = tinfo->goal.ppr_options
2671*4882a593Smuzhiyun & ~MSG_EXT_PPR_RTI;
2672*4882a593Smuzhiyun unsigned int period = tinfo->goal.period;
2673*4882a593Smuzhiyun unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2674*4882a593Smuzhiyun unsigned long flags;
2675*4882a593Smuzhiyun
2676*4882a593Smuzhiyun if ((ahd->features & AHD_RTI) == 0) {
2677*4882a593Smuzhiyun #ifdef AHD_DEBUG
2678*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_DV) != 0)
2679*4882a593Smuzhiyun printk("%s: RTI not available\n", ahd_name(ahd));
2680*4882a593Smuzhiyun #endif
2681*4882a593Smuzhiyun return;
2682*4882a593Smuzhiyun }
2683*4882a593Smuzhiyun
2684*4882a593Smuzhiyun #ifdef AHD_DEBUG
2685*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_DV) != 0)
2686*4882a593Smuzhiyun printk("%s: %s RTI\n", ahd_name(ahd),
2687*4882a593Smuzhiyun rti ? "enabling" : "disabling");
2688*4882a593Smuzhiyun #endif
2689*4882a593Smuzhiyun
2690*4882a593Smuzhiyun if (rti && spi_max_width(starget))
2691*4882a593Smuzhiyun ppr_options |= MSG_EXT_PPR_RTI;
2692*4882a593Smuzhiyun
2693*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2694*4882a593Smuzhiyun starget->channel + 'A', ROLE_INITIATOR);
2695*4882a593Smuzhiyun ahd_find_syncrate(ahd, &period, &ppr_options,
2696*4882a593Smuzhiyun dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2697*4882a593Smuzhiyun
2698*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2699*4882a593Smuzhiyun ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2700*4882a593Smuzhiyun ppr_options, AHD_TRANS_GOAL, FALSE);
2701*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2702*4882a593Smuzhiyun }
2703*4882a593Smuzhiyun
ahd_linux_set_pcomp_en(struct scsi_target * starget,int pcomp)2704*4882a593Smuzhiyun static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp)
2705*4882a593Smuzhiyun {
2706*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2707*4882a593Smuzhiyun struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2708*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
2709*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo
2710*4882a593Smuzhiyun = ahd_fetch_transinfo(ahd,
2711*4882a593Smuzhiyun starget->channel + 'A',
2712*4882a593Smuzhiyun shost->this_id, starget->id, &tstate);
2713*4882a593Smuzhiyun struct ahd_devinfo devinfo;
2714*4882a593Smuzhiyun unsigned int ppr_options = tinfo->goal.ppr_options
2715*4882a593Smuzhiyun & ~MSG_EXT_PPR_PCOMP_EN;
2716*4882a593Smuzhiyun unsigned int period = tinfo->goal.period;
2717*4882a593Smuzhiyun unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2718*4882a593Smuzhiyun unsigned long flags;
2719*4882a593Smuzhiyun
2720*4882a593Smuzhiyun #ifdef AHD_DEBUG
2721*4882a593Smuzhiyun if ((ahd_debug & AHD_SHOW_DV) != 0)
2722*4882a593Smuzhiyun printk("%s: %s Precompensation\n", ahd_name(ahd),
2723*4882a593Smuzhiyun pcomp ? "Enable" : "Disable");
2724*4882a593Smuzhiyun #endif
2725*4882a593Smuzhiyun
2726*4882a593Smuzhiyun if (pcomp && spi_max_width(starget)) {
2727*4882a593Smuzhiyun uint8_t precomp;
2728*4882a593Smuzhiyun
2729*4882a593Smuzhiyun if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
2730*4882a593Smuzhiyun const struct ahd_linux_iocell_opts *iocell_opts;
2731*4882a593Smuzhiyun
2732*4882a593Smuzhiyun iocell_opts = &aic79xx_iocell_info[ahd->unit];
2733*4882a593Smuzhiyun precomp = iocell_opts->precomp;
2734*4882a593Smuzhiyun } else {
2735*4882a593Smuzhiyun precomp = AIC79XX_DEFAULT_PRECOMP;
2736*4882a593Smuzhiyun }
2737*4882a593Smuzhiyun ppr_options |= MSG_EXT_PPR_PCOMP_EN;
2738*4882a593Smuzhiyun AHD_SET_PRECOMP(ahd, precomp);
2739*4882a593Smuzhiyun } else {
2740*4882a593Smuzhiyun AHD_SET_PRECOMP(ahd, 0);
2741*4882a593Smuzhiyun }
2742*4882a593Smuzhiyun
2743*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2744*4882a593Smuzhiyun starget->channel + 'A', ROLE_INITIATOR);
2745*4882a593Smuzhiyun ahd_find_syncrate(ahd, &period, &ppr_options,
2746*4882a593Smuzhiyun dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2747*4882a593Smuzhiyun
2748*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2749*4882a593Smuzhiyun ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2750*4882a593Smuzhiyun ppr_options, AHD_TRANS_GOAL, FALSE);
2751*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2752*4882a593Smuzhiyun }
2753*4882a593Smuzhiyun
ahd_linux_set_hold_mcs(struct scsi_target * starget,int hold)2754*4882a593Smuzhiyun static void ahd_linux_set_hold_mcs(struct scsi_target *starget, int hold)
2755*4882a593Smuzhiyun {
2756*4882a593Smuzhiyun struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2757*4882a593Smuzhiyun struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
2758*4882a593Smuzhiyun struct ahd_tmode_tstate *tstate;
2759*4882a593Smuzhiyun struct ahd_initiator_tinfo *tinfo
2760*4882a593Smuzhiyun = ahd_fetch_transinfo(ahd,
2761*4882a593Smuzhiyun starget->channel + 'A',
2762*4882a593Smuzhiyun shost->this_id, starget->id, &tstate);
2763*4882a593Smuzhiyun struct ahd_devinfo devinfo;
2764*4882a593Smuzhiyun unsigned int ppr_options = tinfo->goal.ppr_options
2765*4882a593Smuzhiyun & ~MSG_EXT_PPR_HOLD_MCS;
2766*4882a593Smuzhiyun unsigned int period = tinfo->goal.period;
2767*4882a593Smuzhiyun unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
2768*4882a593Smuzhiyun unsigned long flags;
2769*4882a593Smuzhiyun
2770*4882a593Smuzhiyun if (hold && spi_max_width(starget))
2771*4882a593Smuzhiyun ppr_options |= MSG_EXT_PPR_HOLD_MCS;
2772*4882a593Smuzhiyun
2773*4882a593Smuzhiyun ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
2774*4882a593Smuzhiyun starget->channel + 'A', ROLE_INITIATOR);
2775*4882a593Smuzhiyun ahd_find_syncrate(ahd, &period, &ppr_options,
2776*4882a593Smuzhiyun dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
2777*4882a593Smuzhiyun
2778*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2779*4882a593Smuzhiyun ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
2780*4882a593Smuzhiyun ppr_options, AHD_TRANS_GOAL, FALSE);
2781*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun
ahd_linux_get_signalling(struct Scsi_Host * shost)2784*4882a593Smuzhiyun static void ahd_linux_get_signalling(struct Scsi_Host *shost)
2785*4882a593Smuzhiyun {
2786*4882a593Smuzhiyun struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata;
2787*4882a593Smuzhiyun unsigned long flags;
2788*4882a593Smuzhiyun u8 mode;
2789*4882a593Smuzhiyun
2790*4882a593Smuzhiyun ahd_lock(ahd, &flags);
2791*4882a593Smuzhiyun ahd_pause(ahd);
2792*4882a593Smuzhiyun mode = ahd_inb(ahd, SBLKCTL);
2793*4882a593Smuzhiyun ahd_unpause(ahd);
2794*4882a593Smuzhiyun ahd_unlock(ahd, &flags);
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun if (mode & ENAB40)
2797*4882a593Smuzhiyun spi_signalling(shost) = SPI_SIGNAL_LVD;
2798*4882a593Smuzhiyun else if (mode & ENAB20)
2799*4882a593Smuzhiyun spi_signalling(shost) = SPI_SIGNAL_SE;
2800*4882a593Smuzhiyun else
2801*4882a593Smuzhiyun spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
2802*4882a593Smuzhiyun }
2803*4882a593Smuzhiyun
2804*4882a593Smuzhiyun static struct spi_function_template ahd_linux_transport_functions = {
2805*4882a593Smuzhiyun .set_offset = ahd_linux_set_offset,
2806*4882a593Smuzhiyun .show_offset = 1,
2807*4882a593Smuzhiyun .set_period = ahd_linux_set_period,
2808*4882a593Smuzhiyun .show_period = 1,
2809*4882a593Smuzhiyun .set_width = ahd_linux_set_width,
2810*4882a593Smuzhiyun .show_width = 1,
2811*4882a593Smuzhiyun .set_dt = ahd_linux_set_dt,
2812*4882a593Smuzhiyun .show_dt = 1,
2813*4882a593Smuzhiyun .set_iu = ahd_linux_set_iu,
2814*4882a593Smuzhiyun .show_iu = 1,
2815*4882a593Smuzhiyun .set_qas = ahd_linux_set_qas,
2816*4882a593Smuzhiyun .show_qas = 1,
2817*4882a593Smuzhiyun .set_rd_strm = ahd_linux_set_rd_strm,
2818*4882a593Smuzhiyun .show_rd_strm = 1,
2819*4882a593Smuzhiyun .set_wr_flow = ahd_linux_set_wr_flow,
2820*4882a593Smuzhiyun .show_wr_flow = 1,
2821*4882a593Smuzhiyun .set_rti = ahd_linux_set_rti,
2822*4882a593Smuzhiyun .show_rti = 1,
2823*4882a593Smuzhiyun .set_pcomp_en = ahd_linux_set_pcomp_en,
2824*4882a593Smuzhiyun .show_pcomp_en = 1,
2825*4882a593Smuzhiyun .set_hold_mcs = ahd_linux_set_hold_mcs,
2826*4882a593Smuzhiyun .show_hold_mcs = 1,
2827*4882a593Smuzhiyun .get_signalling = ahd_linux_get_signalling,
2828*4882a593Smuzhiyun };
2829*4882a593Smuzhiyun
2830*4882a593Smuzhiyun static int __init
ahd_linux_init(void)2831*4882a593Smuzhiyun ahd_linux_init(void)
2832*4882a593Smuzhiyun {
2833*4882a593Smuzhiyun int error = 0;
2834*4882a593Smuzhiyun
2835*4882a593Smuzhiyun /*
2836*4882a593Smuzhiyun * If we've been passed any parameters, process them now.
2837*4882a593Smuzhiyun */
2838*4882a593Smuzhiyun if (aic79xx)
2839*4882a593Smuzhiyun aic79xx_setup(aic79xx);
2840*4882a593Smuzhiyun
2841*4882a593Smuzhiyun ahd_linux_transport_template =
2842*4882a593Smuzhiyun spi_attach_transport(&ahd_linux_transport_functions);
2843*4882a593Smuzhiyun if (!ahd_linux_transport_template)
2844*4882a593Smuzhiyun return -ENODEV;
2845*4882a593Smuzhiyun
2846*4882a593Smuzhiyun scsi_transport_reserve_device(ahd_linux_transport_template,
2847*4882a593Smuzhiyun sizeof(struct ahd_linux_device));
2848*4882a593Smuzhiyun
2849*4882a593Smuzhiyun error = ahd_linux_pci_init();
2850*4882a593Smuzhiyun if (error)
2851*4882a593Smuzhiyun spi_release_transport(ahd_linux_transport_template);
2852*4882a593Smuzhiyun return error;
2853*4882a593Smuzhiyun }
2854*4882a593Smuzhiyun
2855*4882a593Smuzhiyun static void __exit
ahd_linux_exit(void)2856*4882a593Smuzhiyun ahd_linux_exit(void)
2857*4882a593Smuzhiyun {
2858*4882a593Smuzhiyun ahd_linux_pci_exit();
2859*4882a593Smuzhiyun spi_release_transport(ahd_linux_transport_template);
2860*4882a593Smuzhiyun }
2861*4882a593Smuzhiyun
2862*4882a593Smuzhiyun module_init(ahd_linux_init);
2863*4882a593Smuzhiyun module_exit(ahd_linux_exit);
2864