1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /******************************************************************************
3*4882a593Smuzhiyun * QLOGIC LINUX SOFTWARE
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
6*4882a593Smuzhiyun * Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
7*4882a593Smuzhiyun * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
8*4882a593Smuzhiyun * Copyright (C) 2003-2004 Christoph Hellwig
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun ******************************************************************************/
11*4882a593Smuzhiyun #define QLA1280_VERSION "3.27.1"
12*4882a593Smuzhiyun /*****************************************************************************
13*4882a593Smuzhiyun Revision History:
14*4882a593Smuzhiyun Rev 3.27.1, February 8, 2010, Michael Reed
15*4882a593Smuzhiyun - Retain firmware image for error recovery.
16*4882a593Smuzhiyun Rev 3.27, February 10, 2009, Michael Reed
17*4882a593Smuzhiyun - General code cleanup.
18*4882a593Smuzhiyun - Improve error recovery.
19*4882a593Smuzhiyun Rev 3.26, January 16, 2006 Jes Sorensen
20*4882a593Smuzhiyun - Ditch all < 2.6 support
21*4882a593Smuzhiyun Rev 3.25.1, February 10, 2005 Christoph Hellwig
22*4882a593Smuzhiyun - use pci_map_single to map non-S/G requests
23*4882a593Smuzhiyun - remove qla1280_proc_info
24*4882a593Smuzhiyun Rev 3.25, September 28, 2004, Christoph Hellwig
25*4882a593Smuzhiyun - add support for ISP1020/1040
26*4882a593Smuzhiyun - don't include "scsi.h" anymore for 2.6.x
27*4882a593Smuzhiyun Rev 3.24.4 June 7, 2004 Christoph Hellwig
28*4882a593Smuzhiyun - restructure firmware loading, cleanup initialization code
29*4882a593Smuzhiyun - prepare support for ISP1020/1040 chips
30*4882a593Smuzhiyun Rev 3.24.3 January 19, 2004, Jes Sorensen
31*4882a593Smuzhiyun - Handle PCI DMA mask settings correctly
32*4882a593Smuzhiyun - Correct order of error handling in probe_one, free_irq should not
33*4882a593Smuzhiyun be called if request_irq failed
34*4882a593Smuzhiyun Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez
35*4882a593Smuzhiyun - Big endian fixes (James)
36*4882a593Smuzhiyun - Remove bogus IOCB content on zero data transfer commands (Andrew)
37*4882a593Smuzhiyun Rev 3.24.1 January 5, 2004, Jes Sorensen
38*4882a593Smuzhiyun - Initialize completion queue to avoid OOPS on probe
39*4882a593Smuzhiyun - Handle interrupts during mailbox testing
40*4882a593Smuzhiyun Rev 3.24 November 17, 2003, Christoph Hellwig
41*4882a593Smuzhiyun - use struct list_head for completion queue
42*4882a593Smuzhiyun - avoid old Scsi_FOO typedefs
43*4882a593Smuzhiyun - cleanup 2.4 compat glue a bit
44*4882a593Smuzhiyun - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h"
45*4882a593Smuzhiyun - make initialization for memory mapped vs port I/O more similar
46*4882a593Smuzhiyun - remove broken pci config space manipulation
47*4882a593Smuzhiyun - kill more cruft
48*4882a593Smuzhiyun - this is an almost perfect 2.6 scsi driver now! ;)
49*4882a593Smuzhiyun Rev 3.23.39 December 17, 2003, Jes Sorensen
50*4882a593Smuzhiyun - Delete completion queue from srb if mailbox command failed to
51*4882a593Smuzhiyun to avoid qla1280_done completeting qla1280_error_action's
52*4882a593Smuzhiyun obsolete context
53*4882a593Smuzhiyun - Reduce arguments for qla1280_done
54*4882a593Smuzhiyun Rev 3.23.38 October 18, 2003, Christoph Hellwig
55*4882a593Smuzhiyun - Convert to new-style hotplugable driver for 2.6
56*4882a593Smuzhiyun - Fix missing scsi_unregister/scsi_host_put on HBA removal
57*4882a593Smuzhiyun - Kill some more cruft
58*4882a593Smuzhiyun Rev 3.23.37 October 1, 2003, Jes Sorensen
59*4882a593Smuzhiyun - Make MMIO depend on CONFIG_X86_VISWS instead of yet another
60*4882a593Smuzhiyun random CONFIG option
61*4882a593Smuzhiyun - Clean up locking in probe path
62*4882a593Smuzhiyun Rev 3.23.36 October 1, 2003, Christoph Hellwig
63*4882a593Smuzhiyun - queuecommand only ever receives new commands - clear flags
64*4882a593Smuzhiyun - Reintegrate lost fixes from Linux 2.5
65*4882a593Smuzhiyun Rev 3.23.35 August 14, 2003, Jes Sorensen
66*4882a593Smuzhiyun - Build against 2.6
67*4882a593Smuzhiyun Rev 3.23.34 July 23, 2003, Jes Sorensen
68*4882a593Smuzhiyun - Remove pointless TRUE/FALSE macros
69*4882a593Smuzhiyun - Clean up vchan handling
70*4882a593Smuzhiyun Rev 3.23.33 July 3, 2003, Jes Sorensen
71*4882a593Smuzhiyun - Don't define register access macros before define determining MMIO.
72*4882a593Smuzhiyun This just happened to work out on ia64 but not elsewhere.
73*4882a593Smuzhiyun - Don't try and read from the card while it is in reset as
74*4882a593Smuzhiyun it won't respond and causes an MCA
75*4882a593Smuzhiyun Rev 3.23.32 June 23, 2003, Jes Sorensen
76*4882a593Smuzhiyun - Basic support for boot time arguments
77*4882a593Smuzhiyun Rev 3.23.31 June 8, 2003, Jes Sorensen
78*4882a593Smuzhiyun - Reduce boot time messages
79*4882a593Smuzhiyun Rev 3.23.30 June 6, 2003, Jes Sorensen
80*4882a593Smuzhiyun - Do not enable sync/wide/ppr before it has been determined
81*4882a593Smuzhiyun that the target device actually supports it
82*4882a593Smuzhiyun - Enable DMA arbitration for multi channel controllers
83*4882a593Smuzhiyun Rev 3.23.29 June 3, 2003, Jes Sorensen
84*4882a593Smuzhiyun - Port to 2.5.69
85*4882a593Smuzhiyun Rev 3.23.28 June 3, 2003, Jes Sorensen
86*4882a593Smuzhiyun - Eliminate duplicate marker commands on bus resets
87*4882a593Smuzhiyun - Handle outstanding commands appropriately on bus/device resets
88*4882a593Smuzhiyun Rev 3.23.27 May 28, 2003, Jes Sorensen
89*4882a593Smuzhiyun - Remove bogus input queue code, let the Linux SCSI layer do the work
90*4882a593Smuzhiyun - Clean up NVRAM handling, only read it once from the card
91*4882a593Smuzhiyun - Add a number of missing default nvram parameters
92*4882a593Smuzhiyun Rev 3.23.26 Beta May 28, 2003, Jes Sorensen
93*4882a593Smuzhiyun - Use completion queue for mailbox commands instead of busy wait
94*4882a593Smuzhiyun Rev 3.23.25 Beta May 27, 2003, James Bottomley
95*4882a593Smuzhiyun - Migrate to use new error handling code
96*4882a593Smuzhiyun Rev 3.23.24 Beta May 21, 2003, James Bottomley
97*4882a593Smuzhiyun - Big endian support
98*4882a593Smuzhiyun - Cleanup data direction code
99*4882a593Smuzhiyun Rev 3.23.23 Beta May 12, 2003, Jes Sorensen
100*4882a593Smuzhiyun - Switch to using MMIO instead of PIO
101*4882a593Smuzhiyun Rev 3.23.22 Beta April 15, 2003, Jes Sorensen
102*4882a593Smuzhiyun - Fix PCI parity problem with 12160 during reset.
103*4882a593Smuzhiyun Rev 3.23.21 Beta April 14, 2003, Jes Sorensen
104*4882a593Smuzhiyun - Use pci_map_page()/pci_unmap_page() instead of map_single version.
105*4882a593Smuzhiyun Rev 3.23.20 Beta April 9, 2003, Jes Sorensen
106*4882a593Smuzhiyun - Remove < 2.4.x support
107*4882a593Smuzhiyun - Introduce HOST_LOCK to make the spin lock changes portable.
108*4882a593Smuzhiyun - Remove a bunch of idiotic and unnecessary typedef's
109*4882a593Smuzhiyun - Kill all leftovers of target-mode support which never worked anyway
110*4882a593Smuzhiyun Rev 3.23.19 Beta April 11, 2002, Linus Torvalds
111*4882a593Smuzhiyun - Do qla1280_pci_config() before calling request_irq() and
112*4882a593Smuzhiyun request_region()
113*4882a593Smuzhiyun - Use pci_dma_hi32() to handle upper word of DMA addresses instead
114*4882a593Smuzhiyun of large shifts
115*4882a593Smuzhiyun - Hand correct arguments to free_irq() in case of failure
116*4882a593Smuzhiyun Rev 3.23.18 Beta April 11, 2002, Jes Sorensen
117*4882a593Smuzhiyun - Run source through Lindent and clean up the output
118*4882a593Smuzhiyun Rev 3.23.17 Beta April 11, 2002, Jes Sorensen
119*4882a593Smuzhiyun - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32
120*4882a593Smuzhiyun Rev 3.23.16 Beta March 19, 2002, Jes Sorensen
121*4882a593Smuzhiyun - Rely on mailbox commands generating interrupts - do not
122*4882a593Smuzhiyun run qla1280_isr() from ql1280_mailbox_command()
123*4882a593Smuzhiyun - Remove device_reg_t
124*4882a593Smuzhiyun - Integrate ql12160_set_target_parameters() with 1280 version
125*4882a593Smuzhiyun - Make qla1280_setup() non static
126*4882a593Smuzhiyun - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
127*4882a593Smuzhiyun sent to the card - this command pauses the firmware!!!
128*4882a593Smuzhiyun Rev 3.23.15 Beta March 19, 2002, Jes Sorensen
129*4882a593Smuzhiyun - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
130*4882a593Smuzhiyun - Remove a pile of pointless and confusing (srb_t **) and
131*4882a593Smuzhiyun (scsi_lu_t *) typecasts
132*4882a593Smuzhiyun - Explicit mark that we do not use the new error handling (for now)
133*4882a593Smuzhiyun - Remove scsi_qla_host_t and use 'struct' instead
134*4882a593Smuzhiyun - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled,
135*4882a593Smuzhiyun pci_64bit_slot flags which weren't used for anything anyway
136*4882a593Smuzhiyun - Grab host->host_lock while calling qla1280_isr() from abort()
137*4882a593Smuzhiyun - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we
138*4882a593Smuzhiyun do not need to save/restore flags in the interrupt handler
139*4882a593Smuzhiyun - Enable interrupts early (before any mailbox access) in preparation
140*4882a593Smuzhiyun for cleaning up the mailbox handling
141*4882a593Smuzhiyun Rev 3.23.14 Beta March 14, 2002, Jes Sorensen
142*4882a593Smuzhiyun - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace
143*4882a593Smuzhiyun it with proper use of dprintk().
144*4882a593Smuzhiyun - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take
145*4882a593Smuzhiyun a debug level argument to determine if data is to be printed
146*4882a593Smuzhiyun - Add KERN_* info to printk()
147*4882a593Smuzhiyun Rev 3.23.13 Beta March 14, 2002, Jes Sorensen
148*4882a593Smuzhiyun - Significant cosmetic cleanups
149*4882a593Smuzhiyun - Change debug code to use dprintk() and remove #if mess
150*4882a593Smuzhiyun Rev 3.23.12 Beta March 13, 2002, Jes Sorensen
151*4882a593Smuzhiyun - More cosmetic cleanups, fix places treating return as function
152*4882a593Smuzhiyun - use cpu_relax() in qla1280_debounce_register()
153*4882a593Smuzhiyun Rev 3.23.11 Beta March 13, 2002, Jes Sorensen
154*4882a593Smuzhiyun - Make it compile under 2.5.5
155*4882a593Smuzhiyun Rev 3.23.10 Beta October 1, 2001, Jes Sorensen
156*4882a593Smuzhiyun - Do no typecast short * to long * in QL1280BoardTbl, this
157*4882a593Smuzhiyun broke miserably on big endian boxes
158*4882a593Smuzhiyun Rev 3.23.9 Beta September 30, 2001, Jes Sorensen
159*4882a593Smuzhiyun - Remove pre 2.2 hack for checking for reentrance in interrupt handler
160*4882a593Smuzhiyun - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32
161*4882a593Smuzhiyun unsigned int to match the types from struct scsi_cmnd
162*4882a593Smuzhiyun Rev 3.23.8 Beta September 29, 2001, Jes Sorensen
163*4882a593Smuzhiyun - Remove bogus timer_t typedef from qla1280.h
164*4882a593Smuzhiyun - Remove obsolete pre 2.2 PCI setup code, use proper #define's
165*4882a593Smuzhiyun for PCI_ values, call pci_set_master()
166*4882a593Smuzhiyun - Fix memleak of qla1280_buffer on module unload
167*4882a593Smuzhiyun - Only compile module parsing code #ifdef MODULE - should be
168*4882a593Smuzhiyun changed to use individual MODULE_PARM's later
169*4882a593Smuzhiyun - Remove dummy_buffer that was never modified nor printed
170*4882a593Smuzhiyun - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove
171*4882a593Smuzhiyun #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls
172*4882a593Smuzhiyun - Remove \r from print statements, this is Linux, not DOS
173*4882a593Smuzhiyun - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK}
174*4882a593Smuzhiyun dummy macros
175*4882a593Smuzhiyun - Remove C++ compile hack in header file as Linux driver are not
176*4882a593Smuzhiyun supposed to be compiled as C++
177*4882a593Smuzhiyun - Kill MS_64BITS macro as it makes the code more readable
178*4882a593Smuzhiyun - Remove unnecessary flags.in_interrupts bit
179*4882a593Smuzhiyun Rev 3.23.7 Beta August 20, 2001, Jes Sorensen
180*4882a593Smuzhiyun - Dont' check for set flags on q->q_flag one by one in qla1280_next()
181*4882a593Smuzhiyun - Check whether the interrupt was generated by the QLA1280 before
182*4882a593Smuzhiyun doing any processing
183*4882a593Smuzhiyun - qla1280_status_entry(): Only zero out part of sense_buffer that
184*4882a593Smuzhiyun is not being copied into
185*4882a593Smuzhiyun - Remove more superflouous typecasts
186*4882a593Smuzhiyun - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy()
187*4882a593Smuzhiyun Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel
188*4882a593Smuzhiyun - Don't walk the entire list in qla1280_putq_t() just to directly
189*4882a593Smuzhiyun grab the pointer to the last element afterwards
190*4882a593Smuzhiyun Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
191*4882a593Smuzhiyun - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
192*4882a593Smuzhiyun Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
193*4882a593Smuzhiyun - Set dev->max_sectors to 1024
194*4882a593Smuzhiyun Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
195*4882a593Smuzhiyun - Provide compat macros for pci_enable_device(), pci_find_subsys()
196*4882a593Smuzhiyun and scsi_set_pci_device()
197*4882a593Smuzhiyun - Call scsi_set_pci_device() for all devices
198*4882a593Smuzhiyun - Reduce size of kernel version dependent device probe code
199*4882a593Smuzhiyun - Move duplicate probe/init code to separate function
200*4882a593Smuzhiyun - Handle error if qla1280_mem_alloc() fails
201*4882a593Smuzhiyun - Kill OFFSET() macro and use Linux's PCI definitions instead
202*4882a593Smuzhiyun - Kill private structure defining PCI config space (struct config_reg)
203*4882a593Smuzhiyun - Only allocate I/O port region if not in MMIO mode
204*4882a593Smuzhiyun - Remove duplicate (unused) sanity check of sife of srb_t
205*4882a593Smuzhiyun Rev 3.23.2 Beta August 6, 2001, Jes Sorensen
206*4882a593Smuzhiyun - Change home-brew memset() implementations to use memset()
207*4882a593Smuzhiyun - Remove all references to COMTRACE() - accessing a PC's COM2 serial
208*4882a593Smuzhiyun port directly is not legal under Linux.
209*4882a593Smuzhiyun Rev 3.23.1 Beta April 24, 2001, Jes Sorensen
210*4882a593Smuzhiyun - Remove pre 2.2 kernel support
211*4882a593Smuzhiyun - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat)
212*4882a593Smuzhiyun - Fix MMIO access to use readl/writel instead of directly
213*4882a593Smuzhiyun dereferencing pointers
214*4882a593Smuzhiyun - Nuke MSDOS debugging code
215*4882a593Smuzhiyun - Change true/false data types to int from uint8_t
216*4882a593Smuzhiyun - Use int for counters instead of uint8_t etc.
217*4882a593Smuzhiyun - Clean up size & byte order conversion macro usage
218*4882a593Smuzhiyun Rev 3.23 Beta January 11, 2001 BN Qlogic
219*4882a593Smuzhiyun - Added check of device_id when handling non
220*4882a593Smuzhiyun QLA12160s during detect().
221*4882a593Smuzhiyun Rev 3.22 Beta January 5, 2001 BN Qlogic
222*4882a593Smuzhiyun - Changed queue_task() to schedule_task()
223*4882a593Smuzhiyun for kernels 2.4.0 and higher.
224*4882a593Smuzhiyun Note: 2.4.0-testxx kernels released prior to
225*4882a593Smuzhiyun the actual 2.4.0 kernel release on January 2001
226*4882a593Smuzhiyun will get compile/link errors with schedule_task().
227*4882a593Smuzhiyun Please update your kernel to released 2.4.0 level,
228*4882a593Smuzhiyun or comment lines in this file flagged with 3.22
229*4882a593Smuzhiyun to resolve compile/link error of schedule_task().
230*4882a593Smuzhiyun - Added -DCONFIG_SMP in addition to -D__SMP__
231*4882a593Smuzhiyun in Makefile for 2.4.0 builds of driver as module.
232*4882a593Smuzhiyun Rev 3.21 Beta January 4, 2001 BN Qlogic
233*4882a593Smuzhiyun - Changed criteria of 64/32 Bit mode of HBA
234*4882a593Smuzhiyun operation according to BITS_PER_LONG rather
235*4882a593Smuzhiyun than HBA's NVRAM setting of >4Gig memory bit;
236*4882a593Smuzhiyun so that the HBA auto-configures without the need
237*4882a593Smuzhiyun to setup each system individually.
238*4882a593Smuzhiyun Rev 3.20 Beta December 5, 2000 BN Qlogic
239*4882a593Smuzhiyun - Added priority handling to IA-64 onboard SCSI
240*4882a593Smuzhiyun ISP12160 chip for kernels greater than 2.3.18.
241*4882a593Smuzhiyun - Added irqrestore for qla1280_intr_handler.
242*4882a593Smuzhiyun - Enabled /proc/scsi/qla1280 interface.
243*4882a593Smuzhiyun - Clear /proc/scsi/qla1280 counters in detect().
244*4882a593Smuzhiyun Rev 3.19 Beta October 13, 2000 BN Qlogic
245*4882a593Smuzhiyun - Declare driver_template for new kernel
246*4882a593Smuzhiyun (2.4.0 and greater) scsi initialization scheme.
247*4882a593Smuzhiyun - Update /proc/scsi entry for 2.3.18 kernels and
248*4882a593Smuzhiyun above as qla1280
249*4882a593Smuzhiyun Rev 3.18 Beta October 10, 2000 BN Qlogic
250*4882a593Smuzhiyun - Changed scan order of adapters to map
251*4882a593Smuzhiyun the QLA12160 followed by the QLA1280.
252*4882a593Smuzhiyun Rev 3.17 Beta September 18, 2000 BN Qlogic
253*4882a593Smuzhiyun - Removed warnings for 32 bit 2.4.x compiles
254*4882a593Smuzhiyun - Corrected declared size for request and response
255*4882a593Smuzhiyun DMA addresses that are kept in each ha
256*4882a593Smuzhiyun Rev. 3.16 Beta August 25, 2000 BN Qlogic
257*4882a593Smuzhiyun - Corrected 64 bit addressing issue on IA-64
258*4882a593Smuzhiyun where the upper 32 bits were not properly
259*4882a593Smuzhiyun passed to the RISC engine.
260*4882a593Smuzhiyun Rev. 3.15 Beta August 22, 2000 BN Qlogic
261*4882a593Smuzhiyun - Modified qla1280_setup_chip to properly load
262*4882a593Smuzhiyun ISP firmware for greater that 4 Gig memory on IA-64
263*4882a593Smuzhiyun Rev. 3.14 Beta August 16, 2000 BN Qlogic
264*4882a593Smuzhiyun - Added setting of dma_mask to full 64 bit
265*4882a593Smuzhiyun if flags.enable_64bit_addressing is set in NVRAM
266*4882a593Smuzhiyun Rev. 3.13 Beta August 16, 2000 BN Qlogic
267*4882a593Smuzhiyun - Use new PCI DMA mapping APIs for 2.4.x kernel
268*4882a593Smuzhiyun Rev. 3.12 July 18, 2000 Redhat & BN Qlogic
269*4882a593Smuzhiyun - Added check of pci_enable_device to detect() for 2.3.x
270*4882a593Smuzhiyun - Use pci_resource_start() instead of
271*4882a593Smuzhiyun pdev->resource[0].start in detect() for 2.3.x
272*4882a593Smuzhiyun - Updated driver version
273*4882a593Smuzhiyun Rev. 3.11 July 14, 2000 BN Qlogic
274*4882a593Smuzhiyun - Updated SCSI Firmware to following versions:
275*4882a593Smuzhiyun qla1x80: 8.13.08
276*4882a593Smuzhiyun qla1x160: 10.04.08
277*4882a593Smuzhiyun - Updated driver version to 3.11
278*4882a593Smuzhiyun Rev. 3.10 June 23, 2000 BN Qlogic
279*4882a593Smuzhiyun - Added filtering of AMI SubSys Vendor ID devices
280*4882a593Smuzhiyun Rev. 3.9
281*4882a593Smuzhiyun - DEBUG_QLA1280 undefined and new version BN Qlogic
282*4882a593Smuzhiyun Rev. 3.08b May 9, 2000 MD Dell
283*4882a593Smuzhiyun - Added logic to check against AMI subsystem vendor ID
284*4882a593Smuzhiyun Rev. 3.08 May 4, 2000 DG Qlogic
285*4882a593Smuzhiyun - Added logic to check for PCI subsystem ID.
286*4882a593Smuzhiyun Rev. 3.07 Apr 24, 2000 DG & BN Qlogic
287*4882a593Smuzhiyun - Updated SCSI Firmware to following versions:
288*4882a593Smuzhiyun qla12160: 10.01.19
289*4882a593Smuzhiyun qla1280: 8.09.00
290*4882a593Smuzhiyun Rev. 3.06 Apr 12, 2000 DG & BN Qlogic
291*4882a593Smuzhiyun - Internal revision; not released
292*4882a593Smuzhiyun Rev. 3.05 Mar 28, 2000 DG & BN Qlogic
293*4882a593Smuzhiyun - Edit correction for virt_to_bus and PROC.
294*4882a593Smuzhiyun Rev. 3.04 Mar 28, 2000 DG & BN Qlogic
295*4882a593Smuzhiyun - Merge changes from ia64 port.
296*4882a593Smuzhiyun Rev. 3.03 Mar 28, 2000 BN Qlogic
297*4882a593Smuzhiyun - Increase version to reflect new code drop with compile fix
298*4882a593Smuzhiyun of issue with inclusion of linux/spinlock for 2.3 kernels
299*4882a593Smuzhiyun Rev. 3.02 Mar 15, 2000 BN Qlogic
300*4882a593Smuzhiyun - Merge qla1280_proc_info from 2.10 code base
301*4882a593Smuzhiyun Rev. 3.01 Feb 10, 2000 BN Qlogic
302*4882a593Smuzhiyun - Corrected code to compile on a 2.2.x kernel.
303*4882a593Smuzhiyun Rev. 3.00 Jan 17, 2000 DG Qlogic
304*4882a593Smuzhiyun - Added 64-bit support.
305*4882a593Smuzhiyun Rev. 2.07 Nov 9, 1999 DG Qlogic
306*4882a593Smuzhiyun - Added new routine to set target parameters for ISP12160.
307*4882a593Smuzhiyun Rev. 2.06 Sept 10, 1999 DG Qlogic
308*4882a593Smuzhiyun - Added support for ISP12160 Ultra 3 chip.
309*4882a593Smuzhiyun Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont
310*4882a593Smuzhiyun - Modified code to remove errors generated when compiling with
311*4882a593Smuzhiyun Cygnus IA64 Compiler.
312*4882a593Smuzhiyun - Changed conversion of pointers to unsigned longs instead of integers.
313*4882a593Smuzhiyun - Changed type of I/O port variables from uint32_t to unsigned long.
314*4882a593Smuzhiyun - Modified OFFSET macro to work with 64-bit as well as 32-bit.
315*4882a593Smuzhiyun - Changed sprintf and printk format specifiers for pointers to %p.
316*4882a593Smuzhiyun - Changed some int to long type casts where needed in sprintf & printk.
317*4882a593Smuzhiyun - Added l modifiers to sprintf and printk format specifiers for longs.
318*4882a593Smuzhiyun - Removed unused local variables.
319*4882a593Smuzhiyun Rev. 1.20 June 8, 1999 DG, Qlogic
320*4882a593Smuzhiyun Changes to support RedHat release 6.0 (kernel 2.2.5).
321*4882a593Smuzhiyun - Added SCSI exclusive access lock (io_request_lock) when accessing
322*4882a593Smuzhiyun the adapter.
323*4882a593Smuzhiyun - Added changes for the new LINUX interface template. Some new error
324*4882a593Smuzhiyun handling routines have been added to the template, but for now we
325*4882a593Smuzhiyun will use the old ones.
326*4882a593Smuzhiyun - Initial Beta Release.
327*4882a593Smuzhiyun *****************************************************************************/
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun #include <linux/module.h>
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun #include <linux/types.h>
333*4882a593Smuzhiyun #include <linux/string.h>
334*4882a593Smuzhiyun #include <linux/errno.h>
335*4882a593Smuzhiyun #include <linux/kernel.h>
336*4882a593Smuzhiyun #include <linux/ioport.h>
337*4882a593Smuzhiyun #include <linux/delay.h>
338*4882a593Smuzhiyun #include <linux/timer.h>
339*4882a593Smuzhiyun #include <linux/pci.h>
340*4882a593Smuzhiyun #include <linux/proc_fs.h>
341*4882a593Smuzhiyun #include <linux/stat.h>
342*4882a593Smuzhiyun #include <linux/pci_ids.h>
343*4882a593Smuzhiyun #include <linux/interrupt.h>
344*4882a593Smuzhiyun #include <linux/init.h>
345*4882a593Smuzhiyun #include <linux/dma-mapping.h>
346*4882a593Smuzhiyun #include <linux/firmware.h>
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun #include <asm/io.h>
349*4882a593Smuzhiyun #include <asm/irq.h>
350*4882a593Smuzhiyun #include <asm/byteorder.h>
351*4882a593Smuzhiyun #include <asm/processor.h>
352*4882a593Smuzhiyun #include <asm/types.h>
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun #include <scsi/scsi.h>
355*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
356*4882a593Smuzhiyun #include <scsi/scsi_device.h>
357*4882a593Smuzhiyun #include <scsi/scsi_host.h>
358*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /*
362*4882a593Smuzhiyun * Compile time Options:
363*4882a593Smuzhiyun * 0 - Disable and 1 - Enable
364*4882a593Smuzhiyun */
365*4882a593Smuzhiyun #define DEBUG_QLA1280_INTR 0
366*4882a593Smuzhiyun #define DEBUG_PRINT_NVRAM 0
367*4882a593Smuzhiyun #define DEBUG_QLA1280 0
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun #define MEMORY_MAPPED_IO 1
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun #include "qla1280.h"
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
374*4882a593Smuzhiyun #define QLA_64BIT_PTR 1
375*4882a593Smuzhiyun #endif
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun #define NVRAM_DELAY() udelay(500) /* 2 microseconds */
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
380*4882a593Smuzhiyun #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
381*4882a593Smuzhiyun ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
382*4882a593Smuzhiyun #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
383*4882a593Smuzhiyun ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
387*4882a593Smuzhiyun static void qla1280_remove_one(struct pci_dev *);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /*
390*4882a593Smuzhiyun * QLogic Driver Support Function Prototypes.
391*4882a593Smuzhiyun */
392*4882a593Smuzhiyun static void qla1280_done(struct scsi_qla_host *);
393*4882a593Smuzhiyun static int qla1280_get_token(char *);
394*4882a593Smuzhiyun static int qla1280_setup(char *s) __init;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /*
397*4882a593Smuzhiyun * QLogic ISP1280 Hardware Support Function Prototypes.
398*4882a593Smuzhiyun */
399*4882a593Smuzhiyun static int qla1280_load_firmware(struct scsi_qla_host *);
400*4882a593Smuzhiyun static int qla1280_init_rings(struct scsi_qla_host *);
401*4882a593Smuzhiyun static int qla1280_nvram_config(struct scsi_qla_host *);
402*4882a593Smuzhiyun static int qla1280_mailbox_command(struct scsi_qla_host *,
403*4882a593Smuzhiyun uint8_t, uint16_t *);
404*4882a593Smuzhiyun static int qla1280_bus_reset(struct scsi_qla_host *, int);
405*4882a593Smuzhiyun static int qla1280_device_reset(struct scsi_qla_host *, int, int);
406*4882a593Smuzhiyun static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
407*4882a593Smuzhiyun static int qla1280_abort_isp(struct scsi_qla_host *);
408*4882a593Smuzhiyun #ifdef QLA_64BIT_PTR
409*4882a593Smuzhiyun static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
410*4882a593Smuzhiyun #else
411*4882a593Smuzhiyun static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
412*4882a593Smuzhiyun #endif
413*4882a593Smuzhiyun static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
414*4882a593Smuzhiyun static void qla1280_poll(struct scsi_qla_host *);
415*4882a593Smuzhiyun static void qla1280_reset_adapter(struct scsi_qla_host *);
416*4882a593Smuzhiyun static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
417*4882a593Smuzhiyun static void qla1280_isp_cmd(struct scsi_qla_host *);
418*4882a593Smuzhiyun static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
419*4882a593Smuzhiyun static void qla1280_rst_aen(struct scsi_qla_host *);
420*4882a593Smuzhiyun static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
421*4882a593Smuzhiyun struct list_head *);
422*4882a593Smuzhiyun static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
423*4882a593Smuzhiyun struct list_head *);
424*4882a593Smuzhiyun static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
425*4882a593Smuzhiyun static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
426*4882a593Smuzhiyun static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
427*4882a593Smuzhiyun static request_t *qla1280_req_pkt(struct scsi_qla_host *);
428*4882a593Smuzhiyun static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
429*4882a593Smuzhiyun unsigned int);
430*4882a593Smuzhiyun static void qla1280_get_target_parameters(struct scsi_qla_host *,
431*4882a593Smuzhiyun struct scsi_device *);
432*4882a593Smuzhiyun static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun static struct qla_driver_setup driver_setup;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun * convert scsi data direction to request_t control flags
439*4882a593Smuzhiyun */
440*4882a593Smuzhiyun static inline uint16_t
qla1280_data_direction(struct scsi_cmnd * cmnd)441*4882a593Smuzhiyun qla1280_data_direction(struct scsi_cmnd *cmnd)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun switch(cmnd->sc_data_direction) {
444*4882a593Smuzhiyun case DMA_FROM_DEVICE:
445*4882a593Smuzhiyun return BIT_5;
446*4882a593Smuzhiyun case DMA_TO_DEVICE:
447*4882a593Smuzhiyun return BIT_6;
448*4882a593Smuzhiyun case DMA_BIDIRECTIONAL:
449*4882a593Smuzhiyun return BIT_5 | BIT_6;
450*4882a593Smuzhiyun /*
451*4882a593Smuzhiyun * We could BUG() on default here if one of the four cases aren't
452*4882a593Smuzhiyun * met, but then again if we receive something like that from the
453*4882a593Smuzhiyun * SCSI layer we have more serious problems. This shuts up GCC.
454*4882a593Smuzhiyun */
455*4882a593Smuzhiyun case DMA_NONE:
456*4882a593Smuzhiyun default:
457*4882a593Smuzhiyun return 0;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun #if DEBUG_QLA1280
462*4882a593Smuzhiyun static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
463*4882a593Smuzhiyun static void __qla1280_dump_buffer(char *, int);
464*4882a593Smuzhiyun #endif
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /*
468*4882a593Smuzhiyun * insmod needs to find the variable and make it point to something
469*4882a593Smuzhiyun */
470*4882a593Smuzhiyun #ifdef MODULE
471*4882a593Smuzhiyun static char *qla1280;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /* insmod qla1280 options=verbose" */
474*4882a593Smuzhiyun module_param(qla1280, charp, 0);
475*4882a593Smuzhiyun #else
476*4882a593Smuzhiyun __setup("qla1280=", qla1280_setup);
477*4882a593Smuzhiyun #endif
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /*
481*4882a593Smuzhiyun * We use the scsi_pointer structure that's included with each scsi_command
482*4882a593Smuzhiyun * to overlay our struct srb over it. qla1280_init() checks that a srb is not
483*4882a593Smuzhiyun * bigger than a scsi_pointer.
484*4882a593Smuzhiyun */
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun #define CMD_SP(Cmnd) &Cmnd->SCp
487*4882a593Smuzhiyun #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
488*4882a593Smuzhiyun #define CMD_CDBP(Cmnd) Cmnd->cmnd
489*4882a593Smuzhiyun #define CMD_SNSP(Cmnd) Cmnd->sense_buffer
490*4882a593Smuzhiyun #define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
491*4882a593Smuzhiyun #define CMD_RESULT(Cmnd) Cmnd->result
492*4882a593Smuzhiyun #define CMD_HANDLE(Cmnd) Cmnd->host_scribble
493*4882a593Smuzhiyun #define CMD_REQUEST(Cmnd) Cmnd->request->cmd
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun #define CMD_HOST(Cmnd) Cmnd->device->host
496*4882a593Smuzhiyun #define SCSI_BUS_32(Cmnd) Cmnd->device->channel
497*4882a593Smuzhiyun #define SCSI_TCN_32(Cmnd) Cmnd->device->id
498*4882a593Smuzhiyun #define SCSI_LUN_32(Cmnd) Cmnd->device->lun
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /*****************************************/
502*4882a593Smuzhiyun /* ISP Boards supported by this driver */
503*4882a593Smuzhiyun /*****************************************/
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun struct qla_boards {
506*4882a593Smuzhiyun char *name; /* Board ID String */
507*4882a593Smuzhiyun int numPorts; /* Number of SCSI ports */
508*4882a593Smuzhiyun int fw_index; /* index into qla1280_fw_tbl for firmware */
509*4882a593Smuzhiyun };
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
512*4882a593Smuzhiyun static struct pci_device_id qla1280_pci_tbl[] = {
513*4882a593Smuzhiyun {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
514*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
515*4882a593Smuzhiyun {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
516*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
517*4882a593Smuzhiyun {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
518*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
519*4882a593Smuzhiyun {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
520*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
521*4882a593Smuzhiyun {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
522*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
523*4882a593Smuzhiyun {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
524*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
525*4882a593Smuzhiyun {0,}
526*4882a593Smuzhiyun };
527*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun static DEFINE_MUTEX(qla1280_firmware_mutex);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun struct qla_fw {
532*4882a593Smuzhiyun char *fwname;
533*4882a593Smuzhiyun const struct firmware *fw;
534*4882a593Smuzhiyun };
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun #define QL_NUM_FW_IMAGES 3
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun static struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
539*4882a593Smuzhiyun {"qlogic/1040.bin", NULL}, /* image 0 */
540*4882a593Smuzhiyun {"qlogic/1280.bin", NULL}, /* image 1 */
541*4882a593Smuzhiyun {"qlogic/12160.bin", NULL}, /* image 2 */
542*4882a593Smuzhiyun };
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */
545*4882a593Smuzhiyun static struct qla_boards ql1280_board_tbl[] = {
546*4882a593Smuzhiyun {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
547*4882a593Smuzhiyun {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
548*4882a593Smuzhiyun {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
549*4882a593Smuzhiyun {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
550*4882a593Smuzhiyun {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
551*4882a593Smuzhiyun {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
552*4882a593Smuzhiyun {.name = " ", .numPorts = 0, .fw_index = -1},
553*4882a593Smuzhiyun };
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun static int qla1280_verbose = 1;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun #if DEBUG_QLA1280
558*4882a593Smuzhiyun static int ql_debug_level = 1;
559*4882a593Smuzhiyun #define dprintk(level, format, a...) \
560*4882a593Smuzhiyun do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
561*4882a593Smuzhiyun #define qla1280_dump_buffer(level, buf, size) \
562*4882a593Smuzhiyun if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
563*4882a593Smuzhiyun #define qla1280_print_scsi_cmd(level, cmd) \
564*4882a593Smuzhiyun if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
565*4882a593Smuzhiyun #else
566*4882a593Smuzhiyun #define ql_debug_level 0
567*4882a593Smuzhiyun #define dprintk(level, format, a...) do{}while(0)
568*4882a593Smuzhiyun #define qla1280_dump_buffer(a, b, c) do{}while(0)
569*4882a593Smuzhiyun #define qla1280_print_scsi_cmd(a, b) do{}while(0)
570*4882a593Smuzhiyun #endif
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
573*4882a593Smuzhiyun #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
574*4882a593Smuzhiyun #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
575*4882a593Smuzhiyun #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun
qla1280_read_nvram(struct scsi_qla_host * ha)578*4882a593Smuzhiyun static int qla1280_read_nvram(struct scsi_qla_host *ha)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun uint16_t *wptr;
581*4882a593Smuzhiyun uint8_t chksum;
582*4882a593Smuzhiyun int cnt, i;
583*4882a593Smuzhiyun struct nvram *nv;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun ENTER("qla1280_read_nvram");
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun if (driver_setup.no_nvram)
588*4882a593Smuzhiyun return 1;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun wptr = (uint16_t *)&ha->nvram;
593*4882a593Smuzhiyun nv = &ha->nvram;
594*4882a593Smuzhiyun chksum = 0;
595*4882a593Smuzhiyun for (cnt = 0; cnt < 3; cnt++) {
596*4882a593Smuzhiyun *wptr = qla1280_get_nvram_word(ha, cnt);
597*4882a593Smuzhiyun chksum += *wptr & 0xff;
598*4882a593Smuzhiyun chksum += (*wptr >> 8) & 0xff;
599*4882a593Smuzhiyun wptr++;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (nv->id0 != 'I' || nv->id1 != 'S' ||
603*4882a593Smuzhiyun nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
604*4882a593Smuzhiyun dprintk(2, "Invalid nvram ID or version!\n");
605*4882a593Smuzhiyun chksum = 1;
606*4882a593Smuzhiyun } else {
607*4882a593Smuzhiyun for (; cnt < sizeof(struct nvram); cnt++) {
608*4882a593Smuzhiyun *wptr = qla1280_get_nvram_word(ha, cnt);
609*4882a593Smuzhiyun chksum += *wptr & 0xff;
610*4882a593Smuzhiyun chksum += (*wptr >> 8) & 0xff;
611*4882a593Smuzhiyun wptr++;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
616*4882a593Smuzhiyun " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
617*4882a593Smuzhiyun nv->version);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (chksum) {
621*4882a593Smuzhiyun if (!driver_setup.no_nvram)
622*4882a593Smuzhiyun printk(KERN_WARNING "scsi(%ld): Unable to identify or "
623*4882a593Smuzhiyun "validate NVRAM checksum, using default "
624*4882a593Smuzhiyun "settings\n", ha->host_no);
625*4882a593Smuzhiyun ha->nvram_valid = 0;
626*4882a593Smuzhiyun } else
627*4882a593Smuzhiyun ha->nvram_valid = 1;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun /* The firmware interface is, um, interesting, in that the
630*4882a593Smuzhiyun * actual firmware image on the chip is little endian, thus,
631*4882a593Smuzhiyun * the process of taking that image to the CPU would end up
632*4882a593Smuzhiyun * little endian. However, the firmware interface requires it
633*4882a593Smuzhiyun * to be read a word (two bytes) at a time.
634*4882a593Smuzhiyun *
635*4882a593Smuzhiyun * The net result of this would be that the word (and
636*4882a593Smuzhiyun * doubleword) quantites in the firmware would be correct, but
637*4882a593Smuzhiyun * the bytes would be pairwise reversed. Since most of the
638*4882a593Smuzhiyun * firmware quantites are, in fact, bytes, we do an extra
639*4882a593Smuzhiyun * le16_to_cpu() in the firmware read routine.
640*4882a593Smuzhiyun *
641*4882a593Smuzhiyun * The upshot of all this is that the bytes in the firmware
642*4882a593Smuzhiyun * are in the correct places, but the 16 and 32 bit quantites
643*4882a593Smuzhiyun * are still in little endian format. We fix that up below by
644*4882a593Smuzhiyun * doing extra reverses on them */
645*4882a593Smuzhiyun nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
646*4882a593Smuzhiyun nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
647*4882a593Smuzhiyun for(i = 0; i < MAX_BUSES; i++) {
648*4882a593Smuzhiyun nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
649*4882a593Smuzhiyun nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
652*4882a593Smuzhiyun LEAVE("qla1280_read_nvram");
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun return chksum;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /**************************************************************************
658*4882a593Smuzhiyun * qla1280_info
659*4882a593Smuzhiyun * Return a string describing the driver.
660*4882a593Smuzhiyun **************************************************************************/
661*4882a593Smuzhiyun static const char *
qla1280_info(struct Scsi_Host * host)662*4882a593Smuzhiyun qla1280_info(struct Scsi_Host *host)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun static char qla1280_scsi_name_buffer[125];
665*4882a593Smuzhiyun char *bp;
666*4882a593Smuzhiyun struct scsi_qla_host *ha;
667*4882a593Smuzhiyun struct qla_boards *bdp;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun bp = &qla1280_scsi_name_buffer[0];
670*4882a593Smuzhiyun ha = (struct scsi_qla_host *)host->hostdata;
671*4882a593Smuzhiyun bdp = &ql1280_board_tbl[ha->devnum];
672*4882a593Smuzhiyun memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun sprintf (bp,
675*4882a593Smuzhiyun "QLogic %s PCI to SCSI Host Adapter\n"
676*4882a593Smuzhiyun " Firmware version: %2d.%02d.%02d, Driver version %s",
677*4882a593Smuzhiyun &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
678*4882a593Smuzhiyun QLA1280_VERSION);
679*4882a593Smuzhiyun return bp;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /**************************************************************************
683*4882a593Smuzhiyun * qla1280_queuecommand
684*4882a593Smuzhiyun * Queue a command to the controller.
685*4882a593Smuzhiyun *
686*4882a593Smuzhiyun * Note:
687*4882a593Smuzhiyun * The mid-level driver tries to ensures that queuecommand never gets invoked
688*4882a593Smuzhiyun * concurrently with itself or the interrupt handler (although the
689*4882a593Smuzhiyun * interrupt handler may call this routine as part of request-completion
690*4882a593Smuzhiyun * handling). Unfortunely, it sometimes calls the scheduler in interrupt
691*4882a593Smuzhiyun * context which is a big NO! NO!.
692*4882a593Smuzhiyun **************************************************************************/
693*4882a593Smuzhiyun static int
qla1280_queuecommand_lck(struct scsi_cmnd * cmd,void (* fn)(struct scsi_cmnd *))694*4882a593Smuzhiyun qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun struct Scsi_Host *host = cmd->device->host;
697*4882a593Smuzhiyun struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
698*4882a593Smuzhiyun struct srb *sp = (struct srb *)CMD_SP(cmd);
699*4882a593Smuzhiyun int status;
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun cmd->scsi_done = fn;
702*4882a593Smuzhiyun sp->cmd = cmd;
703*4882a593Smuzhiyun sp->flags = 0;
704*4882a593Smuzhiyun sp->wait = NULL;
705*4882a593Smuzhiyun CMD_HANDLE(cmd) = (unsigned char *)NULL;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun qla1280_print_scsi_cmd(5, cmd);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun #ifdef QLA_64BIT_PTR
710*4882a593Smuzhiyun /*
711*4882a593Smuzhiyun * Using 64 bit commands if the PCI bridge doesn't support it is a
712*4882a593Smuzhiyun * bit wasteful, however this should really only happen if one's
713*4882a593Smuzhiyun * PCI controller is completely broken, like the BCM1250. For
714*4882a593Smuzhiyun * sane hardware this is not an issue.
715*4882a593Smuzhiyun */
716*4882a593Smuzhiyun status = qla1280_64bit_start_scsi(ha, sp);
717*4882a593Smuzhiyun #else
718*4882a593Smuzhiyun status = qla1280_32bit_start_scsi(ha, sp);
719*4882a593Smuzhiyun #endif
720*4882a593Smuzhiyun return status;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun static DEF_SCSI_QCMD(qla1280_queuecommand)
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun enum action {
726*4882a593Smuzhiyun ABORT_COMMAND,
727*4882a593Smuzhiyun DEVICE_RESET,
728*4882a593Smuzhiyun BUS_RESET,
729*4882a593Smuzhiyun ADAPTER_RESET,
730*4882a593Smuzhiyun };
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun
qla1280_mailbox_timeout(struct timer_list * t)733*4882a593Smuzhiyun static void qla1280_mailbox_timeout(struct timer_list *t)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer);
736*4882a593Smuzhiyun struct device_reg __iomem *reg;
737*4882a593Smuzhiyun reg = ha->iobase;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
740*4882a593Smuzhiyun printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
741*4882a593Smuzhiyun "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
742*4882a593Smuzhiyun RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus));
743*4882a593Smuzhiyun complete(ha->mailbox_wait);
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun static int
_qla1280_wait_for_single_command(struct scsi_qla_host * ha,struct srb * sp,struct completion * wait)747*4882a593Smuzhiyun _qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
748*4882a593Smuzhiyun struct completion *wait)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun int status = FAILED;
751*4882a593Smuzhiyun struct scsi_cmnd *cmd = sp->cmd;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun spin_unlock_irq(ha->host->host_lock);
754*4882a593Smuzhiyun wait_for_completion_timeout(wait, 4*HZ);
755*4882a593Smuzhiyun spin_lock_irq(ha->host->host_lock);
756*4882a593Smuzhiyun sp->wait = NULL;
757*4882a593Smuzhiyun if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
758*4882a593Smuzhiyun status = SUCCESS;
759*4882a593Smuzhiyun (*cmd->scsi_done)(cmd);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun return status;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun static int
qla1280_wait_for_single_command(struct scsi_qla_host * ha,struct srb * sp)765*4882a593Smuzhiyun qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(wait);
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun sp->wait = &wait;
770*4882a593Smuzhiyun return _qla1280_wait_for_single_command(ha, sp, &wait);
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun static int
qla1280_wait_for_pending_commands(struct scsi_qla_host * ha,int bus,int target)774*4882a593Smuzhiyun qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun int cnt;
777*4882a593Smuzhiyun int status;
778*4882a593Smuzhiyun struct srb *sp;
779*4882a593Smuzhiyun struct scsi_cmnd *cmd;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun status = SUCCESS;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /*
784*4882a593Smuzhiyun * Wait for all commands with the designated bus/target
785*4882a593Smuzhiyun * to be completed by the firmware
786*4882a593Smuzhiyun */
787*4882a593Smuzhiyun for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
788*4882a593Smuzhiyun sp = ha->outstanding_cmds[cnt];
789*4882a593Smuzhiyun if (sp) {
790*4882a593Smuzhiyun cmd = sp->cmd;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
793*4882a593Smuzhiyun continue;
794*4882a593Smuzhiyun if (target >= 0 && SCSI_TCN_32(cmd) != target)
795*4882a593Smuzhiyun continue;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun status = qla1280_wait_for_single_command(ha, sp);
798*4882a593Smuzhiyun if (status == FAILED)
799*4882a593Smuzhiyun break;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun return status;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /**************************************************************************
806*4882a593Smuzhiyun * qla1280_error_action
807*4882a593Smuzhiyun * The function will attempt to perform a specified error action and
808*4882a593Smuzhiyun * wait for the results (or time out).
809*4882a593Smuzhiyun *
810*4882a593Smuzhiyun * Input:
811*4882a593Smuzhiyun * cmd = Linux SCSI command packet of the command that cause the
812*4882a593Smuzhiyun * bus reset.
813*4882a593Smuzhiyun * action = error action to take (see action_t)
814*4882a593Smuzhiyun *
815*4882a593Smuzhiyun * Returns:
816*4882a593Smuzhiyun * SUCCESS or FAILED
817*4882a593Smuzhiyun *
818*4882a593Smuzhiyun **************************************************************************/
819*4882a593Smuzhiyun static int
qla1280_error_action(struct scsi_cmnd * cmd,enum action action)820*4882a593Smuzhiyun qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun struct scsi_qla_host *ha;
823*4882a593Smuzhiyun int bus, target, lun;
824*4882a593Smuzhiyun struct srb *sp;
825*4882a593Smuzhiyun int i, found;
826*4882a593Smuzhiyun int result=FAILED;
827*4882a593Smuzhiyun int wait_for_bus=-1;
828*4882a593Smuzhiyun int wait_for_target = -1;
829*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(wait);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun ENTER("qla1280_error_action");
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
834*4882a593Smuzhiyun sp = (struct srb *)CMD_SP(cmd);
835*4882a593Smuzhiyun bus = SCSI_BUS_32(cmd);
836*4882a593Smuzhiyun target = SCSI_TCN_32(cmd);
837*4882a593Smuzhiyun lun = SCSI_LUN_32(cmd);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun dprintk(4, "error_action %i, istatus 0x%04x\n", action,
840*4882a593Smuzhiyun RD_REG_WORD(&ha->iobase->istatus));
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
843*4882a593Smuzhiyun RD_REG_WORD(&ha->iobase->host_cmd),
844*4882a593Smuzhiyun RD_REG_WORD(&ha->iobase->ictrl), jiffies);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (qla1280_verbose)
847*4882a593Smuzhiyun printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
848*4882a593Smuzhiyun "Handle=0x%p, action=0x%x\n",
849*4882a593Smuzhiyun ha->host_no, cmd, CMD_HANDLE(cmd), action);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /*
852*4882a593Smuzhiyun * Check to see if we have the command in the outstanding_cmds[]
853*4882a593Smuzhiyun * array. If not then it must have completed before this error
854*4882a593Smuzhiyun * action was initiated. If the error_action isn't ABORT_COMMAND
855*4882a593Smuzhiyun * then the driver must proceed with the requested action.
856*4882a593Smuzhiyun */
857*4882a593Smuzhiyun found = -1;
858*4882a593Smuzhiyun for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
859*4882a593Smuzhiyun if (sp == ha->outstanding_cmds[i]) {
860*4882a593Smuzhiyun found = i;
861*4882a593Smuzhiyun sp->wait = &wait; /* we'll wait for it to complete */
862*4882a593Smuzhiyun break;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun if (found < 0) { /* driver doesn't have command */
867*4882a593Smuzhiyun result = SUCCESS;
868*4882a593Smuzhiyun if (qla1280_verbose) {
869*4882a593Smuzhiyun printk(KERN_INFO
870*4882a593Smuzhiyun "scsi(%ld:%d:%d:%d): specified command has "
871*4882a593Smuzhiyun "already completed.\n", ha->host_no, bus,
872*4882a593Smuzhiyun target, lun);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun switch (action) {
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun case ABORT_COMMAND:
879*4882a593Smuzhiyun dprintk(1, "qla1280: RISC aborting command\n");
880*4882a593Smuzhiyun /*
881*4882a593Smuzhiyun * The abort might fail due to race when the host_lock
882*4882a593Smuzhiyun * is released to issue the abort. As such, we
883*4882a593Smuzhiyun * don't bother to check the return status.
884*4882a593Smuzhiyun */
885*4882a593Smuzhiyun if (found >= 0)
886*4882a593Smuzhiyun qla1280_abort_command(ha, sp, found);
887*4882a593Smuzhiyun break;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun case DEVICE_RESET:
890*4882a593Smuzhiyun if (qla1280_verbose)
891*4882a593Smuzhiyun printk(KERN_INFO
892*4882a593Smuzhiyun "scsi(%ld:%d:%d:%d): Queueing device reset "
893*4882a593Smuzhiyun "command.\n", ha->host_no, bus, target, lun);
894*4882a593Smuzhiyun if (qla1280_device_reset(ha, bus, target) == 0) {
895*4882a593Smuzhiyun /* issued device reset, set wait conditions */
896*4882a593Smuzhiyun wait_for_bus = bus;
897*4882a593Smuzhiyun wait_for_target = target;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun break;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun case BUS_RESET:
902*4882a593Smuzhiyun if (qla1280_verbose)
903*4882a593Smuzhiyun printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
904*4882a593Smuzhiyun "reset.\n", ha->host_no, bus);
905*4882a593Smuzhiyun if (qla1280_bus_reset(ha, bus) == 0) {
906*4882a593Smuzhiyun /* issued bus reset, set wait conditions */
907*4882a593Smuzhiyun wait_for_bus = bus;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun break;
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun case ADAPTER_RESET:
912*4882a593Smuzhiyun default:
913*4882a593Smuzhiyun if (qla1280_verbose) {
914*4882a593Smuzhiyun printk(KERN_INFO
915*4882a593Smuzhiyun "scsi(%ld): Issued ADAPTER RESET\n",
916*4882a593Smuzhiyun ha->host_no);
917*4882a593Smuzhiyun printk(KERN_INFO "scsi(%ld): I/O processing will "
918*4882a593Smuzhiyun "continue automatically\n", ha->host_no);
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun ha->flags.reset_active = 1;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun if (qla1280_abort_isp(ha) != 0) { /* it's dead */
923*4882a593Smuzhiyun result = FAILED;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun ha->flags.reset_active = 0;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun /*
930*4882a593Smuzhiyun * At this point, the host_lock has been released and retaken
931*4882a593Smuzhiyun * by the issuance of the mailbox command.
932*4882a593Smuzhiyun * Wait for the command passed in by the mid-layer if it
933*4882a593Smuzhiyun * was found by the driver. It might have been returned
934*4882a593Smuzhiyun * between eh recovery steps, hence the check of the "found"
935*4882a593Smuzhiyun * variable.
936*4882a593Smuzhiyun */
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun if (found >= 0)
939*4882a593Smuzhiyun result = _qla1280_wait_for_single_command(ha, sp, &wait);
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun if (action == ABORT_COMMAND && result != SUCCESS) {
942*4882a593Smuzhiyun printk(KERN_WARNING
943*4882a593Smuzhiyun "scsi(%li:%i:%i:%i): "
944*4882a593Smuzhiyun "Unable to abort command!\n",
945*4882a593Smuzhiyun ha->host_no, bus, target, lun);
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun /*
949*4882a593Smuzhiyun * If the command passed in by the mid-layer has been
950*4882a593Smuzhiyun * returned by the board, then wait for any additional
951*4882a593Smuzhiyun * commands which are supposed to complete based upon
952*4882a593Smuzhiyun * the error action.
953*4882a593Smuzhiyun *
954*4882a593Smuzhiyun * All commands are unconditionally returned during a
955*4882a593Smuzhiyun * call to qla1280_abort_isp(), ADAPTER_RESET. No need
956*4882a593Smuzhiyun * to wait for them.
957*4882a593Smuzhiyun */
958*4882a593Smuzhiyun if (result == SUCCESS && wait_for_bus >= 0) {
959*4882a593Smuzhiyun result = qla1280_wait_for_pending_commands(ha,
960*4882a593Smuzhiyun wait_for_bus, wait_for_target);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun dprintk(1, "RESET returning %d\n", result);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun LEAVE("qla1280_error_action");
966*4882a593Smuzhiyun return result;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun /**************************************************************************
970*4882a593Smuzhiyun * qla1280_abort
971*4882a593Smuzhiyun * Abort the specified SCSI command(s).
972*4882a593Smuzhiyun **************************************************************************/
973*4882a593Smuzhiyun static int
qla1280_eh_abort(struct scsi_cmnd * cmd)974*4882a593Smuzhiyun qla1280_eh_abort(struct scsi_cmnd * cmd)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun int rc;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun spin_lock_irq(cmd->device->host->host_lock);
979*4882a593Smuzhiyun rc = qla1280_error_action(cmd, ABORT_COMMAND);
980*4882a593Smuzhiyun spin_unlock_irq(cmd->device->host->host_lock);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun return rc;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun /**************************************************************************
986*4882a593Smuzhiyun * qla1280_device_reset
987*4882a593Smuzhiyun * Reset the specified SCSI device
988*4882a593Smuzhiyun **************************************************************************/
989*4882a593Smuzhiyun static int
qla1280_eh_device_reset(struct scsi_cmnd * cmd)990*4882a593Smuzhiyun qla1280_eh_device_reset(struct scsi_cmnd *cmd)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun int rc;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun spin_lock_irq(cmd->device->host->host_lock);
995*4882a593Smuzhiyun rc = qla1280_error_action(cmd, DEVICE_RESET);
996*4882a593Smuzhiyun spin_unlock_irq(cmd->device->host->host_lock);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun return rc;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /**************************************************************************
1002*4882a593Smuzhiyun * qla1280_bus_reset
1003*4882a593Smuzhiyun * Reset the specified bus.
1004*4882a593Smuzhiyun **************************************************************************/
1005*4882a593Smuzhiyun static int
qla1280_eh_bus_reset(struct scsi_cmnd * cmd)1006*4882a593Smuzhiyun qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun int rc;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun spin_lock_irq(cmd->device->host->host_lock);
1011*4882a593Smuzhiyun rc = qla1280_error_action(cmd, BUS_RESET);
1012*4882a593Smuzhiyun spin_unlock_irq(cmd->device->host->host_lock);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun return rc;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun /**************************************************************************
1018*4882a593Smuzhiyun * qla1280_adapter_reset
1019*4882a593Smuzhiyun * Reset the specified adapter (both channels)
1020*4882a593Smuzhiyun **************************************************************************/
1021*4882a593Smuzhiyun static int
qla1280_eh_adapter_reset(struct scsi_cmnd * cmd)1022*4882a593Smuzhiyun qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun int rc;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun spin_lock_irq(cmd->device->host->host_lock);
1027*4882a593Smuzhiyun rc = qla1280_error_action(cmd, ADAPTER_RESET);
1028*4882a593Smuzhiyun spin_unlock_irq(cmd->device->host->host_lock);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun return rc;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun static int
qla1280_biosparam(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])1034*4882a593Smuzhiyun qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1035*4882a593Smuzhiyun sector_t capacity, int geom[])
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun int heads, sectors, cylinders;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun heads = 64;
1040*4882a593Smuzhiyun sectors = 32;
1041*4882a593Smuzhiyun cylinders = (unsigned long)capacity / (heads * sectors);
1042*4882a593Smuzhiyun if (cylinders > 1024) {
1043*4882a593Smuzhiyun heads = 255;
1044*4882a593Smuzhiyun sectors = 63;
1045*4882a593Smuzhiyun cylinders = (unsigned long)capacity / (heads * sectors);
1046*4882a593Smuzhiyun /* if (cylinders > 1023)
1047*4882a593Smuzhiyun cylinders = 1023; */
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun geom[0] = heads;
1051*4882a593Smuzhiyun geom[1] = sectors;
1052*4882a593Smuzhiyun geom[2] = cylinders;
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun return 0;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun /* disable risc and host interrupts */
1059*4882a593Smuzhiyun static inline void
qla1280_disable_intrs(struct scsi_qla_host * ha)1060*4882a593Smuzhiyun qla1280_disable_intrs(struct scsi_qla_host *ha)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun WRT_REG_WORD(&ha->iobase->ictrl, 0);
1063*4882a593Smuzhiyun RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun /* enable risc and host interrupts */
1067*4882a593Smuzhiyun static inline void
qla1280_enable_intrs(struct scsi_qla_host * ha)1068*4882a593Smuzhiyun qla1280_enable_intrs(struct scsi_qla_host *ha)
1069*4882a593Smuzhiyun {
1070*4882a593Smuzhiyun WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1071*4882a593Smuzhiyun RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun /**************************************************************************
1075*4882a593Smuzhiyun * qla1280_intr_handler
1076*4882a593Smuzhiyun * Handles the H/W interrupt
1077*4882a593Smuzhiyun **************************************************************************/
1078*4882a593Smuzhiyun static irqreturn_t
qla1280_intr_handler(int irq,void * dev_id)1079*4882a593Smuzhiyun qla1280_intr_handler(int irq, void *dev_id)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun struct scsi_qla_host *ha;
1082*4882a593Smuzhiyun struct device_reg __iomem *reg;
1083*4882a593Smuzhiyun u16 data;
1084*4882a593Smuzhiyun int handled = 0;
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun ENTER_INTR ("qla1280_intr_handler");
1087*4882a593Smuzhiyun ha = (struct scsi_qla_host *)dev_id;
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun spin_lock(ha->host->host_lock);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun ha->isr_count++;
1092*4882a593Smuzhiyun reg = ha->iobase;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun qla1280_disable_intrs(ha);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun data = qla1280_debounce_register(®->istatus);
1097*4882a593Smuzhiyun /* Check for pending interrupts. */
1098*4882a593Smuzhiyun if (data & RISC_INT) {
1099*4882a593Smuzhiyun qla1280_isr(ha, &ha->done_q);
1100*4882a593Smuzhiyun handled = 1;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun if (!list_empty(&ha->done_q))
1103*4882a593Smuzhiyun qla1280_done(ha);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun spin_unlock(ha->host->host_lock);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun qla1280_enable_intrs(ha);
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun LEAVE_INTR("qla1280_intr_handler");
1110*4882a593Smuzhiyun return IRQ_RETVAL(handled);
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun static int
qla1280_set_target_parameters(struct scsi_qla_host * ha,int bus,int target)1115*4882a593Smuzhiyun qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun uint8_t mr;
1118*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT];
1119*4882a593Smuzhiyun struct nvram *nv;
1120*4882a593Smuzhiyun int status, lun;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun nv = &ha->nvram;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun /* Set Target Parameters. */
1127*4882a593Smuzhiyun mb[0] = MBC_SET_TARGET_PARAMETERS;
1128*4882a593Smuzhiyun mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1129*4882a593Smuzhiyun mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1130*4882a593Smuzhiyun mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1131*4882a593Smuzhiyun mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1132*4882a593Smuzhiyun mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1133*4882a593Smuzhiyun mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1134*4882a593Smuzhiyun mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1135*4882a593Smuzhiyun mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1136*4882a593Smuzhiyun mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun if (IS_ISP1x160(ha)) {
1139*4882a593Smuzhiyun mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1140*4882a593Smuzhiyun mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1141*4882a593Smuzhiyun mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1142*4882a593Smuzhiyun nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1143*4882a593Smuzhiyun mr |= BIT_6;
1144*4882a593Smuzhiyun } else {
1145*4882a593Smuzhiyun mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun mb[3] |= nv->bus[bus].target[target].sync_period;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun status = qla1280_mailbox_command(ha, mr, mb);
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun /* Set Device Queue Parameters. */
1152*4882a593Smuzhiyun for (lun = 0; lun < MAX_LUNS; lun++) {
1153*4882a593Smuzhiyun mb[0] = MBC_SET_DEVICE_QUEUE;
1154*4882a593Smuzhiyun mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1155*4882a593Smuzhiyun mb[1] |= lun;
1156*4882a593Smuzhiyun mb[2] = nv->bus[bus].max_queue_depth;
1157*4882a593Smuzhiyun mb[3] = nv->bus[bus].target[target].execution_throttle;
1158*4882a593Smuzhiyun status |= qla1280_mailbox_command(ha, 0x0f, mb);
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun if (status)
1162*4882a593Smuzhiyun printk(KERN_WARNING "scsi(%ld:%i:%i): "
1163*4882a593Smuzhiyun "qla1280_set_target_parameters() failed\n",
1164*4882a593Smuzhiyun ha->host_no, bus, target);
1165*4882a593Smuzhiyun return status;
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun /**************************************************************************
1170*4882a593Smuzhiyun * qla1280_slave_configure
1171*4882a593Smuzhiyun *
1172*4882a593Smuzhiyun * Description:
1173*4882a593Smuzhiyun * Determines the queue depth for a given device. There are two ways
1174*4882a593Smuzhiyun * a queue depth can be obtained for a tagged queueing device. One
1175*4882a593Smuzhiyun * way is the default queue depth which is determined by whether
1176*4882a593Smuzhiyun * If it is defined, then it is used
1177*4882a593Smuzhiyun * as the default queue depth. Otherwise, we use either 4 or 8 as the
1178*4882a593Smuzhiyun * default queue depth (dependent on the number of hardware SCBs).
1179*4882a593Smuzhiyun **************************************************************************/
1180*4882a593Smuzhiyun static int
qla1280_slave_configure(struct scsi_device * device)1181*4882a593Smuzhiyun qla1280_slave_configure(struct scsi_device *device)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun struct scsi_qla_host *ha;
1184*4882a593Smuzhiyun int default_depth = 3;
1185*4882a593Smuzhiyun int bus = device->channel;
1186*4882a593Smuzhiyun int target = device->id;
1187*4882a593Smuzhiyun int status = 0;
1188*4882a593Smuzhiyun struct nvram *nv;
1189*4882a593Smuzhiyun unsigned long flags;
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun ha = (struct scsi_qla_host *)device->host->hostdata;
1192*4882a593Smuzhiyun nv = &ha->nvram;
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun if (qla1280_check_for_dead_scsi_bus(ha, bus))
1195*4882a593Smuzhiyun return 1;
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun if (device->tagged_supported &&
1198*4882a593Smuzhiyun (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1199*4882a593Smuzhiyun scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat);
1200*4882a593Smuzhiyun } else {
1201*4882a593Smuzhiyun scsi_change_queue_depth(device, default_depth);
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1205*4882a593Smuzhiyun nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1206*4882a593Smuzhiyun nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun if (driver_setup.no_sync ||
1209*4882a593Smuzhiyun (driver_setup.sync_mask &&
1210*4882a593Smuzhiyun (~driver_setup.sync_mask & (1 << target))))
1211*4882a593Smuzhiyun nv->bus[bus].target[target].parameter.enable_sync = 0;
1212*4882a593Smuzhiyun if (driver_setup.no_wide ||
1213*4882a593Smuzhiyun (driver_setup.wide_mask &&
1214*4882a593Smuzhiyun (~driver_setup.wide_mask & (1 << target))))
1215*4882a593Smuzhiyun nv->bus[bus].target[target].parameter.enable_wide = 0;
1216*4882a593Smuzhiyun if (IS_ISP1x160(ha)) {
1217*4882a593Smuzhiyun if (driver_setup.no_ppr ||
1218*4882a593Smuzhiyun (driver_setup.ppr_mask &&
1219*4882a593Smuzhiyun (~driver_setup.ppr_mask & (1 << target))))
1220*4882a593Smuzhiyun nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun spin_lock_irqsave(ha->host->host_lock, flags);
1224*4882a593Smuzhiyun if (nv->bus[bus].target[target].parameter.enable_sync)
1225*4882a593Smuzhiyun status = qla1280_set_target_parameters(ha, bus, target);
1226*4882a593Smuzhiyun qla1280_get_target_parameters(ha, device);
1227*4882a593Smuzhiyun spin_unlock_irqrestore(ha->host->host_lock, flags);
1228*4882a593Smuzhiyun return status;
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun /*
1233*4882a593Smuzhiyun * qla1280_done
1234*4882a593Smuzhiyun * Process completed commands.
1235*4882a593Smuzhiyun *
1236*4882a593Smuzhiyun * Input:
1237*4882a593Smuzhiyun * ha = adapter block pointer.
1238*4882a593Smuzhiyun */
1239*4882a593Smuzhiyun static void
qla1280_done(struct scsi_qla_host * ha)1240*4882a593Smuzhiyun qla1280_done(struct scsi_qla_host *ha)
1241*4882a593Smuzhiyun {
1242*4882a593Smuzhiyun struct srb *sp;
1243*4882a593Smuzhiyun struct list_head *done_q;
1244*4882a593Smuzhiyun int bus, target;
1245*4882a593Smuzhiyun struct scsi_cmnd *cmd;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun ENTER("qla1280_done");
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun done_q = &ha->done_q;
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun while (!list_empty(done_q)) {
1252*4882a593Smuzhiyun sp = list_entry(done_q->next, struct srb, list);
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun list_del(&sp->list);
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun cmd = sp->cmd;
1257*4882a593Smuzhiyun bus = SCSI_BUS_32(cmd);
1258*4882a593Smuzhiyun target = SCSI_TCN_32(cmd);
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun switch ((CMD_RESULT(cmd) >> 16)) {
1261*4882a593Smuzhiyun case DID_RESET:
1262*4882a593Smuzhiyun /* Issue marker command. */
1263*4882a593Smuzhiyun if (!ha->flags.abort_isp_active)
1264*4882a593Smuzhiyun qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1265*4882a593Smuzhiyun break;
1266*4882a593Smuzhiyun case DID_ABORT:
1267*4882a593Smuzhiyun sp->flags &= ~SRB_ABORT_PENDING;
1268*4882a593Smuzhiyun sp->flags |= SRB_ABORTED;
1269*4882a593Smuzhiyun break;
1270*4882a593Smuzhiyun default:
1271*4882a593Smuzhiyun break;
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun /* Release memory used for this I/O */
1275*4882a593Smuzhiyun scsi_dma_unmap(cmd);
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun /* Call the mid-level driver interrupt handler */
1278*4882a593Smuzhiyun ha->actthreads--;
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun if (sp->wait == NULL)
1281*4882a593Smuzhiyun (*(cmd)->scsi_done)(cmd);
1282*4882a593Smuzhiyun else
1283*4882a593Smuzhiyun complete(sp->wait);
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun LEAVE("qla1280_done");
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun /*
1289*4882a593Smuzhiyun * Translates a ISP error to a Linux SCSI error
1290*4882a593Smuzhiyun */
1291*4882a593Smuzhiyun static int
qla1280_return_status(struct response * sts,struct scsi_cmnd * cp)1292*4882a593Smuzhiyun qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun int host_status = DID_ERROR;
1295*4882a593Smuzhiyun uint16_t comp_status = le16_to_cpu(sts->comp_status);
1296*4882a593Smuzhiyun uint16_t state_flags = le16_to_cpu(sts->state_flags);
1297*4882a593Smuzhiyun uint32_t residual_length = le32_to_cpu(sts->residual_length);
1298*4882a593Smuzhiyun uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1299*4882a593Smuzhiyun #if DEBUG_QLA1280_INTR
1300*4882a593Smuzhiyun static char *reason[] = {
1301*4882a593Smuzhiyun "DID_OK",
1302*4882a593Smuzhiyun "DID_NO_CONNECT",
1303*4882a593Smuzhiyun "DID_BUS_BUSY",
1304*4882a593Smuzhiyun "DID_TIME_OUT",
1305*4882a593Smuzhiyun "DID_BAD_TARGET",
1306*4882a593Smuzhiyun "DID_ABORT",
1307*4882a593Smuzhiyun "DID_PARITY",
1308*4882a593Smuzhiyun "DID_ERROR",
1309*4882a593Smuzhiyun "DID_RESET",
1310*4882a593Smuzhiyun "DID_BAD_INTR"
1311*4882a593Smuzhiyun };
1312*4882a593Smuzhiyun #endif /* DEBUG_QLA1280_INTR */
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun ENTER("qla1280_return_status");
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun #if DEBUG_QLA1280_INTR
1317*4882a593Smuzhiyun /*
1318*4882a593Smuzhiyun dprintk(1, "qla1280_return_status: compl status = 0x%04x\n",
1319*4882a593Smuzhiyun comp_status);
1320*4882a593Smuzhiyun */
1321*4882a593Smuzhiyun #endif
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun switch (comp_status) {
1324*4882a593Smuzhiyun case CS_COMPLETE:
1325*4882a593Smuzhiyun host_status = DID_OK;
1326*4882a593Smuzhiyun break;
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun case CS_INCOMPLETE:
1329*4882a593Smuzhiyun if (!(state_flags & SF_GOT_BUS))
1330*4882a593Smuzhiyun host_status = DID_NO_CONNECT;
1331*4882a593Smuzhiyun else if (!(state_flags & SF_GOT_TARGET))
1332*4882a593Smuzhiyun host_status = DID_BAD_TARGET;
1333*4882a593Smuzhiyun else if (!(state_flags & SF_SENT_CDB))
1334*4882a593Smuzhiyun host_status = DID_ERROR;
1335*4882a593Smuzhiyun else if (!(state_flags & SF_TRANSFERRED_DATA))
1336*4882a593Smuzhiyun host_status = DID_ERROR;
1337*4882a593Smuzhiyun else if (!(state_flags & SF_GOT_STATUS))
1338*4882a593Smuzhiyun host_status = DID_ERROR;
1339*4882a593Smuzhiyun else if (!(state_flags & SF_GOT_SENSE))
1340*4882a593Smuzhiyun host_status = DID_ERROR;
1341*4882a593Smuzhiyun break;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun case CS_RESET:
1344*4882a593Smuzhiyun host_status = DID_RESET;
1345*4882a593Smuzhiyun break;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun case CS_ABORTED:
1348*4882a593Smuzhiyun host_status = DID_ABORT;
1349*4882a593Smuzhiyun break;
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun case CS_TIMEOUT:
1352*4882a593Smuzhiyun host_status = DID_TIME_OUT;
1353*4882a593Smuzhiyun break;
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun case CS_DATA_OVERRUN:
1356*4882a593Smuzhiyun dprintk(2, "Data overrun 0x%x\n", residual_length);
1357*4882a593Smuzhiyun dprintk(2, "qla1280_return_status: response packet data\n");
1358*4882a593Smuzhiyun qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1359*4882a593Smuzhiyun host_status = DID_ERROR;
1360*4882a593Smuzhiyun break;
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun case CS_DATA_UNDERRUN:
1363*4882a593Smuzhiyun if ((scsi_bufflen(cp) - residual_length) <
1364*4882a593Smuzhiyun cp->underflow) {
1365*4882a593Smuzhiyun printk(KERN_WARNING
1366*4882a593Smuzhiyun "scsi: Underflow detected - retrying "
1367*4882a593Smuzhiyun "command.\n");
1368*4882a593Smuzhiyun host_status = DID_ERROR;
1369*4882a593Smuzhiyun } else {
1370*4882a593Smuzhiyun scsi_set_resid(cp, residual_length);
1371*4882a593Smuzhiyun host_status = DID_OK;
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun break;
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun default:
1376*4882a593Smuzhiyun host_status = DID_ERROR;
1377*4882a593Smuzhiyun break;
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun #if DEBUG_QLA1280_INTR
1381*4882a593Smuzhiyun dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1382*4882a593Smuzhiyun reason[host_status], scsi_status);
1383*4882a593Smuzhiyun #endif
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun LEAVE("qla1280_return_status");
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun return (scsi_status & 0xff) | (host_status << 16);
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun /****************************************************************************/
1391*4882a593Smuzhiyun /* QLogic ISP1280 Hardware Support Functions. */
1392*4882a593Smuzhiyun /****************************************************************************/
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun /*
1395*4882a593Smuzhiyun * qla1280_initialize_adapter
1396*4882a593Smuzhiyun * Initialize board.
1397*4882a593Smuzhiyun *
1398*4882a593Smuzhiyun * Input:
1399*4882a593Smuzhiyun * ha = adapter block pointer.
1400*4882a593Smuzhiyun *
1401*4882a593Smuzhiyun * Returns:
1402*4882a593Smuzhiyun * 0 = success
1403*4882a593Smuzhiyun */
1404*4882a593Smuzhiyun static int
qla1280_initialize_adapter(struct scsi_qla_host * ha)1405*4882a593Smuzhiyun qla1280_initialize_adapter(struct scsi_qla_host *ha)
1406*4882a593Smuzhiyun {
1407*4882a593Smuzhiyun struct device_reg __iomem *reg;
1408*4882a593Smuzhiyun int status;
1409*4882a593Smuzhiyun int bus;
1410*4882a593Smuzhiyun unsigned long flags;
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun ENTER("qla1280_initialize_adapter");
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun /* Clear adapter flags. */
1415*4882a593Smuzhiyun ha->flags.online = 0;
1416*4882a593Smuzhiyun ha->flags.disable_host_adapter = 0;
1417*4882a593Smuzhiyun ha->flags.reset_active = 0;
1418*4882a593Smuzhiyun ha->flags.abort_isp_active = 0;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun /* TODO: implement support for the 1040 nvram format */
1421*4882a593Smuzhiyun if (IS_ISP1040(ha))
1422*4882a593Smuzhiyun driver_setup.no_nvram = 1;
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun dprintk(1, "Configure PCI space for adapter...\n");
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun reg = ha->iobase;
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun /* Insure mailbox registers are free. */
1429*4882a593Smuzhiyun WRT_REG_WORD(®->semaphore, 0);
1430*4882a593Smuzhiyun WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
1431*4882a593Smuzhiyun WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT);
1432*4882a593Smuzhiyun RD_REG_WORD(®->host_cmd);
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun if (qla1280_read_nvram(ha)) {
1435*4882a593Smuzhiyun dprintk(2, "qla1280_initialize_adapter: failed to read "
1436*4882a593Smuzhiyun "NVRAM\n");
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun /*
1440*4882a593Smuzhiyun * It's necessary to grab the spin here as qla1280_mailbox_command
1441*4882a593Smuzhiyun * needs to be able to drop the lock unconditionally to wait
1442*4882a593Smuzhiyun * for completion.
1443*4882a593Smuzhiyun */
1444*4882a593Smuzhiyun spin_lock_irqsave(ha->host->host_lock, flags);
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun status = qla1280_load_firmware(ha);
1447*4882a593Smuzhiyun if (status) {
1448*4882a593Smuzhiyun printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1449*4882a593Smuzhiyun ha->host_no);
1450*4882a593Smuzhiyun goto out;
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun /* Setup adapter based on NVRAM parameters. */
1454*4882a593Smuzhiyun dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1455*4882a593Smuzhiyun qla1280_nvram_config(ha);
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun if (ha->flags.disable_host_adapter) {
1458*4882a593Smuzhiyun status = 1;
1459*4882a593Smuzhiyun goto out;
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun status = qla1280_init_rings(ha);
1463*4882a593Smuzhiyun if (status)
1464*4882a593Smuzhiyun goto out;
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun /* Issue SCSI reset, if we can't reset twice then bus is dead */
1467*4882a593Smuzhiyun for (bus = 0; bus < ha->ports; bus++) {
1468*4882a593Smuzhiyun if (!ha->bus_settings[bus].disable_scsi_reset &&
1469*4882a593Smuzhiyun qla1280_bus_reset(ha, bus) &&
1470*4882a593Smuzhiyun qla1280_bus_reset(ha, bus))
1471*4882a593Smuzhiyun ha->bus_settings[bus].scsi_bus_dead = 1;
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun ha->flags.online = 1;
1475*4882a593Smuzhiyun out:
1476*4882a593Smuzhiyun spin_unlock_irqrestore(ha->host->host_lock, flags);
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun if (status)
1479*4882a593Smuzhiyun dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun LEAVE("qla1280_initialize_adapter");
1482*4882a593Smuzhiyun return status;
1483*4882a593Smuzhiyun }
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun /*
1486*4882a593Smuzhiyun * qla1280_request_firmware
1487*4882a593Smuzhiyun * Acquire firmware for chip. Retain in memory
1488*4882a593Smuzhiyun * for error recovery.
1489*4882a593Smuzhiyun *
1490*4882a593Smuzhiyun * Input:
1491*4882a593Smuzhiyun * ha = adapter block pointer.
1492*4882a593Smuzhiyun *
1493*4882a593Smuzhiyun * Returns:
1494*4882a593Smuzhiyun * Pointer to firmware image or an error code
1495*4882a593Smuzhiyun * cast to pointer via ERR_PTR().
1496*4882a593Smuzhiyun */
1497*4882a593Smuzhiyun static const struct firmware *
qla1280_request_firmware(struct scsi_qla_host * ha)1498*4882a593Smuzhiyun qla1280_request_firmware(struct scsi_qla_host *ha)
1499*4882a593Smuzhiyun {
1500*4882a593Smuzhiyun const struct firmware *fw;
1501*4882a593Smuzhiyun int err;
1502*4882a593Smuzhiyun int index;
1503*4882a593Smuzhiyun char *fwname;
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun spin_unlock_irq(ha->host->host_lock);
1506*4882a593Smuzhiyun mutex_lock(&qla1280_firmware_mutex);
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun index = ql1280_board_tbl[ha->devnum].fw_index;
1509*4882a593Smuzhiyun fw = qla1280_fw_tbl[index].fw;
1510*4882a593Smuzhiyun if (fw)
1511*4882a593Smuzhiyun goto out;
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun fwname = qla1280_fw_tbl[index].fwname;
1514*4882a593Smuzhiyun err = request_firmware(&fw, fwname, &ha->pdev->dev);
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun if (err) {
1517*4882a593Smuzhiyun printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1518*4882a593Smuzhiyun fwname, err);
1519*4882a593Smuzhiyun fw = ERR_PTR(err);
1520*4882a593Smuzhiyun goto unlock;
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun if ((fw->size % 2) || (fw->size < 6)) {
1523*4882a593Smuzhiyun printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1524*4882a593Smuzhiyun fw->size, fwname);
1525*4882a593Smuzhiyun release_firmware(fw);
1526*4882a593Smuzhiyun fw = ERR_PTR(-EINVAL);
1527*4882a593Smuzhiyun goto unlock;
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun qla1280_fw_tbl[index].fw = fw;
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun out:
1533*4882a593Smuzhiyun ha->fwver1 = fw->data[0];
1534*4882a593Smuzhiyun ha->fwver2 = fw->data[1];
1535*4882a593Smuzhiyun ha->fwver3 = fw->data[2];
1536*4882a593Smuzhiyun unlock:
1537*4882a593Smuzhiyun mutex_unlock(&qla1280_firmware_mutex);
1538*4882a593Smuzhiyun spin_lock_irq(ha->host->host_lock);
1539*4882a593Smuzhiyun return fw;
1540*4882a593Smuzhiyun }
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun /*
1543*4882a593Smuzhiyun * Chip diagnostics
1544*4882a593Smuzhiyun * Test chip for proper operation.
1545*4882a593Smuzhiyun *
1546*4882a593Smuzhiyun * Input:
1547*4882a593Smuzhiyun * ha = adapter block pointer.
1548*4882a593Smuzhiyun *
1549*4882a593Smuzhiyun * Returns:
1550*4882a593Smuzhiyun * 0 = success.
1551*4882a593Smuzhiyun */
1552*4882a593Smuzhiyun static int
qla1280_chip_diag(struct scsi_qla_host * ha)1553*4882a593Smuzhiyun qla1280_chip_diag(struct scsi_qla_host *ha)
1554*4882a593Smuzhiyun {
1555*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT];
1556*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
1557*4882a593Smuzhiyun int status = 0;
1558*4882a593Smuzhiyun int cnt;
1559*4882a593Smuzhiyun uint16_t data;
1560*4882a593Smuzhiyun dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun /* Soft reset chip and wait for it to finish. */
1565*4882a593Smuzhiyun WRT_REG_WORD(®->ictrl, ISP_RESET);
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun /*
1568*4882a593Smuzhiyun * We can't do a traditional PCI write flush here by reading
1569*4882a593Smuzhiyun * back the register. The card will not respond once the reset
1570*4882a593Smuzhiyun * is in action and we end up with a machine check exception
1571*4882a593Smuzhiyun * instead. Nothing to do but wait and hope for the best.
1572*4882a593Smuzhiyun * A portable pci_write_flush(pdev) call would be very useful here.
1573*4882a593Smuzhiyun */
1574*4882a593Smuzhiyun udelay(20);
1575*4882a593Smuzhiyun data = qla1280_debounce_register(®->ictrl);
1576*4882a593Smuzhiyun /*
1577*4882a593Smuzhiyun * Yet another QLogic gem ;-(
1578*4882a593Smuzhiyun */
1579*4882a593Smuzhiyun for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1580*4882a593Smuzhiyun udelay(5);
1581*4882a593Smuzhiyun data = RD_REG_WORD(®->ictrl);
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun if (!cnt)
1585*4882a593Smuzhiyun goto fail;
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun /* Reset register cleared by chip reset. */
1588*4882a593Smuzhiyun dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun WRT_REG_WORD(®->cfg_1, 0);
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun /* Reset RISC and disable BIOS which
1593*4882a593Smuzhiyun allows RISC to execute out of RAM. */
1594*4882a593Smuzhiyun WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
1595*4882a593Smuzhiyun HC_RELEASE_RISC | HC_DISABLE_BIOS);
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun RD_REG_WORD(®->id_l); /* Flush PCI write */
1598*4882a593Smuzhiyun data = qla1280_debounce_register(®->mailbox0);
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun /*
1601*4882a593Smuzhiyun * I *LOVE* this code!
1602*4882a593Smuzhiyun */
1603*4882a593Smuzhiyun for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1604*4882a593Smuzhiyun udelay(5);
1605*4882a593Smuzhiyun data = RD_REG_WORD(®->mailbox0);
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun if (!cnt)
1609*4882a593Smuzhiyun goto fail;
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun /* Check product ID of chip */
1612*4882a593Smuzhiyun dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
1615*4882a593Smuzhiyun (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
1616*4882a593Smuzhiyun RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
1617*4882a593Smuzhiyun RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
1618*4882a593Smuzhiyun RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
1619*4882a593Smuzhiyun printk(KERN_INFO "qla1280: Wrong product ID = "
1620*4882a593Smuzhiyun "0x%x,0x%x,0x%x,0x%x\n",
1621*4882a593Smuzhiyun RD_REG_WORD(®->mailbox1),
1622*4882a593Smuzhiyun RD_REG_WORD(®->mailbox2),
1623*4882a593Smuzhiyun RD_REG_WORD(®->mailbox3),
1624*4882a593Smuzhiyun RD_REG_WORD(®->mailbox4));
1625*4882a593Smuzhiyun goto fail;
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun /*
1629*4882a593Smuzhiyun * Enable ints early!!!
1630*4882a593Smuzhiyun */
1631*4882a593Smuzhiyun qla1280_enable_intrs(ha);
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1634*4882a593Smuzhiyun /* Wrap Incoming Mailboxes Test. */
1635*4882a593Smuzhiyun mb[0] = MBC_MAILBOX_REGISTER_TEST;
1636*4882a593Smuzhiyun mb[1] = 0xAAAA;
1637*4882a593Smuzhiyun mb[2] = 0x5555;
1638*4882a593Smuzhiyun mb[3] = 0xAA55;
1639*4882a593Smuzhiyun mb[4] = 0x55AA;
1640*4882a593Smuzhiyun mb[5] = 0xA5A5;
1641*4882a593Smuzhiyun mb[6] = 0x5A5A;
1642*4882a593Smuzhiyun mb[7] = 0x2525;
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun status = qla1280_mailbox_command(ha, 0xff, mb);
1645*4882a593Smuzhiyun if (status)
1646*4882a593Smuzhiyun goto fail;
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1649*4882a593Smuzhiyun mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1650*4882a593Smuzhiyun mb[7] != 0x2525) {
1651*4882a593Smuzhiyun printk(KERN_INFO "qla1280: Failed mbox check\n");
1652*4882a593Smuzhiyun goto fail;
1653*4882a593Smuzhiyun }
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun dprintk(3, "qla1280_chip_diag: exiting normally\n");
1656*4882a593Smuzhiyun return 0;
1657*4882a593Smuzhiyun fail:
1658*4882a593Smuzhiyun dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1659*4882a593Smuzhiyun return status;
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun static int
qla1280_load_firmware_pio(struct scsi_qla_host * ha)1663*4882a593Smuzhiyun qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1664*4882a593Smuzhiyun {
1665*4882a593Smuzhiyun /* enter with host_lock acquired */
1666*4882a593Smuzhiyun
1667*4882a593Smuzhiyun const struct firmware *fw;
1668*4882a593Smuzhiyun const __le16 *fw_data;
1669*4882a593Smuzhiyun uint16_t risc_address, risc_code_size;
1670*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1671*4882a593Smuzhiyun int err = 0;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun fw = qla1280_request_firmware(ha);
1674*4882a593Smuzhiyun if (IS_ERR(fw))
1675*4882a593Smuzhiyun return PTR_ERR(fw);
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun fw_data = (const __le16 *)&fw->data[0];
1678*4882a593Smuzhiyun ha->fwstart = __le16_to_cpu(fw_data[2]);
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun /* Load RISC code. */
1681*4882a593Smuzhiyun risc_address = ha->fwstart;
1682*4882a593Smuzhiyun fw_data = (const __le16 *)&fw->data[6];
1683*4882a593Smuzhiyun risc_code_size = (fw->size - 6) / 2;
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun for (i = 0; i < risc_code_size; i++) {
1686*4882a593Smuzhiyun mb[0] = MBC_WRITE_RAM_WORD;
1687*4882a593Smuzhiyun mb[1] = risc_address + i;
1688*4882a593Smuzhiyun mb[2] = __le16_to_cpu(fw_data[i]);
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1691*4882a593Smuzhiyun if (err) {
1692*4882a593Smuzhiyun printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1693*4882a593Smuzhiyun ha->host_no);
1694*4882a593Smuzhiyun break;
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun }
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun return err;
1699*4882a593Smuzhiyun }
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun #ifdef QLA_64BIT_PTR
1702*4882a593Smuzhiyun #define LOAD_CMD MBC_LOAD_RAM_A64_ROM
1703*4882a593Smuzhiyun #define DUMP_CMD MBC_DUMP_RAM_A64_ROM
1704*4882a593Smuzhiyun #define CMD_ARGS (BIT_7 | BIT_6 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
1705*4882a593Smuzhiyun #else
1706*4882a593Smuzhiyun #define LOAD_CMD MBC_LOAD_RAM
1707*4882a593Smuzhiyun #define DUMP_CMD MBC_DUMP_RAM
1708*4882a593Smuzhiyun #define CMD_ARGS (BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
1709*4882a593Smuzhiyun #endif
1710*4882a593Smuzhiyun
1711*4882a593Smuzhiyun #define DUMP_IT_BACK 0 /* for debug of RISC loading */
1712*4882a593Smuzhiyun static int
qla1280_load_firmware_dma(struct scsi_qla_host * ha)1713*4882a593Smuzhiyun qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1714*4882a593Smuzhiyun {
1715*4882a593Smuzhiyun /* enter with host_lock acquired */
1716*4882a593Smuzhiyun const struct firmware *fw;
1717*4882a593Smuzhiyun const __le16 *fw_data;
1718*4882a593Smuzhiyun uint16_t risc_address, risc_code_size;
1719*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1720*4882a593Smuzhiyun int err = 0, num, i;
1721*4882a593Smuzhiyun #if DUMP_IT_BACK
1722*4882a593Smuzhiyun uint8_t *sp, *tbuf;
1723*4882a593Smuzhiyun dma_addr_t p_tbuf;
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL);
1726*4882a593Smuzhiyun if (!tbuf)
1727*4882a593Smuzhiyun return -ENOMEM;
1728*4882a593Smuzhiyun #endif
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun fw = qla1280_request_firmware(ha);
1731*4882a593Smuzhiyun if (IS_ERR(fw))
1732*4882a593Smuzhiyun return PTR_ERR(fw);
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun fw_data = (const __le16 *)&fw->data[0];
1735*4882a593Smuzhiyun ha->fwstart = __le16_to_cpu(fw_data[2]);
1736*4882a593Smuzhiyun
1737*4882a593Smuzhiyun /* Load RISC code. */
1738*4882a593Smuzhiyun risc_address = ha->fwstart;
1739*4882a593Smuzhiyun fw_data = (const __le16 *)&fw->data[6];
1740*4882a593Smuzhiyun risc_code_size = (fw->size - 6) / 2;
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun dprintk(1, "%s: DMA RISC code (%i) words\n",
1743*4882a593Smuzhiyun __func__, risc_code_size);
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun num = 0;
1746*4882a593Smuzhiyun while (risc_code_size > 0) {
1747*4882a593Smuzhiyun int warn __attribute__((unused)) = 0;
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun cnt = 2000 >> 1;
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun if (cnt > risc_code_size)
1752*4882a593Smuzhiyun cnt = risc_code_size;
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1755*4882a593Smuzhiyun "%d,%d(0x%x)\n",
1756*4882a593Smuzhiyun fw_data, cnt, num, risc_address);
1757*4882a593Smuzhiyun for(i = 0; i < cnt; i++)
1758*4882a593Smuzhiyun ((__le16 *)ha->request_ring)[i] = fw_data[i];
1759*4882a593Smuzhiyun
1760*4882a593Smuzhiyun mb[0] = LOAD_CMD;
1761*4882a593Smuzhiyun mb[1] = risc_address;
1762*4882a593Smuzhiyun mb[4] = cnt;
1763*4882a593Smuzhiyun mb[3] = ha->request_dma & 0xffff;
1764*4882a593Smuzhiyun mb[2] = (ha->request_dma >> 16) & 0xffff;
1765*4882a593Smuzhiyun mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
1766*4882a593Smuzhiyun mb[6] = upper_32_bits(ha->request_dma) >> 16;
1767*4882a593Smuzhiyun dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1768*4882a593Smuzhiyun __func__, mb[0],
1769*4882a593Smuzhiyun (void *)(long)ha->request_dma,
1770*4882a593Smuzhiyun mb[6], mb[7], mb[2], mb[3]);
1771*4882a593Smuzhiyun err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
1772*4882a593Smuzhiyun if (err) {
1773*4882a593Smuzhiyun printk(KERN_ERR "scsi(%li): Failed to load partial "
1774*4882a593Smuzhiyun "segment of f\n", ha->host_no);
1775*4882a593Smuzhiyun goto out;
1776*4882a593Smuzhiyun }
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun #if DUMP_IT_BACK
1779*4882a593Smuzhiyun mb[0] = DUMP_CMD;
1780*4882a593Smuzhiyun mb[1] = risc_address;
1781*4882a593Smuzhiyun mb[4] = cnt;
1782*4882a593Smuzhiyun mb[3] = p_tbuf & 0xffff;
1783*4882a593Smuzhiyun mb[2] = (p_tbuf >> 16) & 0xffff;
1784*4882a593Smuzhiyun mb[7] = upper_32_bits(p_tbuf) & 0xffff;
1785*4882a593Smuzhiyun mb[6] = upper_32_bits(p_tbuf) >> 16;
1786*4882a593Smuzhiyun
1787*4882a593Smuzhiyun err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
1788*4882a593Smuzhiyun if (err) {
1789*4882a593Smuzhiyun printk(KERN_ERR
1790*4882a593Smuzhiyun "Failed to dump partial segment of f/w\n");
1791*4882a593Smuzhiyun goto out;
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun sp = (uint8_t *)ha->request_ring;
1794*4882a593Smuzhiyun for (i = 0; i < (cnt << 1); i++) {
1795*4882a593Smuzhiyun if (tbuf[i] != sp[i] && warn++ < 10) {
1796*4882a593Smuzhiyun printk(KERN_ERR "%s: FW compare error @ "
1797*4882a593Smuzhiyun "byte(0x%x) loop#=%x\n",
1798*4882a593Smuzhiyun __func__, i, num);
1799*4882a593Smuzhiyun printk(KERN_ERR "%s: FWbyte=%x "
1800*4882a593Smuzhiyun "FWfromChip=%x\n",
1801*4882a593Smuzhiyun __func__, sp[i], tbuf[i]);
1802*4882a593Smuzhiyun /*break; */
1803*4882a593Smuzhiyun }
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun #endif
1806*4882a593Smuzhiyun risc_address += cnt;
1807*4882a593Smuzhiyun risc_code_size = risc_code_size - cnt;
1808*4882a593Smuzhiyun fw_data = fw_data + cnt;
1809*4882a593Smuzhiyun num++;
1810*4882a593Smuzhiyun }
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun out:
1813*4882a593Smuzhiyun #if DUMP_IT_BACK
1814*4882a593Smuzhiyun dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf);
1815*4882a593Smuzhiyun #endif
1816*4882a593Smuzhiyun return err;
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun static int
qla1280_start_firmware(struct scsi_qla_host * ha)1820*4882a593Smuzhiyun qla1280_start_firmware(struct scsi_qla_host *ha)
1821*4882a593Smuzhiyun {
1822*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT];
1823*4882a593Smuzhiyun int err;
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1826*4882a593Smuzhiyun __func__);
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun /* Verify checksum of loaded RISC code. */
1829*4882a593Smuzhiyun mb[0] = MBC_VERIFY_CHECKSUM;
1830*4882a593Smuzhiyun /* mb[1] = ql12_risc_code_addr01; */
1831*4882a593Smuzhiyun mb[1] = ha->fwstart;
1832*4882a593Smuzhiyun err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1833*4882a593Smuzhiyun if (err) {
1834*4882a593Smuzhiyun printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1835*4882a593Smuzhiyun return err;
1836*4882a593Smuzhiyun }
1837*4882a593Smuzhiyun
1838*4882a593Smuzhiyun /* Start firmware execution. */
1839*4882a593Smuzhiyun dprintk(1, "%s: start firmware running.\n", __func__);
1840*4882a593Smuzhiyun mb[0] = MBC_EXECUTE_FIRMWARE;
1841*4882a593Smuzhiyun mb[1] = ha->fwstart;
1842*4882a593Smuzhiyun err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1843*4882a593Smuzhiyun if (err) {
1844*4882a593Smuzhiyun printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1845*4882a593Smuzhiyun ha->host_no);
1846*4882a593Smuzhiyun }
1847*4882a593Smuzhiyun
1848*4882a593Smuzhiyun return err;
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun static int
qla1280_load_firmware(struct scsi_qla_host * ha)1852*4882a593Smuzhiyun qla1280_load_firmware(struct scsi_qla_host *ha)
1853*4882a593Smuzhiyun {
1854*4882a593Smuzhiyun /* enter with host_lock taken */
1855*4882a593Smuzhiyun int err;
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun err = qla1280_chip_diag(ha);
1858*4882a593Smuzhiyun if (err)
1859*4882a593Smuzhiyun goto out;
1860*4882a593Smuzhiyun if (IS_ISP1040(ha))
1861*4882a593Smuzhiyun err = qla1280_load_firmware_pio(ha);
1862*4882a593Smuzhiyun else
1863*4882a593Smuzhiyun err = qla1280_load_firmware_dma(ha);
1864*4882a593Smuzhiyun if (err)
1865*4882a593Smuzhiyun goto out;
1866*4882a593Smuzhiyun err = qla1280_start_firmware(ha);
1867*4882a593Smuzhiyun out:
1868*4882a593Smuzhiyun return err;
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun /*
1872*4882a593Smuzhiyun * Initialize rings
1873*4882a593Smuzhiyun *
1874*4882a593Smuzhiyun * Input:
1875*4882a593Smuzhiyun * ha = adapter block pointer.
1876*4882a593Smuzhiyun * ha->request_ring = request ring virtual address
1877*4882a593Smuzhiyun * ha->response_ring = response ring virtual address
1878*4882a593Smuzhiyun * ha->request_dma = request ring physical address
1879*4882a593Smuzhiyun * ha->response_dma = response ring physical address
1880*4882a593Smuzhiyun *
1881*4882a593Smuzhiyun * Returns:
1882*4882a593Smuzhiyun * 0 = success.
1883*4882a593Smuzhiyun */
1884*4882a593Smuzhiyun static int
qla1280_init_rings(struct scsi_qla_host * ha)1885*4882a593Smuzhiyun qla1280_init_rings(struct scsi_qla_host *ha)
1886*4882a593Smuzhiyun {
1887*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT];
1888*4882a593Smuzhiyun int status = 0;
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun ENTER("qla1280_init_rings");
1891*4882a593Smuzhiyun
1892*4882a593Smuzhiyun /* Clear outstanding commands array. */
1893*4882a593Smuzhiyun memset(ha->outstanding_cmds, 0,
1894*4882a593Smuzhiyun sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun /* Initialize request queue. */
1897*4882a593Smuzhiyun ha->request_ring_ptr = ha->request_ring;
1898*4882a593Smuzhiyun ha->req_ring_index = 0;
1899*4882a593Smuzhiyun ha->req_q_cnt = REQUEST_ENTRY_CNT;
1900*4882a593Smuzhiyun /* mb[0] = MBC_INIT_REQUEST_QUEUE; */
1901*4882a593Smuzhiyun mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1902*4882a593Smuzhiyun mb[1] = REQUEST_ENTRY_CNT;
1903*4882a593Smuzhiyun mb[3] = ha->request_dma & 0xffff;
1904*4882a593Smuzhiyun mb[2] = (ha->request_dma >> 16) & 0xffff;
1905*4882a593Smuzhiyun mb[4] = 0;
1906*4882a593Smuzhiyun mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
1907*4882a593Smuzhiyun mb[6] = upper_32_bits(ha->request_dma) >> 16;
1908*4882a593Smuzhiyun if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1909*4882a593Smuzhiyun BIT_3 | BIT_2 | BIT_1 | BIT_0,
1910*4882a593Smuzhiyun &mb[0]))) {
1911*4882a593Smuzhiyun /* Initialize response queue. */
1912*4882a593Smuzhiyun ha->response_ring_ptr = ha->response_ring;
1913*4882a593Smuzhiyun ha->rsp_ring_index = 0;
1914*4882a593Smuzhiyun /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
1915*4882a593Smuzhiyun mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1916*4882a593Smuzhiyun mb[1] = RESPONSE_ENTRY_CNT;
1917*4882a593Smuzhiyun mb[3] = ha->response_dma & 0xffff;
1918*4882a593Smuzhiyun mb[2] = (ha->response_dma >> 16) & 0xffff;
1919*4882a593Smuzhiyun mb[5] = 0;
1920*4882a593Smuzhiyun mb[7] = upper_32_bits(ha->response_dma) & 0xffff;
1921*4882a593Smuzhiyun mb[6] = upper_32_bits(ha->response_dma) >> 16;
1922*4882a593Smuzhiyun status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1923*4882a593Smuzhiyun BIT_3 | BIT_2 | BIT_1 | BIT_0,
1924*4882a593Smuzhiyun &mb[0]);
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun if (status)
1928*4882a593Smuzhiyun dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1929*4882a593Smuzhiyun
1930*4882a593Smuzhiyun LEAVE("qla1280_init_rings");
1931*4882a593Smuzhiyun return status;
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun static void
qla1280_print_settings(struct nvram * nv)1935*4882a593Smuzhiyun qla1280_print_settings(struct nvram *nv)
1936*4882a593Smuzhiyun {
1937*4882a593Smuzhiyun dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1938*4882a593Smuzhiyun nv->bus[0].config_1.initiator_id);
1939*4882a593Smuzhiyun dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1940*4882a593Smuzhiyun nv->bus[1].config_1.initiator_id);
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1943*4882a593Smuzhiyun nv->bus[0].bus_reset_delay);
1944*4882a593Smuzhiyun dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1945*4882a593Smuzhiyun nv->bus[1].bus_reset_delay);
1946*4882a593Smuzhiyun
1947*4882a593Smuzhiyun dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1948*4882a593Smuzhiyun dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1949*4882a593Smuzhiyun dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1950*4882a593Smuzhiyun dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1953*4882a593Smuzhiyun nv->bus[0].config_2.async_data_setup_time);
1954*4882a593Smuzhiyun dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1955*4882a593Smuzhiyun nv->bus[1].config_2.async_data_setup_time);
1956*4882a593Smuzhiyun
1957*4882a593Smuzhiyun dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1958*4882a593Smuzhiyun nv->bus[0].config_2.req_ack_active_negation);
1959*4882a593Smuzhiyun dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1960*4882a593Smuzhiyun nv->bus[1].config_2.req_ack_active_negation);
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun dprintk(1, "qla1280 : data line active negation[0]=%d\n",
1963*4882a593Smuzhiyun nv->bus[0].config_2.data_line_active_negation);
1964*4882a593Smuzhiyun dprintk(1, "qla1280 : data line active negation[1]=%d\n",
1965*4882a593Smuzhiyun nv->bus[1].config_2.data_line_active_negation);
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun dprintk(1, "qla1280 : disable loading risc code=%d\n",
1968*4882a593Smuzhiyun nv->cntr_flags_1.disable_loading_risc_code);
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
1971*4882a593Smuzhiyun nv->cntr_flags_1.enable_64bit_addressing);
1972*4882a593Smuzhiyun
1973*4882a593Smuzhiyun dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
1974*4882a593Smuzhiyun nv->bus[0].selection_timeout);
1975*4882a593Smuzhiyun dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
1976*4882a593Smuzhiyun nv->bus[1].selection_timeout);
1977*4882a593Smuzhiyun
1978*4882a593Smuzhiyun dprintk(1, "qla1280 : max queue depth[0]=%d\n",
1979*4882a593Smuzhiyun nv->bus[0].max_queue_depth);
1980*4882a593Smuzhiyun dprintk(1, "qla1280 : max queue depth[1]=%d\n",
1981*4882a593Smuzhiyun nv->bus[1].max_queue_depth);
1982*4882a593Smuzhiyun }
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun static void
qla1280_set_target_defaults(struct scsi_qla_host * ha,int bus,int target)1985*4882a593Smuzhiyun qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
1986*4882a593Smuzhiyun {
1987*4882a593Smuzhiyun struct nvram *nv = &ha->nvram;
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
1990*4882a593Smuzhiyun nv->bus[bus].target[target].parameter.auto_request_sense = 1;
1991*4882a593Smuzhiyun nv->bus[bus].target[target].parameter.tag_queuing = 1;
1992*4882a593Smuzhiyun nv->bus[bus].target[target].parameter.enable_sync = 1;
1993*4882a593Smuzhiyun #if 1 /* Some SCSI Processors do not seem to like this */
1994*4882a593Smuzhiyun nv->bus[bus].target[target].parameter.enable_wide = 1;
1995*4882a593Smuzhiyun #endif
1996*4882a593Smuzhiyun nv->bus[bus].target[target].execution_throttle =
1997*4882a593Smuzhiyun nv->bus[bus].max_queue_depth - 1;
1998*4882a593Smuzhiyun nv->bus[bus].target[target].parameter.parity_checking = 1;
1999*4882a593Smuzhiyun nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun if (IS_ISP1x160(ha)) {
2002*4882a593Smuzhiyun nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
2003*4882a593Smuzhiyun nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
2004*4882a593Smuzhiyun nv->bus[bus].target[target].sync_period = 9;
2005*4882a593Smuzhiyun nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
2006*4882a593Smuzhiyun nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
2007*4882a593Smuzhiyun nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
2008*4882a593Smuzhiyun } else {
2009*4882a593Smuzhiyun nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
2010*4882a593Smuzhiyun nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
2011*4882a593Smuzhiyun nv->bus[bus].target[target].sync_period = 10;
2012*4882a593Smuzhiyun }
2013*4882a593Smuzhiyun }
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun static void
qla1280_set_defaults(struct scsi_qla_host * ha)2016*4882a593Smuzhiyun qla1280_set_defaults(struct scsi_qla_host *ha)
2017*4882a593Smuzhiyun {
2018*4882a593Smuzhiyun struct nvram *nv = &ha->nvram;
2019*4882a593Smuzhiyun int bus, target;
2020*4882a593Smuzhiyun
2021*4882a593Smuzhiyun dprintk(1, "Using defaults for NVRAM: \n");
2022*4882a593Smuzhiyun memset(nv, 0, sizeof(struct nvram));
2023*4882a593Smuzhiyun
2024*4882a593Smuzhiyun /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
2025*4882a593Smuzhiyun nv->firmware_feature.f.enable_fast_posting = 1;
2026*4882a593Smuzhiyun nv->firmware_feature.f.disable_synchronous_backoff = 1;
2027*4882a593Smuzhiyun nv->termination.scsi_bus_0_control = 3;
2028*4882a593Smuzhiyun nv->termination.scsi_bus_1_control = 3;
2029*4882a593Smuzhiyun nv->termination.auto_term_support = 1;
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun /*
2032*4882a593Smuzhiyun * Set default FIFO magic - What appropriate values would be here
2033*4882a593Smuzhiyun * is unknown. This is what I have found testing with 12160s.
2034*4882a593Smuzhiyun *
2035*4882a593Smuzhiyun * Now, I would love the magic decoder ring for this one, the
2036*4882a593Smuzhiyun * header file provided by QLogic seems to be bogus or incomplete
2037*4882a593Smuzhiyun * at best.
2038*4882a593Smuzhiyun */
2039*4882a593Smuzhiyun nv->isp_config.burst_enable = 1;
2040*4882a593Smuzhiyun if (IS_ISP1040(ha))
2041*4882a593Smuzhiyun nv->isp_config.fifo_threshold |= 3;
2042*4882a593Smuzhiyun else
2043*4882a593Smuzhiyun nv->isp_config.fifo_threshold |= 4;
2044*4882a593Smuzhiyun
2045*4882a593Smuzhiyun if (IS_ISP1x160(ha))
2046*4882a593Smuzhiyun nv->isp_parameter = 0x01; /* fast memory enable */
2047*4882a593Smuzhiyun
2048*4882a593Smuzhiyun for (bus = 0; bus < MAX_BUSES; bus++) {
2049*4882a593Smuzhiyun nv->bus[bus].config_1.initiator_id = 7;
2050*4882a593Smuzhiyun nv->bus[bus].config_2.req_ack_active_negation = 1;
2051*4882a593Smuzhiyun nv->bus[bus].config_2.data_line_active_negation = 1;
2052*4882a593Smuzhiyun nv->bus[bus].selection_timeout = 250;
2053*4882a593Smuzhiyun nv->bus[bus].max_queue_depth = 32;
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun if (IS_ISP1040(ha)) {
2056*4882a593Smuzhiyun nv->bus[bus].bus_reset_delay = 3;
2057*4882a593Smuzhiyun nv->bus[bus].config_2.async_data_setup_time = 6;
2058*4882a593Smuzhiyun nv->bus[bus].retry_delay = 1;
2059*4882a593Smuzhiyun } else {
2060*4882a593Smuzhiyun nv->bus[bus].bus_reset_delay = 5;
2061*4882a593Smuzhiyun nv->bus[bus].config_2.async_data_setup_time = 8;
2062*4882a593Smuzhiyun }
2063*4882a593Smuzhiyun
2064*4882a593Smuzhiyun for (target = 0; target < MAX_TARGETS; target++)
2065*4882a593Smuzhiyun qla1280_set_target_defaults(ha, bus, target);
2066*4882a593Smuzhiyun }
2067*4882a593Smuzhiyun }
2068*4882a593Smuzhiyun
2069*4882a593Smuzhiyun static int
qla1280_config_target(struct scsi_qla_host * ha,int bus,int target)2070*4882a593Smuzhiyun qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2071*4882a593Smuzhiyun {
2072*4882a593Smuzhiyun struct nvram *nv = &ha->nvram;
2073*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT];
2074*4882a593Smuzhiyun int status, lun;
2075*4882a593Smuzhiyun uint16_t flag;
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyun /* Set Target Parameters. */
2078*4882a593Smuzhiyun mb[0] = MBC_SET_TARGET_PARAMETERS;
2079*4882a593Smuzhiyun mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyun /*
2082*4882a593Smuzhiyun * Do not enable sync and ppr for the initial INQUIRY run. We
2083*4882a593Smuzhiyun * enable this later if we determine the target actually
2084*4882a593Smuzhiyun * supports it.
2085*4882a593Smuzhiyun */
2086*4882a593Smuzhiyun mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2087*4882a593Smuzhiyun | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun if (IS_ISP1x160(ha))
2090*4882a593Smuzhiyun mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2091*4882a593Smuzhiyun else
2092*4882a593Smuzhiyun mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2093*4882a593Smuzhiyun mb[3] |= nv->bus[bus].target[target].sync_period;
2094*4882a593Smuzhiyun status = qla1280_mailbox_command(ha, 0x0f, mb);
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun /* Save Tag queuing enable flag. */
2097*4882a593Smuzhiyun flag = (BIT_0 << target);
2098*4882a593Smuzhiyun if (nv->bus[bus].target[target].parameter.tag_queuing)
2099*4882a593Smuzhiyun ha->bus_settings[bus].qtag_enables |= flag;
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun /* Save Device enable flag. */
2102*4882a593Smuzhiyun if (IS_ISP1x160(ha)) {
2103*4882a593Smuzhiyun if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2104*4882a593Smuzhiyun ha->bus_settings[bus].device_enables |= flag;
2105*4882a593Smuzhiyun ha->bus_settings[bus].lun_disables |= 0;
2106*4882a593Smuzhiyun } else {
2107*4882a593Smuzhiyun if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2108*4882a593Smuzhiyun ha->bus_settings[bus].device_enables |= flag;
2109*4882a593Smuzhiyun /* Save LUN disable flag. */
2110*4882a593Smuzhiyun if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2111*4882a593Smuzhiyun ha->bus_settings[bus].lun_disables |= flag;
2112*4882a593Smuzhiyun }
2113*4882a593Smuzhiyun
2114*4882a593Smuzhiyun /* Set Device Queue Parameters. */
2115*4882a593Smuzhiyun for (lun = 0; lun < MAX_LUNS; lun++) {
2116*4882a593Smuzhiyun mb[0] = MBC_SET_DEVICE_QUEUE;
2117*4882a593Smuzhiyun mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2118*4882a593Smuzhiyun mb[1] |= lun;
2119*4882a593Smuzhiyun mb[2] = nv->bus[bus].max_queue_depth;
2120*4882a593Smuzhiyun mb[3] = nv->bus[bus].target[target].execution_throttle;
2121*4882a593Smuzhiyun status |= qla1280_mailbox_command(ha, 0x0f, mb);
2122*4882a593Smuzhiyun }
2123*4882a593Smuzhiyun
2124*4882a593Smuzhiyun return status;
2125*4882a593Smuzhiyun }
2126*4882a593Smuzhiyun
2127*4882a593Smuzhiyun static int
qla1280_config_bus(struct scsi_qla_host * ha,int bus)2128*4882a593Smuzhiyun qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2129*4882a593Smuzhiyun {
2130*4882a593Smuzhiyun struct nvram *nv = &ha->nvram;
2131*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT];
2132*4882a593Smuzhiyun int target, status;
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun /* SCSI Reset Disable. */
2135*4882a593Smuzhiyun ha->bus_settings[bus].disable_scsi_reset =
2136*4882a593Smuzhiyun nv->bus[bus].config_1.scsi_reset_disable;
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun /* Initiator ID. */
2139*4882a593Smuzhiyun ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2140*4882a593Smuzhiyun mb[0] = MBC_SET_INITIATOR_ID;
2141*4882a593Smuzhiyun mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2142*4882a593Smuzhiyun ha->bus_settings[bus].id;
2143*4882a593Smuzhiyun status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun /* Reset Delay. */
2146*4882a593Smuzhiyun ha->bus_settings[bus].bus_reset_delay =
2147*4882a593Smuzhiyun nv->bus[bus].bus_reset_delay;
2148*4882a593Smuzhiyun
2149*4882a593Smuzhiyun /* Command queue depth per device. */
2150*4882a593Smuzhiyun ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun /* Set target parameters. */
2153*4882a593Smuzhiyun for (target = 0; target < MAX_TARGETS; target++)
2154*4882a593Smuzhiyun status |= qla1280_config_target(ha, bus, target);
2155*4882a593Smuzhiyun
2156*4882a593Smuzhiyun return status;
2157*4882a593Smuzhiyun }
2158*4882a593Smuzhiyun
2159*4882a593Smuzhiyun static int
qla1280_nvram_config(struct scsi_qla_host * ha)2160*4882a593Smuzhiyun qla1280_nvram_config(struct scsi_qla_host *ha)
2161*4882a593Smuzhiyun {
2162*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
2163*4882a593Smuzhiyun struct nvram *nv = &ha->nvram;
2164*4882a593Smuzhiyun int bus, target, status = 0;
2165*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT];
2166*4882a593Smuzhiyun
2167*4882a593Smuzhiyun ENTER("qla1280_nvram_config");
2168*4882a593Smuzhiyun
2169*4882a593Smuzhiyun if (ha->nvram_valid) {
2170*4882a593Smuzhiyun /* Always force AUTO sense for LINUX SCSI */
2171*4882a593Smuzhiyun for (bus = 0; bus < MAX_BUSES; bus++)
2172*4882a593Smuzhiyun for (target = 0; target < MAX_TARGETS; target++) {
2173*4882a593Smuzhiyun nv->bus[bus].target[target].parameter.
2174*4882a593Smuzhiyun auto_request_sense = 1;
2175*4882a593Smuzhiyun }
2176*4882a593Smuzhiyun } else {
2177*4882a593Smuzhiyun qla1280_set_defaults(ha);
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun qla1280_print_settings(nv);
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun /* Disable RISC load of firmware. */
2183*4882a593Smuzhiyun ha->flags.disable_risc_code_load =
2184*4882a593Smuzhiyun nv->cntr_flags_1.disable_loading_risc_code;
2185*4882a593Smuzhiyun
2186*4882a593Smuzhiyun if (IS_ISP1040(ha)) {
2187*4882a593Smuzhiyun uint16_t hwrev, cfg1, cdma_conf;
2188*4882a593Smuzhiyun
2189*4882a593Smuzhiyun hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
2190*4882a593Smuzhiyun
2191*4882a593Smuzhiyun cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2192*4882a593Smuzhiyun cdma_conf = RD_REG_WORD(®->cdma_cfg);
2193*4882a593Smuzhiyun
2194*4882a593Smuzhiyun /* Busted fifo, says mjacob. */
2195*4882a593Smuzhiyun if (hwrev != ISP_CFG0_1040A)
2196*4882a593Smuzhiyun cfg1 |= nv->isp_config.fifo_threshold << 4;
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun cfg1 |= nv->isp_config.burst_enable << 2;
2199*4882a593Smuzhiyun WRT_REG_WORD(®->cfg_1, cfg1);
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2202*4882a593Smuzhiyun WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2203*4882a593Smuzhiyun } else {
2204*4882a593Smuzhiyun uint16_t cfg1, term;
2205*4882a593Smuzhiyun
2206*4882a593Smuzhiyun /* Set ISP hardware DMA burst */
2207*4882a593Smuzhiyun cfg1 = nv->isp_config.fifo_threshold << 4;
2208*4882a593Smuzhiyun cfg1 |= nv->isp_config.burst_enable << 2;
2209*4882a593Smuzhiyun /* Enable DMA arbitration on dual channel controllers */
2210*4882a593Smuzhiyun if (ha->ports > 1)
2211*4882a593Smuzhiyun cfg1 |= BIT_13;
2212*4882a593Smuzhiyun WRT_REG_WORD(®->cfg_1, cfg1);
2213*4882a593Smuzhiyun
2214*4882a593Smuzhiyun /* Set SCSI termination. */
2215*4882a593Smuzhiyun WRT_REG_WORD(®->gpio_enable,
2216*4882a593Smuzhiyun BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2217*4882a593Smuzhiyun term = nv->termination.scsi_bus_1_control;
2218*4882a593Smuzhiyun term |= nv->termination.scsi_bus_0_control << 2;
2219*4882a593Smuzhiyun term |= nv->termination.auto_term_support << 7;
2220*4882a593Smuzhiyun RD_REG_WORD(®->id_l); /* Flush PCI write */
2221*4882a593Smuzhiyun WRT_REG_WORD(®->gpio_data, term);
2222*4882a593Smuzhiyun }
2223*4882a593Smuzhiyun RD_REG_WORD(®->id_l); /* Flush PCI write */
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun /* ISP parameter word. */
2226*4882a593Smuzhiyun mb[0] = MBC_SET_SYSTEM_PARAMETER;
2227*4882a593Smuzhiyun mb[1] = nv->isp_parameter;
2228*4882a593Smuzhiyun status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2229*4882a593Smuzhiyun
2230*4882a593Smuzhiyun if (IS_ISP1x40(ha)) {
2231*4882a593Smuzhiyun /* clock rate - for qla1240 and older, only */
2232*4882a593Smuzhiyun mb[0] = MBC_SET_CLOCK_RATE;
2233*4882a593Smuzhiyun mb[1] = 40;
2234*4882a593Smuzhiyun status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2235*4882a593Smuzhiyun }
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun /* Firmware feature word. */
2238*4882a593Smuzhiyun mb[0] = MBC_SET_FIRMWARE_FEATURES;
2239*4882a593Smuzhiyun mb[1] = nv->firmware_feature.f.enable_fast_posting;
2240*4882a593Smuzhiyun mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2241*4882a593Smuzhiyun mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2242*4882a593Smuzhiyun status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun /* Retry count and delay. */
2245*4882a593Smuzhiyun mb[0] = MBC_SET_RETRY_COUNT;
2246*4882a593Smuzhiyun mb[1] = nv->bus[0].retry_count;
2247*4882a593Smuzhiyun mb[2] = nv->bus[0].retry_delay;
2248*4882a593Smuzhiyun mb[6] = nv->bus[1].retry_count;
2249*4882a593Smuzhiyun mb[7] = nv->bus[1].retry_delay;
2250*4882a593Smuzhiyun status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2251*4882a593Smuzhiyun BIT_1 | BIT_0, &mb[0]);
2252*4882a593Smuzhiyun
2253*4882a593Smuzhiyun /* ASYNC data setup time. */
2254*4882a593Smuzhiyun mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2255*4882a593Smuzhiyun mb[1] = nv->bus[0].config_2.async_data_setup_time;
2256*4882a593Smuzhiyun mb[2] = nv->bus[1].config_2.async_data_setup_time;
2257*4882a593Smuzhiyun status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2258*4882a593Smuzhiyun
2259*4882a593Smuzhiyun /* Active negation states. */
2260*4882a593Smuzhiyun mb[0] = MBC_SET_ACTIVE_NEGATION;
2261*4882a593Smuzhiyun mb[1] = 0;
2262*4882a593Smuzhiyun if (nv->bus[0].config_2.req_ack_active_negation)
2263*4882a593Smuzhiyun mb[1] |= BIT_5;
2264*4882a593Smuzhiyun if (nv->bus[0].config_2.data_line_active_negation)
2265*4882a593Smuzhiyun mb[1] |= BIT_4;
2266*4882a593Smuzhiyun mb[2] = 0;
2267*4882a593Smuzhiyun if (nv->bus[1].config_2.req_ack_active_negation)
2268*4882a593Smuzhiyun mb[2] |= BIT_5;
2269*4882a593Smuzhiyun if (nv->bus[1].config_2.data_line_active_negation)
2270*4882a593Smuzhiyun mb[2] |= BIT_4;
2271*4882a593Smuzhiyun status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2272*4882a593Smuzhiyun
2273*4882a593Smuzhiyun mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2274*4882a593Smuzhiyun mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
2275*4882a593Smuzhiyun status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun /* thingy */
2278*4882a593Smuzhiyun mb[0] = MBC_SET_PCI_CONTROL;
2279*4882a593Smuzhiyun mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
2280*4882a593Smuzhiyun mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
2281*4882a593Smuzhiyun status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2282*4882a593Smuzhiyun
2283*4882a593Smuzhiyun mb[0] = MBC_SET_TAG_AGE_LIMIT;
2284*4882a593Smuzhiyun mb[1] = 8;
2285*4882a593Smuzhiyun status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2286*4882a593Smuzhiyun
2287*4882a593Smuzhiyun /* Selection timeout. */
2288*4882a593Smuzhiyun mb[0] = MBC_SET_SELECTION_TIMEOUT;
2289*4882a593Smuzhiyun mb[1] = nv->bus[0].selection_timeout;
2290*4882a593Smuzhiyun mb[2] = nv->bus[1].selection_timeout;
2291*4882a593Smuzhiyun status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2292*4882a593Smuzhiyun
2293*4882a593Smuzhiyun for (bus = 0; bus < ha->ports; bus++)
2294*4882a593Smuzhiyun status |= qla1280_config_bus(ha, bus);
2295*4882a593Smuzhiyun
2296*4882a593Smuzhiyun if (status)
2297*4882a593Smuzhiyun dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun LEAVE("qla1280_nvram_config");
2300*4882a593Smuzhiyun return status;
2301*4882a593Smuzhiyun }
2302*4882a593Smuzhiyun
2303*4882a593Smuzhiyun /*
2304*4882a593Smuzhiyun * Get NVRAM data word
2305*4882a593Smuzhiyun * Calculates word position in NVRAM and calls request routine to
2306*4882a593Smuzhiyun * get the word from NVRAM.
2307*4882a593Smuzhiyun *
2308*4882a593Smuzhiyun * Input:
2309*4882a593Smuzhiyun * ha = adapter block pointer.
2310*4882a593Smuzhiyun * address = NVRAM word address.
2311*4882a593Smuzhiyun *
2312*4882a593Smuzhiyun * Returns:
2313*4882a593Smuzhiyun * data word.
2314*4882a593Smuzhiyun */
2315*4882a593Smuzhiyun static uint16_t
qla1280_get_nvram_word(struct scsi_qla_host * ha,uint32_t address)2316*4882a593Smuzhiyun qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2317*4882a593Smuzhiyun {
2318*4882a593Smuzhiyun uint32_t nv_cmd;
2319*4882a593Smuzhiyun uint16_t data;
2320*4882a593Smuzhiyun
2321*4882a593Smuzhiyun nv_cmd = address << 16;
2322*4882a593Smuzhiyun nv_cmd |= NV_READ_OP;
2323*4882a593Smuzhiyun
2324*4882a593Smuzhiyun data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2325*4882a593Smuzhiyun
2326*4882a593Smuzhiyun dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2327*4882a593Smuzhiyun "0x%x", data);
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun return data;
2330*4882a593Smuzhiyun }
2331*4882a593Smuzhiyun
2332*4882a593Smuzhiyun /*
2333*4882a593Smuzhiyun * NVRAM request
2334*4882a593Smuzhiyun * Sends read command to NVRAM and gets data from NVRAM.
2335*4882a593Smuzhiyun *
2336*4882a593Smuzhiyun * Input:
2337*4882a593Smuzhiyun * ha = adapter block pointer.
2338*4882a593Smuzhiyun * nv_cmd = Bit 26 = start bit
2339*4882a593Smuzhiyun * Bit 25, 24 = opcode
2340*4882a593Smuzhiyun * Bit 23-16 = address
2341*4882a593Smuzhiyun * Bit 15-0 = write data
2342*4882a593Smuzhiyun *
2343*4882a593Smuzhiyun * Returns:
2344*4882a593Smuzhiyun * data word.
2345*4882a593Smuzhiyun */
2346*4882a593Smuzhiyun static uint16_t
qla1280_nvram_request(struct scsi_qla_host * ha,uint32_t nv_cmd)2347*4882a593Smuzhiyun qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2348*4882a593Smuzhiyun {
2349*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
2350*4882a593Smuzhiyun int cnt;
2351*4882a593Smuzhiyun uint16_t data = 0;
2352*4882a593Smuzhiyun uint16_t reg_data;
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun /* Send command to NVRAM. */
2355*4882a593Smuzhiyun
2356*4882a593Smuzhiyun nv_cmd <<= 5;
2357*4882a593Smuzhiyun for (cnt = 0; cnt < 11; cnt++) {
2358*4882a593Smuzhiyun if (nv_cmd & BIT_31)
2359*4882a593Smuzhiyun qla1280_nv_write(ha, NV_DATA_OUT);
2360*4882a593Smuzhiyun else
2361*4882a593Smuzhiyun qla1280_nv_write(ha, 0);
2362*4882a593Smuzhiyun nv_cmd <<= 1;
2363*4882a593Smuzhiyun }
2364*4882a593Smuzhiyun
2365*4882a593Smuzhiyun /* Read data from NVRAM. */
2366*4882a593Smuzhiyun
2367*4882a593Smuzhiyun for (cnt = 0; cnt < 16; cnt++) {
2368*4882a593Smuzhiyun WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK));
2369*4882a593Smuzhiyun RD_REG_WORD(®->id_l); /* Flush PCI write */
2370*4882a593Smuzhiyun NVRAM_DELAY();
2371*4882a593Smuzhiyun data <<= 1;
2372*4882a593Smuzhiyun reg_data = RD_REG_WORD(®->nvram);
2373*4882a593Smuzhiyun if (reg_data & NV_DATA_IN)
2374*4882a593Smuzhiyun data |= BIT_0;
2375*4882a593Smuzhiyun WRT_REG_WORD(®->nvram, NV_SELECT);
2376*4882a593Smuzhiyun RD_REG_WORD(®->id_l); /* Flush PCI write */
2377*4882a593Smuzhiyun NVRAM_DELAY();
2378*4882a593Smuzhiyun }
2379*4882a593Smuzhiyun
2380*4882a593Smuzhiyun /* Deselect chip. */
2381*4882a593Smuzhiyun
2382*4882a593Smuzhiyun WRT_REG_WORD(®->nvram, NV_DESELECT);
2383*4882a593Smuzhiyun RD_REG_WORD(®->id_l); /* Flush PCI write */
2384*4882a593Smuzhiyun NVRAM_DELAY();
2385*4882a593Smuzhiyun
2386*4882a593Smuzhiyun return data;
2387*4882a593Smuzhiyun }
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun static void
qla1280_nv_write(struct scsi_qla_host * ha,uint16_t data)2390*4882a593Smuzhiyun qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2391*4882a593Smuzhiyun {
2392*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
2393*4882a593Smuzhiyun
2394*4882a593Smuzhiyun WRT_REG_WORD(®->nvram, data | NV_SELECT);
2395*4882a593Smuzhiyun RD_REG_WORD(®->id_l); /* Flush PCI write */
2396*4882a593Smuzhiyun NVRAM_DELAY();
2397*4882a593Smuzhiyun WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK);
2398*4882a593Smuzhiyun RD_REG_WORD(®->id_l); /* Flush PCI write */
2399*4882a593Smuzhiyun NVRAM_DELAY();
2400*4882a593Smuzhiyun WRT_REG_WORD(®->nvram, data | NV_SELECT);
2401*4882a593Smuzhiyun RD_REG_WORD(®->id_l); /* Flush PCI write */
2402*4882a593Smuzhiyun NVRAM_DELAY();
2403*4882a593Smuzhiyun }
2404*4882a593Smuzhiyun
2405*4882a593Smuzhiyun /*
2406*4882a593Smuzhiyun * Mailbox Command
2407*4882a593Smuzhiyun * Issue mailbox command and waits for completion.
2408*4882a593Smuzhiyun *
2409*4882a593Smuzhiyun * Input:
2410*4882a593Smuzhiyun * ha = adapter block pointer.
2411*4882a593Smuzhiyun * mr = mailbox registers to load.
2412*4882a593Smuzhiyun * mb = data pointer for mailbox registers.
2413*4882a593Smuzhiyun *
2414*4882a593Smuzhiyun * Output:
2415*4882a593Smuzhiyun * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data.
2416*4882a593Smuzhiyun *
2417*4882a593Smuzhiyun * Returns:
2418*4882a593Smuzhiyun * 0 = success
2419*4882a593Smuzhiyun */
2420*4882a593Smuzhiyun static int
qla1280_mailbox_command(struct scsi_qla_host * ha,uint8_t mr,uint16_t * mb)2421*4882a593Smuzhiyun qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2422*4882a593Smuzhiyun {
2423*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
2424*4882a593Smuzhiyun int status = 0;
2425*4882a593Smuzhiyun int cnt;
2426*4882a593Smuzhiyun uint16_t *optr, *iptr;
2427*4882a593Smuzhiyun uint16_t __iomem *mptr;
2428*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(wait);
2429*4882a593Smuzhiyun
2430*4882a593Smuzhiyun ENTER("qla1280_mailbox_command");
2431*4882a593Smuzhiyun
2432*4882a593Smuzhiyun if (ha->mailbox_wait) {
2433*4882a593Smuzhiyun printk(KERN_ERR "Warning mailbox wait already in use!\n");
2434*4882a593Smuzhiyun }
2435*4882a593Smuzhiyun ha->mailbox_wait = &wait;
2436*4882a593Smuzhiyun
2437*4882a593Smuzhiyun /*
2438*4882a593Smuzhiyun * We really should start out by verifying that the mailbox is
2439*4882a593Smuzhiyun * available before starting sending the command data
2440*4882a593Smuzhiyun */
2441*4882a593Smuzhiyun /* Load mailbox registers. */
2442*4882a593Smuzhiyun mptr = (uint16_t __iomem *) ®->mailbox0;
2443*4882a593Smuzhiyun iptr = mb;
2444*4882a593Smuzhiyun for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2445*4882a593Smuzhiyun if (mr & BIT_0) {
2446*4882a593Smuzhiyun WRT_REG_WORD(mptr, (*iptr));
2447*4882a593Smuzhiyun }
2448*4882a593Smuzhiyun
2449*4882a593Smuzhiyun mr >>= 1;
2450*4882a593Smuzhiyun mptr++;
2451*4882a593Smuzhiyun iptr++;
2452*4882a593Smuzhiyun }
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun /* Issue set host interrupt command. */
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun /* set up a timer just in case we're really jammed */
2457*4882a593Smuzhiyun timer_setup(&ha->mailbox_timer, qla1280_mailbox_timeout, 0);
2458*4882a593Smuzhiyun mod_timer(&ha->mailbox_timer, jiffies + 20 * HZ);
2459*4882a593Smuzhiyun
2460*4882a593Smuzhiyun spin_unlock_irq(ha->host->host_lock);
2461*4882a593Smuzhiyun WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
2462*4882a593Smuzhiyun qla1280_debounce_register(®->istatus);
2463*4882a593Smuzhiyun
2464*4882a593Smuzhiyun wait_for_completion(&wait);
2465*4882a593Smuzhiyun del_timer_sync(&ha->mailbox_timer);
2466*4882a593Smuzhiyun
2467*4882a593Smuzhiyun spin_lock_irq(ha->host->host_lock);
2468*4882a593Smuzhiyun
2469*4882a593Smuzhiyun ha->mailbox_wait = NULL;
2470*4882a593Smuzhiyun
2471*4882a593Smuzhiyun /* Check for mailbox command timeout. */
2472*4882a593Smuzhiyun if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2473*4882a593Smuzhiyun printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2474*4882a593Smuzhiyun "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2475*4882a593Smuzhiyun "0x%04x\n",
2476*4882a593Smuzhiyun mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus));
2477*4882a593Smuzhiyun printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2478*4882a593Smuzhiyun RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1),
2479*4882a593Smuzhiyun RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3));
2480*4882a593Smuzhiyun printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2481*4882a593Smuzhiyun RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5),
2482*4882a593Smuzhiyun RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7));
2483*4882a593Smuzhiyun status = 1;
2484*4882a593Smuzhiyun }
2485*4882a593Smuzhiyun
2486*4882a593Smuzhiyun /* Load return mailbox registers. */
2487*4882a593Smuzhiyun optr = mb;
2488*4882a593Smuzhiyun iptr = (uint16_t *) &ha->mailbox_out[0];
2489*4882a593Smuzhiyun mr = MAILBOX_REGISTER_COUNT;
2490*4882a593Smuzhiyun memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2491*4882a593Smuzhiyun
2492*4882a593Smuzhiyun if (ha->flags.reset_marker)
2493*4882a593Smuzhiyun qla1280_rst_aen(ha);
2494*4882a593Smuzhiyun
2495*4882a593Smuzhiyun if (status)
2496*4882a593Smuzhiyun dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2497*4882a593Smuzhiyun "0x%x ****\n", mb[0]);
2498*4882a593Smuzhiyun
2499*4882a593Smuzhiyun LEAVE("qla1280_mailbox_command");
2500*4882a593Smuzhiyun return status;
2501*4882a593Smuzhiyun }
2502*4882a593Smuzhiyun
2503*4882a593Smuzhiyun /*
2504*4882a593Smuzhiyun * qla1280_poll
2505*4882a593Smuzhiyun * Polls ISP for interrupts.
2506*4882a593Smuzhiyun *
2507*4882a593Smuzhiyun * Input:
2508*4882a593Smuzhiyun * ha = adapter block pointer.
2509*4882a593Smuzhiyun */
2510*4882a593Smuzhiyun static void
qla1280_poll(struct scsi_qla_host * ha)2511*4882a593Smuzhiyun qla1280_poll(struct scsi_qla_host *ha)
2512*4882a593Smuzhiyun {
2513*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
2514*4882a593Smuzhiyun uint16_t data;
2515*4882a593Smuzhiyun LIST_HEAD(done_q);
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun /* ENTER("qla1280_poll"); */
2518*4882a593Smuzhiyun
2519*4882a593Smuzhiyun /* Check for pending interrupts. */
2520*4882a593Smuzhiyun data = RD_REG_WORD(®->istatus);
2521*4882a593Smuzhiyun if (data & RISC_INT)
2522*4882a593Smuzhiyun qla1280_isr(ha, &done_q);
2523*4882a593Smuzhiyun
2524*4882a593Smuzhiyun if (!ha->mailbox_wait) {
2525*4882a593Smuzhiyun if (ha->flags.reset_marker)
2526*4882a593Smuzhiyun qla1280_rst_aen(ha);
2527*4882a593Smuzhiyun }
2528*4882a593Smuzhiyun
2529*4882a593Smuzhiyun if (!list_empty(&done_q))
2530*4882a593Smuzhiyun qla1280_done(ha);
2531*4882a593Smuzhiyun
2532*4882a593Smuzhiyun /* LEAVE("qla1280_poll"); */
2533*4882a593Smuzhiyun }
2534*4882a593Smuzhiyun
2535*4882a593Smuzhiyun /*
2536*4882a593Smuzhiyun * qla1280_bus_reset
2537*4882a593Smuzhiyun * Issue SCSI bus reset.
2538*4882a593Smuzhiyun *
2539*4882a593Smuzhiyun * Input:
2540*4882a593Smuzhiyun * ha = adapter block pointer.
2541*4882a593Smuzhiyun * bus = SCSI bus number.
2542*4882a593Smuzhiyun *
2543*4882a593Smuzhiyun * Returns:
2544*4882a593Smuzhiyun * 0 = success
2545*4882a593Smuzhiyun */
2546*4882a593Smuzhiyun static int
qla1280_bus_reset(struct scsi_qla_host * ha,int bus)2547*4882a593Smuzhiyun qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2548*4882a593Smuzhiyun {
2549*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT];
2550*4882a593Smuzhiyun uint16_t reset_delay;
2551*4882a593Smuzhiyun int status;
2552*4882a593Smuzhiyun
2553*4882a593Smuzhiyun dprintk(3, "qla1280_bus_reset: entered\n");
2554*4882a593Smuzhiyun
2555*4882a593Smuzhiyun if (qla1280_verbose)
2556*4882a593Smuzhiyun printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2557*4882a593Smuzhiyun ha->host_no, bus);
2558*4882a593Smuzhiyun
2559*4882a593Smuzhiyun reset_delay = ha->bus_settings[bus].bus_reset_delay;
2560*4882a593Smuzhiyun mb[0] = MBC_BUS_RESET;
2561*4882a593Smuzhiyun mb[1] = reset_delay;
2562*4882a593Smuzhiyun mb[2] = (uint16_t) bus;
2563*4882a593Smuzhiyun status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2564*4882a593Smuzhiyun
2565*4882a593Smuzhiyun if (status) {
2566*4882a593Smuzhiyun if (ha->bus_settings[bus].failed_reset_count > 2)
2567*4882a593Smuzhiyun ha->bus_settings[bus].scsi_bus_dead = 1;
2568*4882a593Smuzhiyun ha->bus_settings[bus].failed_reset_count++;
2569*4882a593Smuzhiyun } else {
2570*4882a593Smuzhiyun spin_unlock_irq(ha->host->host_lock);
2571*4882a593Smuzhiyun ssleep(reset_delay);
2572*4882a593Smuzhiyun spin_lock_irq(ha->host->host_lock);
2573*4882a593Smuzhiyun
2574*4882a593Smuzhiyun ha->bus_settings[bus].scsi_bus_dead = 0;
2575*4882a593Smuzhiyun ha->bus_settings[bus].failed_reset_count = 0;
2576*4882a593Smuzhiyun ha->bus_settings[bus].reset_marker = 0;
2577*4882a593Smuzhiyun /* Issue marker command. */
2578*4882a593Smuzhiyun qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2579*4882a593Smuzhiyun }
2580*4882a593Smuzhiyun
2581*4882a593Smuzhiyun /*
2582*4882a593Smuzhiyun * We should probably call qla1280_set_target_parameters()
2583*4882a593Smuzhiyun * here as well for all devices on the bus.
2584*4882a593Smuzhiyun */
2585*4882a593Smuzhiyun
2586*4882a593Smuzhiyun if (status)
2587*4882a593Smuzhiyun dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2588*4882a593Smuzhiyun else
2589*4882a593Smuzhiyun dprintk(3, "qla1280_bus_reset: exiting normally\n");
2590*4882a593Smuzhiyun
2591*4882a593Smuzhiyun return status;
2592*4882a593Smuzhiyun }
2593*4882a593Smuzhiyun
2594*4882a593Smuzhiyun /*
2595*4882a593Smuzhiyun * qla1280_device_reset
2596*4882a593Smuzhiyun * Issue bus device reset message to the target.
2597*4882a593Smuzhiyun *
2598*4882a593Smuzhiyun * Input:
2599*4882a593Smuzhiyun * ha = adapter block pointer.
2600*4882a593Smuzhiyun * bus = SCSI BUS number.
2601*4882a593Smuzhiyun * target = SCSI ID.
2602*4882a593Smuzhiyun *
2603*4882a593Smuzhiyun * Returns:
2604*4882a593Smuzhiyun * 0 = success
2605*4882a593Smuzhiyun */
2606*4882a593Smuzhiyun static int
qla1280_device_reset(struct scsi_qla_host * ha,int bus,int target)2607*4882a593Smuzhiyun qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2608*4882a593Smuzhiyun {
2609*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT];
2610*4882a593Smuzhiyun int status;
2611*4882a593Smuzhiyun
2612*4882a593Smuzhiyun ENTER("qla1280_device_reset");
2613*4882a593Smuzhiyun
2614*4882a593Smuzhiyun mb[0] = MBC_ABORT_TARGET;
2615*4882a593Smuzhiyun mb[1] = (bus ? (target | BIT_7) : target) << 8;
2616*4882a593Smuzhiyun mb[2] = 1;
2617*4882a593Smuzhiyun status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2618*4882a593Smuzhiyun
2619*4882a593Smuzhiyun /* Issue marker command. */
2620*4882a593Smuzhiyun qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2621*4882a593Smuzhiyun
2622*4882a593Smuzhiyun if (status)
2623*4882a593Smuzhiyun dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2624*4882a593Smuzhiyun
2625*4882a593Smuzhiyun LEAVE("qla1280_device_reset");
2626*4882a593Smuzhiyun return status;
2627*4882a593Smuzhiyun }
2628*4882a593Smuzhiyun
2629*4882a593Smuzhiyun /*
2630*4882a593Smuzhiyun * qla1280_abort_command
2631*4882a593Smuzhiyun * Abort command aborts a specified IOCB.
2632*4882a593Smuzhiyun *
2633*4882a593Smuzhiyun * Input:
2634*4882a593Smuzhiyun * ha = adapter block pointer.
2635*4882a593Smuzhiyun * sp = SB structure pointer.
2636*4882a593Smuzhiyun *
2637*4882a593Smuzhiyun * Returns:
2638*4882a593Smuzhiyun * 0 = success
2639*4882a593Smuzhiyun */
2640*4882a593Smuzhiyun static int
qla1280_abort_command(struct scsi_qla_host * ha,struct srb * sp,int handle)2641*4882a593Smuzhiyun qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2642*4882a593Smuzhiyun {
2643*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT];
2644*4882a593Smuzhiyun unsigned int bus, target, lun;
2645*4882a593Smuzhiyun int status;
2646*4882a593Smuzhiyun
2647*4882a593Smuzhiyun ENTER("qla1280_abort_command");
2648*4882a593Smuzhiyun
2649*4882a593Smuzhiyun bus = SCSI_BUS_32(sp->cmd);
2650*4882a593Smuzhiyun target = SCSI_TCN_32(sp->cmd);
2651*4882a593Smuzhiyun lun = SCSI_LUN_32(sp->cmd);
2652*4882a593Smuzhiyun
2653*4882a593Smuzhiyun sp->flags |= SRB_ABORT_PENDING;
2654*4882a593Smuzhiyun
2655*4882a593Smuzhiyun mb[0] = MBC_ABORT_COMMAND;
2656*4882a593Smuzhiyun mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2657*4882a593Smuzhiyun mb[2] = handle >> 16;
2658*4882a593Smuzhiyun mb[3] = handle & 0xffff;
2659*4882a593Smuzhiyun status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2660*4882a593Smuzhiyun
2661*4882a593Smuzhiyun if (status) {
2662*4882a593Smuzhiyun dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2663*4882a593Smuzhiyun sp->flags &= ~SRB_ABORT_PENDING;
2664*4882a593Smuzhiyun }
2665*4882a593Smuzhiyun
2666*4882a593Smuzhiyun
2667*4882a593Smuzhiyun LEAVE("qla1280_abort_command");
2668*4882a593Smuzhiyun return status;
2669*4882a593Smuzhiyun }
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun /*
2672*4882a593Smuzhiyun * qla1280_reset_adapter
2673*4882a593Smuzhiyun * Reset adapter.
2674*4882a593Smuzhiyun *
2675*4882a593Smuzhiyun * Input:
2676*4882a593Smuzhiyun * ha = adapter block pointer.
2677*4882a593Smuzhiyun */
2678*4882a593Smuzhiyun static void
qla1280_reset_adapter(struct scsi_qla_host * ha)2679*4882a593Smuzhiyun qla1280_reset_adapter(struct scsi_qla_host *ha)
2680*4882a593Smuzhiyun {
2681*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
2682*4882a593Smuzhiyun
2683*4882a593Smuzhiyun ENTER("qla1280_reset_adapter");
2684*4882a593Smuzhiyun
2685*4882a593Smuzhiyun /* Disable ISP chip */
2686*4882a593Smuzhiyun ha->flags.online = 0;
2687*4882a593Smuzhiyun WRT_REG_WORD(®->ictrl, ISP_RESET);
2688*4882a593Smuzhiyun WRT_REG_WORD(®->host_cmd,
2689*4882a593Smuzhiyun HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2690*4882a593Smuzhiyun RD_REG_WORD(®->id_l); /* Flush PCI write */
2691*4882a593Smuzhiyun
2692*4882a593Smuzhiyun LEAVE("qla1280_reset_adapter");
2693*4882a593Smuzhiyun }
2694*4882a593Smuzhiyun
2695*4882a593Smuzhiyun /*
2696*4882a593Smuzhiyun * Issue marker command.
2697*4882a593Smuzhiyun * Function issues marker IOCB.
2698*4882a593Smuzhiyun *
2699*4882a593Smuzhiyun * Input:
2700*4882a593Smuzhiyun * ha = adapter block pointer.
2701*4882a593Smuzhiyun * bus = SCSI BUS number
2702*4882a593Smuzhiyun * id = SCSI ID
2703*4882a593Smuzhiyun * lun = SCSI LUN
2704*4882a593Smuzhiyun * type = marker modifier
2705*4882a593Smuzhiyun */
2706*4882a593Smuzhiyun static void
qla1280_marker(struct scsi_qla_host * ha,int bus,int id,int lun,u8 type)2707*4882a593Smuzhiyun qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2708*4882a593Smuzhiyun {
2709*4882a593Smuzhiyun struct mrk_entry *pkt;
2710*4882a593Smuzhiyun
2711*4882a593Smuzhiyun ENTER("qla1280_marker");
2712*4882a593Smuzhiyun
2713*4882a593Smuzhiyun /* Get request packet. */
2714*4882a593Smuzhiyun if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2715*4882a593Smuzhiyun pkt->entry_type = MARKER_TYPE;
2716*4882a593Smuzhiyun pkt->lun = (uint8_t) lun;
2717*4882a593Smuzhiyun pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2718*4882a593Smuzhiyun pkt->modifier = type;
2719*4882a593Smuzhiyun pkt->entry_status = 0;
2720*4882a593Smuzhiyun
2721*4882a593Smuzhiyun /* Issue command to ISP */
2722*4882a593Smuzhiyun qla1280_isp_cmd(ha);
2723*4882a593Smuzhiyun }
2724*4882a593Smuzhiyun
2725*4882a593Smuzhiyun LEAVE("qla1280_marker");
2726*4882a593Smuzhiyun }
2727*4882a593Smuzhiyun
2728*4882a593Smuzhiyun
2729*4882a593Smuzhiyun /*
2730*4882a593Smuzhiyun * qla1280_64bit_start_scsi
2731*4882a593Smuzhiyun * The start SCSI is responsible for building request packets on
2732*4882a593Smuzhiyun * request ring and modifying ISP input pointer.
2733*4882a593Smuzhiyun *
2734*4882a593Smuzhiyun * Input:
2735*4882a593Smuzhiyun * ha = adapter block pointer.
2736*4882a593Smuzhiyun * sp = SB structure pointer.
2737*4882a593Smuzhiyun *
2738*4882a593Smuzhiyun * Returns:
2739*4882a593Smuzhiyun * 0 = success, was able to issue command.
2740*4882a593Smuzhiyun */
2741*4882a593Smuzhiyun #ifdef QLA_64BIT_PTR
2742*4882a593Smuzhiyun static int
qla1280_64bit_start_scsi(struct scsi_qla_host * ha,struct srb * sp)2743*4882a593Smuzhiyun qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2744*4882a593Smuzhiyun {
2745*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
2746*4882a593Smuzhiyun struct scsi_cmnd *cmd = sp->cmd;
2747*4882a593Smuzhiyun cmd_a64_entry_t *pkt;
2748*4882a593Smuzhiyun __le32 *dword_ptr;
2749*4882a593Smuzhiyun dma_addr_t dma_handle;
2750*4882a593Smuzhiyun int status = 0;
2751*4882a593Smuzhiyun int cnt;
2752*4882a593Smuzhiyun int req_cnt;
2753*4882a593Smuzhiyun int seg_cnt;
2754*4882a593Smuzhiyun u8 dir;
2755*4882a593Smuzhiyun
2756*4882a593Smuzhiyun ENTER("qla1280_64bit_start_scsi:");
2757*4882a593Smuzhiyun
2758*4882a593Smuzhiyun /* Calculate number of entries and segments required. */
2759*4882a593Smuzhiyun req_cnt = 1;
2760*4882a593Smuzhiyun seg_cnt = scsi_dma_map(cmd);
2761*4882a593Smuzhiyun if (seg_cnt > 0) {
2762*4882a593Smuzhiyun if (seg_cnt > 2) {
2763*4882a593Smuzhiyun req_cnt += (seg_cnt - 2) / 5;
2764*4882a593Smuzhiyun if ((seg_cnt - 2) % 5)
2765*4882a593Smuzhiyun req_cnt++;
2766*4882a593Smuzhiyun }
2767*4882a593Smuzhiyun } else if (seg_cnt < 0) {
2768*4882a593Smuzhiyun status = 1;
2769*4882a593Smuzhiyun goto out;
2770*4882a593Smuzhiyun }
2771*4882a593Smuzhiyun
2772*4882a593Smuzhiyun if ((req_cnt + 2) >= ha->req_q_cnt) {
2773*4882a593Smuzhiyun /* Calculate number of free request entries. */
2774*4882a593Smuzhiyun cnt = RD_REG_WORD(®->mailbox4);
2775*4882a593Smuzhiyun if (ha->req_ring_index < cnt)
2776*4882a593Smuzhiyun ha->req_q_cnt = cnt - ha->req_ring_index;
2777*4882a593Smuzhiyun else
2778*4882a593Smuzhiyun ha->req_q_cnt =
2779*4882a593Smuzhiyun REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2780*4882a593Smuzhiyun }
2781*4882a593Smuzhiyun
2782*4882a593Smuzhiyun dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2783*4882a593Smuzhiyun ha->req_q_cnt, seg_cnt);
2784*4882a593Smuzhiyun
2785*4882a593Smuzhiyun /* If room for request in request ring. */
2786*4882a593Smuzhiyun if ((req_cnt + 2) >= ha->req_q_cnt) {
2787*4882a593Smuzhiyun status = SCSI_MLQUEUE_HOST_BUSY;
2788*4882a593Smuzhiyun dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2789*4882a593Smuzhiyun "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2790*4882a593Smuzhiyun req_cnt);
2791*4882a593Smuzhiyun goto out;
2792*4882a593Smuzhiyun }
2793*4882a593Smuzhiyun
2794*4882a593Smuzhiyun /* Check for room in outstanding command list. */
2795*4882a593Smuzhiyun for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2796*4882a593Smuzhiyun ha->outstanding_cmds[cnt] != NULL; cnt++);
2797*4882a593Smuzhiyun
2798*4882a593Smuzhiyun if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2799*4882a593Smuzhiyun status = SCSI_MLQUEUE_HOST_BUSY;
2800*4882a593Smuzhiyun dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2801*4882a593Smuzhiyun "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2802*4882a593Smuzhiyun goto out;
2803*4882a593Smuzhiyun }
2804*4882a593Smuzhiyun
2805*4882a593Smuzhiyun ha->outstanding_cmds[cnt] = sp;
2806*4882a593Smuzhiyun ha->req_q_cnt -= req_cnt;
2807*4882a593Smuzhiyun CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2808*4882a593Smuzhiyun
2809*4882a593Smuzhiyun dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2810*4882a593Smuzhiyun cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2811*4882a593Smuzhiyun dprintk(2, " bus %i, target %i, lun %i\n",
2812*4882a593Smuzhiyun SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2813*4882a593Smuzhiyun qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2814*4882a593Smuzhiyun
2815*4882a593Smuzhiyun /*
2816*4882a593Smuzhiyun * Build command packet.
2817*4882a593Smuzhiyun */
2818*4882a593Smuzhiyun pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2819*4882a593Smuzhiyun
2820*4882a593Smuzhiyun pkt->entry_type = COMMAND_A64_TYPE;
2821*4882a593Smuzhiyun pkt->entry_count = (uint8_t) req_cnt;
2822*4882a593Smuzhiyun pkt->sys_define = (uint8_t) ha->req_ring_index;
2823*4882a593Smuzhiyun pkt->entry_status = 0;
2824*4882a593Smuzhiyun pkt->handle = cpu_to_le32(cnt);
2825*4882a593Smuzhiyun
2826*4882a593Smuzhiyun /* Zero out remaining portion of packet. */
2827*4882a593Smuzhiyun memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun /* Set ISP command timeout. */
2830*4882a593Smuzhiyun pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2831*4882a593Smuzhiyun
2832*4882a593Smuzhiyun /* Set device target ID and LUN */
2833*4882a593Smuzhiyun pkt->lun = SCSI_LUN_32(cmd);
2834*4882a593Smuzhiyun pkt->target = SCSI_BUS_32(cmd) ?
2835*4882a593Smuzhiyun (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2836*4882a593Smuzhiyun
2837*4882a593Smuzhiyun /* Enable simple tag queuing if device supports it. */
2838*4882a593Smuzhiyun if (cmd->device->simple_tags)
2839*4882a593Smuzhiyun pkt->control_flags |= cpu_to_le16(BIT_3);
2840*4882a593Smuzhiyun
2841*4882a593Smuzhiyun /* Load SCSI command packet. */
2842*4882a593Smuzhiyun pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2843*4882a593Smuzhiyun memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2844*4882a593Smuzhiyun /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
2845*4882a593Smuzhiyun
2846*4882a593Smuzhiyun /* Set transfer direction. */
2847*4882a593Smuzhiyun dir = qla1280_data_direction(cmd);
2848*4882a593Smuzhiyun pkt->control_flags |= cpu_to_le16(dir);
2849*4882a593Smuzhiyun
2850*4882a593Smuzhiyun /* Set total data segment count. */
2851*4882a593Smuzhiyun pkt->dseg_count = cpu_to_le16(seg_cnt);
2852*4882a593Smuzhiyun
2853*4882a593Smuzhiyun /*
2854*4882a593Smuzhiyun * Load data segments.
2855*4882a593Smuzhiyun */
2856*4882a593Smuzhiyun if (seg_cnt) { /* If data transfer. */
2857*4882a593Smuzhiyun struct scatterlist *sg, *s;
2858*4882a593Smuzhiyun int remseg = seg_cnt;
2859*4882a593Smuzhiyun
2860*4882a593Smuzhiyun sg = scsi_sglist(cmd);
2861*4882a593Smuzhiyun
2862*4882a593Smuzhiyun /* Setup packet address segment pointer. */
2863*4882a593Smuzhiyun dword_ptr = (u32 *)&pkt->dseg_0_address;
2864*4882a593Smuzhiyun
2865*4882a593Smuzhiyun /* Load command entry data segments. */
2866*4882a593Smuzhiyun for_each_sg(sg, s, seg_cnt, cnt) {
2867*4882a593Smuzhiyun if (cnt == 2)
2868*4882a593Smuzhiyun break;
2869*4882a593Smuzhiyun
2870*4882a593Smuzhiyun dma_handle = sg_dma_address(s);
2871*4882a593Smuzhiyun *dword_ptr++ =
2872*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(dma_handle));
2873*4882a593Smuzhiyun *dword_ptr++ =
2874*4882a593Smuzhiyun cpu_to_le32(upper_32_bits(dma_handle));
2875*4882a593Smuzhiyun *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2876*4882a593Smuzhiyun dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2877*4882a593Smuzhiyun cpu_to_le32(upper_32_bits(dma_handle)),
2878*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(dma_handle)),
2879*4882a593Smuzhiyun cpu_to_le32(sg_dma_len(sg_next(s))));
2880*4882a593Smuzhiyun remseg--;
2881*4882a593Smuzhiyun }
2882*4882a593Smuzhiyun dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2883*4882a593Smuzhiyun "command packet data - b %i, t %i, l %i \n",
2884*4882a593Smuzhiyun SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2885*4882a593Smuzhiyun SCSI_LUN_32(cmd));
2886*4882a593Smuzhiyun qla1280_dump_buffer(5, (char *)pkt,
2887*4882a593Smuzhiyun REQUEST_ENTRY_SIZE);
2888*4882a593Smuzhiyun
2889*4882a593Smuzhiyun /*
2890*4882a593Smuzhiyun * Build continuation packets.
2891*4882a593Smuzhiyun */
2892*4882a593Smuzhiyun dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2893*4882a593Smuzhiyun "remains\n", seg_cnt);
2894*4882a593Smuzhiyun
2895*4882a593Smuzhiyun while (remseg > 0) {
2896*4882a593Smuzhiyun /* Update sg start */
2897*4882a593Smuzhiyun sg = s;
2898*4882a593Smuzhiyun /* Adjust ring index. */
2899*4882a593Smuzhiyun ha->req_ring_index++;
2900*4882a593Smuzhiyun if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2901*4882a593Smuzhiyun ha->req_ring_index = 0;
2902*4882a593Smuzhiyun ha->request_ring_ptr =
2903*4882a593Smuzhiyun ha->request_ring;
2904*4882a593Smuzhiyun } else
2905*4882a593Smuzhiyun ha->request_ring_ptr++;
2906*4882a593Smuzhiyun
2907*4882a593Smuzhiyun pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2908*4882a593Smuzhiyun
2909*4882a593Smuzhiyun /* Zero out packet. */
2910*4882a593Smuzhiyun memset(pkt, 0, REQUEST_ENTRY_SIZE);
2911*4882a593Smuzhiyun
2912*4882a593Smuzhiyun /* Load packet defaults. */
2913*4882a593Smuzhiyun ((struct cont_a64_entry *) pkt)->entry_type =
2914*4882a593Smuzhiyun CONTINUE_A64_TYPE;
2915*4882a593Smuzhiyun ((struct cont_a64_entry *) pkt)->entry_count = 1;
2916*4882a593Smuzhiyun ((struct cont_a64_entry *) pkt)->sys_define =
2917*4882a593Smuzhiyun (uint8_t)ha->req_ring_index;
2918*4882a593Smuzhiyun /* Setup packet address segment pointer. */
2919*4882a593Smuzhiyun dword_ptr =
2920*4882a593Smuzhiyun (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2921*4882a593Smuzhiyun
2922*4882a593Smuzhiyun /* Load continuation entry data segments. */
2923*4882a593Smuzhiyun for_each_sg(sg, s, remseg, cnt) {
2924*4882a593Smuzhiyun if (cnt == 5)
2925*4882a593Smuzhiyun break;
2926*4882a593Smuzhiyun dma_handle = sg_dma_address(s);
2927*4882a593Smuzhiyun *dword_ptr++ =
2928*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(dma_handle));
2929*4882a593Smuzhiyun *dword_ptr++ =
2930*4882a593Smuzhiyun cpu_to_le32(upper_32_bits(dma_handle));
2931*4882a593Smuzhiyun *dword_ptr++ =
2932*4882a593Smuzhiyun cpu_to_le32(sg_dma_len(s));
2933*4882a593Smuzhiyun dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2934*4882a593Smuzhiyun cpu_to_le32(upper_32_bits(dma_handle)),
2935*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(dma_handle)),
2936*4882a593Smuzhiyun cpu_to_le32(sg_dma_len(s)));
2937*4882a593Smuzhiyun }
2938*4882a593Smuzhiyun remseg -= cnt;
2939*4882a593Smuzhiyun dprintk(5, "qla1280_64bit_start_scsi: "
2940*4882a593Smuzhiyun "continuation packet data - b %i, t "
2941*4882a593Smuzhiyun "%i, l %i \n", SCSI_BUS_32(cmd),
2942*4882a593Smuzhiyun SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2943*4882a593Smuzhiyun qla1280_dump_buffer(5, (char *)pkt,
2944*4882a593Smuzhiyun REQUEST_ENTRY_SIZE);
2945*4882a593Smuzhiyun }
2946*4882a593Smuzhiyun } else { /* No data transfer */
2947*4882a593Smuzhiyun dprintk(5, "qla1280_64bit_start_scsi: No data, command "
2948*4882a593Smuzhiyun "packet data - b %i, t %i, l %i \n",
2949*4882a593Smuzhiyun SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2950*4882a593Smuzhiyun qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
2951*4882a593Smuzhiyun }
2952*4882a593Smuzhiyun /* Adjust ring index. */
2953*4882a593Smuzhiyun ha->req_ring_index++;
2954*4882a593Smuzhiyun if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2955*4882a593Smuzhiyun ha->req_ring_index = 0;
2956*4882a593Smuzhiyun ha->request_ring_ptr = ha->request_ring;
2957*4882a593Smuzhiyun } else
2958*4882a593Smuzhiyun ha->request_ring_ptr++;
2959*4882a593Smuzhiyun
2960*4882a593Smuzhiyun /* Set chip new ring index. */
2961*4882a593Smuzhiyun dprintk(2,
2962*4882a593Smuzhiyun "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
2963*4882a593Smuzhiyun sp->flags |= SRB_SENT;
2964*4882a593Smuzhiyun ha->actthreads++;
2965*4882a593Smuzhiyun WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
2966*4882a593Smuzhiyun
2967*4882a593Smuzhiyun out:
2968*4882a593Smuzhiyun if (status)
2969*4882a593Smuzhiyun dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
2970*4882a593Smuzhiyun else
2971*4882a593Smuzhiyun dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
2972*4882a593Smuzhiyun
2973*4882a593Smuzhiyun return status;
2974*4882a593Smuzhiyun }
2975*4882a593Smuzhiyun #else /* !QLA_64BIT_PTR */
2976*4882a593Smuzhiyun
2977*4882a593Smuzhiyun /*
2978*4882a593Smuzhiyun * qla1280_32bit_start_scsi
2979*4882a593Smuzhiyun * The start SCSI is responsible for building request packets on
2980*4882a593Smuzhiyun * request ring and modifying ISP input pointer.
2981*4882a593Smuzhiyun *
2982*4882a593Smuzhiyun * The Qlogic firmware interface allows every queue slot to have a SCSI
2983*4882a593Smuzhiyun * command and up to 4 scatter/gather (SG) entries. If we need more
2984*4882a593Smuzhiyun * than 4 SG entries, then continuation entries are used that can
2985*4882a593Smuzhiyun * hold another 7 entries each. The start routine determines if there
2986*4882a593Smuzhiyun * is eought empty slots then build the combination of requests to
2987*4882a593Smuzhiyun * fulfill the OS request.
2988*4882a593Smuzhiyun *
2989*4882a593Smuzhiyun * Input:
2990*4882a593Smuzhiyun * ha = adapter block pointer.
2991*4882a593Smuzhiyun * sp = SCSI Request Block structure pointer.
2992*4882a593Smuzhiyun *
2993*4882a593Smuzhiyun * Returns:
2994*4882a593Smuzhiyun * 0 = success, was able to issue command.
2995*4882a593Smuzhiyun */
2996*4882a593Smuzhiyun static int
qla1280_32bit_start_scsi(struct scsi_qla_host * ha,struct srb * sp)2997*4882a593Smuzhiyun qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2998*4882a593Smuzhiyun {
2999*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
3000*4882a593Smuzhiyun struct scsi_cmnd *cmd = sp->cmd;
3001*4882a593Smuzhiyun struct cmd_entry *pkt;
3002*4882a593Smuzhiyun __le32 *dword_ptr;
3003*4882a593Smuzhiyun int status = 0;
3004*4882a593Smuzhiyun int cnt;
3005*4882a593Smuzhiyun int req_cnt;
3006*4882a593Smuzhiyun int seg_cnt;
3007*4882a593Smuzhiyun u8 dir;
3008*4882a593Smuzhiyun
3009*4882a593Smuzhiyun ENTER("qla1280_32bit_start_scsi");
3010*4882a593Smuzhiyun
3011*4882a593Smuzhiyun dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3012*4882a593Smuzhiyun cmd->cmnd[0]);
3013*4882a593Smuzhiyun
3014*4882a593Smuzhiyun /* Calculate number of entries and segments required. */
3015*4882a593Smuzhiyun req_cnt = 1;
3016*4882a593Smuzhiyun seg_cnt = scsi_dma_map(cmd);
3017*4882a593Smuzhiyun if (seg_cnt) {
3018*4882a593Smuzhiyun /*
3019*4882a593Smuzhiyun * if greater than four sg entries then we need to allocate
3020*4882a593Smuzhiyun * continuation entries
3021*4882a593Smuzhiyun */
3022*4882a593Smuzhiyun if (seg_cnt > 4) {
3023*4882a593Smuzhiyun req_cnt += (seg_cnt - 4) / 7;
3024*4882a593Smuzhiyun if ((seg_cnt - 4) % 7)
3025*4882a593Smuzhiyun req_cnt++;
3026*4882a593Smuzhiyun }
3027*4882a593Smuzhiyun dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3028*4882a593Smuzhiyun cmd, seg_cnt, req_cnt);
3029*4882a593Smuzhiyun } else if (seg_cnt < 0) {
3030*4882a593Smuzhiyun status = 1;
3031*4882a593Smuzhiyun goto out;
3032*4882a593Smuzhiyun }
3033*4882a593Smuzhiyun
3034*4882a593Smuzhiyun if ((req_cnt + 2) >= ha->req_q_cnt) {
3035*4882a593Smuzhiyun /* Calculate number of free request entries. */
3036*4882a593Smuzhiyun cnt = RD_REG_WORD(®->mailbox4);
3037*4882a593Smuzhiyun if (ha->req_ring_index < cnt)
3038*4882a593Smuzhiyun ha->req_q_cnt = cnt - ha->req_ring_index;
3039*4882a593Smuzhiyun else
3040*4882a593Smuzhiyun ha->req_q_cnt =
3041*4882a593Smuzhiyun REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3042*4882a593Smuzhiyun }
3043*4882a593Smuzhiyun
3044*4882a593Smuzhiyun dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3045*4882a593Smuzhiyun ha->req_q_cnt, seg_cnt);
3046*4882a593Smuzhiyun /* If room for request in request ring. */
3047*4882a593Smuzhiyun if ((req_cnt + 2) >= ha->req_q_cnt) {
3048*4882a593Smuzhiyun status = SCSI_MLQUEUE_HOST_BUSY;
3049*4882a593Smuzhiyun dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3050*4882a593Smuzhiyun "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3051*4882a593Smuzhiyun ha->req_q_cnt, req_cnt);
3052*4882a593Smuzhiyun goto out;
3053*4882a593Smuzhiyun }
3054*4882a593Smuzhiyun
3055*4882a593Smuzhiyun /* Check for empty slot in outstanding command list. */
3056*4882a593Smuzhiyun for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3057*4882a593Smuzhiyun (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3058*4882a593Smuzhiyun
3059*4882a593Smuzhiyun if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3060*4882a593Smuzhiyun status = SCSI_MLQUEUE_HOST_BUSY;
3061*4882a593Smuzhiyun dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3062*4882a593Smuzhiyun "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3063*4882a593Smuzhiyun goto out;
3064*4882a593Smuzhiyun }
3065*4882a593Smuzhiyun
3066*4882a593Smuzhiyun CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3067*4882a593Smuzhiyun ha->outstanding_cmds[cnt] = sp;
3068*4882a593Smuzhiyun ha->req_q_cnt -= req_cnt;
3069*4882a593Smuzhiyun
3070*4882a593Smuzhiyun /*
3071*4882a593Smuzhiyun * Build command packet.
3072*4882a593Smuzhiyun */
3073*4882a593Smuzhiyun pkt = (struct cmd_entry *) ha->request_ring_ptr;
3074*4882a593Smuzhiyun
3075*4882a593Smuzhiyun pkt->entry_type = COMMAND_TYPE;
3076*4882a593Smuzhiyun pkt->entry_count = (uint8_t) req_cnt;
3077*4882a593Smuzhiyun pkt->sys_define = (uint8_t) ha->req_ring_index;
3078*4882a593Smuzhiyun pkt->entry_status = 0;
3079*4882a593Smuzhiyun pkt->handle = cpu_to_le32(cnt);
3080*4882a593Smuzhiyun
3081*4882a593Smuzhiyun /* Zero out remaining portion of packet. */
3082*4882a593Smuzhiyun memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3083*4882a593Smuzhiyun
3084*4882a593Smuzhiyun /* Set ISP command timeout. */
3085*4882a593Smuzhiyun pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3086*4882a593Smuzhiyun
3087*4882a593Smuzhiyun /* Set device target ID and LUN */
3088*4882a593Smuzhiyun pkt->lun = SCSI_LUN_32(cmd);
3089*4882a593Smuzhiyun pkt->target = SCSI_BUS_32(cmd) ?
3090*4882a593Smuzhiyun (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3091*4882a593Smuzhiyun
3092*4882a593Smuzhiyun /* Enable simple tag queuing if device supports it. */
3093*4882a593Smuzhiyun if (cmd->device->simple_tags)
3094*4882a593Smuzhiyun pkt->control_flags |= cpu_to_le16(BIT_3);
3095*4882a593Smuzhiyun
3096*4882a593Smuzhiyun /* Load SCSI command packet. */
3097*4882a593Smuzhiyun pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3098*4882a593Smuzhiyun memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3099*4882a593Smuzhiyun
3100*4882a593Smuzhiyun /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
3101*4882a593Smuzhiyun /* Set transfer direction. */
3102*4882a593Smuzhiyun dir = qla1280_data_direction(cmd);
3103*4882a593Smuzhiyun pkt->control_flags |= cpu_to_le16(dir);
3104*4882a593Smuzhiyun
3105*4882a593Smuzhiyun /* Set total data segment count. */
3106*4882a593Smuzhiyun pkt->dseg_count = cpu_to_le16(seg_cnt);
3107*4882a593Smuzhiyun
3108*4882a593Smuzhiyun /*
3109*4882a593Smuzhiyun * Load data segments.
3110*4882a593Smuzhiyun */
3111*4882a593Smuzhiyun if (seg_cnt) {
3112*4882a593Smuzhiyun struct scatterlist *sg, *s;
3113*4882a593Smuzhiyun int remseg = seg_cnt;
3114*4882a593Smuzhiyun
3115*4882a593Smuzhiyun sg = scsi_sglist(cmd);
3116*4882a593Smuzhiyun
3117*4882a593Smuzhiyun /* Setup packet address segment pointer. */
3118*4882a593Smuzhiyun dword_ptr = &pkt->dseg_0_address;
3119*4882a593Smuzhiyun
3120*4882a593Smuzhiyun dprintk(3, "Building S/G data segments..\n");
3121*4882a593Smuzhiyun qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3122*4882a593Smuzhiyun
3123*4882a593Smuzhiyun /* Load command entry data segments. */
3124*4882a593Smuzhiyun for_each_sg(sg, s, seg_cnt, cnt) {
3125*4882a593Smuzhiyun if (cnt == 4)
3126*4882a593Smuzhiyun break;
3127*4882a593Smuzhiyun *dword_ptr++ =
3128*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(sg_dma_address(s)));
3129*4882a593Smuzhiyun *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3130*4882a593Smuzhiyun dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3131*4882a593Smuzhiyun (lower_32_bits(sg_dma_address(s))),
3132*4882a593Smuzhiyun (sg_dma_len(s)));
3133*4882a593Smuzhiyun remseg--;
3134*4882a593Smuzhiyun }
3135*4882a593Smuzhiyun /*
3136*4882a593Smuzhiyun * Build continuation packets.
3137*4882a593Smuzhiyun */
3138*4882a593Smuzhiyun dprintk(3, "S/G Building Continuation"
3139*4882a593Smuzhiyun "...seg_cnt=0x%x remains\n", seg_cnt);
3140*4882a593Smuzhiyun while (remseg > 0) {
3141*4882a593Smuzhiyun /* Continue from end point */
3142*4882a593Smuzhiyun sg = s;
3143*4882a593Smuzhiyun /* Adjust ring index. */
3144*4882a593Smuzhiyun ha->req_ring_index++;
3145*4882a593Smuzhiyun if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3146*4882a593Smuzhiyun ha->req_ring_index = 0;
3147*4882a593Smuzhiyun ha->request_ring_ptr =
3148*4882a593Smuzhiyun ha->request_ring;
3149*4882a593Smuzhiyun } else
3150*4882a593Smuzhiyun ha->request_ring_ptr++;
3151*4882a593Smuzhiyun
3152*4882a593Smuzhiyun pkt = (struct cmd_entry *)ha->request_ring_ptr;
3153*4882a593Smuzhiyun
3154*4882a593Smuzhiyun /* Zero out packet. */
3155*4882a593Smuzhiyun memset(pkt, 0, REQUEST_ENTRY_SIZE);
3156*4882a593Smuzhiyun
3157*4882a593Smuzhiyun /* Load packet defaults. */
3158*4882a593Smuzhiyun ((struct cont_entry *) pkt)->
3159*4882a593Smuzhiyun entry_type = CONTINUE_TYPE;
3160*4882a593Smuzhiyun ((struct cont_entry *) pkt)->entry_count = 1;
3161*4882a593Smuzhiyun
3162*4882a593Smuzhiyun ((struct cont_entry *) pkt)->sys_define =
3163*4882a593Smuzhiyun (uint8_t) ha->req_ring_index;
3164*4882a593Smuzhiyun
3165*4882a593Smuzhiyun /* Setup packet address segment pointer. */
3166*4882a593Smuzhiyun dword_ptr =
3167*4882a593Smuzhiyun &((struct cont_entry *) pkt)->dseg_0_address;
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun /* Load continuation entry data segments. */
3170*4882a593Smuzhiyun for_each_sg(sg, s, remseg, cnt) {
3171*4882a593Smuzhiyun if (cnt == 7)
3172*4882a593Smuzhiyun break;
3173*4882a593Smuzhiyun *dword_ptr++ =
3174*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(sg_dma_address(s)));
3175*4882a593Smuzhiyun *dword_ptr++ =
3176*4882a593Smuzhiyun cpu_to_le32(sg_dma_len(s));
3177*4882a593Smuzhiyun dprintk(1,
3178*4882a593Smuzhiyun "S/G Segment Cont. phys_addr=0x%x, "
3179*4882a593Smuzhiyun "len=0x%x\n",
3180*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(sg_dma_address(s))),
3181*4882a593Smuzhiyun cpu_to_le32(sg_dma_len(s)));
3182*4882a593Smuzhiyun }
3183*4882a593Smuzhiyun remseg -= cnt;
3184*4882a593Smuzhiyun dprintk(5, "qla1280_32bit_start_scsi: "
3185*4882a593Smuzhiyun "continuation packet data - "
3186*4882a593Smuzhiyun "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3187*4882a593Smuzhiyun SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3188*4882a593Smuzhiyun qla1280_dump_buffer(5, (char *)pkt,
3189*4882a593Smuzhiyun REQUEST_ENTRY_SIZE);
3190*4882a593Smuzhiyun }
3191*4882a593Smuzhiyun } else { /* No data transfer at all */
3192*4882a593Smuzhiyun dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3193*4882a593Smuzhiyun "packet data - \n");
3194*4882a593Smuzhiyun qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3195*4882a593Smuzhiyun }
3196*4882a593Smuzhiyun dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3197*4882a593Smuzhiyun qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3198*4882a593Smuzhiyun REQUEST_ENTRY_SIZE);
3199*4882a593Smuzhiyun
3200*4882a593Smuzhiyun /* Adjust ring index. */
3201*4882a593Smuzhiyun ha->req_ring_index++;
3202*4882a593Smuzhiyun if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3203*4882a593Smuzhiyun ha->req_ring_index = 0;
3204*4882a593Smuzhiyun ha->request_ring_ptr = ha->request_ring;
3205*4882a593Smuzhiyun } else
3206*4882a593Smuzhiyun ha->request_ring_ptr++;
3207*4882a593Smuzhiyun
3208*4882a593Smuzhiyun /* Set chip new ring index. */
3209*4882a593Smuzhiyun dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3210*4882a593Smuzhiyun "for pending command\n");
3211*4882a593Smuzhiyun sp->flags |= SRB_SENT;
3212*4882a593Smuzhiyun ha->actthreads++;
3213*4882a593Smuzhiyun WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3214*4882a593Smuzhiyun
3215*4882a593Smuzhiyun out:
3216*4882a593Smuzhiyun if (status)
3217*4882a593Smuzhiyun dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3218*4882a593Smuzhiyun
3219*4882a593Smuzhiyun LEAVE("qla1280_32bit_start_scsi");
3220*4882a593Smuzhiyun
3221*4882a593Smuzhiyun return status;
3222*4882a593Smuzhiyun }
3223*4882a593Smuzhiyun #endif
3224*4882a593Smuzhiyun
3225*4882a593Smuzhiyun /*
3226*4882a593Smuzhiyun * qla1280_req_pkt
3227*4882a593Smuzhiyun * Function is responsible for locking ring and
3228*4882a593Smuzhiyun * getting a zeroed out request packet.
3229*4882a593Smuzhiyun *
3230*4882a593Smuzhiyun * Input:
3231*4882a593Smuzhiyun * ha = adapter block pointer.
3232*4882a593Smuzhiyun *
3233*4882a593Smuzhiyun * Returns:
3234*4882a593Smuzhiyun * 0 = failed to get slot.
3235*4882a593Smuzhiyun */
3236*4882a593Smuzhiyun static request_t *
qla1280_req_pkt(struct scsi_qla_host * ha)3237*4882a593Smuzhiyun qla1280_req_pkt(struct scsi_qla_host *ha)
3238*4882a593Smuzhiyun {
3239*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
3240*4882a593Smuzhiyun request_t *pkt = NULL;
3241*4882a593Smuzhiyun int cnt;
3242*4882a593Smuzhiyun uint32_t timer;
3243*4882a593Smuzhiyun
3244*4882a593Smuzhiyun ENTER("qla1280_req_pkt");
3245*4882a593Smuzhiyun
3246*4882a593Smuzhiyun /*
3247*4882a593Smuzhiyun * This can be called from interrupt context, damn it!!!
3248*4882a593Smuzhiyun */
3249*4882a593Smuzhiyun /* Wait for 30 seconds for slot. */
3250*4882a593Smuzhiyun for (timer = 15000000; timer; timer--) {
3251*4882a593Smuzhiyun if (ha->req_q_cnt > 0) {
3252*4882a593Smuzhiyun /* Calculate number of free request entries. */
3253*4882a593Smuzhiyun cnt = RD_REG_WORD(®->mailbox4);
3254*4882a593Smuzhiyun if (ha->req_ring_index < cnt)
3255*4882a593Smuzhiyun ha->req_q_cnt = cnt - ha->req_ring_index;
3256*4882a593Smuzhiyun else
3257*4882a593Smuzhiyun ha->req_q_cnt =
3258*4882a593Smuzhiyun REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3259*4882a593Smuzhiyun }
3260*4882a593Smuzhiyun
3261*4882a593Smuzhiyun /* Found empty request ring slot? */
3262*4882a593Smuzhiyun if (ha->req_q_cnt > 0) {
3263*4882a593Smuzhiyun ha->req_q_cnt--;
3264*4882a593Smuzhiyun pkt = ha->request_ring_ptr;
3265*4882a593Smuzhiyun
3266*4882a593Smuzhiyun /* Zero out packet. */
3267*4882a593Smuzhiyun memset(pkt, 0, REQUEST_ENTRY_SIZE);
3268*4882a593Smuzhiyun
3269*4882a593Smuzhiyun /*
3270*4882a593Smuzhiyun * How can this be right when we have a ring
3271*4882a593Smuzhiyun * size of 512???
3272*4882a593Smuzhiyun */
3273*4882a593Smuzhiyun /* Set system defined field. */
3274*4882a593Smuzhiyun pkt->sys_define = (uint8_t) ha->req_ring_index;
3275*4882a593Smuzhiyun
3276*4882a593Smuzhiyun /* Set entry count. */
3277*4882a593Smuzhiyun pkt->entry_count = 1;
3278*4882a593Smuzhiyun
3279*4882a593Smuzhiyun break;
3280*4882a593Smuzhiyun }
3281*4882a593Smuzhiyun
3282*4882a593Smuzhiyun udelay(2); /* 10 */
3283*4882a593Smuzhiyun
3284*4882a593Smuzhiyun /* Check for pending interrupts. */
3285*4882a593Smuzhiyun qla1280_poll(ha);
3286*4882a593Smuzhiyun }
3287*4882a593Smuzhiyun
3288*4882a593Smuzhiyun if (!pkt)
3289*4882a593Smuzhiyun dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3290*4882a593Smuzhiyun else
3291*4882a593Smuzhiyun dprintk(3, "qla1280_req_pkt: exiting normally\n");
3292*4882a593Smuzhiyun
3293*4882a593Smuzhiyun return pkt;
3294*4882a593Smuzhiyun }
3295*4882a593Smuzhiyun
3296*4882a593Smuzhiyun /*
3297*4882a593Smuzhiyun * qla1280_isp_cmd
3298*4882a593Smuzhiyun * Function is responsible for modifying ISP input pointer.
3299*4882a593Smuzhiyun * Releases ring lock.
3300*4882a593Smuzhiyun *
3301*4882a593Smuzhiyun * Input:
3302*4882a593Smuzhiyun * ha = adapter block pointer.
3303*4882a593Smuzhiyun */
3304*4882a593Smuzhiyun static void
qla1280_isp_cmd(struct scsi_qla_host * ha)3305*4882a593Smuzhiyun qla1280_isp_cmd(struct scsi_qla_host *ha)
3306*4882a593Smuzhiyun {
3307*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
3308*4882a593Smuzhiyun
3309*4882a593Smuzhiyun ENTER("qla1280_isp_cmd");
3310*4882a593Smuzhiyun
3311*4882a593Smuzhiyun dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3312*4882a593Smuzhiyun qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3313*4882a593Smuzhiyun REQUEST_ENTRY_SIZE);
3314*4882a593Smuzhiyun
3315*4882a593Smuzhiyun /* Adjust ring index. */
3316*4882a593Smuzhiyun ha->req_ring_index++;
3317*4882a593Smuzhiyun if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3318*4882a593Smuzhiyun ha->req_ring_index = 0;
3319*4882a593Smuzhiyun ha->request_ring_ptr = ha->request_ring;
3320*4882a593Smuzhiyun } else
3321*4882a593Smuzhiyun ha->request_ring_ptr++;
3322*4882a593Smuzhiyun
3323*4882a593Smuzhiyun /*
3324*4882a593Smuzhiyun * Update request index to mailbox4 (Request Queue In).
3325*4882a593Smuzhiyun */
3326*4882a593Smuzhiyun WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3327*4882a593Smuzhiyun
3328*4882a593Smuzhiyun LEAVE("qla1280_isp_cmd");
3329*4882a593Smuzhiyun }
3330*4882a593Smuzhiyun
3331*4882a593Smuzhiyun /****************************************************************************/
3332*4882a593Smuzhiyun /* Interrupt Service Routine. */
3333*4882a593Smuzhiyun /****************************************************************************/
3334*4882a593Smuzhiyun
3335*4882a593Smuzhiyun /****************************************************************************
3336*4882a593Smuzhiyun * qla1280_isr
3337*4882a593Smuzhiyun * Calls I/O done on command completion.
3338*4882a593Smuzhiyun *
3339*4882a593Smuzhiyun * Input:
3340*4882a593Smuzhiyun * ha = adapter block pointer.
3341*4882a593Smuzhiyun * done_q = done queue.
3342*4882a593Smuzhiyun ****************************************************************************/
3343*4882a593Smuzhiyun static void
qla1280_isr(struct scsi_qla_host * ha,struct list_head * done_q)3344*4882a593Smuzhiyun qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3345*4882a593Smuzhiyun {
3346*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
3347*4882a593Smuzhiyun struct response *pkt;
3348*4882a593Smuzhiyun struct srb *sp = NULL;
3349*4882a593Smuzhiyun uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3350*4882a593Smuzhiyun uint16_t *wptr;
3351*4882a593Smuzhiyun uint32_t index;
3352*4882a593Smuzhiyun u16 istatus;
3353*4882a593Smuzhiyun
3354*4882a593Smuzhiyun ENTER("qla1280_isr");
3355*4882a593Smuzhiyun
3356*4882a593Smuzhiyun istatus = RD_REG_WORD(®->istatus);
3357*4882a593Smuzhiyun if (!(istatus & (RISC_INT | PCI_INT)))
3358*4882a593Smuzhiyun return;
3359*4882a593Smuzhiyun
3360*4882a593Smuzhiyun /* Save mailbox register 5 */
3361*4882a593Smuzhiyun mailbox[5] = RD_REG_WORD(®->mailbox5);
3362*4882a593Smuzhiyun
3363*4882a593Smuzhiyun /* Check for mailbox interrupt. */
3364*4882a593Smuzhiyun
3365*4882a593Smuzhiyun mailbox[0] = RD_REG_WORD_dmasync(®->semaphore);
3366*4882a593Smuzhiyun
3367*4882a593Smuzhiyun if (mailbox[0] & BIT_0) {
3368*4882a593Smuzhiyun /* Get mailbox data. */
3369*4882a593Smuzhiyun /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */
3370*4882a593Smuzhiyun
3371*4882a593Smuzhiyun wptr = &mailbox[0];
3372*4882a593Smuzhiyun *wptr++ = RD_REG_WORD(®->mailbox0);
3373*4882a593Smuzhiyun *wptr++ = RD_REG_WORD(®->mailbox1);
3374*4882a593Smuzhiyun *wptr = RD_REG_WORD(®->mailbox2);
3375*4882a593Smuzhiyun if (mailbox[0] != MBA_SCSI_COMPLETION) {
3376*4882a593Smuzhiyun wptr++;
3377*4882a593Smuzhiyun *wptr++ = RD_REG_WORD(®->mailbox3);
3378*4882a593Smuzhiyun *wptr++ = RD_REG_WORD(®->mailbox4);
3379*4882a593Smuzhiyun wptr++;
3380*4882a593Smuzhiyun *wptr++ = RD_REG_WORD(®->mailbox6);
3381*4882a593Smuzhiyun *wptr = RD_REG_WORD(®->mailbox7);
3382*4882a593Smuzhiyun }
3383*4882a593Smuzhiyun
3384*4882a593Smuzhiyun /* Release mailbox registers. */
3385*4882a593Smuzhiyun
3386*4882a593Smuzhiyun WRT_REG_WORD(®->semaphore, 0);
3387*4882a593Smuzhiyun WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3388*4882a593Smuzhiyun
3389*4882a593Smuzhiyun dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3390*4882a593Smuzhiyun mailbox[0]);
3391*4882a593Smuzhiyun
3392*4882a593Smuzhiyun /* Handle asynchronous event */
3393*4882a593Smuzhiyun switch (mailbox[0]) {
3394*4882a593Smuzhiyun case MBA_SCSI_COMPLETION: /* Response completion */
3395*4882a593Smuzhiyun dprintk(5, "qla1280_isr: mailbox SCSI response "
3396*4882a593Smuzhiyun "completion\n");
3397*4882a593Smuzhiyun
3398*4882a593Smuzhiyun if (ha->flags.online) {
3399*4882a593Smuzhiyun /* Get outstanding command index. */
3400*4882a593Smuzhiyun index = mailbox[2] << 16 | mailbox[1];
3401*4882a593Smuzhiyun
3402*4882a593Smuzhiyun /* Validate handle. */
3403*4882a593Smuzhiyun if (index < MAX_OUTSTANDING_COMMANDS)
3404*4882a593Smuzhiyun sp = ha->outstanding_cmds[index];
3405*4882a593Smuzhiyun else
3406*4882a593Smuzhiyun sp = NULL;
3407*4882a593Smuzhiyun
3408*4882a593Smuzhiyun if (sp) {
3409*4882a593Smuzhiyun /* Free outstanding command slot. */
3410*4882a593Smuzhiyun ha->outstanding_cmds[index] = NULL;
3411*4882a593Smuzhiyun
3412*4882a593Smuzhiyun /* Save ISP completion status */
3413*4882a593Smuzhiyun CMD_RESULT(sp->cmd) = 0;
3414*4882a593Smuzhiyun CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3415*4882a593Smuzhiyun
3416*4882a593Smuzhiyun /* Place block on done queue */
3417*4882a593Smuzhiyun list_add_tail(&sp->list, done_q);
3418*4882a593Smuzhiyun } else {
3419*4882a593Smuzhiyun /*
3420*4882a593Smuzhiyun * If we get here we have a real problem!
3421*4882a593Smuzhiyun */
3422*4882a593Smuzhiyun printk(KERN_WARNING
3423*4882a593Smuzhiyun "qla1280: ISP invalid handle\n");
3424*4882a593Smuzhiyun }
3425*4882a593Smuzhiyun }
3426*4882a593Smuzhiyun break;
3427*4882a593Smuzhiyun
3428*4882a593Smuzhiyun case MBA_BUS_RESET: /* SCSI Bus Reset */
3429*4882a593Smuzhiyun ha->flags.reset_marker = 1;
3430*4882a593Smuzhiyun index = mailbox[6] & BIT_0;
3431*4882a593Smuzhiyun ha->bus_settings[index].reset_marker = 1;
3432*4882a593Smuzhiyun
3433*4882a593Smuzhiyun printk(KERN_DEBUG "qla1280_isr(): index %i "
3434*4882a593Smuzhiyun "asynchronous BUS_RESET\n", index);
3435*4882a593Smuzhiyun break;
3436*4882a593Smuzhiyun
3437*4882a593Smuzhiyun case MBA_SYSTEM_ERR: /* System Error */
3438*4882a593Smuzhiyun printk(KERN_WARNING
3439*4882a593Smuzhiyun "qla1280: ISP System Error - mbx1=%xh, mbx2="
3440*4882a593Smuzhiyun "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3441*4882a593Smuzhiyun mailbox[3]);
3442*4882a593Smuzhiyun break;
3443*4882a593Smuzhiyun
3444*4882a593Smuzhiyun case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3445*4882a593Smuzhiyun printk(KERN_WARNING
3446*4882a593Smuzhiyun "qla1280: ISP Request Transfer Error\n");
3447*4882a593Smuzhiyun break;
3448*4882a593Smuzhiyun
3449*4882a593Smuzhiyun case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3450*4882a593Smuzhiyun printk(KERN_WARNING
3451*4882a593Smuzhiyun "qla1280: ISP Response Transfer Error\n");
3452*4882a593Smuzhiyun break;
3453*4882a593Smuzhiyun
3454*4882a593Smuzhiyun case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
3455*4882a593Smuzhiyun dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3456*4882a593Smuzhiyun break;
3457*4882a593Smuzhiyun
3458*4882a593Smuzhiyun case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */
3459*4882a593Smuzhiyun dprintk(2,
3460*4882a593Smuzhiyun "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3461*4882a593Smuzhiyun break;
3462*4882a593Smuzhiyun
3463*4882a593Smuzhiyun case MBA_DEVICE_RESET: /* Bus Device Reset */
3464*4882a593Smuzhiyun printk(KERN_INFO "qla1280_isr(): asynchronous "
3465*4882a593Smuzhiyun "BUS_DEVICE_RESET\n");
3466*4882a593Smuzhiyun
3467*4882a593Smuzhiyun ha->flags.reset_marker = 1;
3468*4882a593Smuzhiyun index = mailbox[6] & BIT_0;
3469*4882a593Smuzhiyun ha->bus_settings[index].reset_marker = 1;
3470*4882a593Smuzhiyun break;
3471*4882a593Smuzhiyun
3472*4882a593Smuzhiyun case MBA_BUS_MODE_CHANGE:
3473*4882a593Smuzhiyun dprintk(2,
3474*4882a593Smuzhiyun "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3475*4882a593Smuzhiyun break;
3476*4882a593Smuzhiyun
3477*4882a593Smuzhiyun default:
3478*4882a593Smuzhiyun /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */
3479*4882a593Smuzhiyun if (mailbox[0] < MBA_ASYNC_EVENT) {
3480*4882a593Smuzhiyun wptr = &mailbox[0];
3481*4882a593Smuzhiyun memcpy((uint16_t *) ha->mailbox_out, wptr,
3482*4882a593Smuzhiyun MAILBOX_REGISTER_COUNT *
3483*4882a593Smuzhiyun sizeof(uint16_t));
3484*4882a593Smuzhiyun
3485*4882a593Smuzhiyun if(ha->mailbox_wait != NULL)
3486*4882a593Smuzhiyun complete(ha->mailbox_wait);
3487*4882a593Smuzhiyun }
3488*4882a593Smuzhiyun break;
3489*4882a593Smuzhiyun }
3490*4882a593Smuzhiyun } else {
3491*4882a593Smuzhiyun WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3492*4882a593Smuzhiyun }
3493*4882a593Smuzhiyun
3494*4882a593Smuzhiyun /*
3495*4882a593Smuzhiyun * We will receive interrupts during mailbox testing prior to
3496*4882a593Smuzhiyun * the card being marked online, hence the double check.
3497*4882a593Smuzhiyun */
3498*4882a593Smuzhiyun if (!(ha->flags.online && !ha->mailbox_wait)) {
3499*4882a593Smuzhiyun dprintk(2, "qla1280_isr: Response pointer Error\n");
3500*4882a593Smuzhiyun goto out;
3501*4882a593Smuzhiyun }
3502*4882a593Smuzhiyun
3503*4882a593Smuzhiyun if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3504*4882a593Smuzhiyun goto out;
3505*4882a593Smuzhiyun
3506*4882a593Smuzhiyun while (ha->rsp_ring_index != mailbox[5]) {
3507*4882a593Smuzhiyun pkt = ha->response_ring_ptr;
3508*4882a593Smuzhiyun
3509*4882a593Smuzhiyun dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3510*4882a593Smuzhiyun " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3511*4882a593Smuzhiyun dprintk(5,"qla1280_isr: response packet data\n");
3512*4882a593Smuzhiyun qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3513*4882a593Smuzhiyun
3514*4882a593Smuzhiyun if (pkt->entry_type == STATUS_TYPE) {
3515*4882a593Smuzhiyun if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3516*4882a593Smuzhiyun || pkt->comp_status || pkt->entry_status) {
3517*4882a593Smuzhiyun dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3518*4882a593Smuzhiyun "0x%x mailbox[5] = 0x%x, comp_status "
3519*4882a593Smuzhiyun "= 0x%x, scsi_status = 0x%x\n",
3520*4882a593Smuzhiyun ha->rsp_ring_index, mailbox[5],
3521*4882a593Smuzhiyun le16_to_cpu(pkt->comp_status),
3522*4882a593Smuzhiyun le16_to_cpu(pkt->scsi_status));
3523*4882a593Smuzhiyun }
3524*4882a593Smuzhiyun } else {
3525*4882a593Smuzhiyun dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3526*4882a593Smuzhiyun "0x%x, mailbox[5] = 0x%x\n",
3527*4882a593Smuzhiyun ha->rsp_ring_index, mailbox[5]);
3528*4882a593Smuzhiyun dprintk(2, "qla1280_isr: response packet data\n");
3529*4882a593Smuzhiyun qla1280_dump_buffer(2, (char *)pkt,
3530*4882a593Smuzhiyun RESPONSE_ENTRY_SIZE);
3531*4882a593Smuzhiyun }
3532*4882a593Smuzhiyun
3533*4882a593Smuzhiyun if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3534*4882a593Smuzhiyun dprintk(2, "status: Cmd %p, handle %i\n",
3535*4882a593Smuzhiyun ha->outstanding_cmds[pkt->handle]->cmd,
3536*4882a593Smuzhiyun pkt->handle);
3537*4882a593Smuzhiyun if (pkt->entry_type == STATUS_TYPE)
3538*4882a593Smuzhiyun qla1280_status_entry(ha, pkt, done_q);
3539*4882a593Smuzhiyun else
3540*4882a593Smuzhiyun qla1280_error_entry(ha, pkt, done_q);
3541*4882a593Smuzhiyun /* Adjust ring index. */
3542*4882a593Smuzhiyun ha->rsp_ring_index++;
3543*4882a593Smuzhiyun if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3544*4882a593Smuzhiyun ha->rsp_ring_index = 0;
3545*4882a593Smuzhiyun ha->response_ring_ptr = ha->response_ring;
3546*4882a593Smuzhiyun } else
3547*4882a593Smuzhiyun ha->response_ring_ptr++;
3548*4882a593Smuzhiyun WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index);
3549*4882a593Smuzhiyun }
3550*4882a593Smuzhiyun }
3551*4882a593Smuzhiyun
3552*4882a593Smuzhiyun out:
3553*4882a593Smuzhiyun LEAVE("qla1280_isr");
3554*4882a593Smuzhiyun }
3555*4882a593Smuzhiyun
3556*4882a593Smuzhiyun /*
3557*4882a593Smuzhiyun * qla1280_rst_aen
3558*4882a593Smuzhiyun * Processes asynchronous reset.
3559*4882a593Smuzhiyun *
3560*4882a593Smuzhiyun * Input:
3561*4882a593Smuzhiyun * ha = adapter block pointer.
3562*4882a593Smuzhiyun */
3563*4882a593Smuzhiyun static void
qla1280_rst_aen(struct scsi_qla_host * ha)3564*4882a593Smuzhiyun qla1280_rst_aen(struct scsi_qla_host *ha)
3565*4882a593Smuzhiyun {
3566*4882a593Smuzhiyun uint8_t bus;
3567*4882a593Smuzhiyun
3568*4882a593Smuzhiyun ENTER("qla1280_rst_aen");
3569*4882a593Smuzhiyun
3570*4882a593Smuzhiyun if (ha->flags.online && !ha->flags.reset_active &&
3571*4882a593Smuzhiyun !ha->flags.abort_isp_active) {
3572*4882a593Smuzhiyun ha->flags.reset_active = 1;
3573*4882a593Smuzhiyun while (ha->flags.reset_marker) {
3574*4882a593Smuzhiyun /* Issue marker command. */
3575*4882a593Smuzhiyun ha->flags.reset_marker = 0;
3576*4882a593Smuzhiyun for (bus = 0; bus < ha->ports &&
3577*4882a593Smuzhiyun !ha->flags.reset_marker; bus++) {
3578*4882a593Smuzhiyun if (ha->bus_settings[bus].reset_marker) {
3579*4882a593Smuzhiyun ha->bus_settings[bus].reset_marker = 0;
3580*4882a593Smuzhiyun qla1280_marker(ha, bus, 0, 0,
3581*4882a593Smuzhiyun MK_SYNC_ALL);
3582*4882a593Smuzhiyun }
3583*4882a593Smuzhiyun }
3584*4882a593Smuzhiyun }
3585*4882a593Smuzhiyun }
3586*4882a593Smuzhiyun
3587*4882a593Smuzhiyun LEAVE("qla1280_rst_aen");
3588*4882a593Smuzhiyun }
3589*4882a593Smuzhiyun
3590*4882a593Smuzhiyun
3591*4882a593Smuzhiyun /*
3592*4882a593Smuzhiyun * qla1280_status_entry
3593*4882a593Smuzhiyun * Processes received ISP status entry.
3594*4882a593Smuzhiyun *
3595*4882a593Smuzhiyun * Input:
3596*4882a593Smuzhiyun * ha = adapter block pointer.
3597*4882a593Smuzhiyun * pkt = entry pointer.
3598*4882a593Smuzhiyun * done_q = done queue.
3599*4882a593Smuzhiyun */
3600*4882a593Smuzhiyun static void
qla1280_status_entry(struct scsi_qla_host * ha,struct response * pkt,struct list_head * done_q)3601*4882a593Smuzhiyun qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3602*4882a593Smuzhiyun struct list_head *done_q)
3603*4882a593Smuzhiyun {
3604*4882a593Smuzhiyun int sense_sz;
3605*4882a593Smuzhiyun struct srb *sp;
3606*4882a593Smuzhiyun struct scsi_cmnd *cmd;
3607*4882a593Smuzhiyun uint32_t handle = le32_to_cpu(pkt->handle);
3608*4882a593Smuzhiyun uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3609*4882a593Smuzhiyun uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3610*4882a593Smuzhiyun
3611*4882a593Smuzhiyun ENTER("qla1280_status_entry");
3612*4882a593Smuzhiyun
3613*4882a593Smuzhiyun /* Validate handle. */
3614*4882a593Smuzhiyun if (handle < MAX_OUTSTANDING_COMMANDS)
3615*4882a593Smuzhiyun sp = ha->outstanding_cmds[handle];
3616*4882a593Smuzhiyun else
3617*4882a593Smuzhiyun sp = NULL;
3618*4882a593Smuzhiyun
3619*4882a593Smuzhiyun if (!sp) {
3620*4882a593Smuzhiyun printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3621*4882a593Smuzhiyun goto out;
3622*4882a593Smuzhiyun }
3623*4882a593Smuzhiyun
3624*4882a593Smuzhiyun /* Free outstanding command slot. */
3625*4882a593Smuzhiyun ha->outstanding_cmds[handle] = NULL;
3626*4882a593Smuzhiyun
3627*4882a593Smuzhiyun cmd = sp->cmd;
3628*4882a593Smuzhiyun
3629*4882a593Smuzhiyun if (comp_status || scsi_status) {
3630*4882a593Smuzhiyun dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3631*4882a593Smuzhiyun "0x%x, handle = 0x%x\n", comp_status,
3632*4882a593Smuzhiyun scsi_status, handle);
3633*4882a593Smuzhiyun }
3634*4882a593Smuzhiyun
3635*4882a593Smuzhiyun /* Target busy or queue full */
3636*4882a593Smuzhiyun if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3637*4882a593Smuzhiyun (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3638*4882a593Smuzhiyun CMD_RESULT(cmd) = scsi_status & 0xff;
3639*4882a593Smuzhiyun } else {
3640*4882a593Smuzhiyun
3641*4882a593Smuzhiyun /* Save ISP completion status */
3642*4882a593Smuzhiyun CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3643*4882a593Smuzhiyun
3644*4882a593Smuzhiyun if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3645*4882a593Smuzhiyun if (comp_status != CS_ARS_FAILED) {
3646*4882a593Smuzhiyun uint16_t req_sense_length =
3647*4882a593Smuzhiyun le16_to_cpu(pkt->req_sense_length);
3648*4882a593Smuzhiyun if (req_sense_length < CMD_SNSLEN(cmd))
3649*4882a593Smuzhiyun sense_sz = req_sense_length;
3650*4882a593Smuzhiyun else
3651*4882a593Smuzhiyun /*
3652*4882a593Smuzhiyun * scsi_cmnd->sense_buffer is
3653*4882a593Smuzhiyun * 64 bytes, why only copy 63?
3654*4882a593Smuzhiyun * This looks wrong! /Jes
3655*4882a593Smuzhiyun */
3656*4882a593Smuzhiyun sense_sz = CMD_SNSLEN(cmd) - 1;
3657*4882a593Smuzhiyun
3658*4882a593Smuzhiyun memcpy(cmd->sense_buffer,
3659*4882a593Smuzhiyun &pkt->req_sense_data, sense_sz);
3660*4882a593Smuzhiyun } else
3661*4882a593Smuzhiyun sense_sz = 0;
3662*4882a593Smuzhiyun memset(cmd->sense_buffer + sense_sz, 0,
3663*4882a593Smuzhiyun SCSI_SENSE_BUFFERSIZE - sense_sz);
3664*4882a593Smuzhiyun
3665*4882a593Smuzhiyun dprintk(2, "qla1280_status_entry: Check "
3666*4882a593Smuzhiyun "condition Sense data, b %i, t %i, "
3667*4882a593Smuzhiyun "l %i\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
3668*4882a593Smuzhiyun SCSI_LUN_32(cmd));
3669*4882a593Smuzhiyun if (sense_sz)
3670*4882a593Smuzhiyun qla1280_dump_buffer(2,
3671*4882a593Smuzhiyun (char *)cmd->sense_buffer,
3672*4882a593Smuzhiyun sense_sz);
3673*4882a593Smuzhiyun }
3674*4882a593Smuzhiyun }
3675*4882a593Smuzhiyun
3676*4882a593Smuzhiyun CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3677*4882a593Smuzhiyun
3678*4882a593Smuzhiyun /* Place command on done queue. */
3679*4882a593Smuzhiyun list_add_tail(&sp->list, done_q);
3680*4882a593Smuzhiyun out:
3681*4882a593Smuzhiyun LEAVE("qla1280_status_entry");
3682*4882a593Smuzhiyun }
3683*4882a593Smuzhiyun
3684*4882a593Smuzhiyun /*
3685*4882a593Smuzhiyun * qla1280_error_entry
3686*4882a593Smuzhiyun * Processes error entry.
3687*4882a593Smuzhiyun *
3688*4882a593Smuzhiyun * Input:
3689*4882a593Smuzhiyun * ha = adapter block pointer.
3690*4882a593Smuzhiyun * pkt = entry pointer.
3691*4882a593Smuzhiyun * done_q = done queue.
3692*4882a593Smuzhiyun */
3693*4882a593Smuzhiyun static void
qla1280_error_entry(struct scsi_qla_host * ha,struct response * pkt,struct list_head * done_q)3694*4882a593Smuzhiyun qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3695*4882a593Smuzhiyun struct list_head *done_q)
3696*4882a593Smuzhiyun {
3697*4882a593Smuzhiyun struct srb *sp;
3698*4882a593Smuzhiyun uint32_t handle = le32_to_cpu(pkt->handle);
3699*4882a593Smuzhiyun
3700*4882a593Smuzhiyun ENTER("qla1280_error_entry");
3701*4882a593Smuzhiyun
3702*4882a593Smuzhiyun if (pkt->entry_status & BIT_3)
3703*4882a593Smuzhiyun dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3704*4882a593Smuzhiyun else if (pkt->entry_status & BIT_2)
3705*4882a593Smuzhiyun dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3706*4882a593Smuzhiyun else if (pkt->entry_status & BIT_1)
3707*4882a593Smuzhiyun dprintk(2, "qla1280_error_entry: FULL flag error\n");
3708*4882a593Smuzhiyun else
3709*4882a593Smuzhiyun dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3710*4882a593Smuzhiyun
3711*4882a593Smuzhiyun /* Validate handle. */
3712*4882a593Smuzhiyun if (handle < MAX_OUTSTANDING_COMMANDS)
3713*4882a593Smuzhiyun sp = ha->outstanding_cmds[handle];
3714*4882a593Smuzhiyun else
3715*4882a593Smuzhiyun sp = NULL;
3716*4882a593Smuzhiyun
3717*4882a593Smuzhiyun if (sp) {
3718*4882a593Smuzhiyun /* Free outstanding command slot. */
3719*4882a593Smuzhiyun ha->outstanding_cmds[handle] = NULL;
3720*4882a593Smuzhiyun
3721*4882a593Smuzhiyun /* Bad payload or header */
3722*4882a593Smuzhiyun if (pkt->entry_status & (BIT_3 + BIT_2)) {
3723*4882a593Smuzhiyun /* Bad payload or header, set error status. */
3724*4882a593Smuzhiyun /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */
3725*4882a593Smuzhiyun CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3726*4882a593Smuzhiyun } else if (pkt->entry_status & BIT_1) { /* FULL flag */
3727*4882a593Smuzhiyun CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3728*4882a593Smuzhiyun } else {
3729*4882a593Smuzhiyun /* Set error status. */
3730*4882a593Smuzhiyun CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3731*4882a593Smuzhiyun }
3732*4882a593Smuzhiyun
3733*4882a593Smuzhiyun CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3734*4882a593Smuzhiyun
3735*4882a593Smuzhiyun /* Place command on done queue. */
3736*4882a593Smuzhiyun list_add_tail(&sp->list, done_q);
3737*4882a593Smuzhiyun }
3738*4882a593Smuzhiyun #ifdef QLA_64BIT_PTR
3739*4882a593Smuzhiyun else if (pkt->entry_type == COMMAND_A64_TYPE) {
3740*4882a593Smuzhiyun printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3741*4882a593Smuzhiyun }
3742*4882a593Smuzhiyun #endif
3743*4882a593Smuzhiyun
3744*4882a593Smuzhiyun LEAVE("qla1280_error_entry");
3745*4882a593Smuzhiyun }
3746*4882a593Smuzhiyun
3747*4882a593Smuzhiyun /*
3748*4882a593Smuzhiyun * qla1280_abort_isp
3749*4882a593Smuzhiyun * Resets ISP and aborts all outstanding commands.
3750*4882a593Smuzhiyun *
3751*4882a593Smuzhiyun * Input:
3752*4882a593Smuzhiyun * ha = adapter block pointer.
3753*4882a593Smuzhiyun *
3754*4882a593Smuzhiyun * Returns:
3755*4882a593Smuzhiyun * 0 = success
3756*4882a593Smuzhiyun */
3757*4882a593Smuzhiyun static int
qla1280_abort_isp(struct scsi_qla_host * ha)3758*4882a593Smuzhiyun qla1280_abort_isp(struct scsi_qla_host *ha)
3759*4882a593Smuzhiyun {
3760*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
3761*4882a593Smuzhiyun struct srb *sp;
3762*4882a593Smuzhiyun int status = 0;
3763*4882a593Smuzhiyun int cnt;
3764*4882a593Smuzhiyun int bus;
3765*4882a593Smuzhiyun
3766*4882a593Smuzhiyun ENTER("qla1280_abort_isp");
3767*4882a593Smuzhiyun
3768*4882a593Smuzhiyun if (ha->flags.abort_isp_active || !ha->flags.online)
3769*4882a593Smuzhiyun goto out;
3770*4882a593Smuzhiyun
3771*4882a593Smuzhiyun ha->flags.abort_isp_active = 1;
3772*4882a593Smuzhiyun
3773*4882a593Smuzhiyun /* Disable ISP interrupts. */
3774*4882a593Smuzhiyun qla1280_disable_intrs(ha);
3775*4882a593Smuzhiyun WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3776*4882a593Smuzhiyun RD_REG_WORD(®->id_l);
3777*4882a593Smuzhiyun
3778*4882a593Smuzhiyun printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3779*4882a593Smuzhiyun ha->host_no);
3780*4882a593Smuzhiyun /* Dequeue all commands in outstanding command list. */
3781*4882a593Smuzhiyun for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3782*4882a593Smuzhiyun struct scsi_cmnd *cmd;
3783*4882a593Smuzhiyun sp = ha->outstanding_cmds[cnt];
3784*4882a593Smuzhiyun if (sp) {
3785*4882a593Smuzhiyun cmd = sp->cmd;
3786*4882a593Smuzhiyun CMD_RESULT(cmd) = DID_RESET << 16;
3787*4882a593Smuzhiyun CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3788*4882a593Smuzhiyun ha->outstanding_cmds[cnt] = NULL;
3789*4882a593Smuzhiyun list_add_tail(&sp->list, &ha->done_q);
3790*4882a593Smuzhiyun }
3791*4882a593Smuzhiyun }
3792*4882a593Smuzhiyun
3793*4882a593Smuzhiyun qla1280_done(ha);
3794*4882a593Smuzhiyun
3795*4882a593Smuzhiyun status = qla1280_load_firmware(ha);
3796*4882a593Smuzhiyun if (status)
3797*4882a593Smuzhiyun goto out;
3798*4882a593Smuzhiyun
3799*4882a593Smuzhiyun /* Setup adapter based on NVRAM parameters. */
3800*4882a593Smuzhiyun qla1280_nvram_config (ha);
3801*4882a593Smuzhiyun
3802*4882a593Smuzhiyun status = qla1280_init_rings(ha);
3803*4882a593Smuzhiyun if (status)
3804*4882a593Smuzhiyun goto out;
3805*4882a593Smuzhiyun
3806*4882a593Smuzhiyun /* Issue SCSI reset. */
3807*4882a593Smuzhiyun for (bus = 0; bus < ha->ports; bus++)
3808*4882a593Smuzhiyun qla1280_bus_reset(ha, bus);
3809*4882a593Smuzhiyun
3810*4882a593Smuzhiyun ha->flags.abort_isp_active = 0;
3811*4882a593Smuzhiyun out:
3812*4882a593Smuzhiyun if (status) {
3813*4882a593Smuzhiyun printk(KERN_WARNING
3814*4882a593Smuzhiyun "qla1280: ISP error recovery failed, board disabled");
3815*4882a593Smuzhiyun qla1280_reset_adapter(ha);
3816*4882a593Smuzhiyun dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3817*4882a593Smuzhiyun }
3818*4882a593Smuzhiyun
3819*4882a593Smuzhiyun LEAVE("qla1280_abort_isp");
3820*4882a593Smuzhiyun return status;
3821*4882a593Smuzhiyun }
3822*4882a593Smuzhiyun
3823*4882a593Smuzhiyun
3824*4882a593Smuzhiyun /*
3825*4882a593Smuzhiyun * qla1280_debounce_register
3826*4882a593Smuzhiyun * Debounce register.
3827*4882a593Smuzhiyun *
3828*4882a593Smuzhiyun * Input:
3829*4882a593Smuzhiyun * port = register address.
3830*4882a593Smuzhiyun *
3831*4882a593Smuzhiyun * Returns:
3832*4882a593Smuzhiyun * register value.
3833*4882a593Smuzhiyun */
3834*4882a593Smuzhiyun static u16
qla1280_debounce_register(volatile u16 __iomem * addr)3835*4882a593Smuzhiyun qla1280_debounce_register(volatile u16 __iomem * addr)
3836*4882a593Smuzhiyun {
3837*4882a593Smuzhiyun volatile u16 ret;
3838*4882a593Smuzhiyun volatile u16 ret2;
3839*4882a593Smuzhiyun
3840*4882a593Smuzhiyun ret = RD_REG_WORD(addr);
3841*4882a593Smuzhiyun ret2 = RD_REG_WORD(addr);
3842*4882a593Smuzhiyun
3843*4882a593Smuzhiyun if (ret == ret2)
3844*4882a593Smuzhiyun return ret;
3845*4882a593Smuzhiyun
3846*4882a593Smuzhiyun do {
3847*4882a593Smuzhiyun cpu_relax();
3848*4882a593Smuzhiyun ret = RD_REG_WORD(addr);
3849*4882a593Smuzhiyun ret2 = RD_REG_WORD(addr);
3850*4882a593Smuzhiyun } while (ret != ret2);
3851*4882a593Smuzhiyun
3852*4882a593Smuzhiyun return ret;
3853*4882a593Smuzhiyun }
3854*4882a593Smuzhiyun
3855*4882a593Smuzhiyun
3856*4882a593Smuzhiyun /************************************************************************
3857*4882a593Smuzhiyun * qla1280_check_for_dead_scsi_bus *
3858*4882a593Smuzhiyun * *
3859*4882a593Smuzhiyun * This routine checks for a dead SCSI bus *
3860*4882a593Smuzhiyun ************************************************************************/
3861*4882a593Smuzhiyun #define SET_SXP_BANK 0x0100
3862*4882a593Smuzhiyun #define SCSI_PHASE_INVALID 0x87FF
3863*4882a593Smuzhiyun static int
qla1280_check_for_dead_scsi_bus(struct scsi_qla_host * ha,unsigned int bus)3864*4882a593Smuzhiyun qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3865*4882a593Smuzhiyun {
3866*4882a593Smuzhiyun uint16_t config_reg, scsi_control;
3867*4882a593Smuzhiyun struct device_reg __iomem *reg = ha->iobase;
3868*4882a593Smuzhiyun
3869*4882a593Smuzhiyun if (ha->bus_settings[bus].scsi_bus_dead) {
3870*4882a593Smuzhiyun WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3871*4882a593Smuzhiyun config_reg = RD_REG_WORD(®->cfg_1);
3872*4882a593Smuzhiyun WRT_REG_WORD(®->cfg_1, SET_SXP_BANK);
3873*4882a593Smuzhiyun scsi_control = RD_REG_WORD(®->scsiControlPins);
3874*4882a593Smuzhiyun WRT_REG_WORD(®->cfg_1, config_reg);
3875*4882a593Smuzhiyun WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
3876*4882a593Smuzhiyun
3877*4882a593Smuzhiyun if (scsi_control == SCSI_PHASE_INVALID) {
3878*4882a593Smuzhiyun ha->bus_settings[bus].scsi_bus_dead = 1;
3879*4882a593Smuzhiyun return 1; /* bus is dead */
3880*4882a593Smuzhiyun } else {
3881*4882a593Smuzhiyun ha->bus_settings[bus].scsi_bus_dead = 0;
3882*4882a593Smuzhiyun ha->bus_settings[bus].failed_reset_count = 0;
3883*4882a593Smuzhiyun }
3884*4882a593Smuzhiyun }
3885*4882a593Smuzhiyun return 0; /* bus is not dead */
3886*4882a593Smuzhiyun }
3887*4882a593Smuzhiyun
3888*4882a593Smuzhiyun static void
qla1280_get_target_parameters(struct scsi_qla_host * ha,struct scsi_device * device)3889*4882a593Smuzhiyun qla1280_get_target_parameters(struct scsi_qla_host *ha,
3890*4882a593Smuzhiyun struct scsi_device *device)
3891*4882a593Smuzhiyun {
3892*4882a593Smuzhiyun uint16_t mb[MAILBOX_REGISTER_COUNT];
3893*4882a593Smuzhiyun int bus, target, lun;
3894*4882a593Smuzhiyun
3895*4882a593Smuzhiyun bus = device->channel;
3896*4882a593Smuzhiyun target = device->id;
3897*4882a593Smuzhiyun lun = device->lun;
3898*4882a593Smuzhiyun
3899*4882a593Smuzhiyun
3900*4882a593Smuzhiyun mb[0] = MBC_GET_TARGET_PARAMETERS;
3901*4882a593Smuzhiyun mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3902*4882a593Smuzhiyun mb[1] <<= 8;
3903*4882a593Smuzhiyun qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3904*4882a593Smuzhiyun &mb[0]);
3905*4882a593Smuzhiyun
3906*4882a593Smuzhiyun printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3907*4882a593Smuzhiyun
3908*4882a593Smuzhiyun if (mb[3] != 0) {
3909*4882a593Smuzhiyun printk(" Sync: period %d, offset %d",
3910*4882a593Smuzhiyun (mb[3] & 0xff), (mb[3] >> 8));
3911*4882a593Smuzhiyun if (mb[2] & BIT_13)
3912*4882a593Smuzhiyun printk(", Wide");
3913*4882a593Smuzhiyun if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
3914*4882a593Smuzhiyun printk(", DT");
3915*4882a593Smuzhiyun } else
3916*4882a593Smuzhiyun printk(" Async");
3917*4882a593Smuzhiyun
3918*4882a593Smuzhiyun if (device->simple_tags)
3919*4882a593Smuzhiyun printk(", Tagged queuing: depth %d", device->queue_depth);
3920*4882a593Smuzhiyun printk("\n");
3921*4882a593Smuzhiyun }
3922*4882a593Smuzhiyun
3923*4882a593Smuzhiyun
3924*4882a593Smuzhiyun #if DEBUG_QLA1280
3925*4882a593Smuzhiyun static void
__qla1280_dump_buffer(char * b,int size)3926*4882a593Smuzhiyun __qla1280_dump_buffer(char *b, int size)
3927*4882a593Smuzhiyun {
3928*4882a593Smuzhiyun int cnt;
3929*4882a593Smuzhiyun u8 c;
3930*4882a593Smuzhiyun
3931*4882a593Smuzhiyun printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
3932*4882a593Smuzhiyun "Bh Ch Dh Eh Fh\n");
3933*4882a593Smuzhiyun printk(KERN_DEBUG "---------------------------------------------"
3934*4882a593Smuzhiyun "------------------\n");
3935*4882a593Smuzhiyun
3936*4882a593Smuzhiyun for (cnt = 0; cnt < size;) {
3937*4882a593Smuzhiyun c = *b++;
3938*4882a593Smuzhiyun
3939*4882a593Smuzhiyun printk("0x%02x", c);
3940*4882a593Smuzhiyun cnt++;
3941*4882a593Smuzhiyun if (!(cnt % 16))
3942*4882a593Smuzhiyun printk("\n");
3943*4882a593Smuzhiyun else
3944*4882a593Smuzhiyun printk(" ");
3945*4882a593Smuzhiyun }
3946*4882a593Smuzhiyun if (cnt % 16)
3947*4882a593Smuzhiyun printk("\n");
3948*4882a593Smuzhiyun }
3949*4882a593Smuzhiyun
3950*4882a593Smuzhiyun /**************************************************************************
3951*4882a593Smuzhiyun * ql1280_print_scsi_cmd
3952*4882a593Smuzhiyun *
3953*4882a593Smuzhiyun **************************************************************************/
3954*4882a593Smuzhiyun static void
__qla1280_print_scsi_cmd(struct scsi_cmnd * cmd)3955*4882a593Smuzhiyun __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
3956*4882a593Smuzhiyun {
3957*4882a593Smuzhiyun struct scsi_qla_host *ha;
3958*4882a593Smuzhiyun struct Scsi_Host *host = CMD_HOST(cmd);
3959*4882a593Smuzhiyun struct srb *sp;
3960*4882a593Smuzhiyun /* struct scatterlist *sg; */
3961*4882a593Smuzhiyun
3962*4882a593Smuzhiyun int i;
3963*4882a593Smuzhiyun ha = (struct scsi_qla_host *)host->hostdata;
3964*4882a593Smuzhiyun
3965*4882a593Smuzhiyun sp = (struct srb *)CMD_SP(cmd);
3966*4882a593Smuzhiyun printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
3967*4882a593Smuzhiyun printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
3968*4882a593Smuzhiyun SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
3969*4882a593Smuzhiyun CMD_CDBLEN(cmd));
3970*4882a593Smuzhiyun printk(" CDB = ");
3971*4882a593Smuzhiyun for (i = 0; i < cmd->cmd_len; i++) {
3972*4882a593Smuzhiyun printk("0x%02x ", cmd->cmnd[i]);
3973*4882a593Smuzhiyun }
3974*4882a593Smuzhiyun printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
3975*4882a593Smuzhiyun printk(" request buffer=0x%p, request buffer len=0x%x\n",
3976*4882a593Smuzhiyun scsi_sglist(cmd), scsi_bufflen(cmd));
3977*4882a593Smuzhiyun /* if (cmd->use_sg)
3978*4882a593Smuzhiyun {
3979*4882a593Smuzhiyun sg = (struct scatterlist *) cmd->request_buffer;
3980*4882a593Smuzhiyun printk(" SG buffer: \n");
3981*4882a593Smuzhiyun qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
3982*4882a593Smuzhiyun } */
3983*4882a593Smuzhiyun printk(" tag=%d, transfersize=0x%x \n",
3984*4882a593Smuzhiyun cmd->tag, cmd->transfersize);
3985*4882a593Smuzhiyun printk(" SP=0x%p\n", CMD_SP(cmd));
3986*4882a593Smuzhiyun printk(" underflow size = 0x%x, direction=0x%x\n",
3987*4882a593Smuzhiyun cmd->underflow, cmd->sc_data_direction);
3988*4882a593Smuzhiyun }
3989*4882a593Smuzhiyun
3990*4882a593Smuzhiyun /**************************************************************************
3991*4882a593Smuzhiyun * ql1280_dump_device
3992*4882a593Smuzhiyun *
3993*4882a593Smuzhiyun **************************************************************************/
3994*4882a593Smuzhiyun static void
ql1280_dump_device(struct scsi_qla_host * ha)3995*4882a593Smuzhiyun ql1280_dump_device(struct scsi_qla_host *ha)
3996*4882a593Smuzhiyun {
3997*4882a593Smuzhiyun
3998*4882a593Smuzhiyun struct scsi_cmnd *cp;
3999*4882a593Smuzhiyun struct srb *sp;
4000*4882a593Smuzhiyun int i;
4001*4882a593Smuzhiyun
4002*4882a593Smuzhiyun printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4003*4882a593Smuzhiyun
4004*4882a593Smuzhiyun for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4005*4882a593Smuzhiyun if ((sp = ha->outstanding_cmds[i]) == NULL)
4006*4882a593Smuzhiyun continue;
4007*4882a593Smuzhiyun if ((cp = sp->cmd) == NULL)
4008*4882a593Smuzhiyun continue;
4009*4882a593Smuzhiyun qla1280_print_scsi_cmd(1, cp);
4010*4882a593Smuzhiyun }
4011*4882a593Smuzhiyun }
4012*4882a593Smuzhiyun #endif
4013*4882a593Smuzhiyun
4014*4882a593Smuzhiyun
4015*4882a593Smuzhiyun enum tokens {
4016*4882a593Smuzhiyun TOKEN_NVRAM,
4017*4882a593Smuzhiyun TOKEN_SYNC,
4018*4882a593Smuzhiyun TOKEN_WIDE,
4019*4882a593Smuzhiyun TOKEN_PPR,
4020*4882a593Smuzhiyun TOKEN_VERBOSE,
4021*4882a593Smuzhiyun TOKEN_DEBUG,
4022*4882a593Smuzhiyun };
4023*4882a593Smuzhiyun
4024*4882a593Smuzhiyun struct setup_tokens {
4025*4882a593Smuzhiyun char *token;
4026*4882a593Smuzhiyun int val;
4027*4882a593Smuzhiyun };
4028*4882a593Smuzhiyun
4029*4882a593Smuzhiyun static struct setup_tokens setup_token[] __initdata =
4030*4882a593Smuzhiyun {
4031*4882a593Smuzhiyun { "nvram", TOKEN_NVRAM },
4032*4882a593Smuzhiyun { "sync", TOKEN_SYNC },
4033*4882a593Smuzhiyun { "wide", TOKEN_WIDE },
4034*4882a593Smuzhiyun { "ppr", TOKEN_PPR },
4035*4882a593Smuzhiyun { "verbose", TOKEN_VERBOSE },
4036*4882a593Smuzhiyun { "debug", TOKEN_DEBUG },
4037*4882a593Smuzhiyun };
4038*4882a593Smuzhiyun
4039*4882a593Smuzhiyun
4040*4882a593Smuzhiyun /**************************************************************************
4041*4882a593Smuzhiyun * qla1280_setup
4042*4882a593Smuzhiyun *
4043*4882a593Smuzhiyun * Handle boot parameters. This really needs to be changed so one
4044*4882a593Smuzhiyun * can specify per adapter parameters.
4045*4882a593Smuzhiyun **************************************************************************/
4046*4882a593Smuzhiyun static int __init
qla1280_setup(char * s)4047*4882a593Smuzhiyun qla1280_setup(char *s)
4048*4882a593Smuzhiyun {
4049*4882a593Smuzhiyun char *cp, *ptr;
4050*4882a593Smuzhiyun unsigned long val;
4051*4882a593Smuzhiyun int toke;
4052*4882a593Smuzhiyun
4053*4882a593Smuzhiyun cp = s;
4054*4882a593Smuzhiyun
4055*4882a593Smuzhiyun while (cp && (ptr = strchr(cp, ':'))) {
4056*4882a593Smuzhiyun ptr++;
4057*4882a593Smuzhiyun if (!strcmp(ptr, "yes")) {
4058*4882a593Smuzhiyun val = 0x10000;
4059*4882a593Smuzhiyun ptr += 3;
4060*4882a593Smuzhiyun } else if (!strcmp(ptr, "no")) {
4061*4882a593Smuzhiyun val = 0;
4062*4882a593Smuzhiyun ptr += 2;
4063*4882a593Smuzhiyun } else
4064*4882a593Smuzhiyun val = simple_strtoul(ptr, &ptr, 0);
4065*4882a593Smuzhiyun
4066*4882a593Smuzhiyun switch ((toke = qla1280_get_token(cp))) {
4067*4882a593Smuzhiyun case TOKEN_NVRAM:
4068*4882a593Smuzhiyun if (!val)
4069*4882a593Smuzhiyun driver_setup.no_nvram = 1;
4070*4882a593Smuzhiyun break;
4071*4882a593Smuzhiyun case TOKEN_SYNC:
4072*4882a593Smuzhiyun if (!val)
4073*4882a593Smuzhiyun driver_setup.no_sync = 1;
4074*4882a593Smuzhiyun else if (val != 0x10000)
4075*4882a593Smuzhiyun driver_setup.sync_mask = val;
4076*4882a593Smuzhiyun break;
4077*4882a593Smuzhiyun case TOKEN_WIDE:
4078*4882a593Smuzhiyun if (!val)
4079*4882a593Smuzhiyun driver_setup.no_wide = 1;
4080*4882a593Smuzhiyun else if (val != 0x10000)
4081*4882a593Smuzhiyun driver_setup.wide_mask = val;
4082*4882a593Smuzhiyun break;
4083*4882a593Smuzhiyun case TOKEN_PPR:
4084*4882a593Smuzhiyun if (!val)
4085*4882a593Smuzhiyun driver_setup.no_ppr = 1;
4086*4882a593Smuzhiyun else if (val != 0x10000)
4087*4882a593Smuzhiyun driver_setup.ppr_mask = val;
4088*4882a593Smuzhiyun break;
4089*4882a593Smuzhiyun case TOKEN_VERBOSE:
4090*4882a593Smuzhiyun qla1280_verbose = val;
4091*4882a593Smuzhiyun break;
4092*4882a593Smuzhiyun default:
4093*4882a593Smuzhiyun printk(KERN_INFO "qla1280: unknown boot option %s\n",
4094*4882a593Smuzhiyun cp);
4095*4882a593Smuzhiyun }
4096*4882a593Smuzhiyun
4097*4882a593Smuzhiyun cp = strchr(ptr, ';');
4098*4882a593Smuzhiyun if (cp)
4099*4882a593Smuzhiyun cp++;
4100*4882a593Smuzhiyun else {
4101*4882a593Smuzhiyun break;
4102*4882a593Smuzhiyun }
4103*4882a593Smuzhiyun }
4104*4882a593Smuzhiyun return 1;
4105*4882a593Smuzhiyun }
4106*4882a593Smuzhiyun
4107*4882a593Smuzhiyun
4108*4882a593Smuzhiyun static int __init
qla1280_get_token(char * str)4109*4882a593Smuzhiyun qla1280_get_token(char *str)
4110*4882a593Smuzhiyun {
4111*4882a593Smuzhiyun char *sep;
4112*4882a593Smuzhiyun long ret = -1;
4113*4882a593Smuzhiyun int i;
4114*4882a593Smuzhiyun
4115*4882a593Smuzhiyun sep = strchr(str, ':');
4116*4882a593Smuzhiyun
4117*4882a593Smuzhiyun if (sep) {
4118*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4119*4882a593Smuzhiyun if (!strncmp(setup_token[i].token, str, (sep - str))) {
4120*4882a593Smuzhiyun ret = setup_token[i].val;
4121*4882a593Smuzhiyun break;
4122*4882a593Smuzhiyun }
4123*4882a593Smuzhiyun }
4124*4882a593Smuzhiyun }
4125*4882a593Smuzhiyun
4126*4882a593Smuzhiyun return ret;
4127*4882a593Smuzhiyun }
4128*4882a593Smuzhiyun
4129*4882a593Smuzhiyun
4130*4882a593Smuzhiyun static struct scsi_host_template qla1280_driver_template = {
4131*4882a593Smuzhiyun .module = THIS_MODULE,
4132*4882a593Smuzhiyun .proc_name = "qla1280",
4133*4882a593Smuzhiyun .name = "Qlogic ISP 1280/12160",
4134*4882a593Smuzhiyun .info = qla1280_info,
4135*4882a593Smuzhiyun .slave_configure = qla1280_slave_configure,
4136*4882a593Smuzhiyun .queuecommand = qla1280_queuecommand,
4137*4882a593Smuzhiyun .eh_abort_handler = qla1280_eh_abort,
4138*4882a593Smuzhiyun .eh_device_reset_handler= qla1280_eh_device_reset,
4139*4882a593Smuzhiyun .eh_bus_reset_handler = qla1280_eh_bus_reset,
4140*4882a593Smuzhiyun .eh_host_reset_handler = qla1280_eh_adapter_reset,
4141*4882a593Smuzhiyun .bios_param = qla1280_biosparam,
4142*4882a593Smuzhiyun .can_queue = MAX_OUTSTANDING_COMMANDS,
4143*4882a593Smuzhiyun .this_id = -1,
4144*4882a593Smuzhiyun .sg_tablesize = SG_ALL,
4145*4882a593Smuzhiyun };
4146*4882a593Smuzhiyun
4147*4882a593Smuzhiyun
4148*4882a593Smuzhiyun static int
qla1280_probe_one(struct pci_dev * pdev,const struct pci_device_id * id)4149*4882a593Smuzhiyun qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4150*4882a593Smuzhiyun {
4151*4882a593Smuzhiyun int devnum = id->driver_data;
4152*4882a593Smuzhiyun struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4153*4882a593Smuzhiyun struct Scsi_Host *host;
4154*4882a593Smuzhiyun struct scsi_qla_host *ha;
4155*4882a593Smuzhiyun int error = -ENODEV;
4156*4882a593Smuzhiyun
4157*4882a593Smuzhiyun /* Bypass all AMI SUBSYS VENDOR IDs */
4158*4882a593Smuzhiyun if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4159*4882a593Smuzhiyun printk(KERN_INFO
4160*4882a593Smuzhiyun "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4161*4882a593Smuzhiyun goto error;
4162*4882a593Smuzhiyun }
4163*4882a593Smuzhiyun
4164*4882a593Smuzhiyun printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4165*4882a593Smuzhiyun bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4166*4882a593Smuzhiyun
4167*4882a593Smuzhiyun if (pci_enable_device(pdev)) {
4168*4882a593Smuzhiyun printk(KERN_WARNING
4169*4882a593Smuzhiyun "qla1280: Failed to enabled pci device, aborting.\n");
4170*4882a593Smuzhiyun goto error;
4171*4882a593Smuzhiyun }
4172*4882a593Smuzhiyun
4173*4882a593Smuzhiyun pci_set_master(pdev);
4174*4882a593Smuzhiyun
4175*4882a593Smuzhiyun error = -ENOMEM;
4176*4882a593Smuzhiyun host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4177*4882a593Smuzhiyun if (!host) {
4178*4882a593Smuzhiyun printk(KERN_WARNING
4179*4882a593Smuzhiyun "qla1280: Failed to register host, aborting.\n");
4180*4882a593Smuzhiyun goto error_disable_device;
4181*4882a593Smuzhiyun }
4182*4882a593Smuzhiyun
4183*4882a593Smuzhiyun ha = (struct scsi_qla_host *)host->hostdata;
4184*4882a593Smuzhiyun memset(ha, 0, sizeof(struct scsi_qla_host));
4185*4882a593Smuzhiyun
4186*4882a593Smuzhiyun ha->pdev = pdev;
4187*4882a593Smuzhiyun ha->devnum = devnum; /* specifies microcode load address */
4188*4882a593Smuzhiyun
4189*4882a593Smuzhiyun #ifdef QLA_64BIT_PTR
4190*4882a593Smuzhiyun if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
4191*4882a593Smuzhiyun if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4192*4882a593Smuzhiyun printk(KERN_WARNING "scsi(%li): Unable to set a "
4193*4882a593Smuzhiyun "suitable DMA mask - aborting\n", ha->host_no);
4194*4882a593Smuzhiyun error = -ENODEV;
4195*4882a593Smuzhiyun goto error_put_host;
4196*4882a593Smuzhiyun }
4197*4882a593Smuzhiyun } else
4198*4882a593Smuzhiyun dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4199*4882a593Smuzhiyun ha->host_no);
4200*4882a593Smuzhiyun #else
4201*4882a593Smuzhiyun if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4202*4882a593Smuzhiyun printk(KERN_WARNING "scsi(%li): Unable to set a "
4203*4882a593Smuzhiyun "suitable DMA mask - aborting\n", ha->host_no);
4204*4882a593Smuzhiyun error = -ENODEV;
4205*4882a593Smuzhiyun goto error_put_host;
4206*4882a593Smuzhiyun }
4207*4882a593Smuzhiyun #endif
4208*4882a593Smuzhiyun
4209*4882a593Smuzhiyun ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
4210*4882a593Smuzhiyun ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4211*4882a593Smuzhiyun &ha->request_dma, GFP_KERNEL);
4212*4882a593Smuzhiyun if (!ha->request_ring) {
4213*4882a593Smuzhiyun printk(KERN_INFO "qla1280: Failed to get request memory\n");
4214*4882a593Smuzhiyun goto error_put_host;
4215*4882a593Smuzhiyun }
4216*4882a593Smuzhiyun
4217*4882a593Smuzhiyun ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
4218*4882a593Smuzhiyun ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4219*4882a593Smuzhiyun &ha->response_dma, GFP_KERNEL);
4220*4882a593Smuzhiyun if (!ha->response_ring) {
4221*4882a593Smuzhiyun printk(KERN_INFO "qla1280: Failed to get response memory\n");
4222*4882a593Smuzhiyun goto error_free_request_ring;
4223*4882a593Smuzhiyun }
4224*4882a593Smuzhiyun
4225*4882a593Smuzhiyun ha->ports = bdp->numPorts;
4226*4882a593Smuzhiyun
4227*4882a593Smuzhiyun ha->host = host;
4228*4882a593Smuzhiyun ha->host_no = host->host_no;
4229*4882a593Smuzhiyun
4230*4882a593Smuzhiyun host->irq = pdev->irq;
4231*4882a593Smuzhiyun host->max_channel = bdp->numPorts - 1;
4232*4882a593Smuzhiyun host->max_lun = MAX_LUNS - 1;
4233*4882a593Smuzhiyun host->max_id = MAX_TARGETS;
4234*4882a593Smuzhiyun host->max_sectors = 1024;
4235*4882a593Smuzhiyun host->unique_id = host->host_no;
4236*4882a593Smuzhiyun
4237*4882a593Smuzhiyun error = -ENODEV;
4238*4882a593Smuzhiyun
4239*4882a593Smuzhiyun #if MEMORY_MAPPED_IO
4240*4882a593Smuzhiyun ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4241*4882a593Smuzhiyun if (!ha->mmpbase) {
4242*4882a593Smuzhiyun printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4243*4882a593Smuzhiyun goto error_free_response_ring;
4244*4882a593Smuzhiyun }
4245*4882a593Smuzhiyun
4246*4882a593Smuzhiyun host->base = (unsigned long)ha->mmpbase;
4247*4882a593Smuzhiyun ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4248*4882a593Smuzhiyun #else
4249*4882a593Smuzhiyun host->io_port = pci_resource_start(ha->pdev, 0);
4250*4882a593Smuzhiyun if (!request_region(host->io_port, 0xff, "qla1280")) {
4251*4882a593Smuzhiyun printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4252*4882a593Smuzhiyun "0x%04lx-0x%04lx - already in use\n",
4253*4882a593Smuzhiyun host->io_port, host->io_port + 0xff);
4254*4882a593Smuzhiyun goto error_free_response_ring;
4255*4882a593Smuzhiyun }
4256*4882a593Smuzhiyun
4257*4882a593Smuzhiyun ha->iobase = (struct device_reg *)host->io_port;
4258*4882a593Smuzhiyun #endif
4259*4882a593Smuzhiyun
4260*4882a593Smuzhiyun INIT_LIST_HEAD(&ha->done_q);
4261*4882a593Smuzhiyun
4262*4882a593Smuzhiyun /* Disable ISP interrupts. */
4263*4882a593Smuzhiyun qla1280_disable_intrs(ha);
4264*4882a593Smuzhiyun
4265*4882a593Smuzhiyun if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4266*4882a593Smuzhiyun "qla1280", ha)) {
4267*4882a593Smuzhiyun printk("qla1280 : Failed to reserve interrupt %d already "
4268*4882a593Smuzhiyun "in use\n", pdev->irq);
4269*4882a593Smuzhiyun goto error_release_region;
4270*4882a593Smuzhiyun }
4271*4882a593Smuzhiyun
4272*4882a593Smuzhiyun /* load the F/W, read paramaters, and init the H/W */
4273*4882a593Smuzhiyun if (qla1280_initialize_adapter(ha)) {
4274*4882a593Smuzhiyun printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4275*4882a593Smuzhiyun goto error_free_irq;
4276*4882a593Smuzhiyun }
4277*4882a593Smuzhiyun
4278*4882a593Smuzhiyun /* set our host ID (need to do something about our two IDs) */
4279*4882a593Smuzhiyun host->this_id = ha->bus_settings[0].id;
4280*4882a593Smuzhiyun
4281*4882a593Smuzhiyun pci_set_drvdata(pdev, host);
4282*4882a593Smuzhiyun
4283*4882a593Smuzhiyun error = scsi_add_host(host, &pdev->dev);
4284*4882a593Smuzhiyun if (error)
4285*4882a593Smuzhiyun goto error_disable_adapter;
4286*4882a593Smuzhiyun scsi_scan_host(host);
4287*4882a593Smuzhiyun
4288*4882a593Smuzhiyun return 0;
4289*4882a593Smuzhiyun
4290*4882a593Smuzhiyun error_disable_adapter:
4291*4882a593Smuzhiyun qla1280_disable_intrs(ha);
4292*4882a593Smuzhiyun error_free_irq:
4293*4882a593Smuzhiyun free_irq(pdev->irq, ha);
4294*4882a593Smuzhiyun error_release_region:
4295*4882a593Smuzhiyun #if MEMORY_MAPPED_IO
4296*4882a593Smuzhiyun iounmap(ha->mmpbase);
4297*4882a593Smuzhiyun #else
4298*4882a593Smuzhiyun release_region(host->io_port, 0xff);
4299*4882a593Smuzhiyun #endif
4300*4882a593Smuzhiyun error_free_response_ring:
4301*4882a593Smuzhiyun dma_free_coherent(&ha->pdev->dev,
4302*4882a593Smuzhiyun ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4303*4882a593Smuzhiyun ha->response_ring, ha->response_dma);
4304*4882a593Smuzhiyun error_free_request_ring:
4305*4882a593Smuzhiyun dma_free_coherent(&ha->pdev->dev,
4306*4882a593Smuzhiyun ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4307*4882a593Smuzhiyun ha->request_ring, ha->request_dma);
4308*4882a593Smuzhiyun error_put_host:
4309*4882a593Smuzhiyun scsi_host_put(host);
4310*4882a593Smuzhiyun error_disable_device:
4311*4882a593Smuzhiyun pci_disable_device(pdev);
4312*4882a593Smuzhiyun error:
4313*4882a593Smuzhiyun return error;
4314*4882a593Smuzhiyun }
4315*4882a593Smuzhiyun
4316*4882a593Smuzhiyun
4317*4882a593Smuzhiyun static void
qla1280_remove_one(struct pci_dev * pdev)4318*4882a593Smuzhiyun qla1280_remove_one(struct pci_dev *pdev)
4319*4882a593Smuzhiyun {
4320*4882a593Smuzhiyun struct Scsi_Host *host = pci_get_drvdata(pdev);
4321*4882a593Smuzhiyun struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4322*4882a593Smuzhiyun
4323*4882a593Smuzhiyun scsi_remove_host(host);
4324*4882a593Smuzhiyun
4325*4882a593Smuzhiyun qla1280_disable_intrs(ha);
4326*4882a593Smuzhiyun
4327*4882a593Smuzhiyun free_irq(pdev->irq, ha);
4328*4882a593Smuzhiyun
4329*4882a593Smuzhiyun #if MEMORY_MAPPED_IO
4330*4882a593Smuzhiyun iounmap(ha->mmpbase);
4331*4882a593Smuzhiyun #else
4332*4882a593Smuzhiyun release_region(host->io_port, 0xff);
4333*4882a593Smuzhiyun #endif
4334*4882a593Smuzhiyun
4335*4882a593Smuzhiyun dma_free_coherent(&ha->pdev->dev,
4336*4882a593Smuzhiyun ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4337*4882a593Smuzhiyun ha->request_ring, ha->request_dma);
4338*4882a593Smuzhiyun dma_free_coherent(&ha->pdev->dev,
4339*4882a593Smuzhiyun ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4340*4882a593Smuzhiyun ha->response_ring, ha->response_dma);
4341*4882a593Smuzhiyun
4342*4882a593Smuzhiyun pci_disable_device(pdev);
4343*4882a593Smuzhiyun
4344*4882a593Smuzhiyun scsi_host_put(host);
4345*4882a593Smuzhiyun }
4346*4882a593Smuzhiyun
4347*4882a593Smuzhiyun static struct pci_driver qla1280_pci_driver = {
4348*4882a593Smuzhiyun .name = "qla1280",
4349*4882a593Smuzhiyun .id_table = qla1280_pci_tbl,
4350*4882a593Smuzhiyun .probe = qla1280_probe_one,
4351*4882a593Smuzhiyun .remove = qla1280_remove_one,
4352*4882a593Smuzhiyun };
4353*4882a593Smuzhiyun
4354*4882a593Smuzhiyun static int __init
qla1280_init(void)4355*4882a593Smuzhiyun qla1280_init(void)
4356*4882a593Smuzhiyun {
4357*4882a593Smuzhiyun if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4358*4882a593Smuzhiyun printk(KERN_WARNING
4359*4882a593Smuzhiyun "qla1280: struct srb too big, aborting\n");
4360*4882a593Smuzhiyun return -EINVAL;
4361*4882a593Smuzhiyun }
4362*4882a593Smuzhiyun
4363*4882a593Smuzhiyun #ifdef MODULE
4364*4882a593Smuzhiyun /*
4365*4882a593Smuzhiyun * If we are called as a module, the qla1280 pointer may not be null
4366*4882a593Smuzhiyun * and it would point to our bootup string, just like on the lilo
4367*4882a593Smuzhiyun * command line. IF not NULL, then process this config string with
4368*4882a593Smuzhiyun * qla1280_setup
4369*4882a593Smuzhiyun *
4370*4882a593Smuzhiyun * Boot time Options
4371*4882a593Smuzhiyun * To add options at boot time add a line to your lilo.conf file like:
4372*4882a593Smuzhiyun * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
4373*4882a593Smuzhiyun * which will result in the first four devices on the first two
4374*4882a593Smuzhiyun * controllers being set to a tagged queue depth of 32.
4375*4882a593Smuzhiyun */
4376*4882a593Smuzhiyun if (qla1280)
4377*4882a593Smuzhiyun qla1280_setup(qla1280);
4378*4882a593Smuzhiyun #endif
4379*4882a593Smuzhiyun
4380*4882a593Smuzhiyun return pci_register_driver(&qla1280_pci_driver);
4381*4882a593Smuzhiyun }
4382*4882a593Smuzhiyun
4383*4882a593Smuzhiyun static void __exit
qla1280_exit(void)4384*4882a593Smuzhiyun qla1280_exit(void)
4385*4882a593Smuzhiyun {
4386*4882a593Smuzhiyun int i;
4387*4882a593Smuzhiyun
4388*4882a593Smuzhiyun pci_unregister_driver(&qla1280_pci_driver);
4389*4882a593Smuzhiyun /* release any allocated firmware images */
4390*4882a593Smuzhiyun for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4391*4882a593Smuzhiyun release_firmware(qla1280_fw_tbl[i].fw);
4392*4882a593Smuzhiyun qla1280_fw_tbl[i].fw = NULL;
4393*4882a593Smuzhiyun }
4394*4882a593Smuzhiyun }
4395*4882a593Smuzhiyun
4396*4882a593Smuzhiyun module_init(qla1280_init);
4397*4882a593Smuzhiyun module_exit(qla1280_exit);
4398*4882a593Smuzhiyun
4399*4882a593Smuzhiyun MODULE_AUTHOR("Qlogic & Jes Sorensen");
4400*4882a593Smuzhiyun MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4401*4882a593Smuzhiyun MODULE_LICENSE("GPL");
4402*4882a593Smuzhiyun MODULE_FIRMWARE("qlogic/1040.bin");
4403*4882a593Smuzhiyun MODULE_FIRMWARE("qlogic/1280.bin");
4404*4882a593Smuzhiyun MODULE_FIRMWARE("qlogic/12160.bin");
4405*4882a593Smuzhiyun MODULE_VERSION(QLA1280_VERSION);
4406*4882a593Smuzhiyun
4407*4882a593Smuzhiyun /*
4408*4882a593Smuzhiyun * Overrides for Emacs so that we almost follow Linus's tabbing style.
4409*4882a593Smuzhiyun * Emacs will notice this stuff at the end of the file and automatically
4410*4882a593Smuzhiyun * adjust the settings for this buffer only. This must remain at the end
4411*4882a593Smuzhiyun * of the file.
4412*4882a593Smuzhiyun * ---------------------------------------------------------------------------
4413*4882a593Smuzhiyun * Local variables:
4414*4882a593Smuzhiyun * c-basic-offset: 8
4415*4882a593Smuzhiyun * tab-width: 8
4416*4882a593Smuzhiyun * End:
4417*4882a593Smuzhiyun */
4418