1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /***************************************************************************
3*4882a593Smuzhiyun dpti.c - description
4*4882a593Smuzhiyun -------------------
5*4882a593Smuzhiyun begin : Thu Sep 7 2000
6*4882a593Smuzhiyun copyright : (C) 2000 by Adaptec
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun July 30, 2001 First version being submitted
9*4882a593Smuzhiyun for inclusion in the kernel. V2.4
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun See Documentation/scsi/dpti.rst for history, notes, license info
12*4882a593Smuzhiyun and credits
13*4882a593Smuzhiyun ***************************************************************************/
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /***************************************************************************
16*4882a593Smuzhiyun * *
17*4882a593Smuzhiyun * *
18*4882a593Smuzhiyun ***************************************************************************/
19*4882a593Smuzhiyun /***************************************************************************
20*4882a593Smuzhiyun * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
21*4882a593Smuzhiyun - Support 2.6 kernel and DMA-mapping
22*4882a593Smuzhiyun - ioctl fix for raid tools
23*4882a593Smuzhiyun - use schedule_timeout in long long loop
24*4882a593Smuzhiyun **************************************************************************/
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /*#define DEBUG 1 */
27*4882a593Smuzhiyun /*#define UARTDELAY 1 */
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/module.h>
30*4882a593Smuzhiyun #include <linux/pgtable.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
33*4882a593Smuzhiyun MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun ////////////////////////////////////////////////////////////////
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #include <linux/ioctl.h> /* For SCSI-Passthrough */
38*4882a593Smuzhiyun #include <linux/uaccess.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #include <linux/stat.h>
41*4882a593Smuzhiyun #include <linux/slab.h> /* for kmalloc() */
42*4882a593Smuzhiyun #include <linux/pci.h> /* for PCI support */
43*4882a593Smuzhiyun #include <linux/proc_fs.h>
44*4882a593Smuzhiyun #include <linux/blkdev.h>
45*4882a593Smuzhiyun #include <linux/delay.h> /* for udelay */
46*4882a593Smuzhiyun #include <linux/interrupt.h>
47*4882a593Smuzhiyun #include <linux/kernel.h> /* for printk */
48*4882a593Smuzhiyun #include <linux/sched.h>
49*4882a593Smuzhiyun #include <linux/reboot.h>
50*4882a593Smuzhiyun #include <linux/spinlock.h>
51*4882a593Smuzhiyun #include <linux/dma-mapping.h>
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #include <linux/timer.h>
54*4882a593Smuzhiyun #include <linux/string.h>
55*4882a593Smuzhiyun #include <linux/ioport.h>
56*4882a593Smuzhiyun #include <linux/mutex.h>
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #include <asm/processor.h> /* for boot_cpu_data */
59*4882a593Smuzhiyun #include <asm/io.h> /* for virt_to_bus, etc. */
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #include <scsi/scsi.h>
62*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
63*4882a593Smuzhiyun #include <scsi/scsi_device.h>
64*4882a593Smuzhiyun #include <scsi/scsi_host.h>
65*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #include "dpt/dptsig.h"
68*4882a593Smuzhiyun #include "dpti.h"
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /*============================================================================
71*4882a593Smuzhiyun * Create a binary signature - this is read by dptsig
72*4882a593Smuzhiyun * Needed for our management apps
73*4882a593Smuzhiyun *============================================================================
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun static DEFINE_MUTEX(adpt_mutex);
76*4882a593Smuzhiyun static dpt_sig_S DPTI_sig = {
77*4882a593Smuzhiyun {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
78*4882a593Smuzhiyun #ifdef __i386__
79*4882a593Smuzhiyun PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
80*4882a593Smuzhiyun #elif defined(__ia64__)
81*4882a593Smuzhiyun PROC_INTEL, PROC_IA64,
82*4882a593Smuzhiyun #elif defined(__sparc__)
83*4882a593Smuzhiyun PROC_ULTRASPARC, PROC_ULTRASPARC,
84*4882a593Smuzhiyun #elif defined(__alpha__)
85*4882a593Smuzhiyun PROC_ALPHA, PROC_ALPHA,
86*4882a593Smuzhiyun #else
87*4882a593Smuzhiyun (-1),(-1),
88*4882a593Smuzhiyun #endif
89*4882a593Smuzhiyun FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
90*4882a593Smuzhiyun ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
91*4882a593Smuzhiyun DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /*============================================================================
98*4882a593Smuzhiyun * Globals
99*4882a593Smuzhiyun *============================================================================
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun static DEFINE_MUTEX(adpt_configuration_lock);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun static struct i2o_sys_tbl *sys_tbl;
105*4882a593Smuzhiyun static dma_addr_t sys_tbl_pa;
106*4882a593Smuzhiyun static int sys_tbl_ind;
107*4882a593Smuzhiyun static int sys_tbl_len;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun static adpt_hba* hba_chain = NULL;
110*4882a593Smuzhiyun static int hba_count = 0;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun static struct class *adpt_sysfs_class;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
115*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
116*4882a593Smuzhiyun static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
117*4882a593Smuzhiyun #endif
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun static const struct file_operations adpt_fops = {
120*4882a593Smuzhiyun .unlocked_ioctl = adpt_unlocked_ioctl,
121*4882a593Smuzhiyun .open = adpt_open,
122*4882a593Smuzhiyun .release = adpt_close,
123*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
124*4882a593Smuzhiyun .compat_ioctl = compat_adpt_ioctl,
125*4882a593Smuzhiyun #endif
126*4882a593Smuzhiyun .llseek = noop_llseek,
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* Structures and definitions for synchronous message posting.
130*4882a593Smuzhiyun * See adpt_i2o_post_wait() for description
131*4882a593Smuzhiyun * */
132*4882a593Smuzhiyun struct adpt_i2o_post_wait_data
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun int status;
135*4882a593Smuzhiyun u32 id;
136*4882a593Smuzhiyun adpt_wait_queue_head_t *wq;
137*4882a593Smuzhiyun struct adpt_i2o_post_wait_data *next;
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141*4882a593Smuzhiyun static u32 adpt_post_wait_id = 0;
142*4882a593Smuzhiyun static DEFINE_SPINLOCK(adpt_post_wait_lock);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /*============================================================================
146*4882a593Smuzhiyun * Functions
147*4882a593Smuzhiyun *============================================================================
148*4882a593Smuzhiyun */
149*4882a593Smuzhiyun
dpt_dma64(adpt_hba * pHba)150*4882a593Smuzhiyun static inline int dpt_dma64(adpt_hba *pHba)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
dma_high(dma_addr_t addr)155*4882a593Smuzhiyun static inline u32 dma_high(dma_addr_t addr)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun return upper_32_bits(addr);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
dma_low(dma_addr_t addr)160*4882a593Smuzhiyun static inline u32 dma_low(dma_addr_t addr)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun return (u32)addr;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
adpt_read_blink_led(adpt_hba * host)165*4882a593Smuzhiyun static u8 adpt_read_blink_led(adpt_hba* host)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun if (host->FwDebugBLEDflag_P) {
168*4882a593Smuzhiyun if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169*4882a593Smuzhiyun return readb(host->FwDebugBLEDvalue_P);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun return 0;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*============================================================================
176*4882a593Smuzhiyun * Scsi host template interface functions
177*4882a593Smuzhiyun *============================================================================
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun #ifdef MODULE
181*4882a593Smuzhiyun static struct pci_device_id dptids[] = {
182*4882a593Smuzhiyun { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183*4882a593Smuzhiyun { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184*4882a593Smuzhiyun { 0, }
185*4882a593Smuzhiyun };
186*4882a593Smuzhiyun #endif
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci,dptids);
189*4882a593Smuzhiyun
adpt_detect(struct scsi_host_template * sht)190*4882a593Smuzhiyun static int adpt_detect(struct scsi_host_template* sht)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun struct pci_dev *pDev = NULL;
193*4882a593Smuzhiyun adpt_hba *pHba;
194*4882a593Smuzhiyun adpt_hba *next;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun PINFO("Detecting Adaptec I2O RAID controllers...\n");
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* search for all Adatpec I2O RAID cards */
199*4882a593Smuzhiyun while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200*4882a593Smuzhiyun if(pDev->device == PCI_DPT_DEVICE_ID ||
201*4882a593Smuzhiyun pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202*4882a593Smuzhiyun if(adpt_install_hba(sht, pDev) ){
203*4882a593Smuzhiyun PERROR("Could not Init an I2O RAID device\n");
204*4882a593Smuzhiyun PERROR("Will not try to detect others.\n");
205*4882a593Smuzhiyun return hba_count-1;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun pci_dev_get(pDev);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* In INIT state, Activate IOPs */
212*4882a593Smuzhiyun for (pHba = hba_chain; pHba; pHba = next) {
213*4882a593Smuzhiyun next = pHba->next;
214*4882a593Smuzhiyun // Activate does get status , init outbound, and get hrt
215*4882a593Smuzhiyun if (adpt_i2o_activate_hba(pHba) < 0) {
216*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* Active IOPs in HOLD state */
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun rebuild_sys_tab:
224*4882a593Smuzhiyun if (hba_chain == NULL)
225*4882a593Smuzhiyun return 0;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * If build_sys_table fails, we kill everything and bail
229*4882a593Smuzhiyun * as we can't init the IOPs w/o a system table
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun if (adpt_i2o_build_sys_table() < 0) {
232*4882a593Smuzhiyun adpt_i2o_sys_shutdown();
233*4882a593Smuzhiyun return 0;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun PDEBUG("HBA's in HOLD state\n");
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* If IOP don't get online, we need to rebuild the System table */
239*4882a593Smuzhiyun for (pHba = hba_chain; pHba; pHba = pHba->next) {
240*4882a593Smuzhiyun if (adpt_i2o_online_hba(pHba) < 0) {
241*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
242*4882a593Smuzhiyun goto rebuild_sys_tab;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* Active IOPs now in OPERATIONAL state */
247*4882a593Smuzhiyun PDEBUG("HBA's in OPERATIONAL state\n");
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250*4882a593Smuzhiyun for (pHba = hba_chain; pHba; pHba = next) {
251*4882a593Smuzhiyun next = pHba->next;
252*4882a593Smuzhiyun printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253*4882a593Smuzhiyun if (adpt_i2o_lct_get(pHba) < 0){
254*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
255*4882a593Smuzhiyun continue;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (adpt_i2o_parse_lct(pHba) < 0){
259*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
260*4882a593Smuzhiyun continue;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun adpt_inquiry(pHba);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266*4882a593Smuzhiyun if (IS_ERR(adpt_sysfs_class)) {
267*4882a593Smuzhiyun printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268*4882a593Smuzhiyun adpt_sysfs_class = NULL;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun for (pHba = hba_chain; pHba; pHba = next) {
272*4882a593Smuzhiyun next = pHba->next;
273*4882a593Smuzhiyun if (adpt_scsi_host_alloc(pHba, sht) < 0){
274*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
275*4882a593Smuzhiyun continue;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun pHba->initialized = TRUE;
278*4882a593Smuzhiyun pHba->state &= ~DPTI_STATE_RESET;
279*4882a593Smuzhiyun if (adpt_sysfs_class) {
280*4882a593Smuzhiyun struct device *dev = device_create(adpt_sysfs_class,
281*4882a593Smuzhiyun NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282*4882a593Smuzhiyun "dpti%d", pHba->unit);
283*4882a593Smuzhiyun if (IS_ERR(dev)) {
284*4882a593Smuzhiyun printk(KERN_WARNING"dpti%d: unable to "
285*4882a593Smuzhiyun "create device in dpt_i2o class\n",
286*4882a593Smuzhiyun pHba->unit);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun // Register our control device node
292*4882a593Smuzhiyun // nodes will need to be created in /dev to access this
293*4882a593Smuzhiyun // the nodes can not be created from within the driver
294*4882a593Smuzhiyun if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295*4882a593Smuzhiyun adpt_i2o_sys_shutdown();
296*4882a593Smuzhiyun return 0;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun return hba_count;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun
adpt_release(adpt_hba * pHba)302*4882a593Smuzhiyun static void adpt_release(adpt_hba *pHba)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun struct Scsi_Host *shost = pHba->host;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun scsi_remove_host(shost);
307*4882a593Smuzhiyun // adpt_i2o_quiesce_hba(pHba);
308*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
309*4882a593Smuzhiyun scsi_host_put(shost);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun
adpt_inquiry(adpt_hba * pHba)313*4882a593Smuzhiyun static void adpt_inquiry(adpt_hba* pHba)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun u32 msg[17];
316*4882a593Smuzhiyun u32 *mptr;
317*4882a593Smuzhiyun u32 *lenptr;
318*4882a593Smuzhiyun int direction;
319*4882a593Smuzhiyun int scsidir;
320*4882a593Smuzhiyun u32 len;
321*4882a593Smuzhiyun u32 reqlen;
322*4882a593Smuzhiyun u8* buf;
323*4882a593Smuzhiyun dma_addr_t addr;
324*4882a593Smuzhiyun u8 scb[16];
325*4882a593Smuzhiyun s32 rcode;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun memset(msg, 0, sizeof(msg));
328*4882a593Smuzhiyun buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
329*4882a593Smuzhiyun if(!buf){
330*4882a593Smuzhiyun printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331*4882a593Smuzhiyun return;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun memset((void*)buf, 0, 36);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun len = 36;
336*4882a593Smuzhiyun direction = 0x00000000;
337*4882a593Smuzhiyun scsidir =0x40000000; // DATA IN (iop<--dev)
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (dpt_dma64(pHba))
340*4882a593Smuzhiyun reqlen = 17; // SINGLE SGE, 64 bit
341*4882a593Smuzhiyun else
342*4882a593Smuzhiyun reqlen = 14; // SINGLE SGE, 32 bit
343*4882a593Smuzhiyun /* Stick the headers on */
344*4882a593Smuzhiyun msg[0] = reqlen<<16 | SGL_OFFSET_12;
345*4882a593Smuzhiyun msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346*4882a593Smuzhiyun msg[2] = 0;
347*4882a593Smuzhiyun msg[3] = 0;
348*4882a593Smuzhiyun // Adaptec/DPT Private stuff
349*4882a593Smuzhiyun msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350*4882a593Smuzhiyun msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
351*4882a593Smuzhiyun /* Direction, disconnect ok | sense data | simple queue , CDBLen */
352*4882a593Smuzhiyun // I2O_SCB_FLAG_ENABLE_DISCONNECT |
353*4882a593Smuzhiyun // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
354*4882a593Smuzhiyun // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
355*4882a593Smuzhiyun msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun mptr=msg+7;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun memset(scb, 0, sizeof(scb));
360*4882a593Smuzhiyun // Write SCSI command into the message - always 16 byte block
361*4882a593Smuzhiyun scb[0] = INQUIRY;
362*4882a593Smuzhiyun scb[1] = 0;
363*4882a593Smuzhiyun scb[2] = 0;
364*4882a593Smuzhiyun scb[3] = 0;
365*4882a593Smuzhiyun scb[4] = 36;
366*4882a593Smuzhiyun scb[5] = 0;
367*4882a593Smuzhiyun // Don't care about the rest of scb
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun memcpy(mptr, scb, sizeof(scb));
370*4882a593Smuzhiyun mptr+=4;
371*4882a593Smuzhiyun lenptr=mptr++; /* Remember me - fill in when we know */
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /* Now fill in the SGList and command */
374*4882a593Smuzhiyun *lenptr = len;
375*4882a593Smuzhiyun if (dpt_dma64(pHba)) {
376*4882a593Smuzhiyun *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
377*4882a593Smuzhiyun *mptr++ = 1 << PAGE_SHIFT;
378*4882a593Smuzhiyun *mptr++ = 0xD0000000|direction|len;
379*4882a593Smuzhiyun *mptr++ = dma_low(addr);
380*4882a593Smuzhiyun *mptr++ = dma_high(addr);
381*4882a593Smuzhiyun } else {
382*4882a593Smuzhiyun *mptr++ = 0xD0000000|direction|len;
383*4882a593Smuzhiyun *mptr++ = addr;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun // Send it on it's way
387*4882a593Smuzhiyun rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388*4882a593Smuzhiyun if (rcode != 0) {
389*4882a593Smuzhiyun sprintf(pHba->detail, "Adaptec I2O RAID");
390*4882a593Smuzhiyun printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391*4882a593Smuzhiyun if (rcode != -ETIME && rcode != -EINTR)
392*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
393*4882a593Smuzhiyun } else {
394*4882a593Smuzhiyun memset(pHba->detail, 0, sizeof(pHba->detail));
395*4882a593Smuzhiyun memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396*4882a593Smuzhiyun memcpy(&(pHba->detail[16]), " Model: ", 8);
397*4882a593Smuzhiyun memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398*4882a593Smuzhiyun memcpy(&(pHba->detail[40]), " FW: ", 4);
399*4882a593Smuzhiyun memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400*4882a593Smuzhiyun pHba->detail[48] = '\0'; /* precautionary */
401*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun adpt_i2o_status_get(pHba);
404*4882a593Smuzhiyun return ;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun
adpt_slave_configure(struct scsi_device * device)408*4882a593Smuzhiyun static int adpt_slave_configure(struct scsi_device * device)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun struct Scsi_Host *host = device->host;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (host->can_queue && device->tagged_supported) {
413*4882a593Smuzhiyun scsi_change_queue_depth(device,
414*4882a593Smuzhiyun host->can_queue - 1);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
adpt_queue_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))419*4882a593Smuzhiyun static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun adpt_hba* pHba = NULL;
422*4882a593Smuzhiyun struct adpt_device* pDev = NULL; /* dpt per device information */
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun cmd->scsi_done = done;
425*4882a593Smuzhiyun /*
426*4882a593Smuzhiyun * SCSI REQUEST_SENSE commands will be executed automatically by the
427*4882a593Smuzhiyun * Host Adapter for any errors, so they should not be executed
428*4882a593Smuzhiyun * explicitly unless the Sense Data is zero indicating that no error
429*4882a593Smuzhiyun * occurred.
430*4882a593Smuzhiyun */
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
433*4882a593Smuzhiyun cmd->result = (DID_OK << 16);
434*4882a593Smuzhiyun cmd->scsi_done(cmd);
435*4882a593Smuzhiyun return 0;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun pHba = (adpt_hba*)cmd->device->host->hostdata[0];
439*4882a593Smuzhiyun if (!pHba) {
440*4882a593Smuzhiyun return FAILED;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun rmb();
444*4882a593Smuzhiyun if ((pHba->state) & DPTI_STATE_RESET)
445*4882a593Smuzhiyun return SCSI_MLQUEUE_HOST_BUSY;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun // TODO if the cmd->device if offline then I may need to issue a bus rescan
448*4882a593Smuzhiyun // followed by a get_lct to see if the device is there anymore
449*4882a593Smuzhiyun if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
450*4882a593Smuzhiyun /*
451*4882a593Smuzhiyun * First command request for this device. Set up a pointer
452*4882a593Smuzhiyun * to the device structure. This should be a TEST_UNIT_READY
453*4882a593Smuzhiyun * command from scan_scsis_single.
454*4882a593Smuzhiyun */
455*4882a593Smuzhiyun if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
456*4882a593Smuzhiyun // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
457*4882a593Smuzhiyun // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
458*4882a593Smuzhiyun cmd->result = (DID_NO_CONNECT << 16);
459*4882a593Smuzhiyun cmd->scsi_done(cmd);
460*4882a593Smuzhiyun return 0;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun cmd->device->hostdata = pDev;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun pDev->pScsi_dev = cmd->device;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /*
467*4882a593Smuzhiyun * If we are being called from when the device is being reset,
468*4882a593Smuzhiyun * delay processing of the command until later.
469*4882a593Smuzhiyun */
470*4882a593Smuzhiyun if (pDev->state & DPTI_DEV_RESET ) {
471*4882a593Smuzhiyun return FAILED;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun return adpt_scsi_to_i2o(pHba, cmd, pDev);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
DEF_SCSI_QCMD(adpt_queue)476*4882a593Smuzhiyun static DEF_SCSI_QCMD(adpt_queue)
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
479*4882a593Smuzhiyun sector_t capacity, int geom[])
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun int heads=-1;
482*4882a593Smuzhiyun int sectors=-1;
483*4882a593Smuzhiyun int cylinders=-1;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun // *** First lets set the default geometry ****
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun // If the capacity is less than ox2000
488*4882a593Smuzhiyun if (capacity < 0x2000 ) { // floppy
489*4882a593Smuzhiyun heads = 18;
490*4882a593Smuzhiyun sectors = 2;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun // else if between 0x2000 and 0x20000
493*4882a593Smuzhiyun else if (capacity < 0x20000) {
494*4882a593Smuzhiyun heads = 64;
495*4882a593Smuzhiyun sectors = 32;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun // else if between 0x20000 and 0x40000
498*4882a593Smuzhiyun else if (capacity < 0x40000) {
499*4882a593Smuzhiyun heads = 65;
500*4882a593Smuzhiyun sectors = 63;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun // else if between 0x4000 and 0x80000
503*4882a593Smuzhiyun else if (capacity < 0x80000) {
504*4882a593Smuzhiyun heads = 128;
505*4882a593Smuzhiyun sectors = 63;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun // else if greater than 0x80000
508*4882a593Smuzhiyun else {
509*4882a593Smuzhiyun heads = 255;
510*4882a593Smuzhiyun sectors = 63;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun cylinders = sector_div(capacity, heads * sectors);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun // Special case if CDROM
515*4882a593Smuzhiyun if(sdev->type == 5) { // CDROM
516*4882a593Smuzhiyun heads = 252;
517*4882a593Smuzhiyun sectors = 63;
518*4882a593Smuzhiyun cylinders = 1111;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun geom[0] = heads;
522*4882a593Smuzhiyun geom[1] = sectors;
523*4882a593Smuzhiyun geom[2] = cylinders;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun PDEBUG("adpt_bios_param: exit\n");
526*4882a593Smuzhiyun return 0;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun
adpt_info(struct Scsi_Host * host)530*4882a593Smuzhiyun static const char *adpt_info(struct Scsi_Host *host)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun adpt_hba* pHba;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun pHba = (adpt_hba *) host->hostdata[0];
535*4882a593Smuzhiyun return (char *) (pHba->detail);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
adpt_show_info(struct seq_file * m,struct Scsi_Host * host)538*4882a593Smuzhiyun static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun struct adpt_device* d;
541*4882a593Smuzhiyun int id;
542*4882a593Smuzhiyun int chan;
543*4882a593Smuzhiyun adpt_hba* pHba;
544*4882a593Smuzhiyun int unit;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun // Find HBA (host bus adapter) we are looking for
547*4882a593Smuzhiyun mutex_lock(&adpt_configuration_lock);
548*4882a593Smuzhiyun for (pHba = hba_chain; pHba; pHba = pHba->next) {
549*4882a593Smuzhiyun if (pHba->host == host) {
550*4882a593Smuzhiyun break; /* found adapter */
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun mutex_unlock(&adpt_configuration_lock);
554*4882a593Smuzhiyun if (pHba == NULL) {
555*4882a593Smuzhiyun return 0;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun host = pHba->host;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
560*4882a593Smuzhiyun seq_printf(m, "%s\n", pHba->detail);
561*4882a593Smuzhiyun seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
562*4882a593Smuzhiyun pHba->host->host_no, pHba->name, host->irq);
563*4882a593Smuzhiyun seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
564*4882a593Smuzhiyun host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun seq_puts(m, "Devices:\n");
567*4882a593Smuzhiyun for(chan = 0; chan < MAX_CHANNEL; chan++) {
568*4882a593Smuzhiyun for(id = 0; id < MAX_ID; id++) {
569*4882a593Smuzhiyun d = pHba->channel[chan].device[id];
570*4882a593Smuzhiyun while(d) {
571*4882a593Smuzhiyun seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
572*4882a593Smuzhiyun seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun unit = d->pI2o_dev->lct_data.tid;
575*4882a593Smuzhiyun seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
576*4882a593Smuzhiyun unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
577*4882a593Smuzhiyun scsi_device_online(d->pScsi_dev)? "online":"offline");
578*4882a593Smuzhiyun d = d->next_lun;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun return 0;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /*
586*4882a593Smuzhiyun * Turn a pointer to ioctl reply data into an u32 'context'
587*4882a593Smuzhiyun */
adpt_ioctl_to_context(adpt_hba * pHba,void * reply)588*4882a593Smuzhiyun static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun #if BITS_PER_LONG == 32
591*4882a593Smuzhiyun return (u32)(unsigned long)reply;
592*4882a593Smuzhiyun #else
593*4882a593Smuzhiyun ulong flags = 0;
594*4882a593Smuzhiyun u32 nr, i;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun spin_lock_irqsave(pHba->host->host_lock, flags);
597*4882a593Smuzhiyun nr = ARRAY_SIZE(pHba->ioctl_reply_context);
598*4882a593Smuzhiyun for (i = 0; i < nr; i++) {
599*4882a593Smuzhiyun if (pHba->ioctl_reply_context[i] == NULL) {
600*4882a593Smuzhiyun pHba->ioctl_reply_context[i] = reply;
601*4882a593Smuzhiyun break;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun spin_unlock_irqrestore(pHba->host->host_lock, flags);
605*4882a593Smuzhiyun if (i >= nr) {
606*4882a593Smuzhiyun printk(KERN_WARNING"%s: Too many outstanding "
607*4882a593Smuzhiyun "ioctl commands\n", pHba->name);
608*4882a593Smuzhiyun return (u32)-1;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun return i;
612*4882a593Smuzhiyun #endif
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /*
616*4882a593Smuzhiyun * Go from an u32 'context' to a pointer to ioctl reply data.
617*4882a593Smuzhiyun */
adpt_ioctl_from_context(adpt_hba * pHba,u32 context)618*4882a593Smuzhiyun static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun #if BITS_PER_LONG == 32
621*4882a593Smuzhiyun return (void *)(unsigned long)context;
622*4882a593Smuzhiyun #else
623*4882a593Smuzhiyun void *p = pHba->ioctl_reply_context[context];
624*4882a593Smuzhiyun pHba->ioctl_reply_context[context] = NULL;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun return p;
627*4882a593Smuzhiyun #endif
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /*===========================================================================
631*4882a593Smuzhiyun * Error Handling routines
632*4882a593Smuzhiyun *===========================================================================
633*4882a593Smuzhiyun */
634*4882a593Smuzhiyun
adpt_abort(struct scsi_cmnd * cmd)635*4882a593Smuzhiyun static int adpt_abort(struct scsi_cmnd * cmd)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun adpt_hba* pHba = NULL; /* host bus adapter structure */
638*4882a593Smuzhiyun struct adpt_device* dptdevice; /* dpt per device information */
639*4882a593Smuzhiyun u32 msg[5];
640*4882a593Smuzhiyun int rcode;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun pHba = (adpt_hba*) cmd->device->host->hostdata[0];
643*4882a593Smuzhiyun printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
644*4882a593Smuzhiyun if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
645*4882a593Smuzhiyun printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
646*4882a593Smuzhiyun return FAILED;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun memset(msg, 0, sizeof(msg));
650*4882a593Smuzhiyun msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
651*4882a593Smuzhiyun msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
652*4882a593Smuzhiyun msg[2] = 0;
653*4882a593Smuzhiyun msg[3]= 0;
654*4882a593Smuzhiyun /* Add 1 to avoid firmware treating it as invalid command */
655*4882a593Smuzhiyun msg[4] = cmd->request->tag + 1;
656*4882a593Smuzhiyun if (pHba->host)
657*4882a593Smuzhiyun spin_lock_irq(pHba->host->host_lock);
658*4882a593Smuzhiyun rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
659*4882a593Smuzhiyun if (pHba->host)
660*4882a593Smuzhiyun spin_unlock_irq(pHba->host->host_lock);
661*4882a593Smuzhiyun if (rcode != 0) {
662*4882a593Smuzhiyun if(rcode == -EOPNOTSUPP ){
663*4882a593Smuzhiyun printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
664*4882a593Smuzhiyun return FAILED;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
667*4882a593Smuzhiyun return FAILED;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
670*4882a593Smuzhiyun return SUCCESS;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun #define I2O_DEVICE_RESET 0x27
675*4882a593Smuzhiyun // This is the same for BLK and SCSI devices
676*4882a593Smuzhiyun // NOTE this is wrong in the i2o.h definitions
677*4882a593Smuzhiyun // This is not currently supported by our adapter but we issue it anyway
adpt_device_reset(struct scsi_cmnd * cmd)678*4882a593Smuzhiyun static int adpt_device_reset(struct scsi_cmnd* cmd)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun adpt_hba* pHba;
681*4882a593Smuzhiyun u32 msg[4];
682*4882a593Smuzhiyun u32 rcode;
683*4882a593Smuzhiyun int old_state;
684*4882a593Smuzhiyun struct adpt_device* d = cmd->device->hostdata;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun pHba = (void*) cmd->device->host->hostdata[0];
687*4882a593Smuzhiyun printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
688*4882a593Smuzhiyun if (!d) {
689*4882a593Smuzhiyun printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
690*4882a593Smuzhiyun return FAILED;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun memset(msg, 0, sizeof(msg));
693*4882a593Smuzhiyun msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
694*4882a593Smuzhiyun msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
695*4882a593Smuzhiyun msg[2] = 0;
696*4882a593Smuzhiyun msg[3] = 0;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun if (pHba->host)
699*4882a593Smuzhiyun spin_lock_irq(pHba->host->host_lock);
700*4882a593Smuzhiyun old_state = d->state;
701*4882a593Smuzhiyun d->state |= DPTI_DEV_RESET;
702*4882a593Smuzhiyun rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
703*4882a593Smuzhiyun d->state = old_state;
704*4882a593Smuzhiyun if (pHba->host)
705*4882a593Smuzhiyun spin_unlock_irq(pHba->host->host_lock);
706*4882a593Smuzhiyun if (rcode != 0) {
707*4882a593Smuzhiyun if(rcode == -EOPNOTSUPP ){
708*4882a593Smuzhiyun printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
709*4882a593Smuzhiyun return FAILED;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
712*4882a593Smuzhiyun return FAILED;
713*4882a593Smuzhiyun } else {
714*4882a593Smuzhiyun printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
715*4882a593Smuzhiyun return SUCCESS;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun #define I2O_HBA_BUS_RESET 0x87
721*4882a593Smuzhiyun // This version of bus reset is called by the eh_error handler
adpt_bus_reset(struct scsi_cmnd * cmd)722*4882a593Smuzhiyun static int adpt_bus_reset(struct scsi_cmnd* cmd)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun adpt_hba* pHba;
725*4882a593Smuzhiyun u32 msg[4];
726*4882a593Smuzhiyun u32 rcode;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun pHba = (adpt_hba*)cmd->device->host->hostdata[0];
729*4882a593Smuzhiyun memset(msg, 0, sizeof(msg));
730*4882a593Smuzhiyun printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
731*4882a593Smuzhiyun msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
732*4882a593Smuzhiyun msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
733*4882a593Smuzhiyun msg[2] = 0;
734*4882a593Smuzhiyun msg[3] = 0;
735*4882a593Smuzhiyun if (pHba->host)
736*4882a593Smuzhiyun spin_lock_irq(pHba->host->host_lock);
737*4882a593Smuzhiyun rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
738*4882a593Smuzhiyun if (pHba->host)
739*4882a593Smuzhiyun spin_unlock_irq(pHba->host->host_lock);
740*4882a593Smuzhiyun if (rcode != 0) {
741*4882a593Smuzhiyun printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
742*4882a593Smuzhiyun return FAILED;
743*4882a593Smuzhiyun } else {
744*4882a593Smuzhiyun printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
745*4882a593Smuzhiyun return SUCCESS;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun // This version of reset is called by the eh_error_handler
__adpt_reset(struct scsi_cmnd * cmd)750*4882a593Smuzhiyun static int __adpt_reset(struct scsi_cmnd* cmd)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun adpt_hba* pHba;
753*4882a593Smuzhiyun int rcode;
754*4882a593Smuzhiyun char name[32];
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun pHba = (adpt_hba*)cmd->device->host->hostdata[0];
757*4882a593Smuzhiyun strncpy(name, pHba->name, sizeof(name));
758*4882a593Smuzhiyun printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
759*4882a593Smuzhiyun rcode = adpt_hba_reset(pHba);
760*4882a593Smuzhiyun if(rcode == 0){
761*4882a593Smuzhiyun printk(KERN_WARNING"%s: HBA reset complete\n", name);
762*4882a593Smuzhiyun return SUCCESS;
763*4882a593Smuzhiyun } else {
764*4882a593Smuzhiyun printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
765*4882a593Smuzhiyun return FAILED;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
adpt_reset(struct scsi_cmnd * cmd)769*4882a593Smuzhiyun static int adpt_reset(struct scsi_cmnd* cmd)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun int rc;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun spin_lock_irq(cmd->device->host->host_lock);
774*4882a593Smuzhiyun rc = __adpt_reset(cmd);
775*4882a593Smuzhiyun spin_unlock_irq(cmd->device->host->host_lock);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun return rc;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
adpt_hba_reset(adpt_hba * pHba)781*4882a593Smuzhiyun static int adpt_hba_reset(adpt_hba* pHba)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun int rcode;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun pHba->state |= DPTI_STATE_RESET;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun // Activate does get status , init outbound, and get hrt
788*4882a593Smuzhiyun if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
789*4882a593Smuzhiyun printk(KERN_ERR "%s: Could not activate\n", pHba->name);
790*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
791*4882a593Smuzhiyun return rcode;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun if ((rcode=adpt_i2o_build_sys_table()) < 0) {
795*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
796*4882a593Smuzhiyun return rcode;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun PDEBUG("%s: in HOLD state\n",pHba->name);
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
801*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
802*4882a593Smuzhiyun return rcode;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
807*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
808*4882a593Smuzhiyun return rcode;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
812*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
813*4882a593Smuzhiyun return rcode;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun pHba->state &= ~DPTI_STATE_RESET;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun scsi_host_complete_all_commands(pHba->host, DID_RESET);
818*4882a593Smuzhiyun return 0; /* return success */
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /*===========================================================================
822*4882a593Smuzhiyun *
823*4882a593Smuzhiyun *===========================================================================
824*4882a593Smuzhiyun */
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun
adpt_i2o_sys_shutdown(void)827*4882a593Smuzhiyun static void adpt_i2o_sys_shutdown(void)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun adpt_hba *pHba, *pNext;
830*4882a593Smuzhiyun struct adpt_i2o_post_wait_data *p1, *old;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
833*4882a593Smuzhiyun printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
834*4882a593Smuzhiyun /* Delete all IOPs from the controller chain */
835*4882a593Smuzhiyun /* They should have already been released by the
836*4882a593Smuzhiyun * scsi-core
837*4882a593Smuzhiyun */
838*4882a593Smuzhiyun for (pHba = hba_chain; pHba; pHba = pNext) {
839*4882a593Smuzhiyun pNext = pHba->next;
840*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun /* Remove any timedout entries from the wait queue. */
844*4882a593Smuzhiyun // spin_lock_irqsave(&adpt_post_wait_lock, flags);
845*4882a593Smuzhiyun /* Nothing should be outstanding at this point so just
846*4882a593Smuzhiyun * free them
847*4882a593Smuzhiyun */
848*4882a593Smuzhiyun for(p1 = adpt_post_wait_queue; p1;) {
849*4882a593Smuzhiyun old = p1;
850*4882a593Smuzhiyun p1 = p1->next;
851*4882a593Smuzhiyun kfree(old);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
854*4882a593Smuzhiyun adpt_post_wait_queue = NULL;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun printk(KERN_INFO "Adaptec I2O controllers down.\n");
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
adpt_install_hba(struct scsi_host_template * sht,struct pci_dev * pDev)859*4882a593Smuzhiyun static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun adpt_hba* pHba = NULL;
863*4882a593Smuzhiyun adpt_hba* p = NULL;
864*4882a593Smuzhiyun ulong base_addr0_phys = 0;
865*4882a593Smuzhiyun ulong base_addr1_phys = 0;
866*4882a593Smuzhiyun u32 hba_map0_area_size = 0;
867*4882a593Smuzhiyun u32 hba_map1_area_size = 0;
868*4882a593Smuzhiyun void __iomem *base_addr_virt = NULL;
869*4882a593Smuzhiyun void __iomem *msg_addr_virt = NULL;
870*4882a593Smuzhiyun int dma64 = 0;
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun int raptorFlag = FALSE;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun if(pci_enable_device(pDev)) {
875*4882a593Smuzhiyun return -EINVAL;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun if (pci_request_regions(pDev, "dpt_i2o")) {
879*4882a593Smuzhiyun PERROR("dpti: adpt_config_hba: pci request region failed\n");
880*4882a593Smuzhiyun return -EINVAL;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun pci_set_master(pDev);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun /*
886*4882a593Smuzhiyun * See if we should enable dma64 mode.
887*4882a593Smuzhiyun */
888*4882a593Smuzhiyun if (sizeof(dma_addr_t) > 4 &&
889*4882a593Smuzhiyun dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
890*4882a593Smuzhiyun dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
891*4882a593Smuzhiyun dma64 = 1;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
894*4882a593Smuzhiyun return -EINVAL;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun /* adapter only supports message blocks below 4GB */
897*4882a593Smuzhiyun dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun base_addr0_phys = pci_resource_start(pDev,0);
900*4882a593Smuzhiyun hba_map0_area_size = pci_resource_len(pDev,0);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun // Check if standard PCI card or single BAR Raptor
903*4882a593Smuzhiyun if(pDev->device == PCI_DPT_DEVICE_ID){
904*4882a593Smuzhiyun if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
905*4882a593Smuzhiyun // Raptor card with this device id needs 4M
906*4882a593Smuzhiyun hba_map0_area_size = 0x400000;
907*4882a593Smuzhiyun } else { // Not Raptor - it is a PCI card
908*4882a593Smuzhiyun if(hba_map0_area_size > 0x100000 ){
909*4882a593Smuzhiyun hba_map0_area_size = 0x100000;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun } else {// Raptor split BAR config
913*4882a593Smuzhiyun // Use BAR1 in this configuration
914*4882a593Smuzhiyun base_addr1_phys = pci_resource_start(pDev,1);
915*4882a593Smuzhiyun hba_map1_area_size = pci_resource_len(pDev,1);
916*4882a593Smuzhiyun raptorFlag = TRUE;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun #if BITS_PER_LONG == 64
920*4882a593Smuzhiyun /*
921*4882a593Smuzhiyun * The original Adaptec 64 bit driver has this comment here:
922*4882a593Smuzhiyun * "x86_64 machines need more optimal mappings"
923*4882a593Smuzhiyun *
924*4882a593Smuzhiyun * I assume some HBAs report ridiculously large mappings
925*4882a593Smuzhiyun * and we need to limit them on platforms with IOMMUs.
926*4882a593Smuzhiyun */
927*4882a593Smuzhiyun if (raptorFlag == TRUE) {
928*4882a593Smuzhiyun if (hba_map0_area_size > 128)
929*4882a593Smuzhiyun hba_map0_area_size = 128;
930*4882a593Smuzhiyun if (hba_map1_area_size > 524288)
931*4882a593Smuzhiyun hba_map1_area_size = 524288;
932*4882a593Smuzhiyun } else {
933*4882a593Smuzhiyun if (hba_map0_area_size > 524288)
934*4882a593Smuzhiyun hba_map0_area_size = 524288;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun #endif
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
939*4882a593Smuzhiyun if (!base_addr_virt) {
940*4882a593Smuzhiyun pci_release_regions(pDev);
941*4882a593Smuzhiyun PERROR("dpti: adpt_config_hba: io remap failed\n");
942*4882a593Smuzhiyun return -EINVAL;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun if(raptorFlag == TRUE) {
946*4882a593Smuzhiyun msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
947*4882a593Smuzhiyun if (!msg_addr_virt) {
948*4882a593Smuzhiyun PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
949*4882a593Smuzhiyun iounmap(base_addr_virt);
950*4882a593Smuzhiyun pci_release_regions(pDev);
951*4882a593Smuzhiyun return -EINVAL;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun } else {
954*4882a593Smuzhiyun msg_addr_virt = base_addr_virt;
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun // Allocate and zero the data structure
958*4882a593Smuzhiyun pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
959*4882a593Smuzhiyun if (!pHba) {
960*4882a593Smuzhiyun if (msg_addr_virt != base_addr_virt)
961*4882a593Smuzhiyun iounmap(msg_addr_virt);
962*4882a593Smuzhiyun iounmap(base_addr_virt);
963*4882a593Smuzhiyun pci_release_regions(pDev);
964*4882a593Smuzhiyun return -ENOMEM;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun mutex_lock(&adpt_configuration_lock);
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun if(hba_chain != NULL){
970*4882a593Smuzhiyun for(p = hba_chain; p->next; p = p->next);
971*4882a593Smuzhiyun p->next = pHba;
972*4882a593Smuzhiyun } else {
973*4882a593Smuzhiyun hba_chain = pHba;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun pHba->next = NULL;
976*4882a593Smuzhiyun pHba->unit = hba_count;
977*4882a593Smuzhiyun sprintf(pHba->name, "dpti%d", hba_count);
978*4882a593Smuzhiyun hba_count++;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun mutex_unlock(&adpt_configuration_lock);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun pHba->pDev = pDev;
983*4882a593Smuzhiyun pHba->base_addr_phys = base_addr0_phys;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun // Set up the Virtual Base Address of the I2O Device
986*4882a593Smuzhiyun pHba->base_addr_virt = base_addr_virt;
987*4882a593Smuzhiyun pHba->msg_addr_virt = msg_addr_virt;
988*4882a593Smuzhiyun pHba->irq_mask = base_addr_virt+0x30;
989*4882a593Smuzhiyun pHba->post_port = base_addr_virt+0x40;
990*4882a593Smuzhiyun pHba->reply_port = base_addr_virt+0x44;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun pHba->hrt = NULL;
993*4882a593Smuzhiyun pHba->lct = NULL;
994*4882a593Smuzhiyun pHba->lct_size = 0;
995*4882a593Smuzhiyun pHba->status_block = NULL;
996*4882a593Smuzhiyun pHba->post_count = 0;
997*4882a593Smuzhiyun pHba->state = DPTI_STATE_RESET;
998*4882a593Smuzhiyun pHba->pDev = pDev;
999*4882a593Smuzhiyun pHba->devices = NULL;
1000*4882a593Smuzhiyun pHba->dma64 = dma64;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun // Initializing the spinlocks
1003*4882a593Smuzhiyun spin_lock_init(&pHba->state_lock);
1004*4882a593Smuzhiyun spin_lock_init(&adpt_post_wait_lock);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun if(raptorFlag == 0){
1007*4882a593Smuzhiyun printk(KERN_INFO "Adaptec I2O RAID controller"
1008*4882a593Smuzhiyun " %d at %p size=%x irq=%d%s\n",
1009*4882a593Smuzhiyun hba_count-1, base_addr_virt,
1010*4882a593Smuzhiyun hba_map0_area_size, pDev->irq,
1011*4882a593Smuzhiyun dma64 ? " (64-bit DMA)" : "");
1012*4882a593Smuzhiyun } else {
1013*4882a593Smuzhiyun printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1014*4882a593Smuzhiyun hba_count-1, pDev->irq,
1015*4882a593Smuzhiyun dma64 ? " (64-bit DMA)" : "");
1016*4882a593Smuzhiyun printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1017*4882a593Smuzhiyun printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1021*4882a593Smuzhiyun printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1022*4882a593Smuzhiyun adpt_i2o_delete_hba(pHba);
1023*4882a593Smuzhiyun return -EINVAL;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun return 0;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun
adpt_i2o_delete_hba(adpt_hba * pHba)1030*4882a593Smuzhiyun static void adpt_i2o_delete_hba(adpt_hba* pHba)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun adpt_hba* p1;
1033*4882a593Smuzhiyun adpt_hba* p2;
1034*4882a593Smuzhiyun struct i2o_device* d;
1035*4882a593Smuzhiyun struct i2o_device* next;
1036*4882a593Smuzhiyun int i;
1037*4882a593Smuzhiyun int j;
1038*4882a593Smuzhiyun struct adpt_device* pDev;
1039*4882a593Smuzhiyun struct adpt_device* pNext;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun mutex_lock(&adpt_configuration_lock);
1043*4882a593Smuzhiyun if(pHba->host){
1044*4882a593Smuzhiyun free_irq(pHba->host->irq, pHba);
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun p2 = NULL;
1047*4882a593Smuzhiyun for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1048*4882a593Smuzhiyun if(p1 == pHba) {
1049*4882a593Smuzhiyun if(p2) {
1050*4882a593Smuzhiyun p2->next = p1->next;
1051*4882a593Smuzhiyun } else {
1052*4882a593Smuzhiyun hba_chain = p1->next;
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun break;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun hba_count--;
1059*4882a593Smuzhiyun mutex_unlock(&adpt_configuration_lock);
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun iounmap(pHba->base_addr_virt);
1062*4882a593Smuzhiyun pci_release_regions(pHba->pDev);
1063*4882a593Smuzhiyun if(pHba->msg_addr_virt != pHba->base_addr_virt){
1064*4882a593Smuzhiyun iounmap(pHba->msg_addr_virt);
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun if(pHba->FwDebugBuffer_P)
1067*4882a593Smuzhiyun iounmap(pHba->FwDebugBuffer_P);
1068*4882a593Smuzhiyun if(pHba->hrt) {
1069*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev,
1070*4882a593Smuzhiyun pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1071*4882a593Smuzhiyun pHba->hrt, pHba->hrt_pa);
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun if(pHba->lct) {
1074*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1075*4882a593Smuzhiyun pHba->lct, pHba->lct_pa);
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun if(pHba->status_block) {
1078*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1079*4882a593Smuzhiyun pHba->status_block, pHba->status_block_pa);
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun if(pHba->reply_pool) {
1082*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev,
1083*4882a593Smuzhiyun pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1084*4882a593Smuzhiyun pHba->reply_pool, pHba->reply_pool_pa);
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun for(d = pHba->devices; d ; d = next){
1088*4882a593Smuzhiyun next = d->next;
1089*4882a593Smuzhiyun kfree(d);
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1092*4882a593Smuzhiyun for(j = 0; j < MAX_ID; j++){
1093*4882a593Smuzhiyun if(pHba->channel[i].device[j] != NULL){
1094*4882a593Smuzhiyun for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1095*4882a593Smuzhiyun pNext = pDev->next_lun;
1096*4882a593Smuzhiyun kfree(pDev);
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun pci_dev_put(pHba->pDev);
1102*4882a593Smuzhiyun if (adpt_sysfs_class)
1103*4882a593Smuzhiyun device_destroy(adpt_sysfs_class,
1104*4882a593Smuzhiyun MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1105*4882a593Smuzhiyun kfree(pHba);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun if(hba_count <= 0){
1108*4882a593Smuzhiyun unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1109*4882a593Smuzhiyun if (adpt_sysfs_class) {
1110*4882a593Smuzhiyun class_destroy(adpt_sysfs_class);
1111*4882a593Smuzhiyun adpt_sysfs_class = NULL;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
adpt_find_device(adpt_hba * pHba,u32 chan,u32 id,u64 lun)1116*4882a593Smuzhiyun static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun struct adpt_device* d;
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun if (chan >= MAX_CHANNEL)
1121*4882a593Smuzhiyun return NULL;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun d = pHba->channel[chan].device[id];
1124*4882a593Smuzhiyun if(!d || d->tid == 0) {
1125*4882a593Smuzhiyun return NULL;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun /* If it is the only lun at that address then this should match*/
1129*4882a593Smuzhiyun if(d->scsi_lun == lun){
1130*4882a593Smuzhiyun return d;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun /* else we need to look through all the luns */
1134*4882a593Smuzhiyun for(d=d->next_lun ; d ; d = d->next_lun){
1135*4882a593Smuzhiyun if(d->scsi_lun == lun){
1136*4882a593Smuzhiyun return d;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun return NULL;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun
adpt_i2o_post_wait(adpt_hba * pHba,u32 * msg,int len,int timeout)1143*4882a593Smuzhiyun static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun // I used my own version of the WAIT_QUEUE_HEAD
1146*4882a593Smuzhiyun // to handle some version differences
1147*4882a593Smuzhiyun // When embedded in the kernel this could go back to the vanilla one
1148*4882a593Smuzhiyun ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1149*4882a593Smuzhiyun int status = 0;
1150*4882a593Smuzhiyun ulong flags = 0;
1151*4882a593Smuzhiyun struct adpt_i2o_post_wait_data *p1, *p2;
1152*4882a593Smuzhiyun struct adpt_i2o_post_wait_data *wait_data =
1153*4882a593Smuzhiyun kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1154*4882a593Smuzhiyun DECLARE_WAITQUEUE(wait, current);
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun if (!wait_data)
1157*4882a593Smuzhiyun return -ENOMEM;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun /*
1160*4882a593Smuzhiyun * The spin locking is needed to keep anyone from playing
1161*4882a593Smuzhiyun * with the queue pointers and id while we do the same
1162*4882a593Smuzhiyun */
1163*4882a593Smuzhiyun spin_lock_irqsave(&adpt_post_wait_lock, flags);
1164*4882a593Smuzhiyun // TODO we need a MORE unique way of getting ids
1165*4882a593Smuzhiyun // to support async LCT get
1166*4882a593Smuzhiyun wait_data->next = adpt_post_wait_queue;
1167*4882a593Smuzhiyun adpt_post_wait_queue = wait_data;
1168*4882a593Smuzhiyun adpt_post_wait_id++;
1169*4882a593Smuzhiyun adpt_post_wait_id &= 0x7fff;
1170*4882a593Smuzhiyun wait_data->id = adpt_post_wait_id;
1171*4882a593Smuzhiyun spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun wait_data->wq = &adpt_wq_i2o_post;
1174*4882a593Smuzhiyun wait_data->status = -ETIMEDOUT;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun add_wait_queue(&adpt_wq_i2o_post, &wait);
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun msg[2] |= 0x80000000 | ((u32)wait_data->id);
1179*4882a593Smuzhiyun timeout *= HZ;
1180*4882a593Smuzhiyun if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1181*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
1182*4882a593Smuzhiyun if(pHba->host)
1183*4882a593Smuzhiyun spin_unlock_irq(pHba->host->host_lock);
1184*4882a593Smuzhiyun if (!timeout)
1185*4882a593Smuzhiyun schedule();
1186*4882a593Smuzhiyun else{
1187*4882a593Smuzhiyun timeout = schedule_timeout(timeout);
1188*4882a593Smuzhiyun if (timeout == 0) {
1189*4882a593Smuzhiyun // I/O issued, but cannot get result in
1190*4882a593Smuzhiyun // specified time. Freeing resorces is
1191*4882a593Smuzhiyun // dangerous.
1192*4882a593Smuzhiyun status = -ETIME;
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun if(pHba->host)
1196*4882a593Smuzhiyun spin_lock_irq(pHba->host->host_lock);
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun remove_wait_queue(&adpt_wq_i2o_post, &wait);
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun if(status == -ETIMEDOUT){
1201*4882a593Smuzhiyun printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1202*4882a593Smuzhiyun // We will have to free the wait_data memory during shutdown
1203*4882a593Smuzhiyun return status;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun /* Remove the entry from the queue. */
1207*4882a593Smuzhiyun p2 = NULL;
1208*4882a593Smuzhiyun spin_lock_irqsave(&adpt_post_wait_lock, flags);
1209*4882a593Smuzhiyun for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1210*4882a593Smuzhiyun if(p1 == wait_data) {
1211*4882a593Smuzhiyun if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1212*4882a593Smuzhiyun status = -EOPNOTSUPP;
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun if(p2) {
1215*4882a593Smuzhiyun p2->next = p1->next;
1216*4882a593Smuzhiyun } else {
1217*4882a593Smuzhiyun adpt_post_wait_queue = p1->next;
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun break;
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun kfree(wait_data);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun return status;
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun
adpt_i2o_post_this(adpt_hba * pHba,u32 * data,int len)1230*4882a593Smuzhiyun static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun u32 m = EMPTY_QUEUE;
1234*4882a593Smuzhiyun u32 __iomem *msg;
1235*4882a593Smuzhiyun ulong timeout = jiffies + 30*HZ;
1236*4882a593Smuzhiyun do {
1237*4882a593Smuzhiyun rmb();
1238*4882a593Smuzhiyun m = readl(pHba->post_port);
1239*4882a593Smuzhiyun if (m != EMPTY_QUEUE) {
1240*4882a593Smuzhiyun break;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun if(time_after(jiffies,timeout)){
1243*4882a593Smuzhiyun printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1244*4882a593Smuzhiyun return -ETIMEDOUT;
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun schedule_timeout_uninterruptible(1);
1247*4882a593Smuzhiyun } while(m == EMPTY_QUEUE);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun msg = pHba->msg_addr_virt + m;
1250*4882a593Smuzhiyun memcpy_toio(msg, data, len);
1251*4882a593Smuzhiyun wmb();
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun //post message
1254*4882a593Smuzhiyun writel(m, pHba->post_port);
1255*4882a593Smuzhiyun wmb();
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun return 0;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun
adpt_i2o_post_wait_complete(u32 context,int status)1261*4882a593Smuzhiyun static void adpt_i2o_post_wait_complete(u32 context, int status)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun struct adpt_i2o_post_wait_data *p1 = NULL;
1264*4882a593Smuzhiyun /*
1265*4882a593Smuzhiyun * We need to search through the adpt_post_wait
1266*4882a593Smuzhiyun * queue to see if the given message is still
1267*4882a593Smuzhiyun * outstanding. If not, it means that the IOP
1268*4882a593Smuzhiyun * took longer to respond to the message than we
1269*4882a593Smuzhiyun * had allowed and timer has already expired.
1270*4882a593Smuzhiyun * Not much we can do about that except log
1271*4882a593Smuzhiyun * it for debug purposes, increase timeout, and recompile
1272*4882a593Smuzhiyun *
1273*4882a593Smuzhiyun * Lock needed to keep anyone from moving queue pointers
1274*4882a593Smuzhiyun * around while we're looking through them.
1275*4882a593Smuzhiyun */
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun context &= 0x7fff;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun spin_lock(&adpt_post_wait_lock);
1280*4882a593Smuzhiyun for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1281*4882a593Smuzhiyun if(p1->id == context) {
1282*4882a593Smuzhiyun p1->status = status;
1283*4882a593Smuzhiyun spin_unlock(&adpt_post_wait_lock);
1284*4882a593Smuzhiyun wake_up_interruptible(p1->wq);
1285*4882a593Smuzhiyun return;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun spin_unlock(&adpt_post_wait_lock);
1289*4882a593Smuzhiyun // If this happens we lose commands that probably really completed
1290*4882a593Smuzhiyun printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1291*4882a593Smuzhiyun printk(KERN_DEBUG" Tasks in wait queue:\n");
1292*4882a593Smuzhiyun for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1293*4882a593Smuzhiyun printk(KERN_DEBUG" %d\n",p1->id);
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun return;
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun
adpt_i2o_reset_hba(adpt_hba * pHba)1298*4882a593Smuzhiyun static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun u32 msg[8];
1301*4882a593Smuzhiyun u8* status;
1302*4882a593Smuzhiyun dma_addr_t addr;
1303*4882a593Smuzhiyun u32 m = EMPTY_QUEUE ;
1304*4882a593Smuzhiyun ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun if(pHba->initialized == FALSE) { // First time reset should be quick
1307*4882a593Smuzhiyun timeout = jiffies + (25*HZ);
1308*4882a593Smuzhiyun } else {
1309*4882a593Smuzhiyun adpt_i2o_quiesce_hba(pHba);
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun do {
1313*4882a593Smuzhiyun rmb();
1314*4882a593Smuzhiyun m = readl(pHba->post_port);
1315*4882a593Smuzhiyun if (m != EMPTY_QUEUE) {
1316*4882a593Smuzhiyun break;
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun if(time_after(jiffies,timeout)){
1319*4882a593Smuzhiyun printk(KERN_WARNING"Timeout waiting for message!\n");
1320*4882a593Smuzhiyun return -ETIMEDOUT;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun schedule_timeout_uninterruptible(1);
1323*4882a593Smuzhiyun } while (m == EMPTY_QUEUE);
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1326*4882a593Smuzhiyun if(status == NULL) {
1327*4882a593Smuzhiyun adpt_send_nop(pHba, m);
1328*4882a593Smuzhiyun printk(KERN_ERR"IOP reset failed - no free memory.\n");
1329*4882a593Smuzhiyun return -ENOMEM;
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1333*4882a593Smuzhiyun msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1334*4882a593Smuzhiyun msg[2]=0;
1335*4882a593Smuzhiyun msg[3]=0;
1336*4882a593Smuzhiyun msg[4]=0;
1337*4882a593Smuzhiyun msg[5]=0;
1338*4882a593Smuzhiyun msg[6]=dma_low(addr);
1339*4882a593Smuzhiyun msg[7]=dma_high(addr);
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1342*4882a593Smuzhiyun wmb();
1343*4882a593Smuzhiyun writel(m, pHba->post_port);
1344*4882a593Smuzhiyun wmb();
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun while(*status == 0){
1347*4882a593Smuzhiyun if(time_after(jiffies,timeout)){
1348*4882a593Smuzhiyun printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1349*4882a593Smuzhiyun /* We lose 4 bytes of "status" here, but we cannot
1350*4882a593Smuzhiyun free these because controller may awake and corrupt
1351*4882a593Smuzhiyun those bytes at any time */
1352*4882a593Smuzhiyun /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1353*4882a593Smuzhiyun return -ETIMEDOUT;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun rmb();
1356*4882a593Smuzhiyun schedule_timeout_uninterruptible(1);
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1360*4882a593Smuzhiyun PDEBUG("%s: Reset in progress...\n", pHba->name);
1361*4882a593Smuzhiyun // Here we wait for message frame to become available
1362*4882a593Smuzhiyun // indicated that reset has finished
1363*4882a593Smuzhiyun do {
1364*4882a593Smuzhiyun rmb();
1365*4882a593Smuzhiyun m = readl(pHba->post_port);
1366*4882a593Smuzhiyun if (m != EMPTY_QUEUE) {
1367*4882a593Smuzhiyun break;
1368*4882a593Smuzhiyun }
1369*4882a593Smuzhiyun if(time_after(jiffies,timeout)){
1370*4882a593Smuzhiyun printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1371*4882a593Smuzhiyun /* We lose 4 bytes of "status" here, but we
1372*4882a593Smuzhiyun cannot free these because controller may
1373*4882a593Smuzhiyun awake and corrupt those bytes at any time */
1374*4882a593Smuzhiyun /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1375*4882a593Smuzhiyun return -ETIMEDOUT;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun schedule_timeout_uninterruptible(1);
1378*4882a593Smuzhiyun } while (m == EMPTY_QUEUE);
1379*4882a593Smuzhiyun // Flush the offset
1380*4882a593Smuzhiyun adpt_send_nop(pHba, m);
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun adpt_i2o_status_get(pHba);
1383*4882a593Smuzhiyun if(*status == 0x02 ||
1384*4882a593Smuzhiyun pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1385*4882a593Smuzhiyun printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1386*4882a593Smuzhiyun pHba->name);
1387*4882a593Smuzhiyun } else {
1388*4882a593Smuzhiyun PDEBUG("%s: Reset completed.\n", pHba->name);
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1392*4882a593Smuzhiyun #ifdef UARTDELAY
1393*4882a593Smuzhiyun // This delay is to allow someone attached to the card through the debug UART to
1394*4882a593Smuzhiyun // set up the dump levels that they want before the rest of the initialization sequence
1395*4882a593Smuzhiyun adpt_delay(20000);
1396*4882a593Smuzhiyun #endif
1397*4882a593Smuzhiyun return 0;
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun
adpt_i2o_parse_lct(adpt_hba * pHba)1401*4882a593Smuzhiyun static int adpt_i2o_parse_lct(adpt_hba* pHba)
1402*4882a593Smuzhiyun {
1403*4882a593Smuzhiyun int i;
1404*4882a593Smuzhiyun int max;
1405*4882a593Smuzhiyun int tid;
1406*4882a593Smuzhiyun struct i2o_device *d;
1407*4882a593Smuzhiyun i2o_lct *lct = pHba->lct;
1408*4882a593Smuzhiyun u8 bus_no = 0;
1409*4882a593Smuzhiyun s16 scsi_id;
1410*4882a593Smuzhiyun u64 scsi_lun;
1411*4882a593Smuzhiyun u32 buf[10]; // larger than 7, or 8 ...
1412*4882a593Smuzhiyun struct adpt_device* pDev;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun if (lct == NULL) {
1415*4882a593Smuzhiyun printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1416*4882a593Smuzhiyun return -1;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun max = lct->table_size;
1420*4882a593Smuzhiyun max -= 3;
1421*4882a593Smuzhiyun max /= 9;
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun for(i=0;i<max;i++) {
1424*4882a593Smuzhiyun if( lct->lct_entry[i].user_tid != 0xfff){
1425*4882a593Smuzhiyun /*
1426*4882a593Smuzhiyun * If we have hidden devices, we need to inform the upper layers about
1427*4882a593Smuzhiyun * the possible maximum id reference to handle device access when
1428*4882a593Smuzhiyun * an array is disassembled. This code has no other purpose but to
1429*4882a593Smuzhiyun * allow us future access to devices that are currently hidden
1430*4882a593Smuzhiyun * behind arrays, hotspares or have not been configured (JBOD mode).
1431*4882a593Smuzhiyun */
1432*4882a593Smuzhiyun if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1433*4882a593Smuzhiyun lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1434*4882a593Smuzhiyun lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1435*4882a593Smuzhiyun continue;
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun tid = lct->lct_entry[i].tid;
1438*4882a593Smuzhiyun // I2O_DPT_DEVICE_INFO_GROUP_NO;
1439*4882a593Smuzhiyun if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1440*4882a593Smuzhiyun continue;
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun bus_no = buf[0]>>16;
1443*4882a593Smuzhiyun scsi_id = buf[1];
1444*4882a593Smuzhiyun scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1445*4882a593Smuzhiyun if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1446*4882a593Smuzhiyun printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1447*4882a593Smuzhiyun continue;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun if (scsi_id >= MAX_ID){
1450*4882a593Smuzhiyun printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1451*4882a593Smuzhiyun continue;
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun if(bus_no > pHba->top_scsi_channel){
1454*4882a593Smuzhiyun pHba->top_scsi_channel = bus_no;
1455*4882a593Smuzhiyun }
1456*4882a593Smuzhiyun if(scsi_id > pHba->top_scsi_id){
1457*4882a593Smuzhiyun pHba->top_scsi_id = scsi_id;
1458*4882a593Smuzhiyun }
1459*4882a593Smuzhiyun if(scsi_lun > pHba->top_scsi_lun){
1460*4882a593Smuzhiyun pHba->top_scsi_lun = scsi_lun;
1461*4882a593Smuzhiyun }
1462*4882a593Smuzhiyun continue;
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1465*4882a593Smuzhiyun if(d==NULL)
1466*4882a593Smuzhiyun {
1467*4882a593Smuzhiyun printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1468*4882a593Smuzhiyun return -ENOMEM;
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun d->controller = pHba;
1472*4882a593Smuzhiyun d->next = NULL;
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun d->flags = 0;
1477*4882a593Smuzhiyun tid = d->lct_data.tid;
1478*4882a593Smuzhiyun adpt_i2o_report_hba_unit(pHba, d);
1479*4882a593Smuzhiyun adpt_i2o_install_device(pHba, d);
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun bus_no = 0;
1482*4882a593Smuzhiyun for(d = pHba->devices; d ; d = d->next) {
1483*4882a593Smuzhiyun if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1484*4882a593Smuzhiyun d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1485*4882a593Smuzhiyun tid = d->lct_data.tid;
1486*4882a593Smuzhiyun // TODO get the bus_no from hrt-but for now they are in order
1487*4882a593Smuzhiyun //bus_no =
1488*4882a593Smuzhiyun if(bus_no > pHba->top_scsi_channel){
1489*4882a593Smuzhiyun pHba->top_scsi_channel = bus_no;
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun pHba->channel[bus_no].type = d->lct_data.class_id;
1492*4882a593Smuzhiyun pHba->channel[bus_no].tid = tid;
1493*4882a593Smuzhiyun if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1494*4882a593Smuzhiyun {
1495*4882a593Smuzhiyun pHba->channel[bus_no].scsi_id = buf[1];
1496*4882a593Smuzhiyun PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1497*4882a593Smuzhiyun }
1498*4882a593Smuzhiyun // TODO remove - this is just until we get from hrt
1499*4882a593Smuzhiyun bus_no++;
1500*4882a593Smuzhiyun if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1501*4882a593Smuzhiyun printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1502*4882a593Smuzhiyun break;
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun // Setup adpt_device table
1508*4882a593Smuzhiyun for(d = pHba->devices; d ; d = d->next) {
1509*4882a593Smuzhiyun if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1510*4882a593Smuzhiyun d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1511*4882a593Smuzhiyun d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun tid = d->lct_data.tid;
1514*4882a593Smuzhiyun scsi_id = -1;
1515*4882a593Smuzhiyun // I2O_DPT_DEVICE_INFO_GROUP_NO;
1516*4882a593Smuzhiyun if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1517*4882a593Smuzhiyun bus_no = buf[0]>>16;
1518*4882a593Smuzhiyun scsi_id = buf[1];
1519*4882a593Smuzhiyun scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1520*4882a593Smuzhiyun if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1521*4882a593Smuzhiyun continue;
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun if (scsi_id >= MAX_ID) {
1524*4882a593Smuzhiyun continue;
1525*4882a593Smuzhiyun }
1526*4882a593Smuzhiyun if( pHba->channel[bus_no].device[scsi_id] == NULL){
1527*4882a593Smuzhiyun pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1528*4882a593Smuzhiyun if(pDev == NULL) {
1529*4882a593Smuzhiyun return -ENOMEM;
1530*4882a593Smuzhiyun }
1531*4882a593Smuzhiyun pHba->channel[bus_no].device[scsi_id] = pDev;
1532*4882a593Smuzhiyun } else {
1533*4882a593Smuzhiyun for( pDev = pHba->channel[bus_no].device[scsi_id];
1534*4882a593Smuzhiyun pDev->next_lun; pDev = pDev->next_lun){
1535*4882a593Smuzhiyun }
1536*4882a593Smuzhiyun pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1537*4882a593Smuzhiyun if(pDev->next_lun == NULL) {
1538*4882a593Smuzhiyun return -ENOMEM;
1539*4882a593Smuzhiyun }
1540*4882a593Smuzhiyun pDev = pDev->next_lun;
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun pDev->tid = tid;
1543*4882a593Smuzhiyun pDev->scsi_channel = bus_no;
1544*4882a593Smuzhiyun pDev->scsi_id = scsi_id;
1545*4882a593Smuzhiyun pDev->scsi_lun = scsi_lun;
1546*4882a593Smuzhiyun pDev->pI2o_dev = d;
1547*4882a593Smuzhiyun d->owner = pDev;
1548*4882a593Smuzhiyun pDev->type = (buf[0])&0xff;
1549*4882a593Smuzhiyun pDev->flags = (buf[0]>>8)&0xff;
1550*4882a593Smuzhiyun if(scsi_id > pHba->top_scsi_id){
1551*4882a593Smuzhiyun pHba->top_scsi_id = scsi_id;
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun if(scsi_lun > pHba->top_scsi_lun){
1554*4882a593Smuzhiyun pHba->top_scsi_lun = scsi_lun;
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun if(scsi_id == -1){
1558*4882a593Smuzhiyun printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1559*4882a593Smuzhiyun d->lct_data.identity_tag);
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun }
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun return 0;
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun /*
1568*4882a593Smuzhiyun * Each I2O controller has a chain of devices on it - these match
1569*4882a593Smuzhiyun * the useful parts of the LCT of the board.
1570*4882a593Smuzhiyun */
1571*4882a593Smuzhiyun
adpt_i2o_install_device(adpt_hba * pHba,struct i2o_device * d)1572*4882a593Smuzhiyun static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1573*4882a593Smuzhiyun {
1574*4882a593Smuzhiyun mutex_lock(&adpt_configuration_lock);
1575*4882a593Smuzhiyun d->controller=pHba;
1576*4882a593Smuzhiyun d->owner=NULL;
1577*4882a593Smuzhiyun d->next=pHba->devices;
1578*4882a593Smuzhiyun d->prev=NULL;
1579*4882a593Smuzhiyun if (pHba->devices != NULL){
1580*4882a593Smuzhiyun pHba->devices->prev=d;
1581*4882a593Smuzhiyun }
1582*4882a593Smuzhiyun pHba->devices=d;
1583*4882a593Smuzhiyun *d->dev_name = 0;
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun mutex_unlock(&adpt_configuration_lock);
1586*4882a593Smuzhiyun return 0;
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun
adpt_open(struct inode * inode,struct file * file)1589*4882a593Smuzhiyun static int adpt_open(struct inode *inode, struct file *file)
1590*4882a593Smuzhiyun {
1591*4882a593Smuzhiyun int minor;
1592*4882a593Smuzhiyun adpt_hba* pHba;
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun mutex_lock(&adpt_mutex);
1595*4882a593Smuzhiyun //TODO check for root access
1596*4882a593Smuzhiyun //
1597*4882a593Smuzhiyun minor = iminor(inode);
1598*4882a593Smuzhiyun if (minor >= hba_count) {
1599*4882a593Smuzhiyun mutex_unlock(&adpt_mutex);
1600*4882a593Smuzhiyun return -ENXIO;
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun mutex_lock(&adpt_configuration_lock);
1603*4882a593Smuzhiyun for (pHba = hba_chain; pHba; pHba = pHba->next) {
1604*4882a593Smuzhiyun if (pHba->unit == minor) {
1605*4882a593Smuzhiyun break; /* found adapter */
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun }
1608*4882a593Smuzhiyun if (pHba == NULL) {
1609*4882a593Smuzhiyun mutex_unlock(&adpt_configuration_lock);
1610*4882a593Smuzhiyun mutex_unlock(&adpt_mutex);
1611*4882a593Smuzhiyun return -ENXIO;
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun // if(pHba->in_use){
1615*4882a593Smuzhiyun // mutex_unlock(&adpt_configuration_lock);
1616*4882a593Smuzhiyun // return -EBUSY;
1617*4882a593Smuzhiyun // }
1618*4882a593Smuzhiyun
1619*4882a593Smuzhiyun pHba->in_use = 1;
1620*4882a593Smuzhiyun mutex_unlock(&adpt_configuration_lock);
1621*4882a593Smuzhiyun mutex_unlock(&adpt_mutex);
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun return 0;
1624*4882a593Smuzhiyun }
1625*4882a593Smuzhiyun
adpt_close(struct inode * inode,struct file * file)1626*4882a593Smuzhiyun static int adpt_close(struct inode *inode, struct file *file)
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun int minor;
1629*4882a593Smuzhiyun adpt_hba* pHba;
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun minor = iminor(inode);
1632*4882a593Smuzhiyun if (minor >= hba_count) {
1633*4882a593Smuzhiyun return -ENXIO;
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun mutex_lock(&adpt_configuration_lock);
1636*4882a593Smuzhiyun for (pHba = hba_chain; pHba; pHba = pHba->next) {
1637*4882a593Smuzhiyun if (pHba->unit == minor) {
1638*4882a593Smuzhiyun break; /* found adapter */
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun mutex_unlock(&adpt_configuration_lock);
1642*4882a593Smuzhiyun if (pHba == NULL) {
1643*4882a593Smuzhiyun return -ENXIO;
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun pHba->in_use = 0;
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun return 0;
1649*4882a593Smuzhiyun }
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun
adpt_i2o_passthru(adpt_hba * pHba,u32 __user * arg)1652*4882a593Smuzhiyun static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1653*4882a593Smuzhiyun {
1654*4882a593Smuzhiyun u32 msg[MAX_MESSAGE_SIZE];
1655*4882a593Smuzhiyun u32* reply = NULL;
1656*4882a593Smuzhiyun u32 size = 0;
1657*4882a593Smuzhiyun u32 reply_size = 0;
1658*4882a593Smuzhiyun u32 __user *user_msg = arg;
1659*4882a593Smuzhiyun u32 __user * user_reply = NULL;
1660*4882a593Smuzhiyun void **sg_list = NULL;
1661*4882a593Smuzhiyun u32 sg_offset = 0;
1662*4882a593Smuzhiyun u32 sg_count = 0;
1663*4882a593Smuzhiyun int sg_index = 0;
1664*4882a593Smuzhiyun u32 i = 0;
1665*4882a593Smuzhiyun u32 rcode = 0;
1666*4882a593Smuzhiyun void *p = NULL;
1667*4882a593Smuzhiyun dma_addr_t addr;
1668*4882a593Smuzhiyun ulong flags = 0;
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyun memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1671*4882a593Smuzhiyun // get user msg size in u32s
1672*4882a593Smuzhiyun if(get_user(size, &user_msg[0])){
1673*4882a593Smuzhiyun return -EFAULT;
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun size = size>>16;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun user_reply = &user_msg[size];
1678*4882a593Smuzhiyun if(size > MAX_MESSAGE_SIZE){
1679*4882a593Smuzhiyun return -EFAULT;
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun size *= 4; // Convert to bytes
1682*4882a593Smuzhiyun
1683*4882a593Smuzhiyun /* Copy in the user's I2O command */
1684*4882a593Smuzhiyun if(copy_from_user(msg, user_msg, size)) {
1685*4882a593Smuzhiyun return -EFAULT;
1686*4882a593Smuzhiyun }
1687*4882a593Smuzhiyun get_user(reply_size, &user_reply[0]);
1688*4882a593Smuzhiyun reply_size = reply_size>>16;
1689*4882a593Smuzhiyun if(reply_size > REPLY_FRAME_SIZE){
1690*4882a593Smuzhiyun reply_size = REPLY_FRAME_SIZE;
1691*4882a593Smuzhiyun }
1692*4882a593Smuzhiyun reply_size *= 4;
1693*4882a593Smuzhiyun reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1694*4882a593Smuzhiyun if(reply == NULL) {
1695*4882a593Smuzhiyun printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1696*4882a593Smuzhiyun return -ENOMEM;
1697*4882a593Smuzhiyun }
1698*4882a593Smuzhiyun sg_offset = (msg[0]>>4)&0xf;
1699*4882a593Smuzhiyun msg[2] = 0x40000000; // IOCTL context
1700*4882a593Smuzhiyun msg[3] = adpt_ioctl_to_context(pHba, reply);
1701*4882a593Smuzhiyun if (msg[3] == (u32)-1) {
1702*4882a593Smuzhiyun rcode = -EBUSY;
1703*4882a593Smuzhiyun goto free;
1704*4882a593Smuzhiyun }
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1707*4882a593Smuzhiyun if (!sg_list) {
1708*4882a593Smuzhiyun rcode = -ENOMEM;
1709*4882a593Smuzhiyun goto free;
1710*4882a593Smuzhiyun }
1711*4882a593Smuzhiyun if(sg_offset) {
1712*4882a593Smuzhiyun // TODO add 64 bit API
1713*4882a593Smuzhiyun struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1714*4882a593Smuzhiyun sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1715*4882a593Smuzhiyun if (sg_count > pHba->sg_tablesize){
1716*4882a593Smuzhiyun printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1717*4882a593Smuzhiyun rcode = -EINVAL;
1718*4882a593Smuzhiyun goto free;
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun for(i = 0; i < sg_count; i++) {
1722*4882a593Smuzhiyun int sg_size;
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1725*4882a593Smuzhiyun printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1726*4882a593Smuzhiyun rcode = -EINVAL;
1727*4882a593Smuzhiyun goto cleanup;
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun sg_size = sg[i].flag_count & 0xffffff;
1730*4882a593Smuzhiyun /* Allocate memory for the transfer */
1731*4882a593Smuzhiyun p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1732*4882a593Smuzhiyun if(!p) {
1733*4882a593Smuzhiyun printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1734*4882a593Smuzhiyun pHba->name,sg_size,i,sg_count);
1735*4882a593Smuzhiyun rcode = -ENOMEM;
1736*4882a593Smuzhiyun goto cleanup;
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1739*4882a593Smuzhiyun /* Copy in the user's SG buffer if necessary */
1740*4882a593Smuzhiyun if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1741*4882a593Smuzhiyun // sg_simple_element API is 32 bit
1742*4882a593Smuzhiyun if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1743*4882a593Smuzhiyun printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1744*4882a593Smuzhiyun rcode = -EFAULT;
1745*4882a593Smuzhiyun goto cleanup;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun }
1748*4882a593Smuzhiyun /* sg_simple_element API is 32 bit, but addr < 4GB */
1749*4882a593Smuzhiyun sg[i].addr_bus = addr;
1750*4882a593Smuzhiyun }
1751*4882a593Smuzhiyun }
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun do {
1754*4882a593Smuzhiyun /*
1755*4882a593Smuzhiyun * Stop any new commands from enterring the
1756*4882a593Smuzhiyun * controller while processing the ioctl
1757*4882a593Smuzhiyun */
1758*4882a593Smuzhiyun if (pHba->host) {
1759*4882a593Smuzhiyun scsi_block_requests(pHba->host);
1760*4882a593Smuzhiyun spin_lock_irqsave(pHba->host->host_lock, flags);
1761*4882a593Smuzhiyun }
1762*4882a593Smuzhiyun rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1763*4882a593Smuzhiyun if (rcode != 0)
1764*4882a593Smuzhiyun printk("adpt_i2o_passthru: post wait failed %d %p\n",
1765*4882a593Smuzhiyun rcode, reply);
1766*4882a593Smuzhiyun if (pHba->host) {
1767*4882a593Smuzhiyun spin_unlock_irqrestore(pHba->host->host_lock, flags);
1768*4882a593Smuzhiyun scsi_unblock_requests(pHba->host);
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun } while (rcode == -ETIMEDOUT);
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun if(rcode){
1773*4882a593Smuzhiyun goto cleanup;
1774*4882a593Smuzhiyun }
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun if(sg_offset) {
1777*4882a593Smuzhiyun /* Copy back the Scatter Gather buffers back to user space */
1778*4882a593Smuzhiyun u32 j;
1779*4882a593Smuzhiyun // TODO add 64 bit API
1780*4882a593Smuzhiyun struct sg_simple_element* sg;
1781*4882a593Smuzhiyun int sg_size;
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun // re-acquire the original message to handle correctly the sg copy operation
1784*4882a593Smuzhiyun memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1785*4882a593Smuzhiyun // get user msg size in u32s
1786*4882a593Smuzhiyun if(get_user(size, &user_msg[0])){
1787*4882a593Smuzhiyun rcode = -EFAULT;
1788*4882a593Smuzhiyun goto cleanup;
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun size = size>>16;
1791*4882a593Smuzhiyun size *= 4;
1792*4882a593Smuzhiyun if (size > MAX_MESSAGE_SIZE) {
1793*4882a593Smuzhiyun rcode = -EINVAL;
1794*4882a593Smuzhiyun goto cleanup;
1795*4882a593Smuzhiyun }
1796*4882a593Smuzhiyun /* Copy in the user's I2O command */
1797*4882a593Smuzhiyun if (copy_from_user (msg, user_msg, size)) {
1798*4882a593Smuzhiyun rcode = -EFAULT;
1799*4882a593Smuzhiyun goto cleanup;
1800*4882a593Smuzhiyun }
1801*4882a593Smuzhiyun sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun // TODO add 64 bit API
1804*4882a593Smuzhiyun sg = (struct sg_simple_element*)(msg + sg_offset);
1805*4882a593Smuzhiyun for (j = 0; j < sg_count; j++) {
1806*4882a593Smuzhiyun /* Copy out the SG list to user's buffer if necessary */
1807*4882a593Smuzhiyun if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1808*4882a593Smuzhiyun sg_size = sg[j].flag_count & 0xffffff;
1809*4882a593Smuzhiyun // sg_simple_element API is 32 bit
1810*4882a593Smuzhiyun if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1811*4882a593Smuzhiyun printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1812*4882a593Smuzhiyun rcode = -EFAULT;
1813*4882a593Smuzhiyun goto cleanup;
1814*4882a593Smuzhiyun }
1815*4882a593Smuzhiyun }
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun /* Copy back the reply to user space */
1820*4882a593Smuzhiyun if (reply_size) {
1821*4882a593Smuzhiyun // we wrote our own values for context - now restore the user supplied ones
1822*4882a593Smuzhiyun if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1823*4882a593Smuzhiyun printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1824*4882a593Smuzhiyun rcode = -EFAULT;
1825*4882a593Smuzhiyun }
1826*4882a593Smuzhiyun if(copy_to_user(user_reply, reply, reply_size)) {
1827*4882a593Smuzhiyun printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1828*4882a593Smuzhiyun rcode = -EFAULT;
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun }
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun cleanup:
1834*4882a593Smuzhiyun if (rcode != -ETIME && rcode != -EINTR) {
1835*4882a593Smuzhiyun struct sg_simple_element *sg =
1836*4882a593Smuzhiyun (struct sg_simple_element*) (msg +sg_offset);
1837*4882a593Smuzhiyun while(sg_index) {
1838*4882a593Smuzhiyun if(sg_list[--sg_index]) {
1839*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev,
1840*4882a593Smuzhiyun sg[sg_index].flag_count & 0xffffff,
1841*4882a593Smuzhiyun sg_list[sg_index],
1842*4882a593Smuzhiyun sg[sg_index].addr_bus);
1843*4882a593Smuzhiyun }
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun }
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun free:
1848*4882a593Smuzhiyun kfree(sg_list);
1849*4882a593Smuzhiyun kfree(reply);
1850*4882a593Smuzhiyun return rcode;
1851*4882a593Smuzhiyun }
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun #if defined __ia64__
adpt_ia64_info(sysInfo_S * si)1854*4882a593Smuzhiyun static void adpt_ia64_info(sysInfo_S* si)
1855*4882a593Smuzhiyun {
1856*4882a593Smuzhiyun // This is all the info we need for now
1857*4882a593Smuzhiyun // We will add more info as our new
1858*4882a593Smuzhiyun // managmenent utility requires it
1859*4882a593Smuzhiyun si->processorType = PROC_IA64;
1860*4882a593Smuzhiyun }
1861*4882a593Smuzhiyun #endif
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun #if defined __sparc__
adpt_sparc_info(sysInfo_S * si)1864*4882a593Smuzhiyun static void adpt_sparc_info(sysInfo_S* si)
1865*4882a593Smuzhiyun {
1866*4882a593Smuzhiyun // This is all the info we need for now
1867*4882a593Smuzhiyun // We will add more info as our new
1868*4882a593Smuzhiyun // managmenent utility requires it
1869*4882a593Smuzhiyun si->processorType = PROC_ULTRASPARC;
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun #endif
1872*4882a593Smuzhiyun #if defined __alpha__
adpt_alpha_info(sysInfo_S * si)1873*4882a593Smuzhiyun static void adpt_alpha_info(sysInfo_S* si)
1874*4882a593Smuzhiyun {
1875*4882a593Smuzhiyun // This is all the info we need for now
1876*4882a593Smuzhiyun // We will add more info as our new
1877*4882a593Smuzhiyun // managmenent utility requires it
1878*4882a593Smuzhiyun si->processorType = PROC_ALPHA;
1879*4882a593Smuzhiyun }
1880*4882a593Smuzhiyun #endif
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun #if defined __i386__
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun #include <uapi/asm/vm86.h>
1885*4882a593Smuzhiyun
adpt_i386_info(sysInfo_S * si)1886*4882a593Smuzhiyun static void adpt_i386_info(sysInfo_S* si)
1887*4882a593Smuzhiyun {
1888*4882a593Smuzhiyun // This is all the info we need for now
1889*4882a593Smuzhiyun // We will add more info as our new
1890*4882a593Smuzhiyun // managmenent utility requires it
1891*4882a593Smuzhiyun switch (boot_cpu_data.x86) {
1892*4882a593Smuzhiyun case CPU_386:
1893*4882a593Smuzhiyun si->processorType = PROC_386;
1894*4882a593Smuzhiyun break;
1895*4882a593Smuzhiyun case CPU_486:
1896*4882a593Smuzhiyun si->processorType = PROC_486;
1897*4882a593Smuzhiyun break;
1898*4882a593Smuzhiyun case CPU_586:
1899*4882a593Smuzhiyun si->processorType = PROC_PENTIUM;
1900*4882a593Smuzhiyun break;
1901*4882a593Smuzhiyun default: // Just in case
1902*4882a593Smuzhiyun si->processorType = PROC_PENTIUM;
1903*4882a593Smuzhiyun break;
1904*4882a593Smuzhiyun }
1905*4882a593Smuzhiyun }
1906*4882a593Smuzhiyun #endif
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun /*
1909*4882a593Smuzhiyun * This routine returns information about the system. This does not effect
1910*4882a593Smuzhiyun * any logic and if the info is wrong - it doesn't matter.
1911*4882a593Smuzhiyun */
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun /* Get all the info we can not get from kernel services */
adpt_system_info(void __user * buffer)1914*4882a593Smuzhiyun static int adpt_system_info(void __user *buffer)
1915*4882a593Smuzhiyun {
1916*4882a593Smuzhiyun sysInfo_S si;
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun memset(&si, 0, sizeof(si));
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun si.osType = OS_LINUX;
1921*4882a593Smuzhiyun si.osMajorVersion = 0;
1922*4882a593Smuzhiyun si.osMinorVersion = 0;
1923*4882a593Smuzhiyun si.osRevision = 0;
1924*4882a593Smuzhiyun si.busType = SI_PCI_BUS;
1925*4882a593Smuzhiyun si.processorFamily = DPTI_sig.dsProcessorFamily;
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun #if defined __i386__
1928*4882a593Smuzhiyun adpt_i386_info(&si);
1929*4882a593Smuzhiyun #elif defined (__ia64__)
1930*4882a593Smuzhiyun adpt_ia64_info(&si);
1931*4882a593Smuzhiyun #elif defined(__sparc__)
1932*4882a593Smuzhiyun adpt_sparc_info(&si);
1933*4882a593Smuzhiyun #elif defined (__alpha__)
1934*4882a593Smuzhiyun adpt_alpha_info(&si);
1935*4882a593Smuzhiyun #else
1936*4882a593Smuzhiyun si.processorType = 0xff ;
1937*4882a593Smuzhiyun #endif
1938*4882a593Smuzhiyun if (copy_to_user(buffer, &si, sizeof(si))){
1939*4882a593Smuzhiyun printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1940*4882a593Smuzhiyun return -EFAULT;
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun return 0;
1944*4882a593Smuzhiyun }
1945*4882a593Smuzhiyun
adpt_ioctl(struct inode * inode,struct file * file,uint cmd,ulong arg)1946*4882a593Smuzhiyun static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1947*4882a593Smuzhiyun {
1948*4882a593Smuzhiyun int minor;
1949*4882a593Smuzhiyun int error = 0;
1950*4882a593Smuzhiyun adpt_hba* pHba;
1951*4882a593Smuzhiyun ulong flags = 0;
1952*4882a593Smuzhiyun void __user *argp = (void __user *)arg;
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun minor = iminor(inode);
1955*4882a593Smuzhiyun if (minor >= DPTI_MAX_HBA){
1956*4882a593Smuzhiyun return -ENXIO;
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun mutex_lock(&adpt_configuration_lock);
1959*4882a593Smuzhiyun for (pHba = hba_chain; pHba; pHba = pHba->next) {
1960*4882a593Smuzhiyun if (pHba->unit == minor) {
1961*4882a593Smuzhiyun break; /* found adapter */
1962*4882a593Smuzhiyun }
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun mutex_unlock(&adpt_configuration_lock);
1965*4882a593Smuzhiyun if(pHba == NULL){
1966*4882a593Smuzhiyun return -ENXIO;
1967*4882a593Smuzhiyun }
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun while((volatile u32) pHba->state & DPTI_STATE_RESET )
1970*4882a593Smuzhiyun schedule_timeout_uninterruptible(2);
1971*4882a593Smuzhiyun
1972*4882a593Smuzhiyun switch (cmd) {
1973*4882a593Smuzhiyun // TODO: handle 3 cases
1974*4882a593Smuzhiyun case DPT_SIGNATURE:
1975*4882a593Smuzhiyun if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1976*4882a593Smuzhiyun return -EFAULT;
1977*4882a593Smuzhiyun }
1978*4882a593Smuzhiyun break;
1979*4882a593Smuzhiyun case I2OUSRCMD:
1980*4882a593Smuzhiyun return adpt_i2o_passthru(pHba, argp);
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun case DPT_CTRLINFO:{
1983*4882a593Smuzhiyun drvrHBAinfo_S HbaInfo;
1984*4882a593Smuzhiyun
1985*4882a593Smuzhiyun #define FLG_OSD_PCI_VALID 0x0001
1986*4882a593Smuzhiyun #define FLG_OSD_DMA 0x0002
1987*4882a593Smuzhiyun #define FLG_OSD_I2O 0x0004
1988*4882a593Smuzhiyun memset(&HbaInfo, 0, sizeof(HbaInfo));
1989*4882a593Smuzhiyun HbaInfo.drvrHBAnum = pHba->unit;
1990*4882a593Smuzhiyun HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1991*4882a593Smuzhiyun HbaInfo.blinkState = adpt_read_blink_led(pHba);
1992*4882a593Smuzhiyun HbaInfo.pciBusNum = pHba->pDev->bus->number;
1993*4882a593Smuzhiyun HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1994*4882a593Smuzhiyun HbaInfo.Interrupt = pHba->pDev->irq;
1995*4882a593Smuzhiyun HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1996*4882a593Smuzhiyun if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1997*4882a593Smuzhiyun printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1998*4882a593Smuzhiyun return -EFAULT;
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun break;
2001*4882a593Smuzhiyun }
2002*4882a593Smuzhiyun case DPT_SYSINFO:
2003*4882a593Smuzhiyun return adpt_system_info(argp);
2004*4882a593Smuzhiyun case DPT_BLINKLED:{
2005*4882a593Smuzhiyun u32 value;
2006*4882a593Smuzhiyun value = (u32)adpt_read_blink_led(pHba);
2007*4882a593Smuzhiyun if (copy_to_user(argp, &value, sizeof(value))) {
2008*4882a593Smuzhiyun return -EFAULT;
2009*4882a593Smuzhiyun }
2010*4882a593Smuzhiyun break;
2011*4882a593Smuzhiyun }
2012*4882a593Smuzhiyun case I2ORESETCMD: {
2013*4882a593Smuzhiyun struct Scsi_Host *shost = pHba->host;
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun if (shost)
2016*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
2017*4882a593Smuzhiyun adpt_hba_reset(pHba);
2018*4882a593Smuzhiyun if (shost)
2019*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
2020*4882a593Smuzhiyun break;
2021*4882a593Smuzhiyun }
2022*4882a593Smuzhiyun case I2ORESCANCMD:
2023*4882a593Smuzhiyun adpt_rescan(pHba);
2024*4882a593Smuzhiyun break;
2025*4882a593Smuzhiyun default:
2026*4882a593Smuzhiyun return -EINVAL;
2027*4882a593Smuzhiyun }
2028*4882a593Smuzhiyun
2029*4882a593Smuzhiyun return error;
2030*4882a593Smuzhiyun }
2031*4882a593Smuzhiyun
adpt_unlocked_ioctl(struct file * file,uint cmd,ulong arg)2032*4882a593Smuzhiyun static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2033*4882a593Smuzhiyun {
2034*4882a593Smuzhiyun struct inode *inode;
2035*4882a593Smuzhiyun long ret;
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun inode = file_inode(file);
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun mutex_lock(&adpt_mutex);
2040*4882a593Smuzhiyun ret = adpt_ioctl(inode, file, cmd, arg);
2041*4882a593Smuzhiyun mutex_unlock(&adpt_mutex);
2042*4882a593Smuzhiyun
2043*4882a593Smuzhiyun return ret;
2044*4882a593Smuzhiyun }
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
compat_adpt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2047*4882a593Smuzhiyun static long compat_adpt_ioctl(struct file *file,
2048*4882a593Smuzhiyun unsigned int cmd, unsigned long arg)
2049*4882a593Smuzhiyun {
2050*4882a593Smuzhiyun struct inode *inode;
2051*4882a593Smuzhiyun long ret;
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun inode = file_inode(file);
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun mutex_lock(&adpt_mutex);
2056*4882a593Smuzhiyun
2057*4882a593Smuzhiyun switch(cmd) {
2058*4882a593Smuzhiyun case DPT_SIGNATURE:
2059*4882a593Smuzhiyun case I2OUSRCMD:
2060*4882a593Smuzhiyun case DPT_CTRLINFO:
2061*4882a593Smuzhiyun case DPT_SYSINFO:
2062*4882a593Smuzhiyun case DPT_BLINKLED:
2063*4882a593Smuzhiyun case I2ORESETCMD:
2064*4882a593Smuzhiyun case I2ORESCANCMD:
2065*4882a593Smuzhiyun case (DPT_TARGET_BUSY & 0xFFFF):
2066*4882a593Smuzhiyun case DPT_TARGET_BUSY:
2067*4882a593Smuzhiyun ret = adpt_ioctl(inode, file, cmd, arg);
2068*4882a593Smuzhiyun break;
2069*4882a593Smuzhiyun default:
2070*4882a593Smuzhiyun ret = -ENOIOCTLCMD;
2071*4882a593Smuzhiyun }
2072*4882a593Smuzhiyun
2073*4882a593Smuzhiyun mutex_unlock(&adpt_mutex);
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun return ret;
2076*4882a593Smuzhiyun }
2077*4882a593Smuzhiyun #endif
2078*4882a593Smuzhiyun
adpt_isr(int irq,void * dev_id)2079*4882a593Smuzhiyun static irqreturn_t adpt_isr(int irq, void *dev_id)
2080*4882a593Smuzhiyun {
2081*4882a593Smuzhiyun struct scsi_cmnd* cmd;
2082*4882a593Smuzhiyun adpt_hba* pHba = dev_id;
2083*4882a593Smuzhiyun u32 m;
2084*4882a593Smuzhiyun void __iomem *reply;
2085*4882a593Smuzhiyun u32 status=0;
2086*4882a593Smuzhiyun u32 context;
2087*4882a593Smuzhiyun ulong flags = 0;
2088*4882a593Smuzhiyun int handled = 0;
2089*4882a593Smuzhiyun
2090*4882a593Smuzhiyun if (pHba == NULL){
2091*4882a593Smuzhiyun printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2092*4882a593Smuzhiyun return IRQ_NONE;
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun if(pHba->host)
2095*4882a593Smuzhiyun spin_lock_irqsave(pHba->host->host_lock, flags);
2096*4882a593Smuzhiyun
2097*4882a593Smuzhiyun while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2098*4882a593Smuzhiyun m = readl(pHba->reply_port);
2099*4882a593Smuzhiyun if(m == EMPTY_QUEUE){
2100*4882a593Smuzhiyun // Try twice then give up
2101*4882a593Smuzhiyun rmb();
2102*4882a593Smuzhiyun m = readl(pHba->reply_port);
2103*4882a593Smuzhiyun if(m == EMPTY_QUEUE){
2104*4882a593Smuzhiyun // This really should not happen
2105*4882a593Smuzhiyun printk(KERN_ERR"dpti: Could not get reply frame\n");
2106*4882a593Smuzhiyun goto out;
2107*4882a593Smuzhiyun }
2108*4882a593Smuzhiyun }
2109*4882a593Smuzhiyun if (pHba->reply_pool_pa <= m &&
2110*4882a593Smuzhiyun m < pHba->reply_pool_pa +
2111*4882a593Smuzhiyun (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2112*4882a593Smuzhiyun reply = (u8 *)pHba->reply_pool +
2113*4882a593Smuzhiyun (m - pHba->reply_pool_pa);
2114*4882a593Smuzhiyun } else {
2115*4882a593Smuzhiyun /* Ick, we should *never* be here */
2116*4882a593Smuzhiyun printk(KERN_ERR "dpti: reply frame not from pool\n");
2117*4882a593Smuzhiyun reply = (u8 *)bus_to_virt(m);
2118*4882a593Smuzhiyun }
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun if (readl(reply) & MSG_FAIL) {
2121*4882a593Smuzhiyun u32 old_m = readl(reply+28);
2122*4882a593Smuzhiyun void __iomem *msg;
2123*4882a593Smuzhiyun u32 old_context;
2124*4882a593Smuzhiyun PDEBUG("%s: Failed message\n",pHba->name);
2125*4882a593Smuzhiyun if(old_m >= 0x100000){
2126*4882a593Smuzhiyun printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2127*4882a593Smuzhiyun writel(m,pHba->reply_port);
2128*4882a593Smuzhiyun continue;
2129*4882a593Smuzhiyun }
2130*4882a593Smuzhiyun // Transaction context is 0 in failed reply frame
2131*4882a593Smuzhiyun msg = pHba->msg_addr_virt + old_m;
2132*4882a593Smuzhiyun old_context = readl(msg+12);
2133*4882a593Smuzhiyun writel(old_context, reply+12);
2134*4882a593Smuzhiyun adpt_send_nop(pHba, old_m);
2135*4882a593Smuzhiyun }
2136*4882a593Smuzhiyun context = readl(reply+8);
2137*4882a593Smuzhiyun if(context & 0x40000000){ // IOCTL
2138*4882a593Smuzhiyun void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2139*4882a593Smuzhiyun if( p != NULL) {
2140*4882a593Smuzhiyun memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2141*4882a593Smuzhiyun }
2142*4882a593Smuzhiyun // All IOCTLs will also be post wait
2143*4882a593Smuzhiyun }
2144*4882a593Smuzhiyun if(context & 0x80000000){ // Post wait message
2145*4882a593Smuzhiyun status = readl(reply+16);
2146*4882a593Smuzhiyun if(status >> 24){
2147*4882a593Smuzhiyun status &= 0xffff; /* Get detail status */
2148*4882a593Smuzhiyun } else {
2149*4882a593Smuzhiyun status = I2O_POST_WAIT_OK;
2150*4882a593Smuzhiyun }
2151*4882a593Smuzhiyun if(!(context & 0x40000000)) {
2152*4882a593Smuzhiyun /*
2153*4882a593Smuzhiyun * The request tag is one less than the command tag
2154*4882a593Smuzhiyun * as the firmware might treat a 0 tag as invalid
2155*4882a593Smuzhiyun */
2156*4882a593Smuzhiyun cmd = scsi_host_find_tag(pHba->host,
2157*4882a593Smuzhiyun readl(reply + 12) - 1);
2158*4882a593Smuzhiyun if(cmd != NULL) {
2159*4882a593Smuzhiyun printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2160*4882a593Smuzhiyun }
2161*4882a593Smuzhiyun }
2162*4882a593Smuzhiyun adpt_i2o_post_wait_complete(context, status);
2163*4882a593Smuzhiyun } else { // SCSI message
2164*4882a593Smuzhiyun /*
2165*4882a593Smuzhiyun * The request tag is one less than the command tag
2166*4882a593Smuzhiyun * as the firmware might treat a 0 tag as invalid
2167*4882a593Smuzhiyun */
2168*4882a593Smuzhiyun cmd = scsi_host_find_tag(pHba->host,
2169*4882a593Smuzhiyun readl(reply + 12) - 1);
2170*4882a593Smuzhiyun if(cmd != NULL){
2171*4882a593Smuzhiyun scsi_dma_unmap(cmd);
2172*4882a593Smuzhiyun adpt_i2o_scsi_complete(reply, cmd);
2173*4882a593Smuzhiyun }
2174*4882a593Smuzhiyun }
2175*4882a593Smuzhiyun writel(m, pHba->reply_port);
2176*4882a593Smuzhiyun wmb();
2177*4882a593Smuzhiyun rmb();
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun handled = 1;
2180*4882a593Smuzhiyun out: if(pHba->host)
2181*4882a593Smuzhiyun spin_unlock_irqrestore(pHba->host->host_lock, flags);
2182*4882a593Smuzhiyun return IRQ_RETVAL(handled);
2183*4882a593Smuzhiyun }
2184*4882a593Smuzhiyun
adpt_scsi_to_i2o(adpt_hba * pHba,struct scsi_cmnd * cmd,struct adpt_device * d)2185*4882a593Smuzhiyun static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2186*4882a593Smuzhiyun {
2187*4882a593Smuzhiyun int i;
2188*4882a593Smuzhiyun u32 msg[MAX_MESSAGE_SIZE];
2189*4882a593Smuzhiyun u32* mptr;
2190*4882a593Smuzhiyun u32* lptr;
2191*4882a593Smuzhiyun u32 *lenptr;
2192*4882a593Smuzhiyun int direction;
2193*4882a593Smuzhiyun int scsidir;
2194*4882a593Smuzhiyun int nseg;
2195*4882a593Smuzhiyun u32 len;
2196*4882a593Smuzhiyun u32 reqlen;
2197*4882a593Smuzhiyun s32 rcode;
2198*4882a593Smuzhiyun dma_addr_t addr;
2199*4882a593Smuzhiyun
2200*4882a593Smuzhiyun memset(msg, 0 , sizeof(msg));
2201*4882a593Smuzhiyun len = scsi_bufflen(cmd);
2202*4882a593Smuzhiyun direction = 0x00000000;
2203*4882a593Smuzhiyun
2204*4882a593Smuzhiyun scsidir = 0x00000000; // DATA NO XFER
2205*4882a593Smuzhiyun if(len) {
2206*4882a593Smuzhiyun /*
2207*4882a593Smuzhiyun * Set SCBFlags to indicate if data is being transferred
2208*4882a593Smuzhiyun * in or out, or no data transfer
2209*4882a593Smuzhiyun * Note: Do not have to verify index is less than 0 since
2210*4882a593Smuzhiyun * cmd->cmnd[0] is an unsigned char
2211*4882a593Smuzhiyun */
2212*4882a593Smuzhiyun switch(cmd->sc_data_direction){
2213*4882a593Smuzhiyun case DMA_FROM_DEVICE:
2214*4882a593Smuzhiyun scsidir =0x40000000; // DATA IN (iop<--dev)
2215*4882a593Smuzhiyun break;
2216*4882a593Smuzhiyun case DMA_TO_DEVICE:
2217*4882a593Smuzhiyun direction=0x04000000; // SGL OUT
2218*4882a593Smuzhiyun scsidir =0x80000000; // DATA OUT (iop-->dev)
2219*4882a593Smuzhiyun break;
2220*4882a593Smuzhiyun case DMA_NONE:
2221*4882a593Smuzhiyun break;
2222*4882a593Smuzhiyun case DMA_BIDIRECTIONAL:
2223*4882a593Smuzhiyun scsidir =0x40000000; // DATA IN (iop<--dev)
2224*4882a593Smuzhiyun // Assume In - and continue;
2225*4882a593Smuzhiyun break;
2226*4882a593Smuzhiyun default:
2227*4882a593Smuzhiyun printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2228*4882a593Smuzhiyun pHba->name, cmd->cmnd[0]);
2229*4882a593Smuzhiyun cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2230*4882a593Smuzhiyun cmd->scsi_done(cmd);
2231*4882a593Smuzhiyun return 0;
2232*4882a593Smuzhiyun }
2233*4882a593Smuzhiyun }
2234*4882a593Smuzhiyun // msg[0] is set later
2235*4882a593Smuzhiyun // I2O_CMD_SCSI_EXEC
2236*4882a593Smuzhiyun msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2237*4882a593Smuzhiyun msg[2] = 0;
2238*4882a593Smuzhiyun /* Add 1 to avoid firmware treating it as invalid command */
2239*4882a593Smuzhiyun msg[3] = cmd->request->tag + 1;
2240*4882a593Smuzhiyun // Our cards use the transaction context as the tag for queueing
2241*4882a593Smuzhiyun // Adaptec/DPT Private stuff
2242*4882a593Smuzhiyun msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2243*4882a593Smuzhiyun msg[5] = d->tid;
2244*4882a593Smuzhiyun /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2245*4882a593Smuzhiyun // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2246*4882a593Smuzhiyun // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2247*4882a593Smuzhiyun // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2248*4882a593Smuzhiyun msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2249*4882a593Smuzhiyun
2250*4882a593Smuzhiyun mptr=msg+7;
2251*4882a593Smuzhiyun
2252*4882a593Smuzhiyun // Write SCSI command into the message - always 16 byte block
2253*4882a593Smuzhiyun memset(mptr, 0, 16);
2254*4882a593Smuzhiyun memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2255*4882a593Smuzhiyun mptr+=4;
2256*4882a593Smuzhiyun lenptr=mptr++; /* Remember me - fill in when we know */
2257*4882a593Smuzhiyun if (dpt_dma64(pHba)) {
2258*4882a593Smuzhiyun reqlen = 16; // SINGLE SGE
2259*4882a593Smuzhiyun *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2260*4882a593Smuzhiyun *mptr++ = 1 << PAGE_SHIFT;
2261*4882a593Smuzhiyun } else {
2262*4882a593Smuzhiyun reqlen = 14; // SINGLE SGE
2263*4882a593Smuzhiyun }
2264*4882a593Smuzhiyun /* Now fill in the SGList and command */
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun nseg = scsi_dma_map(cmd);
2267*4882a593Smuzhiyun BUG_ON(nseg < 0);
2268*4882a593Smuzhiyun if (nseg) {
2269*4882a593Smuzhiyun struct scatterlist *sg;
2270*4882a593Smuzhiyun
2271*4882a593Smuzhiyun len = 0;
2272*4882a593Smuzhiyun scsi_for_each_sg(cmd, sg, nseg, i) {
2273*4882a593Smuzhiyun lptr = mptr;
2274*4882a593Smuzhiyun *mptr++ = direction|0x10000000|sg_dma_len(sg);
2275*4882a593Smuzhiyun len+=sg_dma_len(sg);
2276*4882a593Smuzhiyun addr = sg_dma_address(sg);
2277*4882a593Smuzhiyun *mptr++ = dma_low(addr);
2278*4882a593Smuzhiyun if (dpt_dma64(pHba))
2279*4882a593Smuzhiyun *mptr++ = dma_high(addr);
2280*4882a593Smuzhiyun /* Make this an end of list */
2281*4882a593Smuzhiyun if (i == nseg - 1)
2282*4882a593Smuzhiyun *lptr = direction|0xD0000000|sg_dma_len(sg);
2283*4882a593Smuzhiyun }
2284*4882a593Smuzhiyun reqlen = mptr - msg;
2285*4882a593Smuzhiyun *lenptr = len;
2286*4882a593Smuzhiyun
2287*4882a593Smuzhiyun if(cmd->underflow && len != cmd->underflow){
2288*4882a593Smuzhiyun printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2289*4882a593Smuzhiyun len, cmd->underflow);
2290*4882a593Smuzhiyun }
2291*4882a593Smuzhiyun } else {
2292*4882a593Smuzhiyun *lenptr = len = 0;
2293*4882a593Smuzhiyun reqlen = 12;
2294*4882a593Smuzhiyun }
2295*4882a593Smuzhiyun
2296*4882a593Smuzhiyun /* Stick the headers on */
2297*4882a593Smuzhiyun msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun // Send it on it's way
2300*4882a593Smuzhiyun rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2301*4882a593Smuzhiyun if (rcode == 0) {
2302*4882a593Smuzhiyun return 0;
2303*4882a593Smuzhiyun }
2304*4882a593Smuzhiyun return rcode;
2305*4882a593Smuzhiyun }
2306*4882a593Smuzhiyun
2307*4882a593Smuzhiyun
adpt_scsi_host_alloc(adpt_hba * pHba,struct scsi_host_template * sht)2308*4882a593Smuzhiyun static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2309*4882a593Smuzhiyun {
2310*4882a593Smuzhiyun struct Scsi_Host *host;
2311*4882a593Smuzhiyun
2312*4882a593Smuzhiyun host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2313*4882a593Smuzhiyun if (host == NULL) {
2314*4882a593Smuzhiyun printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2315*4882a593Smuzhiyun return -1;
2316*4882a593Smuzhiyun }
2317*4882a593Smuzhiyun host->hostdata[0] = (unsigned long)pHba;
2318*4882a593Smuzhiyun pHba->host = host;
2319*4882a593Smuzhiyun
2320*4882a593Smuzhiyun host->irq = pHba->pDev->irq;
2321*4882a593Smuzhiyun /* no IO ports, so don't have to set host->io_port and
2322*4882a593Smuzhiyun * host->n_io_port
2323*4882a593Smuzhiyun */
2324*4882a593Smuzhiyun host->io_port = 0;
2325*4882a593Smuzhiyun host->n_io_port = 0;
2326*4882a593Smuzhiyun /* see comments in scsi_host.h */
2327*4882a593Smuzhiyun host->max_id = 16;
2328*4882a593Smuzhiyun host->max_lun = 256;
2329*4882a593Smuzhiyun host->max_channel = pHba->top_scsi_channel + 1;
2330*4882a593Smuzhiyun host->cmd_per_lun = 1;
2331*4882a593Smuzhiyun host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2332*4882a593Smuzhiyun host->sg_tablesize = pHba->sg_tablesize;
2333*4882a593Smuzhiyun host->can_queue = pHba->post_fifo_size;
2334*4882a593Smuzhiyun
2335*4882a593Smuzhiyun return 0;
2336*4882a593Smuzhiyun }
2337*4882a593Smuzhiyun
2338*4882a593Smuzhiyun
adpt_i2o_scsi_complete(void __iomem * reply,struct scsi_cmnd * cmd)2339*4882a593Smuzhiyun static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
2340*4882a593Smuzhiyun {
2341*4882a593Smuzhiyun adpt_hba* pHba;
2342*4882a593Smuzhiyun u32 hba_status;
2343*4882a593Smuzhiyun u32 dev_status;
2344*4882a593Smuzhiyun u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2345*4882a593Smuzhiyun // I know this would look cleaner if I just read bytes
2346*4882a593Smuzhiyun // but the model I have been using for all the rest of the
2347*4882a593Smuzhiyun // io is in 4 byte words - so I keep that model
2348*4882a593Smuzhiyun u16 detailed_status = readl(reply+16) &0xffff;
2349*4882a593Smuzhiyun dev_status = (detailed_status & 0xff);
2350*4882a593Smuzhiyun hba_status = detailed_status >> 8;
2351*4882a593Smuzhiyun
2352*4882a593Smuzhiyun // calculate resid for sg
2353*4882a593Smuzhiyun scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2354*4882a593Smuzhiyun
2355*4882a593Smuzhiyun pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2356*4882a593Smuzhiyun
2357*4882a593Smuzhiyun cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2358*4882a593Smuzhiyun
2359*4882a593Smuzhiyun if(!(reply_flags & MSG_FAIL)) {
2360*4882a593Smuzhiyun switch(detailed_status & I2O_SCSI_DSC_MASK) {
2361*4882a593Smuzhiyun case I2O_SCSI_DSC_SUCCESS:
2362*4882a593Smuzhiyun cmd->result = (DID_OK << 16);
2363*4882a593Smuzhiyun // handle underflow
2364*4882a593Smuzhiyun if (readl(reply+20) < cmd->underflow) {
2365*4882a593Smuzhiyun cmd->result = (DID_ERROR <<16);
2366*4882a593Smuzhiyun printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2367*4882a593Smuzhiyun }
2368*4882a593Smuzhiyun break;
2369*4882a593Smuzhiyun case I2O_SCSI_DSC_REQUEST_ABORTED:
2370*4882a593Smuzhiyun cmd->result = (DID_ABORT << 16);
2371*4882a593Smuzhiyun break;
2372*4882a593Smuzhiyun case I2O_SCSI_DSC_PATH_INVALID:
2373*4882a593Smuzhiyun case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2374*4882a593Smuzhiyun case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2375*4882a593Smuzhiyun case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2376*4882a593Smuzhiyun case I2O_SCSI_DSC_NO_ADAPTER:
2377*4882a593Smuzhiyun case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2378*4882a593Smuzhiyun printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2379*4882a593Smuzhiyun pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2380*4882a593Smuzhiyun cmd->result = (DID_TIME_OUT << 16);
2381*4882a593Smuzhiyun break;
2382*4882a593Smuzhiyun case I2O_SCSI_DSC_ADAPTER_BUSY:
2383*4882a593Smuzhiyun case I2O_SCSI_DSC_BUS_BUSY:
2384*4882a593Smuzhiyun cmd->result = (DID_BUS_BUSY << 16);
2385*4882a593Smuzhiyun break;
2386*4882a593Smuzhiyun case I2O_SCSI_DSC_SCSI_BUS_RESET:
2387*4882a593Smuzhiyun case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2388*4882a593Smuzhiyun cmd->result = (DID_RESET << 16);
2389*4882a593Smuzhiyun break;
2390*4882a593Smuzhiyun case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2391*4882a593Smuzhiyun printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2392*4882a593Smuzhiyun cmd->result = (DID_PARITY << 16);
2393*4882a593Smuzhiyun break;
2394*4882a593Smuzhiyun case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2395*4882a593Smuzhiyun case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2396*4882a593Smuzhiyun case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2397*4882a593Smuzhiyun case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2398*4882a593Smuzhiyun case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2399*4882a593Smuzhiyun case I2O_SCSI_DSC_DATA_OVERRUN:
2400*4882a593Smuzhiyun case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2401*4882a593Smuzhiyun case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2402*4882a593Smuzhiyun case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2403*4882a593Smuzhiyun case I2O_SCSI_DSC_PROVIDE_FAILURE:
2404*4882a593Smuzhiyun case I2O_SCSI_DSC_REQUEST_TERMINATED:
2405*4882a593Smuzhiyun case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2406*4882a593Smuzhiyun case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2407*4882a593Smuzhiyun case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2408*4882a593Smuzhiyun case I2O_SCSI_DSC_INVALID_CDB:
2409*4882a593Smuzhiyun case I2O_SCSI_DSC_LUN_INVALID:
2410*4882a593Smuzhiyun case I2O_SCSI_DSC_SCSI_TID_INVALID:
2411*4882a593Smuzhiyun case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2412*4882a593Smuzhiyun case I2O_SCSI_DSC_NO_NEXUS:
2413*4882a593Smuzhiyun case I2O_SCSI_DSC_CDB_RECEIVED:
2414*4882a593Smuzhiyun case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2415*4882a593Smuzhiyun case I2O_SCSI_DSC_QUEUE_FROZEN:
2416*4882a593Smuzhiyun case I2O_SCSI_DSC_REQUEST_INVALID:
2417*4882a593Smuzhiyun default:
2418*4882a593Smuzhiyun printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2419*4882a593Smuzhiyun pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2420*4882a593Smuzhiyun hba_status, dev_status, cmd->cmnd[0]);
2421*4882a593Smuzhiyun cmd->result = (DID_ERROR << 16);
2422*4882a593Smuzhiyun break;
2423*4882a593Smuzhiyun }
2424*4882a593Smuzhiyun
2425*4882a593Smuzhiyun // copy over the request sense data if it was a check
2426*4882a593Smuzhiyun // condition status
2427*4882a593Smuzhiyun if (dev_status == SAM_STAT_CHECK_CONDITION) {
2428*4882a593Smuzhiyun u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2429*4882a593Smuzhiyun // Copy over the sense data
2430*4882a593Smuzhiyun memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2431*4882a593Smuzhiyun if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2432*4882a593Smuzhiyun cmd->sense_buffer[2] == DATA_PROTECT ){
2433*4882a593Smuzhiyun /* This is to handle an array failed */
2434*4882a593Smuzhiyun cmd->result = (DID_TIME_OUT << 16);
2435*4882a593Smuzhiyun printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2436*4882a593Smuzhiyun pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2437*4882a593Smuzhiyun hba_status, dev_status, cmd->cmnd[0]);
2438*4882a593Smuzhiyun
2439*4882a593Smuzhiyun }
2440*4882a593Smuzhiyun }
2441*4882a593Smuzhiyun } else {
2442*4882a593Smuzhiyun /* In this condtion we could not talk to the tid
2443*4882a593Smuzhiyun * the card rejected it. We should signal a retry
2444*4882a593Smuzhiyun * for a limitted number of retries.
2445*4882a593Smuzhiyun */
2446*4882a593Smuzhiyun cmd->result = (DID_TIME_OUT << 16);
2447*4882a593Smuzhiyun printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2448*4882a593Smuzhiyun pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2449*4882a593Smuzhiyun ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2450*4882a593Smuzhiyun }
2451*4882a593Smuzhiyun
2452*4882a593Smuzhiyun cmd->result |= (dev_status);
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun if(cmd->scsi_done != NULL){
2455*4882a593Smuzhiyun cmd->scsi_done(cmd);
2456*4882a593Smuzhiyun }
2457*4882a593Smuzhiyun }
2458*4882a593Smuzhiyun
2459*4882a593Smuzhiyun
adpt_rescan(adpt_hba * pHba)2460*4882a593Smuzhiyun static s32 adpt_rescan(adpt_hba* pHba)
2461*4882a593Smuzhiyun {
2462*4882a593Smuzhiyun s32 rcode;
2463*4882a593Smuzhiyun ulong flags = 0;
2464*4882a593Smuzhiyun
2465*4882a593Smuzhiyun if(pHba->host)
2466*4882a593Smuzhiyun spin_lock_irqsave(pHba->host->host_lock, flags);
2467*4882a593Smuzhiyun if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2468*4882a593Smuzhiyun goto out;
2469*4882a593Smuzhiyun if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2470*4882a593Smuzhiyun goto out;
2471*4882a593Smuzhiyun rcode = 0;
2472*4882a593Smuzhiyun out: if(pHba->host)
2473*4882a593Smuzhiyun spin_unlock_irqrestore(pHba->host->host_lock, flags);
2474*4882a593Smuzhiyun return rcode;
2475*4882a593Smuzhiyun }
2476*4882a593Smuzhiyun
2477*4882a593Smuzhiyun
adpt_i2o_reparse_lct(adpt_hba * pHba)2478*4882a593Smuzhiyun static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2479*4882a593Smuzhiyun {
2480*4882a593Smuzhiyun int i;
2481*4882a593Smuzhiyun int max;
2482*4882a593Smuzhiyun int tid;
2483*4882a593Smuzhiyun struct i2o_device *d;
2484*4882a593Smuzhiyun i2o_lct *lct = pHba->lct;
2485*4882a593Smuzhiyun u8 bus_no = 0;
2486*4882a593Smuzhiyun s16 scsi_id;
2487*4882a593Smuzhiyun u64 scsi_lun;
2488*4882a593Smuzhiyun u32 buf[10]; // at least 8 u32's
2489*4882a593Smuzhiyun struct adpt_device* pDev = NULL;
2490*4882a593Smuzhiyun struct i2o_device* pI2o_dev = NULL;
2491*4882a593Smuzhiyun
2492*4882a593Smuzhiyun if (lct == NULL) {
2493*4882a593Smuzhiyun printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2494*4882a593Smuzhiyun return -1;
2495*4882a593Smuzhiyun }
2496*4882a593Smuzhiyun
2497*4882a593Smuzhiyun max = lct->table_size;
2498*4882a593Smuzhiyun max -= 3;
2499*4882a593Smuzhiyun max /= 9;
2500*4882a593Smuzhiyun
2501*4882a593Smuzhiyun // Mark each drive as unscanned
2502*4882a593Smuzhiyun for (d = pHba->devices; d; d = d->next) {
2503*4882a593Smuzhiyun pDev =(struct adpt_device*) d->owner;
2504*4882a593Smuzhiyun if(!pDev){
2505*4882a593Smuzhiyun continue;
2506*4882a593Smuzhiyun }
2507*4882a593Smuzhiyun pDev->state |= DPTI_DEV_UNSCANNED;
2508*4882a593Smuzhiyun }
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2511*4882a593Smuzhiyun
2512*4882a593Smuzhiyun for(i=0;i<max;i++) {
2513*4882a593Smuzhiyun if( lct->lct_entry[i].user_tid != 0xfff){
2514*4882a593Smuzhiyun continue;
2515*4882a593Smuzhiyun }
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2518*4882a593Smuzhiyun lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2519*4882a593Smuzhiyun lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2520*4882a593Smuzhiyun tid = lct->lct_entry[i].tid;
2521*4882a593Smuzhiyun if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2522*4882a593Smuzhiyun printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2523*4882a593Smuzhiyun continue;
2524*4882a593Smuzhiyun }
2525*4882a593Smuzhiyun bus_no = buf[0]>>16;
2526*4882a593Smuzhiyun if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2527*4882a593Smuzhiyun printk(KERN_WARNING
2528*4882a593Smuzhiyun "%s: Channel number %d out of range\n",
2529*4882a593Smuzhiyun pHba->name, bus_no);
2530*4882a593Smuzhiyun continue;
2531*4882a593Smuzhiyun }
2532*4882a593Smuzhiyun
2533*4882a593Smuzhiyun scsi_id = buf[1];
2534*4882a593Smuzhiyun scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2535*4882a593Smuzhiyun pDev = pHba->channel[bus_no].device[scsi_id];
2536*4882a593Smuzhiyun /* da lun */
2537*4882a593Smuzhiyun while(pDev) {
2538*4882a593Smuzhiyun if(pDev->scsi_lun == scsi_lun) {
2539*4882a593Smuzhiyun break;
2540*4882a593Smuzhiyun }
2541*4882a593Smuzhiyun pDev = pDev->next_lun;
2542*4882a593Smuzhiyun }
2543*4882a593Smuzhiyun if(!pDev ) { // Something new add it
2544*4882a593Smuzhiyun d = kmalloc(sizeof(struct i2o_device),
2545*4882a593Smuzhiyun GFP_ATOMIC);
2546*4882a593Smuzhiyun if(d==NULL)
2547*4882a593Smuzhiyun {
2548*4882a593Smuzhiyun printk(KERN_CRIT "Out of memory for I2O device data.\n");
2549*4882a593Smuzhiyun return -ENOMEM;
2550*4882a593Smuzhiyun }
2551*4882a593Smuzhiyun
2552*4882a593Smuzhiyun d->controller = pHba;
2553*4882a593Smuzhiyun d->next = NULL;
2554*4882a593Smuzhiyun
2555*4882a593Smuzhiyun memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2556*4882a593Smuzhiyun
2557*4882a593Smuzhiyun d->flags = 0;
2558*4882a593Smuzhiyun adpt_i2o_report_hba_unit(pHba, d);
2559*4882a593Smuzhiyun adpt_i2o_install_device(pHba, d);
2560*4882a593Smuzhiyun
2561*4882a593Smuzhiyun pDev = pHba->channel[bus_no].device[scsi_id];
2562*4882a593Smuzhiyun if( pDev == NULL){
2563*4882a593Smuzhiyun pDev =
2564*4882a593Smuzhiyun kzalloc(sizeof(struct adpt_device),
2565*4882a593Smuzhiyun GFP_ATOMIC);
2566*4882a593Smuzhiyun if(pDev == NULL) {
2567*4882a593Smuzhiyun return -ENOMEM;
2568*4882a593Smuzhiyun }
2569*4882a593Smuzhiyun pHba->channel[bus_no].device[scsi_id] = pDev;
2570*4882a593Smuzhiyun } else {
2571*4882a593Smuzhiyun while (pDev->next_lun) {
2572*4882a593Smuzhiyun pDev = pDev->next_lun;
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun pDev = pDev->next_lun =
2575*4882a593Smuzhiyun kzalloc(sizeof(struct adpt_device),
2576*4882a593Smuzhiyun GFP_ATOMIC);
2577*4882a593Smuzhiyun if(pDev == NULL) {
2578*4882a593Smuzhiyun return -ENOMEM;
2579*4882a593Smuzhiyun }
2580*4882a593Smuzhiyun }
2581*4882a593Smuzhiyun pDev->tid = d->lct_data.tid;
2582*4882a593Smuzhiyun pDev->scsi_channel = bus_no;
2583*4882a593Smuzhiyun pDev->scsi_id = scsi_id;
2584*4882a593Smuzhiyun pDev->scsi_lun = scsi_lun;
2585*4882a593Smuzhiyun pDev->pI2o_dev = d;
2586*4882a593Smuzhiyun d->owner = pDev;
2587*4882a593Smuzhiyun pDev->type = (buf[0])&0xff;
2588*4882a593Smuzhiyun pDev->flags = (buf[0]>>8)&0xff;
2589*4882a593Smuzhiyun // Too late, SCSI system has made up it's mind, but what the hey ...
2590*4882a593Smuzhiyun if(scsi_id > pHba->top_scsi_id){
2591*4882a593Smuzhiyun pHba->top_scsi_id = scsi_id;
2592*4882a593Smuzhiyun }
2593*4882a593Smuzhiyun if(scsi_lun > pHba->top_scsi_lun){
2594*4882a593Smuzhiyun pHba->top_scsi_lun = scsi_lun;
2595*4882a593Smuzhiyun }
2596*4882a593Smuzhiyun continue;
2597*4882a593Smuzhiyun } // end of new i2o device
2598*4882a593Smuzhiyun
2599*4882a593Smuzhiyun // We found an old device - check it
2600*4882a593Smuzhiyun while(pDev) {
2601*4882a593Smuzhiyun if(pDev->scsi_lun == scsi_lun) {
2602*4882a593Smuzhiyun if(!scsi_device_online(pDev->pScsi_dev)) {
2603*4882a593Smuzhiyun printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2604*4882a593Smuzhiyun pHba->name,bus_no,scsi_id,scsi_lun);
2605*4882a593Smuzhiyun if (pDev->pScsi_dev) {
2606*4882a593Smuzhiyun scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2607*4882a593Smuzhiyun }
2608*4882a593Smuzhiyun }
2609*4882a593Smuzhiyun d = pDev->pI2o_dev;
2610*4882a593Smuzhiyun if(d->lct_data.tid != tid) { // something changed
2611*4882a593Smuzhiyun pDev->tid = tid;
2612*4882a593Smuzhiyun memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2613*4882a593Smuzhiyun if (pDev->pScsi_dev) {
2614*4882a593Smuzhiyun pDev->pScsi_dev->changed = TRUE;
2615*4882a593Smuzhiyun pDev->pScsi_dev->removable = TRUE;
2616*4882a593Smuzhiyun }
2617*4882a593Smuzhiyun }
2618*4882a593Smuzhiyun // Found it - mark it scanned
2619*4882a593Smuzhiyun pDev->state = DPTI_DEV_ONLINE;
2620*4882a593Smuzhiyun break;
2621*4882a593Smuzhiyun }
2622*4882a593Smuzhiyun pDev = pDev->next_lun;
2623*4882a593Smuzhiyun }
2624*4882a593Smuzhiyun }
2625*4882a593Smuzhiyun }
2626*4882a593Smuzhiyun for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2627*4882a593Smuzhiyun pDev =(struct adpt_device*) pI2o_dev->owner;
2628*4882a593Smuzhiyun if(!pDev){
2629*4882a593Smuzhiyun continue;
2630*4882a593Smuzhiyun }
2631*4882a593Smuzhiyun // Drive offline drives that previously existed but could not be found
2632*4882a593Smuzhiyun // in the LCT table
2633*4882a593Smuzhiyun if (pDev->state & DPTI_DEV_UNSCANNED){
2634*4882a593Smuzhiyun pDev->state = DPTI_DEV_OFFLINE;
2635*4882a593Smuzhiyun printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2636*4882a593Smuzhiyun if (pDev->pScsi_dev) {
2637*4882a593Smuzhiyun scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2638*4882a593Smuzhiyun }
2639*4882a593Smuzhiyun }
2640*4882a593Smuzhiyun }
2641*4882a593Smuzhiyun return 0;
2642*4882a593Smuzhiyun }
2643*4882a593Smuzhiyun
2644*4882a593Smuzhiyun /*============================================================================
2645*4882a593Smuzhiyun * Routines from i2o subsystem
2646*4882a593Smuzhiyun *============================================================================
2647*4882a593Smuzhiyun */
2648*4882a593Smuzhiyun
2649*4882a593Smuzhiyun
2650*4882a593Smuzhiyun
2651*4882a593Smuzhiyun /*
2652*4882a593Smuzhiyun * Bring an I2O controller into HOLD state. See the spec.
2653*4882a593Smuzhiyun */
adpt_i2o_activate_hba(adpt_hba * pHba)2654*4882a593Smuzhiyun static int adpt_i2o_activate_hba(adpt_hba* pHba)
2655*4882a593Smuzhiyun {
2656*4882a593Smuzhiyun int rcode;
2657*4882a593Smuzhiyun
2658*4882a593Smuzhiyun if(pHba->initialized ) {
2659*4882a593Smuzhiyun if (adpt_i2o_status_get(pHba) < 0) {
2660*4882a593Smuzhiyun if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2661*4882a593Smuzhiyun printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2662*4882a593Smuzhiyun return rcode;
2663*4882a593Smuzhiyun }
2664*4882a593Smuzhiyun if (adpt_i2o_status_get(pHba) < 0) {
2665*4882a593Smuzhiyun printk(KERN_INFO "HBA not responding.\n");
2666*4882a593Smuzhiyun return -1;
2667*4882a593Smuzhiyun }
2668*4882a593Smuzhiyun }
2669*4882a593Smuzhiyun
2670*4882a593Smuzhiyun if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2671*4882a593Smuzhiyun printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2672*4882a593Smuzhiyun return -1;
2673*4882a593Smuzhiyun }
2674*4882a593Smuzhiyun
2675*4882a593Smuzhiyun if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2676*4882a593Smuzhiyun pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2677*4882a593Smuzhiyun pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2678*4882a593Smuzhiyun pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2679*4882a593Smuzhiyun adpt_i2o_reset_hba(pHba);
2680*4882a593Smuzhiyun if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2681*4882a593Smuzhiyun printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2682*4882a593Smuzhiyun return -1;
2683*4882a593Smuzhiyun }
2684*4882a593Smuzhiyun }
2685*4882a593Smuzhiyun } else {
2686*4882a593Smuzhiyun if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2687*4882a593Smuzhiyun printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2688*4882a593Smuzhiyun return rcode;
2689*4882a593Smuzhiyun }
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun }
2692*4882a593Smuzhiyun
2693*4882a593Smuzhiyun if (adpt_i2o_init_outbound_q(pHba) < 0) {
2694*4882a593Smuzhiyun return -1;
2695*4882a593Smuzhiyun }
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun /* In HOLD state */
2698*4882a593Smuzhiyun
2699*4882a593Smuzhiyun if (adpt_i2o_hrt_get(pHba) < 0) {
2700*4882a593Smuzhiyun return -1;
2701*4882a593Smuzhiyun }
2702*4882a593Smuzhiyun
2703*4882a593Smuzhiyun return 0;
2704*4882a593Smuzhiyun }
2705*4882a593Smuzhiyun
2706*4882a593Smuzhiyun /*
2707*4882a593Smuzhiyun * Bring a controller online into OPERATIONAL state.
2708*4882a593Smuzhiyun */
2709*4882a593Smuzhiyun
adpt_i2o_online_hba(adpt_hba * pHba)2710*4882a593Smuzhiyun static int adpt_i2o_online_hba(adpt_hba* pHba)
2711*4882a593Smuzhiyun {
2712*4882a593Smuzhiyun if (adpt_i2o_systab_send(pHba) < 0)
2713*4882a593Smuzhiyun return -1;
2714*4882a593Smuzhiyun /* In READY state */
2715*4882a593Smuzhiyun
2716*4882a593Smuzhiyun if (adpt_i2o_enable_hba(pHba) < 0)
2717*4882a593Smuzhiyun return -1;
2718*4882a593Smuzhiyun
2719*4882a593Smuzhiyun /* In OPERATIONAL state */
2720*4882a593Smuzhiyun return 0;
2721*4882a593Smuzhiyun }
2722*4882a593Smuzhiyun
adpt_send_nop(adpt_hba * pHba,u32 m)2723*4882a593Smuzhiyun static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2724*4882a593Smuzhiyun {
2725*4882a593Smuzhiyun u32 __iomem *msg;
2726*4882a593Smuzhiyun ulong timeout = jiffies + 5*HZ;
2727*4882a593Smuzhiyun
2728*4882a593Smuzhiyun while(m == EMPTY_QUEUE){
2729*4882a593Smuzhiyun rmb();
2730*4882a593Smuzhiyun m = readl(pHba->post_port);
2731*4882a593Smuzhiyun if(m != EMPTY_QUEUE){
2732*4882a593Smuzhiyun break;
2733*4882a593Smuzhiyun }
2734*4882a593Smuzhiyun if(time_after(jiffies,timeout)){
2735*4882a593Smuzhiyun printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2736*4882a593Smuzhiyun return 2;
2737*4882a593Smuzhiyun }
2738*4882a593Smuzhiyun schedule_timeout_uninterruptible(1);
2739*4882a593Smuzhiyun }
2740*4882a593Smuzhiyun msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2741*4882a593Smuzhiyun writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2742*4882a593Smuzhiyun writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2743*4882a593Smuzhiyun writel( 0,&msg[2]);
2744*4882a593Smuzhiyun wmb();
2745*4882a593Smuzhiyun
2746*4882a593Smuzhiyun writel(m, pHba->post_port);
2747*4882a593Smuzhiyun wmb();
2748*4882a593Smuzhiyun return 0;
2749*4882a593Smuzhiyun }
2750*4882a593Smuzhiyun
adpt_i2o_init_outbound_q(adpt_hba * pHba)2751*4882a593Smuzhiyun static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2752*4882a593Smuzhiyun {
2753*4882a593Smuzhiyun u8 *status;
2754*4882a593Smuzhiyun dma_addr_t addr;
2755*4882a593Smuzhiyun u32 __iomem *msg = NULL;
2756*4882a593Smuzhiyun int i;
2757*4882a593Smuzhiyun ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2758*4882a593Smuzhiyun u32 m;
2759*4882a593Smuzhiyun
2760*4882a593Smuzhiyun do {
2761*4882a593Smuzhiyun rmb();
2762*4882a593Smuzhiyun m = readl(pHba->post_port);
2763*4882a593Smuzhiyun if (m != EMPTY_QUEUE) {
2764*4882a593Smuzhiyun break;
2765*4882a593Smuzhiyun }
2766*4882a593Smuzhiyun
2767*4882a593Smuzhiyun if(time_after(jiffies,timeout)){
2768*4882a593Smuzhiyun printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2769*4882a593Smuzhiyun return -ETIMEDOUT;
2770*4882a593Smuzhiyun }
2771*4882a593Smuzhiyun schedule_timeout_uninterruptible(1);
2772*4882a593Smuzhiyun } while(m == EMPTY_QUEUE);
2773*4882a593Smuzhiyun
2774*4882a593Smuzhiyun msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2775*4882a593Smuzhiyun
2776*4882a593Smuzhiyun status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2777*4882a593Smuzhiyun if (!status) {
2778*4882a593Smuzhiyun adpt_send_nop(pHba, m);
2779*4882a593Smuzhiyun printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2780*4882a593Smuzhiyun pHba->name);
2781*4882a593Smuzhiyun return -ENOMEM;
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun
2784*4882a593Smuzhiyun writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2785*4882a593Smuzhiyun writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2786*4882a593Smuzhiyun writel(0, &msg[2]);
2787*4882a593Smuzhiyun writel(0x0106, &msg[3]); /* Transaction context */
2788*4882a593Smuzhiyun writel(4096, &msg[4]); /* Host page frame size */
2789*4882a593Smuzhiyun writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2790*4882a593Smuzhiyun writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2791*4882a593Smuzhiyun writel((u32)addr, &msg[7]);
2792*4882a593Smuzhiyun
2793*4882a593Smuzhiyun writel(m, pHba->post_port);
2794*4882a593Smuzhiyun wmb();
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun // Wait for the reply status to come back
2797*4882a593Smuzhiyun do {
2798*4882a593Smuzhiyun if (*status) {
2799*4882a593Smuzhiyun if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2800*4882a593Smuzhiyun break;
2801*4882a593Smuzhiyun }
2802*4882a593Smuzhiyun }
2803*4882a593Smuzhiyun rmb();
2804*4882a593Smuzhiyun if(time_after(jiffies,timeout)){
2805*4882a593Smuzhiyun printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2806*4882a593Smuzhiyun /* We lose 4 bytes of "status" here, but we
2807*4882a593Smuzhiyun cannot free these because controller may
2808*4882a593Smuzhiyun awake and corrupt those bytes at any time */
2809*4882a593Smuzhiyun /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2810*4882a593Smuzhiyun return -ETIMEDOUT;
2811*4882a593Smuzhiyun }
2812*4882a593Smuzhiyun schedule_timeout_uninterruptible(1);
2813*4882a593Smuzhiyun } while (1);
2814*4882a593Smuzhiyun
2815*4882a593Smuzhiyun // If the command was successful, fill the fifo with our reply
2816*4882a593Smuzhiyun // message packets
2817*4882a593Smuzhiyun if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2818*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2819*4882a593Smuzhiyun return -2;
2820*4882a593Smuzhiyun }
2821*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2822*4882a593Smuzhiyun
2823*4882a593Smuzhiyun if(pHba->reply_pool != NULL) {
2824*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev,
2825*4882a593Smuzhiyun pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2826*4882a593Smuzhiyun pHba->reply_pool, pHba->reply_pool_pa);
2827*4882a593Smuzhiyun }
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2830*4882a593Smuzhiyun pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2831*4882a593Smuzhiyun &pHba->reply_pool_pa, GFP_KERNEL);
2832*4882a593Smuzhiyun if (!pHba->reply_pool) {
2833*4882a593Smuzhiyun printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2834*4882a593Smuzhiyun return -ENOMEM;
2835*4882a593Smuzhiyun }
2836*4882a593Smuzhiyun
2837*4882a593Smuzhiyun for(i = 0; i < pHba->reply_fifo_size; i++) {
2838*4882a593Smuzhiyun writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2839*4882a593Smuzhiyun pHba->reply_port);
2840*4882a593Smuzhiyun wmb();
2841*4882a593Smuzhiyun }
2842*4882a593Smuzhiyun adpt_i2o_status_get(pHba);
2843*4882a593Smuzhiyun return 0;
2844*4882a593Smuzhiyun }
2845*4882a593Smuzhiyun
2846*4882a593Smuzhiyun
2847*4882a593Smuzhiyun /*
2848*4882a593Smuzhiyun * I2O System Table. Contains information about
2849*4882a593Smuzhiyun * all the IOPs in the system. Used to inform IOPs
2850*4882a593Smuzhiyun * about each other's existence.
2851*4882a593Smuzhiyun *
2852*4882a593Smuzhiyun * sys_tbl_ver is the CurrentChangeIndicator that is
2853*4882a593Smuzhiyun * used by IOPs to track changes.
2854*4882a593Smuzhiyun */
2855*4882a593Smuzhiyun
2856*4882a593Smuzhiyun
2857*4882a593Smuzhiyun
adpt_i2o_status_get(adpt_hba * pHba)2858*4882a593Smuzhiyun static s32 adpt_i2o_status_get(adpt_hba* pHba)
2859*4882a593Smuzhiyun {
2860*4882a593Smuzhiyun ulong timeout;
2861*4882a593Smuzhiyun u32 m;
2862*4882a593Smuzhiyun u32 __iomem *msg;
2863*4882a593Smuzhiyun u8 *status_block=NULL;
2864*4882a593Smuzhiyun
2865*4882a593Smuzhiyun if(pHba->status_block == NULL) {
2866*4882a593Smuzhiyun pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2867*4882a593Smuzhiyun sizeof(i2o_status_block),
2868*4882a593Smuzhiyun &pHba->status_block_pa, GFP_KERNEL);
2869*4882a593Smuzhiyun if(pHba->status_block == NULL) {
2870*4882a593Smuzhiyun printk(KERN_ERR
2871*4882a593Smuzhiyun "dpti%d: Get Status Block failed; Out of memory. \n",
2872*4882a593Smuzhiyun pHba->unit);
2873*4882a593Smuzhiyun return -ENOMEM;
2874*4882a593Smuzhiyun }
2875*4882a593Smuzhiyun }
2876*4882a593Smuzhiyun memset(pHba->status_block, 0, sizeof(i2o_status_block));
2877*4882a593Smuzhiyun status_block = (u8*)(pHba->status_block);
2878*4882a593Smuzhiyun timeout = jiffies+TMOUT_GETSTATUS*HZ;
2879*4882a593Smuzhiyun do {
2880*4882a593Smuzhiyun rmb();
2881*4882a593Smuzhiyun m = readl(pHba->post_port);
2882*4882a593Smuzhiyun if (m != EMPTY_QUEUE) {
2883*4882a593Smuzhiyun break;
2884*4882a593Smuzhiyun }
2885*4882a593Smuzhiyun if(time_after(jiffies,timeout)){
2886*4882a593Smuzhiyun printk(KERN_ERR "%s: Timeout waiting for message !\n",
2887*4882a593Smuzhiyun pHba->name);
2888*4882a593Smuzhiyun return -ETIMEDOUT;
2889*4882a593Smuzhiyun }
2890*4882a593Smuzhiyun schedule_timeout_uninterruptible(1);
2891*4882a593Smuzhiyun } while(m==EMPTY_QUEUE);
2892*4882a593Smuzhiyun
2893*4882a593Smuzhiyun
2894*4882a593Smuzhiyun msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2895*4882a593Smuzhiyun
2896*4882a593Smuzhiyun writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2897*4882a593Smuzhiyun writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2898*4882a593Smuzhiyun writel(1, &msg[2]);
2899*4882a593Smuzhiyun writel(0, &msg[3]);
2900*4882a593Smuzhiyun writel(0, &msg[4]);
2901*4882a593Smuzhiyun writel(0, &msg[5]);
2902*4882a593Smuzhiyun writel( dma_low(pHba->status_block_pa), &msg[6]);
2903*4882a593Smuzhiyun writel( dma_high(pHba->status_block_pa), &msg[7]);
2904*4882a593Smuzhiyun writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2905*4882a593Smuzhiyun
2906*4882a593Smuzhiyun //post message
2907*4882a593Smuzhiyun writel(m, pHba->post_port);
2908*4882a593Smuzhiyun wmb();
2909*4882a593Smuzhiyun
2910*4882a593Smuzhiyun while(status_block[87]!=0xff){
2911*4882a593Smuzhiyun if(time_after(jiffies,timeout)){
2912*4882a593Smuzhiyun printk(KERN_ERR"dpti%d: Get status timeout.\n",
2913*4882a593Smuzhiyun pHba->unit);
2914*4882a593Smuzhiyun return -ETIMEDOUT;
2915*4882a593Smuzhiyun }
2916*4882a593Smuzhiyun rmb();
2917*4882a593Smuzhiyun schedule_timeout_uninterruptible(1);
2918*4882a593Smuzhiyun }
2919*4882a593Smuzhiyun
2920*4882a593Smuzhiyun // Set up our number of outbound and inbound messages
2921*4882a593Smuzhiyun pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2922*4882a593Smuzhiyun if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2923*4882a593Smuzhiyun pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2924*4882a593Smuzhiyun }
2925*4882a593Smuzhiyun
2926*4882a593Smuzhiyun pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2927*4882a593Smuzhiyun if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2928*4882a593Smuzhiyun pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2929*4882a593Smuzhiyun }
2930*4882a593Smuzhiyun
2931*4882a593Smuzhiyun // Calculate the Scatter Gather list size
2932*4882a593Smuzhiyun if (dpt_dma64(pHba)) {
2933*4882a593Smuzhiyun pHba->sg_tablesize
2934*4882a593Smuzhiyun = ((pHba->status_block->inbound_frame_size * 4
2935*4882a593Smuzhiyun - 14 * sizeof(u32))
2936*4882a593Smuzhiyun / (sizeof(struct sg_simple_element) + sizeof(u32)));
2937*4882a593Smuzhiyun } else {
2938*4882a593Smuzhiyun pHba->sg_tablesize
2939*4882a593Smuzhiyun = ((pHba->status_block->inbound_frame_size * 4
2940*4882a593Smuzhiyun - 12 * sizeof(u32))
2941*4882a593Smuzhiyun / sizeof(struct sg_simple_element));
2942*4882a593Smuzhiyun }
2943*4882a593Smuzhiyun if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2944*4882a593Smuzhiyun pHba->sg_tablesize = SG_LIST_ELEMENTS;
2945*4882a593Smuzhiyun }
2946*4882a593Smuzhiyun
2947*4882a593Smuzhiyun
2948*4882a593Smuzhiyun #ifdef DEBUG
2949*4882a593Smuzhiyun printk("dpti%d: State = ",pHba->unit);
2950*4882a593Smuzhiyun switch(pHba->status_block->iop_state) {
2951*4882a593Smuzhiyun case 0x01:
2952*4882a593Smuzhiyun printk("INIT\n");
2953*4882a593Smuzhiyun break;
2954*4882a593Smuzhiyun case 0x02:
2955*4882a593Smuzhiyun printk("RESET\n");
2956*4882a593Smuzhiyun break;
2957*4882a593Smuzhiyun case 0x04:
2958*4882a593Smuzhiyun printk("HOLD\n");
2959*4882a593Smuzhiyun break;
2960*4882a593Smuzhiyun case 0x05:
2961*4882a593Smuzhiyun printk("READY\n");
2962*4882a593Smuzhiyun break;
2963*4882a593Smuzhiyun case 0x08:
2964*4882a593Smuzhiyun printk("OPERATIONAL\n");
2965*4882a593Smuzhiyun break;
2966*4882a593Smuzhiyun case 0x10:
2967*4882a593Smuzhiyun printk("FAILED\n");
2968*4882a593Smuzhiyun break;
2969*4882a593Smuzhiyun case 0x11:
2970*4882a593Smuzhiyun printk("FAULTED\n");
2971*4882a593Smuzhiyun break;
2972*4882a593Smuzhiyun default:
2973*4882a593Smuzhiyun printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2974*4882a593Smuzhiyun }
2975*4882a593Smuzhiyun #endif
2976*4882a593Smuzhiyun return 0;
2977*4882a593Smuzhiyun }
2978*4882a593Smuzhiyun
2979*4882a593Smuzhiyun /*
2980*4882a593Smuzhiyun * Get the IOP's Logical Configuration Table
2981*4882a593Smuzhiyun */
adpt_i2o_lct_get(adpt_hba * pHba)2982*4882a593Smuzhiyun static int adpt_i2o_lct_get(adpt_hba* pHba)
2983*4882a593Smuzhiyun {
2984*4882a593Smuzhiyun u32 msg[8];
2985*4882a593Smuzhiyun int ret;
2986*4882a593Smuzhiyun u32 buf[16];
2987*4882a593Smuzhiyun
2988*4882a593Smuzhiyun if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2989*4882a593Smuzhiyun pHba->lct_size = pHba->status_block->expected_lct_size;
2990*4882a593Smuzhiyun }
2991*4882a593Smuzhiyun do {
2992*4882a593Smuzhiyun if (pHba->lct == NULL) {
2993*4882a593Smuzhiyun pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
2994*4882a593Smuzhiyun pHba->lct_size, &pHba->lct_pa,
2995*4882a593Smuzhiyun GFP_ATOMIC);
2996*4882a593Smuzhiyun if(pHba->lct == NULL) {
2997*4882a593Smuzhiyun printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2998*4882a593Smuzhiyun pHba->name);
2999*4882a593Smuzhiyun return -ENOMEM;
3000*4882a593Smuzhiyun }
3001*4882a593Smuzhiyun }
3002*4882a593Smuzhiyun memset(pHba->lct, 0, pHba->lct_size);
3003*4882a593Smuzhiyun
3004*4882a593Smuzhiyun msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3005*4882a593Smuzhiyun msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3006*4882a593Smuzhiyun msg[2] = 0;
3007*4882a593Smuzhiyun msg[3] = 0;
3008*4882a593Smuzhiyun msg[4] = 0xFFFFFFFF; /* All devices */
3009*4882a593Smuzhiyun msg[5] = 0x00000000; /* Report now */
3010*4882a593Smuzhiyun msg[6] = 0xD0000000|pHba->lct_size;
3011*4882a593Smuzhiyun msg[7] = (u32)pHba->lct_pa;
3012*4882a593Smuzhiyun
3013*4882a593Smuzhiyun if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3014*4882a593Smuzhiyun printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3015*4882a593Smuzhiyun pHba->name, ret);
3016*4882a593Smuzhiyun printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3017*4882a593Smuzhiyun return ret;
3018*4882a593Smuzhiyun }
3019*4882a593Smuzhiyun
3020*4882a593Smuzhiyun if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3021*4882a593Smuzhiyun pHba->lct_size = pHba->lct->table_size << 2;
3022*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3023*4882a593Smuzhiyun pHba->lct, pHba->lct_pa);
3024*4882a593Smuzhiyun pHba->lct = NULL;
3025*4882a593Smuzhiyun }
3026*4882a593Smuzhiyun } while (pHba->lct == NULL);
3027*4882a593Smuzhiyun
3028*4882a593Smuzhiyun PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3029*4882a593Smuzhiyun
3030*4882a593Smuzhiyun
3031*4882a593Smuzhiyun // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3032*4882a593Smuzhiyun if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3033*4882a593Smuzhiyun pHba->FwDebugBufferSize = buf[1];
3034*4882a593Smuzhiyun pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3035*4882a593Smuzhiyun pHba->FwDebugBufferSize);
3036*4882a593Smuzhiyun if (pHba->FwDebugBuffer_P) {
3037*4882a593Smuzhiyun pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3038*4882a593Smuzhiyun FW_DEBUG_FLAGS_OFFSET;
3039*4882a593Smuzhiyun pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3040*4882a593Smuzhiyun FW_DEBUG_BLED_OFFSET;
3041*4882a593Smuzhiyun pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3042*4882a593Smuzhiyun pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3043*4882a593Smuzhiyun FW_DEBUG_STR_LENGTH_OFFSET;
3044*4882a593Smuzhiyun pHba->FwDebugBuffer_P += buf[2];
3045*4882a593Smuzhiyun pHba->FwDebugFlags = 0;
3046*4882a593Smuzhiyun }
3047*4882a593Smuzhiyun }
3048*4882a593Smuzhiyun
3049*4882a593Smuzhiyun return 0;
3050*4882a593Smuzhiyun }
3051*4882a593Smuzhiyun
adpt_i2o_build_sys_table(void)3052*4882a593Smuzhiyun static int adpt_i2o_build_sys_table(void)
3053*4882a593Smuzhiyun {
3054*4882a593Smuzhiyun adpt_hba* pHba = hba_chain;
3055*4882a593Smuzhiyun int count = 0;
3056*4882a593Smuzhiyun
3057*4882a593Smuzhiyun if (sys_tbl)
3058*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3059*4882a593Smuzhiyun sys_tbl, sys_tbl_pa);
3060*4882a593Smuzhiyun
3061*4882a593Smuzhiyun sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3062*4882a593Smuzhiyun (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3063*4882a593Smuzhiyun
3064*4882a593Smuzhiyun sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3065*4882a593Smuzhiyun sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3066*4882a593Smuzhiyun if (!sys_tbl) {
3067*4882a593Smuzhiyun printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3068*4882a593Smuzhiyun return -ENOMEM;
3069*4882a593Smuzhiyun }
3070*4882a593Smuzhiyun
3071*4882a593Smuzhiyun sys_tbl->num_entries = hba_count;
3072*4882a593Smuzhiyun sys_tbl->version = I2OVERSION;
3073*4882a593Smuzhiyun sys_tbl->change_ind = sys_tbl_ind++;
3074*4882a593Smuzhiyun
3075*4882a593Smuzhiyun for(pHba = hba_chain; pHba; pHba = pHba->next) {
3076*4882a593Smuzhiyun u64 addr;
3077*4882a593Smuzhiyun // Get updated Status Block so we have the latest information
3078*4882a593Smuzhiyun if (adpt_i2o_status_get(pHba)) {
3079*4882a593Smuzhiyun sys_tbl->num_entries--;
3080*4882a593Smuzhiyun continue; // try next one
3081*4882a593Smuzhiyun }
3082*4882a593Smuzhiyun
3083*4882a593Smuzhiyun sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3084*4882a593Smuzhiyun sys_tbl->iops[count].iop_id = pHba->unit + 2;
3085*4882a593Smuzhiyun sys_tbl->iops[count].seg_num = 0;
3086*4882a593Smuzhiyun sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3087*4882a593Smuzhiyun sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3088*4882a593Smuzhiyun sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3089*4882a593Smuzhiyun sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3090*4882a593Smuzhiyun sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3091*4882a593Smuzhiyun sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3092*4882a593Smuzhiyun addr = pHba->base_addr_phys + 0x40;
3093*4882a593Smuzhiyun sys_tbl->iops[count].inbound_low = dma_low(addr);
3094*4882a593Smuzhiyun sys_tbl->iops[count].inbound_high = dma_high(addr);
3095*4882a593Smuzhiyun
3096*4882a593Smuzhiyun count++;
3097*4882a593Smuzhiyun }
3098*4882a593Smuzhiyun
3099*4882a593Smuzhiyun #ifdef DEBUG
3100*4882a593Smuzhiyun {
3101*4882a593Smuzhiyun u32 *table = (u32*)sys_tbl;
3102*4882a593Smuzhiyun printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3103*4882a593Smuzhiyun for(count = 0; count < (sys_tbl_len >>2); count++) {
3104*4882a593Smuzhiyun printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3105*4882a593Smuzhiyun count, table[count]);
3106*4882a593Smuzhiyun }
3107*4882a593Smuzhiyun }
3108*4882a593Smuzhiyun #endif
3109*4882a593Smuzhiyun
3110*4882a593Smuzhiyun return 0;
3111*4882a593Smuzhiyun }
3112*4882a593Smuzhiyun
3113*4882a593Smuzhiyun
3114*4882a593Smuzhiyun /*
3115*4882a593Smuzhiyun * Dump the information block associated with a given unit (TID)
3116*4882a593Smuzhiyun */
3117*4882a593Smuzhiyun
adpt_i2o_report_hba_unit(adpt_hba * pHba,struct i2o_device * d)3118*4882a593Smuzhiyun static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3119*4882a593Smuzhiyun {
3120*4882a593Smuzhiyun char buf[64];
3121*4882a593Smuzhiyun int unit = d->lct_data.tid;
3122*4882a593Smuzhiyun
3123*4882a593Smuzhiyun printk(KERN_INFO "TID %3.3d ", unit);
3124*4882a593Smuzhiyun
3125*4882a593Smuzhiyun if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3126*4882a593Smuzhiyun {
3127*4882a593Smuzhiyun buf[16]=0;
3128*4882a593Smuzhiyun printk(" Vendor: %-12.12s", buf);
3129*4882a593Smuzhiyun }
3130*4882a593Smuzhiyun if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3131*4882a593Smuzhiyun {
3132*4882a593Smuzhiyun buf[16]=0;
3133*4882a593Smuzhiyun printk(" Device: %-12.12s", buf);
3134*4882a593Smuzhiyun }
3135*4882a593Smuzhiyun if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3136*4882a593Smuzhiyun {
3137*4882a593Smuzhiyun buf[8]=0;
3138*4882a593Smuzhiyun printk(" Rev: %-12.12s\n", buf);
3139*4882a593Smuzhiyun }
3140*4882a593Smuzhiyun #ifdef DEBUG
3141*4882a593Smuzhiyun printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3142*4882a593Smuzhiyun printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3143*4882a593Smuzhiyun printk(KERN_INFO "\tFlags: ");
3144*4882a593Smuzhiyun
3145*4882a593Smuzhiyun if(d->lct_data.device_flags&(1<<0))
3146*4882a593Smuzhiyun printk("C"); // ConfigDialog requested
3147*4882a593Smuzhiyun if(d->lct_data.device_flags&(1<<1))
3148*4882a593Smuzhiyun printk("U"); // Multi-user capable
3149*4882a593Smuzhiyun if(!(d->lct_data.device_flags&(1<<4)))
3150*4882a593Smuzhiyun printk("P"); // Peer service enabled!
3151*4882a593Smuzhiyun if(!(d->lct_data.device_flags&(1<<5)))
3152*4882a593Smuzhiyun printk("M"); // Mgmt service enabled!
3153*4882a593Smuzhiyun printk("\n");
3154*4882a593Smuzhiyun #endif
3155*4882a593Smuzhiyun }
3156*4882a593Smuzhiyun
3157*4882a593Smuzhiyun #ifdef DEBUG
3158*4882a593Smuzhiyun /*
3159*4882a593Smuzhiyun * Do i2o class name lookup
3160*4882a593Smuzhiyun */
adpt_i2o_get_class_name(int class)3161*4882a593Smuzhiyun static const char *adpt_i2o_get_class_name(int class)
3162*4882a593Smuzhiyun {
3163*4882a593Smuzhiyun int idx = 16;
3164*4882a593Smuzhiyun static char *i2o_class_name[] = {
3165*4882a593Smuzhiyun "Executive",
3166*4882a593Smuzhiyun "Device Driver Module",
3167*4882a593Smuzhiyun "Block Device",
3168*4882a593Smuzhiyun "Tape Device",
3169*4882a593Smuzhiyun "LAN Interface",
3170*4882a593Smuzhiyun "WAN Interface",
3171*4882a593Smuzhiyun "Fibre Channel Port",
3172*4882a593Smuzhiyun "Fibre Channel Device",
3173*4882a593Smuzhiyun "SCSI Device",
3174*4882a593Smuzhiyun "ATE Port",
3175*4882a593Smuzhiyun "ATE Device",
3176*4882a593Smuzhiyun "Floppy Controller",
3177*4882a593Smuzhiyun "Floppy Device",
3178*4882a593Smuzhiyun "Secondary Bus Port",
3179*4882a593Smuzhiyun "Peer Transport Agent",
3180*4882a593Smuzhiyun "Peer Transport",
3181*4882a593Smuzhiyun "Unknown"
3182*4882a593Smuzhiyun };
3183*4882a593Smuzhiyun
3184*4882a593Smuzhiyun switch(class&0xFFF) {
3185*4882a593Smuzhiyun case I2O_CLASS_EXECUTIVE:
3186*4882a593Smuzhiyun idx = 0; break;
3187*4882a593Smuzhiyun case I2O_CLASS_DDM:
3188*4882a593Smuzhiyun idx = 1; break;
3189*4882a593Smuzhiyun case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3190*4882a593Smuzhiyun idx = 2; break;
3191*4882a593Smuzhiyun case I2O_CLASS_SEQUENTIAL_STORAGE:
3192*4882a593Smuzhiyun idx = 3; break;
3193*4882a593Smuzhiyun case I2O_CLASS_LAN:
3194*4882a593Smuzhiyun idx = 4; break;
3195*4882a593Smuzhiyun case I2O_CLASS_WAN:
3196*4882a593Smuzhiyun idx = 5; break;
3197*4882a593Smuzhiyun case I2O_CLASS_FIBRE_CHANNEL_PORT:
3198*4882a593Smuzhiyun idx = 6; break;
3199*4882a593Smuzhiyun case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3200*4882a593Smuzhiyun idx = 7; break;
3201*4882a593Smuzhiyun case I2O_CLASS_SCSI_PERIPHERAL:
3202*4882a593Smuzhiyun idx = 8; break;
3203*4882a593Smuzhiyun case I2O_CLASS_ATE_PORT:
3204*4882a593Smuzhiyun idx = 9; break;
3205*4882a593Smuzhiyun case I2O_CLASS_ATE_PERIPHERAL:
3206*4882a593Smuzhiyun idx = 10; break;
3207*4882a593Smuzhiyun case I2O_CLASS_FLOPPY_CONTROLLER:
3208*4882a593Smuzhiyun idx = 11; break;
3209*4882a593Smuzhiyun case I2O_CLASS_FLOPPY_DEVICE:
3210*4882a593Smuzhiyun idx = 12; break;
3211*4882a593Smuzhiyun case I2O_CLASS_BUS_ADAPTER_PORT:
3212*4882a593Smuzhiyun idx = 13; break;
3213*4882a593Smuzhiyun case I2O_CLASS_PEER_TRANSPORT_AGENT:
3214*4882a593Smuzhiyun idx = 14; break;
3215*4882a593Smuzhiyun case I2O_CLASS_PEER_TRANSPORT:
3216*4882a593Smuzhiyun idx = 15; break;
3217*4882a593Smuzhiyun }
3218*4882a593Smuzhiyun return i2o_class_name[idx];
3219*4882a593Smuzhiyun }
3220*4882a593Smuzhiyun #endif
3221*4882a593Smuzhiyun
3222*4882a593Smuzhiyun
adpt_i2o_hrt_get(adpt_hba * pHba)3223*4882a593Smuzhiyun static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3224*4882a593Smuzhiyun {
3225*4882a593Smuzhiyun u32 msg[6];
3226*4882a593Smuzhiyun int ret, size = sizeof(i2o_hrt);
3227*4882a593Smuzhiyun
3228*4882a593Smuzhiyun do {
3229*4882a593Smuzhiyun if (pHba->hrt == NULL) {
3230*4882a593Smuzhiyun pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3231*4882a593Smuzhiyun size, &pHba->hrt_pa, GFP_KERNEL);
3232*4882a593Smuzhiyun if (pHba->hrt == NULL) {
3233*4882a593Smuzhiyun printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3234*4882a593Smuzhiyun return -ENOMEM;
3235*4882a593Smuzhiyun }
3236*4882a593Smuzhiyun }
3237*4882a593Smuzhiyun
3238*4882a593Smuzhiyun msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3239*4882a593Smuzhiyun msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3240*4882a593Smuzhiyun msg[2]= 0;
3241*4882a593Smuzhiyun msg[3]= 0;
3242*4882a593Smuzhiyun msg[4]= (0xD0000000 | size); /* Simple transaction */
3243*4882a593Smuzhiyun msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3244*4882a593Smuzhiyun
3245*4882a593Smuzhiyun if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3246*4882a593Smuzhiyun printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3247*4882a593Smuzhiyun return ret;
3248*4882a593Smuzhiyun }
3249*4882a593Smuzhiyun
3250*4882a593Smuzhiyun if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3251*4882a593Smuzhiyun int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3252*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, size,
3253*4882a593Smuzhiyun pHba->hrt, pHba->hrt_pa);
3254*4882a593Smuzhiyun size = newsize;
3255*4882a593Smuzhiyun pHba->hrt = NULL;
3256*4882a593Smuzhiyun }
3257*4882a593Smuzhiyun } while(pHba->hrt == NULL);
3258*4882a593Smuzhiyun return 0;
3259*4882a593Smuzhiyun }
3260*4882a593Smuzhiyun
3261*4882a593Smuzhiyun /*
3262*4882a593Smuzhiyun * Query one scalar group value or a whole scalar group.
3263*4882a593Smuzhiyun */
adpt_i2o_query_scalar(adpt_hba * pHba,int tid,int group,int field,void * buf,int buflen)3264*4882a593Smuzhiyun static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3265*4882a593Smuzhiyun int group, int field, void *buf, int buflen)
3266*4882a593Smuzhiyun {
3267*4882a593Smuzhiyun u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3268*4882a593Smuzhiyun u8 *opblk_va;
3269*4882a593Smuzhiyun dma_addr_t opblk_pa;
3270*4882a593Smuzhiyun u8 *resblk_va;
3271*4882a593Smuzhiyun dma_addr_t resblk_pa;
3272*4882a593Smuzhiyun
3273*4882a593Smuzhiyun int size;
3274*4882a593Smuzhiyun
3275*4882a593Smuzhiyun /* 8 bytes for header */
3276*4882a593Smuzhiyun resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3277*4882a593Smuzhiyun sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3278*4882a593Smuzhiyun if (resblk_va == NULL) {
3279*4882a593Smuzhiyun printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3280*4882a593Smuzhiyun return -ENOMEM;
3281*4882a593Smuzhiyun }
3282*4882a593Smuzhiyun
3283*4882a593Smuzhiyun opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3284*4882a593Smuzhiyun sizeof(opblk), &opblk_pa, GFP_KERNEL);
3285*4882a593Smuzhiyun if (opblk_va == NULL) {
3286*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3287*4882a593Smuzhiyun resblk_va, resblk_pa);
3288*4882a593Smuzhiyun printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3289*4882a593Smuzhiyun pHba->name);
3290*4882a593Smuzhiyun return -ENOMEM;
3291*4882a593Smuzhiyun }
3292*4882a593Smuzhiyun if (field == -1) /* whole group */
3293*4882a593Smuzhiyun opblk[4] = -1;
3294*4882a593Smuzhiyun
3295*4882a593Smuzhiyun memcpy(opblk_va, opblk, sizeof(opblk));
3296*4882a593Smuzhiyun size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3297*4882a593Smuzhiyun opblk_va, opblk_pa, sizeof(opblk),
3298*4882a593Smuzhiyun resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3299*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3300*4882a593Smuzhiyun if (size == -ETIME) {
3301*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3302*4882a593Smuzhiyun resblk_va, resblk_pa);
3303*4882a593Smuzhiyun printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3304*4882a593Smuzhiyun return -ETIME;
3305*4882a593Smuzhiyun } else if (size == -EINTR) {
3306*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3307*4882a593Smuzhiyun resblk_va, resblk_pa);
3308*4882a593Smuzhiyun printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3309*4882a593Smuzhiyun return -EINTR;
3310*4882a593Smuzhiyun }
3311*4882a593Smuzhiyun
3312*4882a593Smuzhiyun memcpy(buf, resblk_va+8, buflen); /* cut off header */
3313*4882a593Smuzhiyun
3314*4882a593Smuzhiyun dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3315*4882a593Smuzhiyun resblk_va, resblk_pa);
3316*4882a593Smuzhiyun if (size < 0)
3317*4882a593Smuzhiyun return size;
3318*4882a593Smuzhiyun
3319*4882a593Smuzhiyun return buflen;
3320*4882a593Smuzhiyun }
3321*4882a593Smuzhiyun
3322*4882a593Smuzhiyun
3323*4882a593Smuzhiyun /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3324*4882a593Smuzhiyun *
3325*4882a593Smuzhiyun * This function can be used for all UtilParamsGet/Set operations.
3326*4882a593Smuzhiyun * The OperationBlock is given in opblk-buffer,
3327*4882a593Smuzhiyun * and results are returned in resblk-buffer.
3328*4882a593Smuzhiyun * Note that the minimum sized resblk is 8 bytes and contains
3329*4882a593Smuzhiyun * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3330*4882a593Smuzhiyun */
adpt_i2o_issue_params(int cmd,adpt_hba * pHba,int tid,void * opblk_va,dma_addr_t opblk_pa,int oplen,void * resblk_va,dma_addr_t resblk_pa,int reslen)3331*4882a593Smuzhiyun static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3332*4882a593Smuzhiyun void *opblk_va, dma_addr_t opblk_pa, int oplen,
3333*4882a593Smuzhiyun void *resblk_va, dma_addr_t resblk_pa, int reslen)
3334*4882a593Smuzhiyun {
3335*4882a593Smuzhiyun u32 msg[9];
3336*4882a593Smuzhiyun u32 *res = (u32 *)resblk_va;
3337*4882a593Smuzhiyun int wait_status;
3338*4882a593Smuzhiyun
3339*4882a593Smuzhiyun msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3340*4882a593Smuzhiyun msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3341*4882a593Smuzhiyun msg[2] = 0;
3342*4882a593Smuzhiyun msg[3] = 0;
3343*4882a593Smuzhiyun msg[4] = 0;
3344*4882a593Smuzhiyun msg[5] = 0x54000000 | oplen; /* OperationBlock */
3345*4882a593Smuzhiyun msg[6] = (u32)opblk_pa;
3346*4882a593Smuzhiyun msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3347*4882a593Smuzhiyun msg[8] = (u32)resblk_pa;
3348*4882a593Smuzhiyun
3349*4882a593Smuzhiyun if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3350*4882a593Smuzhiyun printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3351*4882a593Smuzhiyun return wait_status; /* -DetailedStatus */
3352*4882a593Smuzhiyun }
3353*4882a593Smuzhiyun
3354*4882a593Smuzhiyun if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3355*4882a593Smuzhiyun printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3356*4882a593Smuzhiyun "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3357*4882a593Smuzhiyun pHba->name,
3358*4882a593Smuzhiyun (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3359*4882a593Smuzhiyun : "PARAMS_GET",
3360*4882a593Smuzhiyun res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3361*4882a593Smuzhiyun return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3362*4882a593Smuzhiyun }
3363*4882a593Smuzhiyun
3364*4882a593Smuzhiyun return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3365*4882a593Smuzhiyun }
3366*4882a593Smuzhiyun
3367*4882a593Smuzhiyun
adpt_i2o_quiesce_hba(adpt_hba * pHba)3368*4882a593Smuzhiyun static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3369*4882a593Smuzhiyun {
3370*4882a593Smuzhiyun u32 msg[4];
3371*4882a593Smuzhiyun int ret;
3372*4882a593Smuzhiyun
3373*4882a593Smuzhiyun adpt_i2o_status_get(pHba);
3374*4882a593Smuzhiyun
3375*4882a593Smuzhiyun /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3376*4882a593Smuzhiyun
3377*4882a593Smuzhiyun if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3378*4882a593Smuzhiyun (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3379*4882a593Smuzhiyun return 0;
3380*4882a593Smuzhiyun }
3381*4882a593Smuzhiyun
3382*4882a593Smuzhiyun msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3383*4882a593Smuzhiyun msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3384*4882a593Smuzhiyun msg[2] = 0;
3385*4882a593Smuzhiyun msg[3] = 0;
3386*4882a593Smuzhiyun
3387*4882a593Smuzhiyun if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3388*4882a593Smuzhiyun printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3389*4882a593Smuzhiyun pHba->unit, -ret);
3390*4882a593Smuzhiyun } else {
3391*4882a593Smuzhiyun printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3392*4882a593Smuzhiyun }
3393*4882a593Smuzhiyun
3394*4882a593Smuzhiyun adpt_i2o_status_get(pHba);
3395*4882a593Smuzhiyun return ret;
3396*4882a593Smuzhiyun }
3397*4882a593Smuzhiyun
3398*4882a593Smuzhiyun
3399*4882a593Smuzhiyun /*
3400*4882a593Smuzhiyun * Enable IOP. Allows the IOP to resume external operations.
3401*4882a593Smuzhiyun */
adpt_i2o_enable_hba(adpt_hba * pHba)3402*4882a593Smuzhiyun static int adpt_i2o_enable_hba(adpt_hba* pHba)
3403*4882a593Smuzhiyun {
3404*4882a593Smuzhiyun u32 msg[4];
3405*4882a593Smuzhiyun int ret;
3406*4882a593Smuzhiyun
3407*4882a593Smuzhiyun adpt_i2o_status_get(pHba);
3408*4882a593Smuzhiyun if(!pHba->status_block){
3409*4882a593Smuzhiyun return -ENOMEM;
3410*4882a593Smuzhiyun }
3411*4882a593Smuzhiyun /* Enable only allowed on READY state */
3412*4882a593Smuzhiyun if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3413*4882a593Smuzhiyun return 0;
3414*4882a593Smuzhiyun
3415*4882a593Smuzhiyun if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3416*4882a593Smuzhiyun return -EINVAL;
3417*4882a593Smuzhiyun
3418*4882a593Smuzhiyun msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3419*4882a593Smuzhiyun msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3420*4882a593Smuzhiyun msg[2]= 0;
3421*4882a593Smuzhiyun msg[3]= 0;
3422*4882a593Smuzhiyun
3423*4882a593Smuzhiyun if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3424*4882a593Smuzhiyun printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3425*4882a593Smuzhiyun pHba->name, ret);
3426*4882a593Smuzhiyun } else {
3427*4882a593Smuzhiyun PDEBUG("%s: Enabled.\n", pHba->name);
3428*4882a593Smuzhiyun }
3429*4882a593Smuzhiyun
3430*4882a593Smuzhiyun adpt_i2o_status_get(pHba);
3431*4882a593Smuzhiyun return ret;
3432*4882a593Smuzhiyun }
3433*4882a593Smuzhiyun
3434*4882a593Smuzhiyun
adpt_i2o_systab_send(adpt_hba * pHba)3435*4882a593Smuzhiyun static int adpt_i2o_systab_send(adpt_hba* pHba)
3436*4882a593Smuzhiyun {
3437*4882a593Smuzhiyun u32 msg[12];
3438*4882a593Smuzhiyun int ret;
3439*4882a593Smuzhiyun
3440*4882a593Smuzhiyun msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3441*4882a593Smuzhiyun msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3442*4882a593Smuzhiyun msg[2] = 0;
3443*4882a593Smuzhiyun msg[3] = 0;
3444*4882a593Smuzhiyun msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3445*4882a593Smuzhiyun msg[5] = 0; /* Segment 0 */
3446*4882a593Smuzhiyun
3447*4882a593Smuzhiyun /*
3448*4882a593Smuzhiyun * Provide three SGL-elements:
3449*4882a593Smuzhiyun * System table (SysTab), Private memory space declaration and
3450*4882a593Smuzhiyun * Private i/o space declaration
3451*4882a593Smuzhiyun */
3452*4882a593Smuzhiyun msg[6] = 0x54000000 | sys_tbl_len;
3453*4882a593Smuzhiyun msg[7] = (u32)sys_tbl_pa;
3454*4882a593Smuzhiyun msg[8] = 0x54000000 | 0;
3455*4882a593Smuzhiyun msg[9] = 0;
3456*4882a593Smuzhiyun msg[10] = 0xD4000000 | 0;
3457*4882a593Smuzhiyun msg[11] = 0;
3458*4882a593Smuzhiyun
3459*4882a593Smuzhiyun if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3460*4882a593Smuzhiyun printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3461*4882a593Smuzhiyun pHba->name, ret);
3462*4882a593Smuzhiyun }
3463*4882a593Smuzhiyun #ifdef DEBUG
3464*4882a593Smuzhiyun else {
3465*4882a593Smuzhiyun PINFO("%s: SysTab set.\n", pHba->name);
3466*4882a593Smuzhiyun }
3467*4882a593Smuzhiyun #endif
3468*4882a593Smuzhiyun
3469*4882a593Smuzhiyun return ret;
3470*4882a593Smuzhiyun }
3471*4882a593Smuzhiyun
3472*4882a593Smuzhiyun
3473*4882a593Smuzhiyun /*============================================================================
3474*4882a593Smuzhiyun *
3475*4882a593Smuzhiyun *============================================================================
3476*4882a593Smuzhiyun */
3477*4882a593Smuzhiyun
3478*4882a593Smuzhiyun
3479*4882a593Smuzhiyun #ifdef UARTDELAY
3480*4882a593Smuzhiyun
adpt_delay(int millisec)3481*4882a593Smuzhiyun static static void adpt_delay(int millisec)
3482*4882a593Smuzhiyun {
3483*4882a593Smuzhiyun int i;
3484*4882a593Smuzhiyun for (i = 0; i < millisec; i++) {
3485*4882a593Smuzhiyun udelay(1000); /* delay for one millisecond */
3486*4882a593Smuzhiyun }
3487*4882a593Smuzhiyun }
3488*4882a593Smuzhiyun
3489*4882a593Smuzhiyun #endif
3490*4882a593Smuzhiyun
3491*4882a593Smuzhiyun static struct scsi_host_template driver_template = {
3492*4882a593Smuzhiyun .module = THIS_MODULE,
3493*4882a593Smuzhiyun .name = "dpt_i2o",
3494*4882a593Smuzhiyun .proc_name = "dpt_i2o",
3495*4882a593Smuzhiyun .show_info = adpt_show_info,
3496*4882a593Smuzhiyun .info = adpt_info,
3497*4882a593Smuzhiyun .queuecommand = adpt_queue,
3498*4882a593Smuzhiyun .eh_abort_handler = adpt_abort,
3499*4882a593Smuzhiyun .eh_device_reset_handler = adpt_device_reset,
3500*4882a593Smuzhiyun .eh_bus_reset_handler = adpt_bus_reset,
3501*4882a593Smuzhiyun .eh_host_reset_handler = adpt_reset,
3502*4882a593Smuzhiyun .bios_param = adpt_bios_param,
3503*4882a593Smuzhiyun .slave_configure = adpt_slave_configure,
3504*4882a593Smuzhiyun .can_queue = MAX_TO_IOP_MESSAGES,
3505*4882a593Smuzhiyun .this_id = 7,
3506*4882a593Smuzhiyun };
3507*4882a593Smuzhiyun
adpt_init(void)3508*4882a593Smuzhiyun static int __init adpt_init(void)
3509*4882a593Smuzhiyun {
3510*4882a593Smuzhiyun int error;
3511*4882a593Smuzhiyun adpt_hba *pHba, *next;
3512*4882a593Smuzhiyun
3513*4882a593Smuzhiyun printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3514*4882a593Smuzhiyun
3515*4882a593Smuzhiyun error = adpt_detect(&driver_template);
3516*4882a593Smuzhiyun if (error < 0)
3517*4882a593Smuzhiyun return error;
3518*4882a593Smuzhiyun if (hba_chain == NULL)
3519*4882a593Smuzhiyun return -ENODEV;
3520*4882a593Smuzhiyun
3521*4882a593Smuzhiyun for (pHba = hba_chain; pHba; pHba = pHba->next) {
3522*4882a593Smuzhiyun error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3523*4882a593Smuzhiyun if (error)
3524*4882a593Smuzhiyun goto fail;
3525*4882a593Smuzhiyun scsi_scan_host(pHba->host);
3526*4882a593Smuzhiyun }
3527*4882a593Smuzhiyun return 0;
3528*4882a593Smuzhiyun fail:
3529*4882a593Smuzhiyun for (pHba = hba_chain; pHba; pHba = next) {
3530*4882a593Smuzhiyun next = pHba->next;
3531*4882a593Smuzhiyun scsi_remove_host(pHba->host);
3532*4882a593Smuzhiyun }
3533*4882a593Smuzhiyun return error;
3534*4882a593Smuzhiyun }
3535*4882a593Smuzhiyun
adpt_exit(void)3536*4882a593Smuzhiyun static void __exit adpt_exit(void)
3537*4882a593Smuzhiyun {
3538*4882a593Smuzhiyun adpt_hba *pHba, *next;
3539*4882a593Smuzhiyun
3540*4882a593Smuzhiyun for (pHba = hba_chain; pHba; pHba = next) {
3541*4882a593Smuzhiyun next = pHba->next;
3542*4882a593Smuzhiyun adpt_release(pHba);
3543*4882a593Smuzhiyun }
3544*4882a593Smuzhiyun }
3545*4882a593Smuzhiyun
3546*4882a593Smuzhiyun module_init(adpt_init);
3547*4882a593Smuzhiyun module_exit(adpt_exit);
3548*4882a593Smuzhiyun
3549*4882a593Smuzhiyun MODULE_LICENSE("GPL");
3550