1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Adaptec AAC series RAID controller driver
4*4882a593Smuzhiyun * (c) Copyright 2001 Red Hat Inc.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * based on the old aacraid driver that is..
7*4882a593Smuzhiyun * Adaptec aacraid device driver for Linux.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright (c) 2000-2010 Adaptec, Inc.
10*4882a593Smuzhiyun * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11*4882a593Smuzhiyun * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Module Name:
14*4882a593Smuzhiyun * commsup.c
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Abstract: Contain all routines that are required for FSA host/adapter
17*4882a593Smuzhiyun * communication.
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <linux/kernel.h>
21*4882a593Smuzhiyun #include <linux/init.h>
22*4882a593Smuzhiyun #include <linux/crash_dump.h>
23*4882a593Smuzhiyun #include <linux/types.h>
24*4882a593Smuzhiyun #include <linux/sched.h>
25*4882a593Smuzhiyun #include <linux/pci.h>
26*4882a593Smuzhiyun #include <linux/spinlock.h>
27*4882a593Smuzhiyun #include <linux/slab.h>
28*4882a593Smuzhiyun #include <linux/completion.h>
29*4882a593Smuzhiyun #include <linux/blkdev.h>
30*4882a593Smuzhiyun #include <linux/delay.h>
31*4882a593Smuzhiyun #include <linux/kthread.h>
32*4882a593Smuzhiyun #include <linux/interrupt.h>
33*4882a593Smuzhiyun #include <linux/bcd.h>
34*4882a593Smuzhiyun #include <scsi/scsi.h>
35*4882a593Smuzhiyun #include <scsi/scsi_host.h>
36*4882a593Smuzhiyun #include <scsi/scsi_device.h>
37*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include "aacraid.h"
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /**
42*4882a593Smuzhiyun * fib_map_alloc - allocate the fib objects
43*4882a593Smuzhiyun * @dev: Adapter to allocate for
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * Allocate and map the shared PCI space for the FIB blocks used to
46*4882a593Smuzhiyun * talk to the Adaptec firmware.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun
fib_map_alloc(struct aac_dev * dev)49*4882a593Smuzhiyun static int fib_map_alloc(struct aac_dev *dev)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
52*4882a593Smuzhiyun dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
53*4882a593Smuzhiyun else
54*4882a593Smuzhiyun dev->max_cmd_size = dev->max_fib_size;
55*4882a593Smuzhiyun if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
56*4882a593Smuzhiyun dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
57*4882a593Smuzhiyun } else {
58*4882a593Smuzhiyun dev->max_cmd_size = dev->max_fib_size;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun dprintk((KERN_INFO
62*4882a593Smuzhiyun "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
63*4882a593Smuzhiyun &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
64*4882a593Smuzhiyun AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
65*4882a593Smuzhiyun dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev,
66*4882a593Smuzhiyun (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
67*4882a593Smuzhiyun * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
68*4882a593Smuzhiyun &dev->hw_fib_pa, GFP_KERNEL);
69*4882a593Smuzhiyun if (dev->hw_fib_va == NULL)
70*4882a593Smuzhiyun return -ENOMEM;
71*4882a593Smuzhiyun return 0;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun * aac_fib_map_free - free the fib objects
76*4882a593Smuzhiyun * @dev: Adapter to free
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * Free the PCI mappings and the memory allocated for FIB blocks
79*4882a593Smuzhiyun * on this adapter.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun
aac_fib_map_free(struct aac_dev * dev)82*4882a593Smuzhiyun void aac_fib_map_free(struct aac_dev *dev)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun size_t alloc_size;
85*4882a593Smuzhiyun size_t fib_size;
86*4882a593Smuzhiyun int num_fibs;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if(!dev->hw_fib_va || !dev->max_cmd_size)
89*4882a593Smuzhiyun return;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
92*4882a593Smuzhiyun fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
93*4882a593Smuzhiyun alloc_size = fib_size * num_fibs + ALIGN32 - 1;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va,
96*4882a593Smuzhiyun dev->hw_fib_pa);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun dev->hw_fib_va = NULL;
99*4882a593Smuzhiyun dev->hw_fib_pa = 0;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
aac_fib_vector_assign(struct aac_dev * dev)102*4882a593Smuzhiyun void aac_fib_vector_assign(struct aac_dev *dev)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun u32 i = 0;
105*4882a593Smuzhiyun u32 vector = 1;
106*4882a593Smuzhiyun struct fib *fibptr = NULL;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun for (i = 0, fibptr = &dev->fibs[i];
109*4882a593Smuzhiyun i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
110*4882a593Smuzhiyun i++, fibptr++) {
111*4882a593Smuzhiyun if ((dev->max_msix == 1) ||
112*4882a593Smuzhiyun (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
113*4882a593Smuzhiyun - dev->vector_cap))) {
114*4882a593Smuzhiyun fibptr->vector_no = 0;
115*4882a593Smuzhiyun } else {
116*4882a593Smuzhiyun fibptr->vector_no = vector;
117*4882a593Smuzhiyun vector++;
118*4882a593Smuzhiyun if (vector == dev->max_msix)
119*4882a593Smuzhiyun vector = 1;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /**
125*4882a593Smuzhiyun * aac_fib_setup - setup the fibs
126*4882a593Smuzhiyun * @dev: Adapter to set up
127*4882a593Smuzhiyun *
128*4882a593Smuzhiyun * Allocate the PCI space for the fibs, map it and then initialise the
129*4882a593Smuzhiyun * fib area, the unmapped fib data and also the free list
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun
aac_fib_setup(struct aac_dev * dev)132*4882a593Smuzhiyun int aac_fib_setup(struct aac_dev * dev)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun struct fib *fibptr;
135*4882a593Smuzhiyun struct hw_fib *hw_fib;
136*4882a593Smuzhiyun dma_addr_t hw_fib_pa;
137*4882a593Smuzhiyun int i;
138*4882a593Smuzhiyun u32 max_cmds;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun while (((i = fib_map_alloc(dev)) == -ENOMEM)
141*4882a593Smuzhiyun && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
142*4882a593Smuzhiyun max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1;
143*4882a593Smuzhiyun dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB;
144*4882a593Smuzhiyun if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
145*4882a593Smuzhiyun dev->init->r7.max_io_commands = cpu_to_le32(max_cmds);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun if (i<0)
148*4882a593Smuzhiyun return -ENOMEM;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun memset(dev->hw_fib_va, 0,
151*4882a593Smuzhiyun (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
152*4882a593Smuzhiyun (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* 32 byte alignment for PMC */
155*4882a593Smuzhiyun hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
156*4882a593Smuzhiyun hw_fib = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
157*4882a593Smuzhiyun (hw_fib_pa - dev->hw_fib_pa));
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* add Xport header */
160*4882a593Smuzhiyun hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
161*4882a593Smuzhiyun sizeof(struct aac_fib_xporthdr));
162*4882a593Smuzhiyun hw_fib_pa += sizeof(struct aac_fib_xporthdr);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * Initialise the fibs
166*4882a593Smuzhiyun */
167*4882a593Smuzhiyun for (i = 0, fibptr = &dev->fibs[i];
168*4882a593Smuzhiyun i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
169*4882a593Smuzhiyun i++, fibptr++)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun fibptr->flags = 0;
172*4882a593Smuzhiyun fibptr->size = sizeof(struct fib);
173*4882a593Smuzhiyun fibptr->dev = dev;
174*4882a593Smuzhiyun fibptr->hw_fib_va = hw_fib;
175*4882a593Smuzhiyun fibptr->data = (void *) fibptr->hw_fib_va->data;
176*4882a593Smuzhiyun fibptr->next = fibptr+1; /* Forward chain the fibs */
177*4882a593Smuzhiyun init_completion(&fibptr->event_wait);
178*4882a593Smuzhiyun spin_lock_init(&fibptr->event_lock);
179*4882a593Smuzhiyun hw_fib->header.XferState = cpu_to_le32(0xffffffff);
180*4882a593Smuzhiyun hw_fib->header.SenderSize =
181*4882a593Smuzhiyun cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */
182*4882a593Smuzhiyun fibptr->hw_fib_pa = hw_fib_pa;
183*4882a593Smuzhiyun fibptr->hw_sgl_pa = hw_fib_pa +
184*4882a593Smuzhiyun offsetof(struct aac_hba_cmd_req, sge[2]);
185*4882a593Smuzhiyun /*
186*4882a593Smuzhiyun * one element is for the ptr to the separate sg list,
187*4882a593Smuzhiyun * second element for 32 byte alignment
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun fibptr->hw_error_pa = hw_fib_pa +
190*4882a593Smuzhiyun offsetof(struct aac_native_hba, resp.resp_bytes[0]);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
193*4882a593Smuzhiyun dev->max_cmd_size + sizeof(struct aac_fib_xporthdr));
194*4882a593Smuzhiyun hw_fib_pa = hw_fib_pa +
195*4882a593Smuzhiyun dev->max_cmd_size + sizeof(struct aac_fib_xporthdr);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun *Assign vector numbers to fibs
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun aac_fib_vector_assign(dev);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * Add the fib chain to the free list
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun * Set 8 fibs aside for management tools
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
211*4882a593Smuzhiyun return 0;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /**
215*4882a593Smuzhiyun * aac_fib_alloc_tag-allocate a fib using tags
216*4882a593Smuzhiyun * @dev: Adapter to allocate the fib for
217*4882a593Smuzhiyun * @scmd: SCSI command
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * Allocate a fib from the adapter fib pool using tags
220*4882a593Smuzhiyun * from the blk layer.
221*4882a593Smuzhiyun */
222*4882a593Smuzhiyun
aac_fib_alloc_tag(struct aac_dev * dev,struct scsi_cmnd * scmd)223*4882a593Smuzhiyun struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun struct fib *fibptr;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun fibptr = &dev->fibs[scmd->request->tag];
228*4882a593Smuzhiyun /*
229*4882a593Smuzhiyun * Null out fields that depend on being zero at the start of
230*4882a593Smuzhiyun * each I/O
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun fibptr->hw_fib_va->header.XferState = 0;
233*4882a593Smuzhiyun fibptr->type = FSAFS_NTC_FIB_CONTEXT;
234*4882a593Smuzhiyun fibptr->callback_data = NULL;
235*4882a593Smuzhiyun fibptr->callback = NULL;
236*4882a593Smuzhiyun fibptr->flags = 0;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun return fibptr;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /**
242*4882a593Smuzhiyun * aac_fib_alloc - allocate a fib
243*4882a593Smuzhiyun * @dev: Adapter to allocate the fib for
244*4882a593Smuzhiyun *
245*4882a593Smuzhiyun * Allocate a fib from the adapter fib pool. If the pool is empty we
246*4882a593Smuzhiyun * return NULL.
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun
aac_fib_alloc(struct aac_dev * dev)249*4882a593Smuzhiyun struct fib *aac_fib_alloc(struct aac_dev *dev)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct fib * fibptr;
252*4882a593Smuzhiyun unsigned long flags;
253*4882a593Smuzhiyun spin_lock_irqsave(&dev->fib_lock, flags);
254*4882a593Smuzhiyun fibptr = dev->free_fib;
255*4882a593Smuzhiyun if(!fibptr){
256*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->fib_lock, flags);
257*4882a593Smuzhiyun return fibptr;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun dev->free_fib = fibptr->next;
260*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->fib_lock, flags);
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun * Set the proper node type code and node byte size
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun fibptr->type = FSAFS_NTC_FIB_CONTEXT;
265*4882a593Smuzhiyun fibptr->size = sizeof(struct fib);
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * Null out fields that depend on being zero at the start of
268*4882a593Smuzhiyun * each I/O
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun fibptr->hw_fib_va->header.XferState = 0;
271*4882a593Smuzhiyun fibptr->flags = 0;
272*4882a593Smuzhiyun fibptr->callback = NULL;
273*4882a593Smuzhiyun fibptr->callback_data = NULL;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return fibptr;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /**
279*4882a593Smuzhiyun * aac_fib_free - free a fib
280*4882a593Smuzhiyun * @fibptr: fib to free up
281*4882a593Smuzhiyun *
282*4882a593Smuzhiyun * Frees up a fib and places it on the appropriate queue
283*4882a593Smuzhiyun */
284*4882a593Smuzhiyun
aac_fib_free(struct fib * fibptr)285*4882a593Smuzhiyun void aac_fib_free(struct fib *fibptr)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun unsigned long flags;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if (fibptr->done == 2)
290*4882a593Smuzhiyun return;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
293*4882a593Smuzhiyun if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
294*4882a593Smuzhiyun aac_config.fib_timeouts++;
295*4882a593Smuzhiyun if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
296*4882a593Smuzhiyun fibptr->hw_fib_va->header.XferState != 0) {
297*4882a593Smuzhiyun printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
298*4882a593Smuzhiyun (void*)fibptr,
299*4882a593Smuzhiyun le32_to_cpu(fibptr->hw_fib_va->header.XferState));
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun fibptr->next = fibptr->dev->free_fib;
302*4882a593Smuzhiyun fibptr->dev->free_fib = fibptr;
303*4882a593Smuzhiyun spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /**
307*4882a593Smuzhiyun * aac_fib_init - initialise a fib
308*4882a593Smuzhiyun * @fibptr: The fib to initialize
309*4882a593Smuzhiyun *
310*4882a593Smuzhiyun * Set up the generic fib fields ready for use
311*4882a593Smuzhiyun */
312*4882a593Smuzhiyun
aac_fib_init(struct fib * fibptr)313*4882a593Smuzhiyun void aac_fib_init(struct fib *fibptr)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct hw_fib *hw_fib = fibptr->hw_fib_va;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
318*4882a593Smuzhiyun hw_fib->header.StructType = FIB_MAGIC;
319*4882a593Smuzhiyun hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
320*4882a593Smuzhiyun hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
321*4882a593Smuzhiyun hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
322*4882a593Smuzhiyun hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /**
326*4882a593Smuzhiyun * fib_deallocate - deallocate a fib
327*4882a593Smuzhiyun * @fibptr: fib to deallocate
328*4882a593Smuzhiyun *
329*4882a593Smuzhiyun * Will deallocate and return to the free pool the FIB pointed to by the
330*4882a593Smuzhiyun * caller.
331*4882a593Smuzhiyun */
332*4882a593Smuzhiyun
fib_dealloc(struct fib * fibptr)333*4882a593Smuzhiyun static void fib_dealloc(struct fib * fibptr)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun struct hw_fib *hw_fib = fibptr->hw_fib_va;
336*4882a593Smuzhiyun hw_fib->header.XferState = 0;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /*
340*4882a593Smuzhiyun * Commuication primitives define and support the queuing method we use to
341*4882a593Smuzhiyun * support host to adapter commuication. All queue accesses happen through
342*4882a593Smuzhiyun * these routines and are the only routines which have a knowledge of the
343*4882a593Smuzhiyun * how these queues are implemented.
344*4882a593Smuzhiyun */
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /**
347*4882a593Smuzhiyun * aac_get_entry - get a queue entry
348*4882a593Smuzhiyun * @dev: Adapter
349*4882a593Smuzhiyun * @qid: Queue Number
350*4882a593Smuzhiyun * @entry: Entry return
351*4882a593Smuzhiyun * @index: Index return
352*4882a593Smuzhiyun * @nonotify: notification control
353*4882a593Smuzhiyun *
354*4882a593Smuzhiyun * With a priority the routine returns a queue entry if the queue has free entries. If the queue
355*4882a593Smuzhiyun * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
356*4882a593Smuzhiyun * returned.
357*4882a593Smuzhiyun */
358*4882a593Smuzhiyun
aac_get_entry(struct aac_dev * dev,u32 qid,struct aac_entry ** entry,u32 * index,unsigned long * nonotify)359*4882a593Smuzhiyun static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct aac_queue * q;
362*4882a593Smuzhiyun unsigned long idx;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun * All of the queues wrap when they reach the end, so we check
366*4882a593Smuzhiyun * to see if they have reached the end and if they have we just
367*4882a593Smuzhiyun * set the index back to zero. This is a wrap. You could or off
368*4882a593Smuzhiyun * the high bits in all updates but this is a bit faster I think.
369*4882a593Smuzhiyun */
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun q = &dev->queues->queue[qid];
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun idx = *index = le32_to_cpu(*(q->headers.producer));
374*4882a593Smuzhiyun /* Interrupt Moderation, only interrupt for first two entries */
375*4882a593Smuzhiyun if (idx != le32_to_cpu(*(q->headers.consumer))) {
376*4882a593Smuzhiyun if (--idx == 0) {
377*4882a593Smuzhiyun if (qid == AdapNormCmdQueue)
378*4882a593Smuzhiyun idx = ADAP_NORM_CMD_ENTRIES;
379*4882a593Smuzhiyun else
380*4882a593Smuzhiyun idx = ADAP_NORM_RESP_ENTRIES;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun if (idx != le32_to_cpu(*(q->headers.consumer)))
383*4882a593Smuzhiyun *nonotify = 1;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun if (qid == AdapNormCmdQueue) {
387*4882a593Smuzhiyun if (*index >= ADAP_NORM_CMD_ENTRIES)
388*4882a593Smuzhiyun *index = 0; /* Wrap to front of the Producer Queue. */
389*4882a593Smuzhiyun } else {
390*4882a593Smuzhiyun if (*index >= ADAP_NORM_RESP_ENTRIES)
391*4882a593Smuzhiyun *index = 0; /* Wrap to front of the Producer Queue. */
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /* Queue is full */
395*4882a593Smuzhiyun if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
396*4882a593Smuzhiyun printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
397*4882a593Smuzhiyun qid, atomic_read(&q->numpending));
398*4882a593Smuzhiyun return 0;
399*4882a593Smuzhiyun } else {
400*4882a593Smuzhiyun *entry = q->base + *index;
401*4882a593Smuzhiyun return 1;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun /**
406*4882a593Smuzhiyun * aac_queue_get - get the next free QE
407*4882a593Smuzhiyun * @dev: Adapter
408*4882a593Smuzhiyun * @index: Returned index
409*4882a593Smuzhiyun * @qid: Queue number
410*4882a593Smuzhiyun * @hw_fib: Fib to associate with the queue entry
411*4882a593Smuzhiyun * @wait: Wait if queue full
412*4882a593Smuzhiyun * @fibptr: Driver fib object to go with fib
413*4882a593Smuzhiyun * @nonotify: Don't notify the adapter
414*4882a593Smuzhiyun *
415*4882a593Smuzhiyun * Gets the next free QE off the requested priorty adapter command
416*4882a593Smuzhiyun * queue and associates the Fib with the QE. The QE represented by
417*4882a593Smuzhiyun * index is ready to insert on the queue when this routine returns
418*4882a593Smuzhiyun * success.
419*4882a593Smuzhiyun */
420*4882a593Smuzhiyun
aac_queue_get(struct aac_dev * dev,u32 * index,u32 qid,struct hw_fib * hw_fib,int wait,struct fib * fibptr,unsigned long * nonotify)421*4882a593Smuzhiyun int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun struct aac_entry * entry = NULL;
424*4882a593Smuzhiyun int map = 0;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (qid == AdapNormCmdQueue) {
427*4882a593Smuzhiyun /* if no entries wait for some if caller wants to */
428*4882a593Smuzhiyun while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
429*4882a593Smuzhiyun printk(KERN_ERR "GetEntries failed\n");
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun /*
432*4882a593Smuzhiyun * Setup queue entry with a command, status and fib mapped
433*4882a593Smuzhiyun */
434*4882a593Smuzhiyun entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
435*4882a593Smuzhiyun map = 1;
436*4882a593Smuzhiyun } else {
437*4882a593Smuzhiyun while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
438*4882a593Smuzhiyun /* if no entries wait for some if caller wants to */
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun /*
441*4882a593Smuzhiyun * Setup queue entry with command, status and fib mapped
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
444*4882a593Smuzhiyun entry->addr = hw_fib->header.SenderFibAddress;
445*4882a593Smuzhiyun /* Restore adapters pointer to the FIB */
446*4882a593Smuzhiyun hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
447*4882a593Smuzhiyun map = 0;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun /*
450*4882a593Smuzhiyun * If MapFib is true than we need to map the Fib and put pointers
451*4882a593Smuzhiyun * in the queue entry.
452*4882a593Smuzhiyun */
453*4882a593Smuzhiyun if (map)
454*4882a593Smuzhiyun entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
455*4882a593Smuzhiyun return 0;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /*
459*4882a593Smuzhiyun * Define the highest level of host to adapter communication routines.
460*4882a593Smuzhiyun * These routines will support host to adapter FS commuication. These
461*4882a593Smuzhiyun * routines have no knowledge of the commuication method used. This level
462*4882a593Smuzhiyun * sends and receives FIBs. This level has no knowledge of how these FIBs
463*4882a593Smuzhiyun * get passed back and forth.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /**
467*4882a593Smuzhiyun * aac_fib_send - send a fib to the adapter
468*4882a593Smuzhiyun * @command: Command to send
469*4882a593Smuzhiyun * @fibptr: The fib
470*4882a593Smuzhiyun * @size: Size of fib data area
471*4882a593Smuzhiyun * @priority: Priority of Fib
472*4882a593Smuzhiyun * @wait: Async/sync select
473*4882a593Smuzhiyun * @reply: True if a reply is wanted
474*4882a593Smuzhiyun * @callback: Called with reply
475*4882a593Smuzhiyun * @callback_data: Passed to callback
476*4882a593Smuzhiyun *
477*4882a593Smuzhiyun * Sends the requested FIB to the adapter and optionally will wait for a
478*4882a593Smuzhiyun * response FIB. If the caller does not wish to wait for a response than
479*4882a593Smuzhiyun * an event to wait on must be supplied. This event will be set when a
480*4882a593Smuzhiyun * response FIB is received from the adapter.
481*4882a593Smuzhiyun */
482*4882a593Smuzhiyun
aac_fib_send(u16 command,struct fib * fibptr,unsigned long size,int priority,int wait,int reply,fib_callback callback,void * callback_data)483*4882a593Smuzhiyun int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
484*4882a593Smuzhiyun int priority, int wait, int reply, fib_callback callback,
485*4882a593Smuzhiyun void *callback_data)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun struct aac_dev * dev = fibptr->dev;
488*4882a593Smuzhiyun struct hw_fib * hw_fib = fibptr->hw_fib_va;
489*4882a593Smuzhiyun unsigned long flags = 0;
490*4882a593Smuzhiyun unsigned long mflags = 0;
491*4882a593Smuzhiyun unsigned long sflags = 0;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
494*4882a593Smuzhiyun return -EBUSY;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))
497*4882a593Smuzhiyun return -EINVAL;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /*
500*4882a593Smuzhiyun * There are 5 cases with the wait and response requested flags.
501*4882a593Smuzhiyun * The only invalid cases are if the caller requests to wait and
502*4882a593Smuzhiyun * does not request a response and if the caller does not want a
503*4882a593Smuzhiyun * response and the Fib is not allocated from pool. If a response
504*4882a593Smuzhiyun * is not requested the Fib will just be deallocaed by the DPC
505*4882a593Smuzhiyun * routine when the response comes back from the adapter. No
506*4882a593Smuzhiyun * further processing will be done besides deleting the Fib. We
507*4882a593Smuzhiyun * will have a debug mode where the adapter can notify the host
508*4882a593Smuzhiyun * it had a problem and the host can log that fact.
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun fibptr->flags = 0;
511*4882a593Smuzhiyun if (wait && !reply) {
512*4882a593Smuzhiyun return -EINVAL;
513*4882a593Smuzhiyun } else if (!wait && reply) {
514*4882a593Smuzhiyun hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
515*4882a593Smuzhiyun FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
516*4882a593Smuzhiyun } else if (!wait && !reply) {
517*4882a593Smuzhiyun hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
518*4882a593Smuzhiyun FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
519*4882a593Smuzhiyun } else if (wait && reply) {
520*4882a593Smuzhiyun hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
521*4882a593Smuzhiyun FIB_COUNTER_INCREMENT(aac_config.NormalSent);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun /*
524*4882a593Smuzhiyun * Map the fib into 32bits by using the fib number
525*4882a593Smuzhiyun */
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun hw_fib->header.SenderFibAddress =
528*4882a593Smuzhiyun cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /* use the same shifted value for handle to be compatible
531*4882a593Smuzhiyun * with the new native hba command handle
532*4882a593Smuzhiyun */
533*4882a593Smuzhiyun hw_fib->header.Handle =
534*4882a593Smuzhiyun cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /*
537*4882a593Smuzhiyun * Set FIB state to indicate where it came from and if we want a
538*4882a593Smuzhiyun * response from the adapter. Also load the command from the
539*4882a593Smuzhiyun * caller.
540*4882a593Smuzhiyun *
541*4882a593Smuzhiyun * Map the hw fib pointer as a 32bit value
542*4882a593Smuzhiyun */
543*4882a593Smuzhiyun hw_fib->header.Command = cpu_to_le16(command);
544*4882a593Smuzhiyun hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
545*4882a593Smuzhiyun /*
546*4882a593Smuzhiyun * Set the size of the Fib we want to send to the adapter
547*4882a593Smuzhiyun */
548*4882a593Smuzhiyun hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
549*4882a593Smuzhiyun if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
550*4882a593Smuzhiyun return -EMSGSIZE;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun /*
553*4882a593Smuzhiyun * Get a queue entry connect the FIB to it and send an notify
554*4882a593Smuzhiyun * the adapter a command is ready.
555*4882a593Smuzhiyun */
556*4882a593Smuzhiyun hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun /*
559*4882a593Smuzhiyun * Fill in the Callback and CallbackContext if we are not
560*4882a593Smuzhiyun * going to wait.
561*4882a593Smuzhiyun */
562*4882a593Smuzhiyun if (!wait) {
563*4882a593Smuzhiyun fibptr->callback = callback;
564*4882a593Smuzhiyun fibptr->callback_data = callback_data;
565*4882a593Smuzhiyun fibptr->flags = FIB_CONTEXT_FLAG;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun fibptr->done = 0;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun FIB_COUNTER_INCREMENT(aac_config.FibsSent);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun dprintk((KERN_DEBUG "Fib contents:.\n"));
573*4882a593Smuzhiyun dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
574*4882a593Smuzhiyun dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
575*4882a593Smuzhiyun dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
576*4882a593Smuzhiyun dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va));
577*4882a593Smuzhiyun dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
578*4882a593Smuzhiyun dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun if (!dev->queues)
581*4882a593Smuzhiyun return -EBUSY;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (wait) {
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun spin_lock_irqsave(&dev->manage_lock, mflags);
586*4882a593Smuzhiyun if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
587*4882a593Smuzhiyun printk(KERN_INFO "No management Fibs Available:%d\n",
588*4882a593Smuzhiyun dev->management_fib_count);
589*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->manage_lock, mflags);
590*4882a593Smuzhiyun return -EBUSY;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun dev->management_fib_count++;
593*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->manage_lock, mflags);
594*4882a593Smuzhiyun spin_lock_irqsave(&fibptr->event_lock, flags);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (dev->sync_mode) {
598*4882a593Smuzhiyun if (wait)
599*4882a593Smuzhiyun spin_unlock_irqrestore(&fibptr->event_lock, flags);
600*4882a593Smuzhiyun spin_lock_irqsave(&dev->sync_lock, sflags);
601*4882a593Smuzhiyun if (dev->sync_fib) {
602*4882a593Smuzhiyun list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
603*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->sync_lock, sflags);
604*4882a593Smuzhiyun } else {
605*4882a593Smuzhiyun dev->sync_fib = fibptr;
606*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->sync_lock, sflags);
607*4882a593Smuzhiyun aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
608*4882a593Smuzhiyun (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
609*4882a593Smuzhiyun NULL, NULL, NULL, NULL, NULL);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun if (wait) {
612*4882a593Smuzhiyun fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
613*4882a593Smuzhiyun if (wait_for_completion_interruptible(&fibptr->event_wait)) {
614*4882a593Smuzhiyun fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
615*4882a593Smuzhiyun return -EFAULT;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun return 0;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun return -EINPROGRESS;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun if (aac_adapter_deliver(fibptr) != 0) {
623*4882a593Smuzhiyun printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
624*4882a593Smuzhiyun if (wait) {
625*4882a593Smuzhiyun spin_unlock_irqrestore(&fibptr->event_lock, flags);
626*4882a593Smuzhiyun spin_lock_irqsave(&dev->manage_lock, mflags);
627*4882a593Smuzhiyun dev->management_fib_count--;
628*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->manage_lock, mflags);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun return -EBUSY;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /*
635*4882a593Smuzhiyun * If the caller wanted us to wait for response wait now.
636*4882a593Smuzhiyun */
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun if (wait) {
639*4882a593Smuzhiyun spin_unlock_irqrestore(&fibptr->event_lock, flags);
640*4882a593Smuzhiyun /* Only set for first known interruptable command */
641*4882a593Smuzhiyun if (wait < 0) {
642*4882a593Smuzhiyun /*
643*4882a593Smuzhiyun * *VERY* Dangerous to time out a command, the
644*4882a593Smuzhiyun * assumption is made that we have no hope of
645*4882a593Smuzhiyun * functioning because an interrupt routing or other
646*4882a593Smuzhiyun * hardware failure has occurred.
647*4882a593Smuzhiyun */
648*4882a593Smuzhiyun unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
649*4882a593Smuzhiyun while (!try_wait_for_completion(&fibptr->event_wait)) {
650*4882a593Smuzhiyun int blink;
651*4882a593Smuzhiyun if (time_is_before_eq_jiffies(timeout)) {
652*4882a593Smuzhiyun struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
653*4882a593Smuzhiyun atomic_dec(&q->numpending);
654*4882a593Smuzhiyun if (wait == -1) {
655*4882a593Smuzhiyun printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
656*4882a593Smuzhiyun "Usually a result of a PCI interrupt routing problem;\n"
657*4882a593Smuzhiyun "update mother board BIOS or consider utilizing one of\n"
658*4882a593Smuzhiyun "the SAFE mode kernel options (acpi, apic etc)\n");
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun return -ETIMEDOUT;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun if (unlikely(aac_pci_offline(dev)))
664*4882a593Smuzhiyun return -EFAULT;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun if ((blink = aac_adapter_check_health(dev)) > 0) {
667*4882a593Smuzhiyun if (wait == -1) {
668*4882a593Smuzhiyun printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
669*4882a593Smuzhiyun "Usually a result of a serious unrecoverable hardware problem\n",
670*4882a593Smuzhiyun blink);
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun return -EFAULT;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun /*
675*4882a593Smuzhiyun * Allow other processes / CPUS to use core
676*4882a593Smuzhiyun */
677*4882a593Smuzhiyun schedule();
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun } else if (wait_for_completion_interruptible(&fibptr->event_wait)) {
680*4882a593Smuzhiyun /* Do nothing ... satisfy
681*4882a593Smuzhiyun * wait_for_completion_interruptible must_check */
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun spin_lock_irqsave(&fibptr->event_lock, flags);
685*4882a593Smuzhiyun if (fibptr->done == 0) {
686*4882a593Smuzhiyun fibptr->done = 2; /* Tell interrupt we aborted */
687*4882a593Smuzhiyun spin_unlock_irqrestore(&fibptr->event_lock, flags);
688*4882a593Smuzhiyun return -ERESTARTSYS;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun spin_unlock_irqrestore(&fibptr->event_lock, flags);
691*4882a593Smuzhiyun BUG_ON(fibptr->done == 0);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
694*4882a593Smuzhiyun return -ETIMEDOUT;
695*4882a593Smuzhiyun return 0;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun /*
698*4882a593Smuzhiyun * If the user does not want a response than return success otherwise
699*4882a593Smuzhiyun * return pending
700*4882a593Smuzhiyun */
701*4882a593Smuzhiyun if (reply)
702*4882a593Smuzhiyun return -EINPROGRESS;
703*4882a593Smuzhiyun else
704*4882a593Smuzhiyun return 0;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
aac_hba_send(u8 command,struct fib * fibptr,fib_callback callback,void * callback_data)707*4882a593Smuzhiyun int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
708*4882a593Smuzhiyun void *callback_data)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun struct aac_dev *dev = fibptr->dev;
711*4882a593Smuzhiyun int wait;
712*4882a593Smuzhiyun unsigned long flags = 0;
713*4882a593Smuzhiyun unsigned long mflags = 0;
714*4882a593Smuzhiyun struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
715*4882a593Smuzhiyun fibptr->hw_fib_va;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
718*4882a593Smuzhiyun if (callback) {
719*4882a593Smuzhiyun wait = 0;
720*4882a593Smuzhiyun fibptr->callback = callback;
721*4882a593Smuzhiyun fibptr->callback_data = callback_data;
722*4882a593Smuzhiyun } else
723*4882a593Smuzhiyun wait = 1;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun hbacmd->iu_type = command;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
729*4882a593Smuzhiyun /* bit1 of request_id must be 0 */
730*4882a593Smuzhiyun hbacmd->request_id =
731*4882a593Smuzhiyun cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
732*4882a593Smuzhiyun fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
733*4882a593Smuzhiyun } else
734*4882a593Smuzhiyun return -EINVAL;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun if (wait) {
738*4882a593Smuzhiyun spin_lock_irqsave(&dev->manage_lock, mflags);
739*4882a593Smuzhiyun if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
740*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->manage_lock, mflags);
741*4882a593Smuzhiyun return -EBUSY;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun dev->management_fib_count++;
744*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->manage_lock, mflags);
745*4882a593Smuzhiyun spin_lock_irqsave(&fibptr->event_lock, flags);
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun if (aac_adapter_deliver(fibptr) != 0) {
749*4882a593Smuzhiyun if (wait) {
750*4882a593Smuzhiyun spin_unlock_irqrestore(&fibptr->event_lock, flags);
751*4882a593Smuzhiyun spin_lock_irqsave(&dev->manage_lock, mflags);
752*4882a593Smuzhiyun dev->management_fib_count--;
753*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->manage_lock, mflags);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun return -EBUSY;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun FIB_COUNTER_INCREMENT(aac_config.NativeSent);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (wait) {
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun spin_unlock_irqrestore(&fibptr->event_lock, flags);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun if (unlikely(aac_pci_offline(dev)))
764*4882a593Smuzhiyun return -EFAULT;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
767*4882a593Smuzhiyun if (wait_for_completion_interruptible(&fibptr->event_wait))
768*4882a593Smuzhiyun fibptr->done = 2;
769*4882a593Smuzhiyun fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun spin_lock_irqsave(&fibptr->event_lock, flags);
772*4882a593Smuzhiyun if ((fibptr->done == 0) || (fibptr->done == 2)) {
773*4882a593Smuzhiyun fibptr->done = 2; /* Tell interrupt we aborted */
774*4882a593Smuzhiyun spin_unlock_irqrestore(&fibptr->event_lock, flags);
775*4882a593Smuzhiyun return -ERESTARTSYS;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun spin_unlock_irqrestore(&fibptr->event_lock, flags);
778*4882a593Smuzhiyun WARN_ON(fibptr->done == 0);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
781*4882a593Smuzhiyun return -ETIMEDOUT;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun return 0;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun return -EINPROGRESS;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun /**
790*4882a593Smuzhiyun * aac_consumer_get - get the top of the queue
791*4882a593Smuzhiyun * @dev: Adapter
792*4882a593Smuzhiyun * @q: Queue
793*4882a593Smuzhiyun * @entry: Return entry
794*4882a593Smuzhiyun *
795*4882a593Smuzhiyun * Will return a pointer to the entry on the top of the queue requested that
796*4882a593Smuzhiyun * we are a consumer of, and return the address of the queue entry. It does
797*4882a593Smuzhiyun * not change the state of the queue.
798*4882a593Smuzhiyun */
799*4882a593Smuzhiyun
aac_consumer_get(struct aac_dev * dev,struct aac_queue * q,struct aac_entry ** entry)800*4882a593Smuzhiyun int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun u32 index;
803*4882a593Smuzhiyun int status;
804*4882a593Smuzhiyun if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
805*4882a593Smuzhiyun status = 0;
806*4882a593Smuzhiyun } else {
807*4882a593Smuzhiyun /*
808*4882a593Smuzhiyun * The consumer index must be wrapped if we have reached
809*4882a593Smuzhiyun * the end of the queue, else we just use the entry
810*4882a593Smuzhiyun * pointed to by the header index
811*4882a593Smuzhiyun */
812*4882a593Smuzhiyun if (le32_to_cpu(*q->headers.consumer) >= q->entries)
813*4882a593Smuzhiyun index = 0;
814*4882a593Smuzhiyun else
815*4882a593Smuzhiyun index = le32_to_cpu(*q->headers.consumer);
816*4882a593Smuzhiyun *entry = q->base + index;
817*4882a593Smuzhiyun status = 1;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun return(status);
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /**
823*4882a593Smuzhiyun * aac_consumer_free - free consumer entry
824*4882a593Smuzhiyun * @dev: Adapter
825*4882a593Smuzhiyun * @q: Queue
826*4882a593Smuzhiyun * @qid: Queue ident
827*4882a593Smuzhiyun *
828*4882a593Smuzhiyun * Frees up the current top of the queue we are a consumer of. If the
829*4882a593Smuzhiyun * queue was full notify the producer that the queue is no longer full.
830*4882a593Smuzhiyun */
831*4882a593Smuzhiyun
aac_consumer_free(struct aac_dev * dev,struct aac_queue * q,u32 qid)832*4882a593Smuzhiyun void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun int wasfull = 0;
835*4882a593Smuzhiyun u32 notify;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
838*4882a593Smuzhiyun wasfull = 1;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun if (le32_to_cpu(*q->headers.consumer) >= q->entries)
841*4882a593Smuzhiyun *q->headers.consumer = cpu_to_le32(1);
842*4882a593Smuzhiyun else
843*4882a593Smuzhiyun le32_add_cpu(q->headers.consumer, 1);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun if (wasfull) {
846*4882a593Smuzhiyun switch (qid) {
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun case HostNormCmdQueue:
849*4882a593Smuzhiyun notify = HostNormCmdNotFull;
850*4882a593Smuzhiyun break;
851*4882a593Smuzhiyun case HostNormRespQueue:
852*4882a593Smuzhiyun notify = HostNormRespNotFull;
853*4882a593Smuzhiyun break;
854*4882a593Smuzhiyun default:
855*4882a593Smuzhiyun BUG();
856*4882a593Smuzhiyun return;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun aac_adapter_notify(dev, notify);
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /**
863*4882a593Smuzhiyun * aac_fib_adapter_complete - complete adapter issued fib
864*4882a593Smuzhiyun * @fibptr: fib to complete
865*4882a593Smuzhiyun * @size: size of fib
866*4882a593Smuzhiyun *
867*4882a593Smuzhiyun * Will do all necessary work to complete a FIB that was sent from
868*4882a593Smuzhiyun * the adapter.
869*4882a593Smuzhiyun */
870*4882a593Smuzhiyun
aac_fib_adapter_complete(struct fib * fibptr,unsigned short size)871*4882a593Smuzhiyun int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun struct hw_fib * hw_fib = fibptr->hw_fib_va;
874*4882a593Smuzhiyun struct aac_dev * dev = fibptr->dev;
875*4882a593Smuzhiyun struct aac_queue * q;
876*4882a593Smuzhiyun unsigned long nointr = 0;
877*4882a593Smuzhiyun unsigned long qflags;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
880*4882a593Smuzhiyun dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
881*4882a593Smuzhiyun dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
882*4882a593Smuzhiyun kfree(hw_fib);
883*4882a593Smuzhiyun return 0;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun if (hw_fib->header.XferState == 0) {
887*4882a593Smuzhiyun if (dev->comm_interface == AAC_COMM_MESSAGE)
888*4882a593Smuzhiyun kfree(hw_fib);
889*4882a593Smuzhiyun return 0;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun /*
892*4882a593Smuzhiyun * If we plan to do anything check the structure type first.
893*4882a593Smuzhiyun */
894*4882a593Smuzhiyun if (hw_fib->header.StructType != FIB_MAGIC &&
895*4882a593Smuzhiyun hw_fib->header.StructType != FIB_MAGIC2 &&
896*4882a593Smuzhiyun hw_fib->header.StructType != FIB_MAGIC2_64) {
897*4882a593Smuzhiyun if (dev->comm_interface == AAC_COMM_MESSAGE)
898*4882a593Smuzhiyun kfree(hw_fib);
899*4882a593Smuzhiyun return -EINVAL;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun /*
902*4882a593Smuzhiyun * This block handles the case where the adapter had sent us a
903*4882a593Smuzhiyun * command and we have finished processing the command. We
904*4882a593Smuzhiyun * call completeFib when we are done processing the command
905*4882a593Smuzhiyun * and want to send a response back to the adapter. This will
906*4882a593Smuzhiyun * send the completed cdb to the adapter.
907*4882a593Smuzhiyun */
908*4882a593Smuzhiyun if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
909*4882a593Smuzhiyun if (dev->comm_interface == AAC_COMM_MESSAGE) {
910*4882a593Smuzhiyun kfree (hw_fib);
911*4882a593Smuzhiyun } else {
912*4882a593Smuzhiyun u32 index;
913*4882a593Smuzhiyun hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
914*4882a593Smuzhiyun if (size) {
915*4882a593Smuzhiyun size += sizeof(struct aac_fibhdr);
916*4882a593Smuzhiyun if (size > le16_to_cpu(hw_fib->header.SenderSize))
917*4882a593Smuzhiyun return -EMSGSIZE;
918*4882a593Smuzhiyun hw_fib->header.Size = cpu_to_le16(size);
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun q = &dev->queues->queue[AdapNormRespQueue];
921*4882a593Smuzhiyun spin_lock_irqsave(q->lock, qflags);
922*4882a593Smuzhiyun aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
923*4882a593Smuzhiyun *(q->headers.producer) = cpu_to_le32(index + 1);
924*4882a593Smuzhiyun spin_unlock_irqrestore(q->lock, qflags);
925*4882a593Smuzhiyun if (!(nointr & (int)aac_config.irq_mod))
926*4882a593Smuzhiyun aac_adapter_notify(dev, AdapNormRespQueue);
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun } else {
929*4882a593Smuzhiyun printk(KERN_WARNING "aac_fib_adapter_complete: "
930*4882a593Smuzhiyun "Unknown xferstate detected.\n");
931*4882a593Smuzhiyun BUG();
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun return 0;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun /**
937*4882a593Smuzhiyun * aac_fib_complete - fib completion handler
938*4882a593Smuzhiyun * @fibptr: FIB to complete
939*4882a593Smuzhiyun *
940*4882a593Smuzhiyun * Will do all necessary work to complete a FIB.
941*4882a593Smuzhiyun */
942*4882a593Smuzhiyun
aac_fib_complete(struct fib * fibptr)943*4882a593Smuzhiyun int aac_fib_complete(struct fib *fibptr)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun struct hw_fib * hw_fib = fibptr->hw_fib_va;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
948*4882a593Smuzhiyun fib_dealloc(fibptr);
949*4882a593Smuzhiyun return 0;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun /*
953*4882a593Smuzhiyun * Check for a fib which has already been completed or with a
954*4882a593Smuzhiyun * status wait timeout
955*4882a593Smuzhiyun */
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun if (hw_fib->header.XferState == 0 || fibptr->done == 2)
958*4882a593Smuzhiyun return 0;
959*4882a593Smuzhiyun /*
960*4882a593Smuzhiyun * If we plan to do anything check the structure type first.
961*4882a593Smuzhiyun */
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun if (hw_fib->header.StructType != FIB_MAGIC &&
964*4882a593Smuzhiyun hw_fib->header.StructType != FIB_MAGIC2 &&
965*4882a593Smuzhiyun hw_fib->header.StructType != FIB_MAGIC2_64)
966*4882a593Smuzhiyun return -EINVAL;
967*4882a593Smuzhiyun /*
968*4882a593Smuzhiyun * This block completes a cdb which orginated on the host and we
969*4882a593Smuzhiyun * just need to deallocate the cdb or reinit it. At this point the
970*4882a593Smuzhiyun * command is complete that we had sent to the adapter and this
971*4882a593Smuzhiyun * cdb could be reused.
972*4882a593Smuzhiyun */
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
975*4882a593Smuzhiyun (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun fib_dealloc(fibptr);
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun /*
982*4882a593Smuzhiyun * This handles the case when the host has aborted the I/O
983*4882a593Smuzhiyun * to the adapter because the adapter is not responding
984*4882a593Smuzhiyun */
985*4882a593Smuzhiyun fib_dealloc(fibptr);
986*4882a593Smuzhiyun } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
987*4882a593Smuzhiyun fib_dealloc(fibptr);
988*4882a593Smuzhiyun } else {
989*4882a593Smuzhiyun BUG();
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun return 0;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun /**
995*4882a593Smuzhiyun * aac_printf - handle printf from firmware
996*4882a593Smuzhiyun * @dev: Adapter
997*4882a593Smuzhiyun * @val: Message info
998*4882a593Smuzhiyun *
999*4882a593Smuzhiyun * Print a message passed to us by the controller firmware on the
1000*4882a593Smuzhiyun * Adaptec board
1001*4882a593Smuzhiyun */
1002*4882a593Smuzhiyun
aac_printf(struct aac_dev * dev,u32 val)1003*4882a593Smuzhiyun void aac_printf(struct aac_dev *dev, u32 val)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun char *cp = dev->printfbuf;
1006*4882a593Smuzhiyun if (dev->printf_enabled)
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun int length = val & 0xffff;
1009*4882a593Smuzhiyun int level = (val >> 16) & 0xffff;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun /*
1012*4882a593Smuzhiyun * The size of the printfbuf is set in port.c
1013*4882a593Smuzhiyun * There is no variable or define for it
1014*4882a593Smuzhiyun */
1015*4882a593Smuzhiyun if (length > 255)
1016*4882a593Smuzhiyun length = 255;
1017*4882a593Smuzhiyun if (cp[length] != 0)
1018*4882a593Smuzhiyun cp[length] = 0;
1019*4882a593Smuzhiyun if (level == LOG_AAC_HIGH_ERROR)
1020*4882a593Smuzhiyun printk(KERN_WARNING "%s:%s", dev->name, cp);
1021*4882a593Smuzhiyun else
1022*4882a593Smuzhiyun printk(KERN_INFO "%s:%s", dev->name, cp);
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun memset(cp, 0, 256);
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
aac_aif_data(struct aac_aifcmd * aifcmd,uint32_t index)1027*4882a593Smuzhiyun static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index)
1028*4882a593Smuzhiyun {
1029*4882a593Smuzhiyun return le32_to_cpu(((__le32 *)aifcmd->data)[index]);
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun
aac_handle_aif_bu(struct aac_dev * dev,struct aac_aifcmd * aifcmd)1033*4882a593Smuzhiyun static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun switch (aac_aif_data(aifcmd, 1)) {
1036*4882a593Smuzhiyun case AifBuCacheDataLoss:
1037*4882a593Smuzhiyun if (aac_aif_data(aifcmd, 2))
1038*4882a593Smuzhiyun dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n",
1039*4882a593Smuzhiyun aac_aif_data(aifcmd, 2));
1040*4882a593Smuzhiyun else
1041*4882a593Smuzhiyun dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n");
1042*4882a593Smuzhiyun break;
1043*4882a593Smuzhiyun case AifBuCacheDataRecover:
1044*4882a593Smuzhiyun if (aac_aif_data(aifcmd, 2))
1045*4882a593Smuzhiyun dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n",
1046*4882a593Smuzhiyun aac_aif_data(aifcmd, 2));
1047*4882a593Smuzhiyun else
1048*4882a593Smuzhiyun dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n");
1049*4882a593Smuzhiyun break;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun #define AIF_SNIFF_TIMEOUT (500*HZ)
1054*4882a593Smuzhiyun /**
1055*4882a593Smuzhiyun * aac_handle_aif - Handle a message from the firmware
1056*4882a593Smuzhiyun * @dev: Which adapter this fib is from
1057*4882a593Smuzhiyun * @fibptr: Pointer to fibptr from adapter
1058*4882a593Smuzhiyun *
1059*4882a593Smuzhiyun * This routine handles a driver notify fib from the adapter and
1060*4882a593Smuzhiyun * dispatches it to the appropriate routine for handling.
1061*4882a593Smuzhiyun */
aac_handle_aif(struct aac_dev * dev,struct fib * fibptr)1062*4882a593Smuzhiyun static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1063*4882a593Smuzhiyun {
1064*4882a593Smuzhiyun struct hw_fib * hw_fib = fibptr->hw_fib_va;
1065*4882a593Smuzhiyun struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
1066*4882a593Smuzhiyun u32 channel, id, lun, container;
1067*4882a593Smuzhiyun struct scsi_device *device;
1068*4882a593Smuzhiyun enum {
1069*4882a593Smuzhiyun NOTHING,
1070*4882a593Smuzhiyun DELETE,
1071*4882a593Smuzhiyun ADD,
1072*4882a593Smuzhiyun CHANGE
1073*4882a593Smuzhiyun } device_config_needed = NOTHING;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun /* Sniff for container changes */
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (!dev || !dev->fsa_dev)
1078*4882a593Smuzhiyun return;
1079*4882a593Smuzhiyun container = channel = id = lun = (u32)-1;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun /*
1082*4882a593Smuzhiyun * We have set this up to try and minimize the number of
1083*4882a593Smuzhiyun * re-configures that take place. As a result of this when
1084*4882a593Smuzhiyun * certain AIF's come in we will set a flag waiting for another
1085*4882a593Smuzhiyun * type of AIF before setting the re-config flag.
1086*4882a593Smuzhiyun */
1087*4882a593Smuzhiyun switch (le32_to_cpu(aifcmd->command)) {
1088*4882a593Smuzhiyun case AifCmdDriverNotify:
1089*4882a593Smuzhiyun switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
1090*4882a593Smuzhiyun case AifRawDeviceRemove:
1091*4882a593Smuzhiyun container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1092*4882a593Smuzhiyun if ((container >> 28)) {
1093*4882a593Smuzhiyun container = (u32)-1;
1094*4882a593Smuzhiyun break;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun channel = (container >> 24) & 0xF;
1097*4882a593Smuzhiyun if (channel >= dev->maximum_num_channels) {
1098*4882a593Smuzhiyun container = (u32)-1;
1099*4882a593Smuzhiyun break;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun id = container & 0xFFFF;
1102*4882a593Smuzhiyun if (id >= dev->maximum_num_physicals) {
1103*4882a593Smuzhiyun container = (u32)-1;
1104*4882a593Smuzhiyun break;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun lun = (container >> 16) & 0xFF;
1107*4882a593Smuzhiyun container = (u32)-1;
1108*4882a593Smuzhiyun channel = aac_phys_to_logical(channel);
1109*4882a593Smuzhiyun device_config_needed = DELETE;
1110*4882a593Smuzhiyun break;
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun /*
1113*4882a593Smuzhiyun * Morph or Expand complete
1114*4882a593Smuzhiyun */
1115*4882a593Smuzhiyun case AifDenMorphComplete:
1116*4882a593Smuzhiyun case AifDenVolumeExtendComplete:
1117*4882a593Smuzhiyun container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1118*4882a593Smuzhiyun if (container >= dev->maximum_num_containers)
1119*4882a593Smuzhiyun break;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun /*
1122*4882a593Smuzhiyun * Find the scsi_device associated with the SCSI
1123*4882a593Smuzhiyun * address. Make sure we have the right array, and if
1124*4882a593Smuzhiyun * so set the flag to initiate a new re-config once we
1125*4882a593Smuzhiyun * see an AifEnConfigChange AIF come through.
1126*4882a593Smuzhiyun */
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
1129*4882a593Smuzhiyun device = scsi_device_lookup(dev->scsi_host_ptr,
1130*4882a593Smuzhiyun CONTAINER_TO_CHANNEL(container),
1131*4882a593Smuzhiyun CONTAINER_TO_ID(container),
1132*4882a593Smuzhiyun CONTAINER_TO_LUN(container));
1133*4882a593Smuzhiyun if (device) {
1134*4882a593Smuzhiyun dev->fsa_dev[container].config_needed = CHANGE;
1135*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
1136*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_stamp = jiffies;
1137*4882a593Smuzhiyun scsi_device_put(device);
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun /*
1143*4882a593Smuzhiyun * If we are waiting on something and this happens to be
1144*4882a593Smuzhiyun * that thing then set the re-configure flag.
1145*4882a593Smuzhiyun */
1146*4882a593Smuzhiyun if (container != (u32)-1) {
1147*4882a593Smuzhiyun if (container >= dev->maximum_num_containers)
1148*4882a593Smuzhiyun break;
1149*4882a593Smuzhiyun if ((dev->fsa_dev[container].config_waiting_on ==
1150*4882a593Smuzhiyun le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1151*4882a593Smuzhiyun time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1152*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_on = 0;
1153*4882a593Smuzhiyun } else for (container = 0;
1154*4882a593Smuzhiyun container < dev->maximum_num_containers; ++container) {
1155*4882a593Smuzhiyun if ((dev->fsa_dev[container].config_waiting_on ==
1156*4882a593Smuzhiyun le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1157*4882a593Smuzhiyun time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1158*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_on = 0;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun break;
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun case AifCmdEventNotify:
1163*4882a593Smuzhiyun switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
1164*4882a593Smuzhiyun case AifEnBatteryEvent:
1165*4882a593Smuzhiyun dev->cache_protected =
1166*4882a593Smuzhiyun (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
1167*4882a593Smuzhiyun break;
1168*4882a593Smuzhiyun /*
1169*4882a593Smuzhiyun * Add an Array.
1170*4882a593Smuzhiyun */
1171*4882a593Smuzhiyun case AifEnAddContainer:
1172*4882a593Smuzhiyun container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1173*4882a593Smuzhiyun if (container >= dev->maximum_num_containers)
1174*4882a593Smuzhiyun break;
1175*4882a593Smuzhiyun dev->fsa_dev[container].config_needed = ADD;
1176*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_on =
1177*4882a593Smuzhiyun AifEnConfigChange;
1178*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_stamp = jiffies;
1179*4882a593Smuzhiyun break;
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun /*
1182*4882a593Smuzhiyun * Delete an Array.
1183*4882a593Smuzhiyun */
1184*4882a593Smuzhiyun case AifEnDeleteContainer:
1185*4882a593Smuzhiyun container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1186*4882a593Smuzhiyun if (container >= dev->maximum_num_containers)
1187*4882a593Smuzhiyun break;
1188*4882a593Smuzhiyun dev->fsa_dev[container].config_needed = DELETE;
1189*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_on =
1190*4882a593Smuzhiyun AifEnConfigChange;
1191*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_stamp = jiffies;
1192*4882a593Smuzhiyun break;
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun /*
1195*4882a593Smuzhiyun * Container change detected. If we currently are not
1196*4882a593Smuzhiyun * waiting on something else, setup to wait on a Config Change.
1197*4882a593Smuzhiyun */
1198*4882a593Smuzhiyun case AifEnContainerChange:
1199*4882a593Smuzhiyun container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1200*4882a593Smuzhiyun if (container >= dev->maximum_num_containers)
1201*4882a593Smuzhiyun break;
1202*4882a593Smuzhiyun if (dev->fsa_dev[container].config_waiting_on &&
1203*4882a593Smuzhiyun time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1204*4882a593Smuzhiyun break;
1205*4882a593Smuzhiyun dev->fsa_dev[container].config_needed = CHANGE;
1206*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_on =
1207*4882a593Smuzhiyun AifEnConfigChange;
1208*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_stamp = jiffies;
1209*4882a593Smuzhiyun break;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun case AifEnConfigChange:
1212*4882a593Smuzhiyun break;
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun case AifEnAddJBOD:
1215*4882a593Smuzhiyun case AifEnDeleteJBOD:
1216*4882a593Smuzhiyun container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1217*4882a593Smuzhiyun if ((container >> 28)) {
1218*4882a593Smuzhiyun container = (u32)-1;
1219*4882a593Smuzhiyun break;
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun channel = (container >> 24) & 0xF;
1222*4882a593Smuzhiyun if (channel >= dev->maximum_num_channels) {
1223*4882a593Smuzhiyun container = (u32)-1;
1224*4882a593Smuzhiyun break;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun id = container & 0xFFFF;
1227*4882a593Smuzhiyun if (id >= dev->maximum_num_physicals) {
1228*4882a593Smuzhiyun container = (u32)-1;
1229*4882a593Smuzhiyun break;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun lun = (container >> 16) & 0xFF;
1232*4882a593Smuzhiyun container = (u32)-1;
1233*4882a593Smuzhiyun channel = aac_phys_to_logical(channel);
1234*4882a593Smuzhiyun device_config_needed =
1235*4882a593Smuzhiyun (((__le32 *)aifcmd->data)[0] ==
1236*4882a593Smuzhiyun cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
1237*4882a593Smuzhiyun if (device_config_needed == ADD) {
1238*4882a593Smuzhiyun device = scsi_device_lookup(dev->scsi_host_ptr,
1239*4882a593Smuzhiyun channel,
1240*4882a593Smuzhiyun id,
1241*4882a593Smuzhiyun lun);
1242*4882a593Smuzhiyun if (device) {
1243*4882a593Smuzhiyun scsi_remove_device(device);
1244*4882a593Smuzhiyun scsi_device_put(device);
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun break;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun case AifEnEnclosureManagement:
1250*4882a593Smuzhiyun /*
1251*4882a593Smuzhiyun * If in JBOD mode, automatic exposure of new
1252*4882a593Smuzhiyun * physical target to be suppressed until configured.
1253*4882a593Smuzhiyun */
1254*4882a593Smuzhiyun if (dev->jbod)
1255*4882a593Smuzhiyun break;
1256*4882a593Smuzhiyun switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
1257*4882a593Smuzhiyun case EM_DRIVE_INSERTION:
1258*4882a593Smuzhiyun case EM_DRIVE_REMOVAL:
1259*4882a593Smuzhiyun case EM_SES_DRIVE_INSERTION:
1260*4882a593Smuzhiyun case EM_SES_DRIVE_REMOVAL:
1261*4882a593Smuzhiyun container = le32_to_cpu(
1262*4882a593Smuzhiyun ((__le32 *)aifcmd->data)[2]);
1263*4882a593Smuzhiyun if ((container >> 28)) {
1264*4882a593Smuzhiyun container = (u32)-1;
1265*4882a593Smuzhiyun break;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun channel = (container >> 24) & 0xF;
1268*4882a593Smuzhiyun if (channel >= dev->maximum_num_channels) {
1269*4882a593Smuzhiyun container = (u32)-1;
1270*4882a593Smuzhiyun break;
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun id = container & 0xFFFF;
1273*4882a593Smuzhiyun lun = (container >> 16) & 0xFF;
1274*4882a593Smuzhiyun container = (u32)-1;
1275*4882a593Smuzhiyun if (id >= dev->maximum_num_physicals) {
1276*4882a593Smuzhiyun /* legacy dev_t ? */
1277*4882a593Smuzhiyun if ((0x2000 <= id) || lun || channel ||
1278*4882a593Smuzhiyun ((channel = (id >> 7) & 0x3F) >=
1279*4882a593Smuzhiyun dev->maximum_num_channels))
1280*4882a593Smuzhiyun break;
1281*4882a593Smuzhiyun lun = (id >> 4) & 7;
1282*4882a593Smuzhiyun id &= 0xF;
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun channel = aac_phys_to_logical(channel);
1285*4882a593Smuzhiyun device_config_needed =
1286*4882a593Smuzhiyun ((((__le32 *)aifcmd->data)[3]
1287*4882a593Smuzhiyun == cpu_to_le32(EM_DRIVE_INSERTION)) ||
1288*4882a593Smuzhiyun (((__le32 *)aifcmd->data)[3]
1289*4882a593Smuzhiyun == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
1290*4882a593Smuzhiyun ADD : DELETE;
1291*4882a593Smuzhiyun break;
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun break;
1294*4882a593Smuzhiyun case AifBuManagerEvent:
1295*4882a593Smuzhiyun aac_handle_aif_bu(dev, aifcmd);
1296*4882a593Smuzhiyun break;
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun /*
1300*4882a593Smuzhiyun * If we are waiting on something and this happens to be
1301*4882a593Smuzhiyun * that thing then set the re-configure flag.
1302*4882a593Smuzhiyun */
1303*4882a593Smuzhiyun if (container != (u32)-1) {
1304*4882a593Smuzhiyun if (container >= dev->maximum_num_containers)
1305*4882a593Smuzhiyun break;
1306*4882a593Smuzhiyun if ((dev->fsa_dev[container].config_waiting_on ==
1307*4882a593Smuzhiyun le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1308*4882a593Smuzhiyun time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1309*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_on = 0;
1310*4882a593Smuzhiyun } else for (container = 0;
1311*4882a593Smuzhiyun container < dev->maximum_num_containers; ++container) {
1312*4882a593Smuzhiyun if ((dev->fsa_dev[container].config_waiting_on ==
1313*4882a593Smuzhiyun le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1314*4882a593Smuzhiyun time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1315*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_on = 0;
1316*4882a593Smuzhiyun }
1317*4882a593Smuzhiyun break;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun case AifCmdJobProgress:
1320*4882a593Smuzhiyun /*
1321*4882a593Smuzhiyun * These are job progress AIF's. When a Clear is being
1322*4882a593Smuzhiyun * done on a container it is initially created then hidden from
1323*4882a593Smuzhiyun * the OS. When the clear completes we don't get a config
1324*4882a593Smuzhiyun * change so we monitor the job status complete on a clear then
1325*4882a593Smuzhiyun * wait for a container change.
1326*4882a593Smuzhiyun */
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1329*4882a593Smuzhiyun (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
1330*4882a593Smuzhiyun ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
1331*4882a593Smuzhiyun for (container = 0;
1332*4882a593Smuzhiyun container < dev->maximum_num_containers;
1333*4882a593Smuzhiyun ++container) {
1334*4882a593Smuzhiyun /*
1335*4882a593Smuzhiyun * Stomp on all config sequencing for all
1336*4882a593Smuzhiyun * containers?
1337*4882a593Smuzhiyun */
1338*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_on =
1339*4882a593Smuzhiyun AifEnContainerChange;
1340*4882a593Smuzhiyun dev->fsa_dev[container].config_needed = ADD;
1341*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_stamp =
1342*4882a593Smuzhiyun jiffies;
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1346*4882a593Smuzhiyun ((__le32 *)aifcmd->data)[6] == 0 &&
1347*4882a593Smuzhiyun ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
1348*4882a593Smuzhiyun for (container = 0;
1349*4882a593Smuzhiyun container < dev->maximum_num_containers;
1350*4882a593Smuzhiyun ++container) {
1351*4882a593Smuzhiyun /*
1352*4882a593Smuzhiyun * Stomp on all config sequencing for all
1353*4882a593Smuzhiyun * containers?
1354*4882a593Smuzhiyun */
1355*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_on =
1356*4882a593Smuzhiyun AifEnContainerChange;
1357*4882a593Smuzhiyun dev->fsa_dev[container].config_needed = DELETE;
1358*4882a593Smuzhiyun dev->fsa_dev[container].config_waiting_stamp =
1359*4882a593Smuzhiyun jiffies;
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun break;
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun container = 0;
1366*4882a593Smuzhiyun retry_next:
1367*4882a593Smuzhiyun if (device_config_needed == NOTHING) {
1368*4882a593Smuzhiyun for (; container < dev->maximum_num_containers; ++container) {
1369*4882a593Smuzhiyun if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1370*4882a593Smuzhiyun (dev->fsa_dev[container].config_needed != NOTHING) &&
1371*4882a593Smuzhiyun time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
1372*4882a593Smuzhiyun device_config_needed =
1373*4882a593Smuzhiyun dev->fsa_dev[container].config_needed;
1374*4882a593Smuzhiyun dev->fsa_dev[container].config_needed = NOTHING;
1375*4882a593Smuzhiyun channel = CONTAINER_TO_CHANNEL(container);
1376*4882a593Smuzhiyun id = CONTAINER_TO_ID(container);
1377*4882a593Smuzhiyun lun = CONTAINER_TO_LUN(container);
1378*4882a593Smuzhiyun break;
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun if (device_config_needed == NOTHING)
1383*4882a593Smuzhiyun return;
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun /*
1386*4882a593Smuzhiyun * If we decided that a re-configuration needs to be done,
1387*4882a593Smuzhiyun * schedule it here on the way out the door, please close the door
1388*4882a593Smuzhiyun * behind you.
1389*4882a593Smuzhiyun */
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun /*
1392*4882a593Smuzhiyun * Find the scsi_device associated with the SCSI address,
1393*4882a593Smuzhiyun * and mark it as changed, invalidating the cache. This deals
1394*4882a593Smuzhiyun * with changes to existing device IDs.
1395*4882a593Smuzhiyun */
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun if (!dev || !dev->scsi_host_ptr)
1398*4882a593Smuzhiyun return;
1399*4882a593Smuzhiyun /*
1400*4882a593Smuzhiyun * force reload of disk info via aac_probe_container
1401*4882a593Smuzhiyun */
1402*4882a593Smuzhiyun if ((channel == CONTAINER_CHANNEL) &&
1403*4882a593Smuzhiyun (device_config_needed != NOTHING)) {
1404*4882a593Smuzhiyun if (dev->fsa_dev[container].valid == 1)
1405*4882a593Smuzhiyun dev->fsa_dev[container].valid = 2;
1406*4882a593Smuzhiyun aac_probe_container(dev, container);
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
1409*4882a593Smuzhiyun if (device) {
1410*4882a593Smuzhiyun switch (device_config_needed) {
1411*4882a593Smuzhiyun case DELETE:
1412*4882a593Smuzhiyun #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1413*4882a593Smuzhiyun scsi_remove_device(device);
1414*4882a593Smuzhiyun #else
1415*4882a593Smuzhiyun if (scsi_device_online(device)) {
1416*4882a593Smuzhiyun scsi_device_set_state(device, SDEV_OFFLINE);
1417*4882a593Smuzhiyun sdev_printk(KERN_INFO, device,
1418*4882a593Smuzhiyun "Device offlined - %s\n",
1419*4882a593Smuzhiyun (channel == CONTAINER_CHANNEL) ?
1420*4882a593Smuzhiyun "array deleted" :
1421*4882a593Smuzhiyun "enclosure services event");
1422*4882a593Smuzhiyun }
1423*4882a593Smuzhiyun #endif
1424*4882a593Smuzhiyun break;
1425*4882a593Smuzhiyun case ADD:
1426*4882a593Smuzhiyun if (!scsi_device_online(device)) {
1427*4882a593Smuzhiyun sdev_printk(KERN_INFO, device,
1428*4882a593Smuzhiyun "Device online - %s\n",
1429*4882a593Smuzhiyun (channel == CONTAINER_CHANNEL) ?
1430*4882a593Smuzhiyun "array created" :
1431*4882a593Smuzhiyun "enclosure services event");
1432*4882a593Smuzhiyun scsi_device_set_state(device, SDEV_RUNNING);
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun fallthrough;
1435*4882a593Smuzhiyun case CHANGE:
1436*4882a593Smuzhiyun if ((channel == CONTAINER_CHANNEL)
1437*4882a593Smuzhiyun && (!dev->fsa_dev[container].valid)) {
1438*4882a593Smuzhiyun #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1439*4882a593Smuzhiyun scsi_remove_device(device);
1440*4882a593Smuzhiyun #else
1441*4882a593Smuzhiyun if (!scsi_device_online(device))
1442*4882a593Smuzhiyun break;
1443*4882a593Smuzhiyun scsi_device_set_state(device, SDEV_OFFLINE);
1444*4882a593Smuzhiyun sdev_printk(KERN_INFO, device,
1445*4882a593Smuzhiyun "Device offlined - %s\n",
1446*4882a593Smuzhiyun "array failed");
1447*4882a593Smuzhiyun #endif
1448*4882a593Smuzhiyun break;
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun scsi_rescan_device(&device->sdev_gendev);
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun default:
1453*4882a593Smuzhiyun break;
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun scsi_device_put(device);
1456*4882a593Smuzhiyun device_config_needed = NOTHING;
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun if (device_config_needed == ADD)
1459*4882a593Smuzhiyun scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
1460*4882a593Smuzhiyun if (channel == CONTAINER_CHANNEL) {
1461*4882a593Smuzhiyun container++;
1462*4882a593Smuzhiyun device_config_needed = NOTHING;
1463*4882a593Smuzhiyun goto retry_next;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun
aac_schedule_bus_scan(struct aac_dev * aac)1467*4882a593Smuzhiyun static void aac_schedule_bus_scan(struct aac_dev *aac)
1468*4882a593Smuzhiyun {
1469*4882a593Smuzhiyun if (aac->sa_firmware)
1470*4882a593Smuzhiyun aac_schedule_safw_scan_worker(aac);
1471*4882a593Smuzhiyun else
1472*4882a593Smuzhiyun aac_schedule_src_reinit_aif_worker(aac);
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun
_aac_reset_adapter(struct aac_dev * aac,int forced,u8 reset_type)1475*4882a593Smuzhiyun static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1476*4882a593Smuzhiyun {
1477*4882a593Smuzhiyun int index, quirks;
1478*4882a593Smuzhiyun int retval;
1479*4882a593Smuzhiyun struct Scsi_Host *host = aac->scsi_host_ptr;
1480*4882a593Smuzhiyun int jafo = 0;
1481*4882a593Smuzhiyun int bled;
1482*4882a593Smuzhiyun u64 dmamask;
1483*4882a593Smuzhiyun int num_of_fibs = 0;
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun /*
1486*4882a593Smuzhiyun * Assumptions:
1487*4882a593Smuzhiyun * - host is locked, unless called by the aacraid thread.
1488*4882a593Smuzhiyun * (a matter of convenience, due to legacy issues surrounding
1489*4882a593Smuzhiyun * eh_host_adapter_reset).
1490*4882a593Smuzhiyun * - in_reset is asserted, so no new i/o is getting to the
1491*4882a593Smuzhiyun * card.
1492*4882a593Smuzhiyun * - The card is dead, or will be very shortly ;-/ so no new
1493*4882a593Smuzhiyun * commands are completing in the interrupt service.
1494*4882a593Smuzhiyun */
1495*4882a593Smuzhiyun aac_adapter_disable_int(aac);
1496*4882a593Smuzhiyun if (aac->thread && aac->thread->pid != current->pid) {
1497*4882a593Smuzhiyun spin_unlock_irq(host->host_lock);
1498*4882a593Smuzhiyun kthread_stop(aac->thread);
1499*4882a593Smuzhiyun aac->thread = NULL;
1500*4882a593Smuzhiyun jafo = 1;
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun /*
1504*4882a593Smuzhiyun * If a positive health, means in a known DEAD PANIC
1505*4882a593Smuzhiyun * state and the adapter could be reset to `try again'.
1506*4882a593Smuzhiyun */
1507*4882a593Smuzhiyun bled = forced ? 0 : aac_adapter_check_health(aac);
1508*4882a593Smuzhiyun retval = aac_adapter_restart(aac, bled, reset_type);
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun if (retval)
1511*4882a593Smuzhiyun goto out;
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun /*
1514*4882a593Smuzhiyun * Loop through the fibs, close the synchronous FIBS
1515*4882a593Smuzhiyun */
1516*4882a593Smuzhiyun retval = 1;
1517*4882a593Smuzhiyun num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
1518*4882a593Smuzhiyun for (index = 0; index < num_of_fibs; index++) {
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun struct fib *fib = &aac->fibs[index];
1521*4882a593Smuzhiyun __le32 XferState = fib->hw_fib_va->header.XferState;
1522*4882a593Smuzhiyun bool is_response_expected = false;
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1525*4882a593Smuzhiyun (XferState & cpu_to_le32(ResponseExpected)))
1526*4882a593Smuzhiyun is_response_expected = true;
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun if (is_response_expected
1529*4882a593Smuzhiyun || fib->flags & FIB_CONTEXT_FLAG_WAIT) {
1530*4882a593Smuzhiyun unsigned long flagv;
1531*4882a593Smuzhiyun spin_lock_irqsave(&fib->event_lock, flagv);
1532*4882a593Smuzhiyun complete(&fib->event_wait);
1533*4882a593Smuzhiyun spin_unlock_irqrestore(&fib->event_lock, flagv);
1534*4882a593Smuzhiyun schedule();
1535*4882a593Smuzhiyun retval = 0;
1536*4882a593Smuzhiyun }
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun /* Give some extra time for ioctls to complete. */
1539*4882a593Smuzhiyun if (retval == 0)
1540*4882a593Smuzhiyun ssleep(2);
1541*4882a593Smuzhiyun index = aac->cardtype;
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun /*
1544*4882a593Smuzhiyun * Re-initialize the adapter, first free resources, then carefully
1545*4882a593Smuzhiyun * apply the initialization sequence to come back again. Only risk
1546*4882a593Smuzhiyun * is a change in Firmware dropping cache, it is assumed the caller
1547*4882a593Smuzhiyun * will ensure that i/o is queisced and the card is flushed in that
1548*4882a593Smuzhiyun * case.
1549*4882a593Smuzhiyun */
1550*4882a593Smuzhiyun aac_free_irq(aac);
1551*4882a593Smuzhiyun aac_fib_map_free(aac);
1552*4882a593Smuzhiyun dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
1553*4882a593Smuzhiyun aac->comm_phys);
1554*4882a593Smuzhiyun aac_adapter_ioremap(aac, 0);
1555*4882a593Smuzhiyun aac->comm_addr = NULL;
1556*4882a593Smuzhiyun aac->comm_phys = 0;
1557*4882a593Smuzhiyun kfree(aac->queues);
1558*4882a593Smuzhiyun aac->queues = NULL;
1559*4882a593Smuzhiyun kfree(aac->fsa_dev);
1560*4882a593Smuzhiyun aac->fsa_dev = NULL;
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun dmamask = DMA_BIT_MASK(32);
1563*4882a593Smuzhiyun quirks = aac_get_driver_ident(index)->quirks;
1564*4882a593Smuzhiyun if (quirks & AAC_QUIRK_31BIT)
1565*4882a593Smuzhiyun retval = dma_set_mask(&aac->pdev->dev, dmamask);
1566*4882a593Smuzhiyun else if (!(quirks & AAC_QUIRK_SRC))
1567*4882a593Smuzhiyun retval = dma_set_mask(&aac->pdev->dev, dmamask);
1568*4882a593Smuzhiyun else
1569*4882a593Smuzhiyun retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun if (quirks & AAC_QUIRK_31BIT && !retval) {
1572*4882a593Smuzhiyun dmamask = DMA_BIT_MASK(31);
1573*4882a593Smuzhiyun retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
1574*4882a593Smuzhiyun }
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun if (retval)
1577*4882a593Smuzhiyun goto out;
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1580*4882a593Smuzhiyun goto out;
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun if (jafo) {
1583*4882a593Smuzhiyun aac->thread = kthread_run(aac_command_thread, aac, "%s",
1584*4882a593Smuzhiyun aac->name);
1585*4882a593Smuzhiyun if (IS_ERR(aac->thread)) {
1586*4882a593Smuzhiyun retval = PTR_ERR(aac->thread);
1587*4882a593Smuzhiyun aac->thread = NULL;
1588*4882a593Smuzhiyun goto out;
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun (void)aac_get_adapter_info(aac);
1592*4882a593Smuzhiyun if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1593*4882a593Smuzhiyun host->sg_tablesize = 34;
1594*4882a593Smuzhiyun host->max_sectors = (host->sg_tablesize * 8) + 112;
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1597*4882a593Smuzhiyun host->sg_tablesize = 17;
1598*4882a593Smuzhiyun host->max_sectors = (host->sg_tablesize * 8) + 112;
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun aac_get_config_status(aac, 1);
1601*4882a593Smuzhiyun aac_get_containers(aac);
1602*4882a593Smuzhiyun /*
1603*4882a593Smuzhiyun * This is where the assumption that the Adapter is quiesced
1604*4882a593Smuzhiyun * is important.
1605*4882a593Smuzhiyun */
1606*4882a593Smuzhiyun scsi_host_complete_all_commands(host, DID_RESET);
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun retval = 0;
1609*4882a593Smuzhiyun out:
1610*4882a593Smuzhiyun aac->in_reset = 0;
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun /*
1613*4882a593Smuzhiyun * Issue bus rescan to catch any configuration that might have
1614*4882a593Smuzhiyun * occurred
1615*4882a593Smuzhiyun */
1616*4882a593Smuzhiyun if (!retval && !is_kdump_kernel()) {
1617*4882a593Smuzhiyun dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
1618*4882a593Smuzhiyun aac_schedule_bus_scan(aac);
1619*4882a593Smuzhiyun }
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun if (jafo) {
1622*4882a593Smuzhiyun spin_lock_irq(host->host_lock);
1623*4882a593Smuzhiyun }
1624*4882a593Smuzhiyun return retval;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun
aac_reset_adapter(struct aac_dev * aac,int forced,u8 reset_type)1627*4882a593Smuzhiyun int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1628*4882a593Smuzhiyun {
1629*4882a593Smuzhiyun unsigned long flagv = 0;
1630*4882a593Smuzhiyun int retval, unblock_retval;
1631*4882a593Smuzhiyun struct Scsi_Host *host = aac->scsi_host_ptr;
1632*4882a593Smuzhiyun int bled;
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1635*4882a593Smuzhiyun return -EBUSY;
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun if (aac->in_reset) {
1638*4882a593Smuzhiyun spin_unlock_irqrestore(&aac->fib_lock, flagv);
1639*4882a593Smuzhiyun return -EBUSY;
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun aac->in_reset = 1;
1642*4882a593Smuzhiyun spin_unlock_irqrestore(&aac->fib_lock, flagv);
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun /*
1645*4882a593Smuzhiyun * Wait for all commands to complete to this specific
1646*4882a593Smuzhiyun * target (block maximum 60 seconds). Although not necessary,
1647*4882a593Smuzhiyun * it does make us a good storage citizen.
1648*4882a593Smuzhiyun */
1649*4882a593Smuzhiyun scsi_host_block(host);
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun /* Quiesce build, flush cache, write through mode */
1652*4882a593Smuzhiyun if (forced < 2)
1653*4882a593Smuzhiyun aac_send_shutdown(aac);
1654*4882a593Smuzhiyun spin_lock_irqsave(host->host_lock, flagv);
1655*4882a593Smuzhiyun bled = forced ? forced :
1656*4882a593Smuzhiyun (aac_check_reset != 0 && aac_check_reset != 1);
1657*4882a593Smuzhiyun retval = _aac_reset_adapter(aac, bled, reset_type);
1658*4882a593Smuzhiyun spin_unlock_irqrestore(host->host_lock, flagv);
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun unblock_retval = scsi_host_unblock(host, SDEV_RUNNING);
1661*4882a593Smuzhiyun if (!retval)
1662*4882a593Smuzhiyun retval = unblock_retval;
1663*4882a593Smuzhiyun if ((forced < 2) && (retval == -ENODEV)) {
1664*4882a593Smuzhiyun /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1665*4882a593Smuzhiyun struct fib * fibctx = aac_fib_alloc(aac);
1666*4882a593Smuzhiyun if (fibctx) {
1667*4882a593Smuzhiyun struct aac_pause *cmd;
1668*4882a593Smuzhiyun int status;
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyun aac_fib_init(fibctx);
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun cmd = (struct aac_pause *) fib_data(fibctx);
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun cmd->command = cpu_to_le32(VM_ContainerConfig);
1675*4882a593Smuzhiyun cmd->type = cpu_to_le32(CT_PAUSE_IO);
1676*4882a593Smuzhiyun cmd->timeout = cpu_to_le32(1);
1677*4882a593Smuzhiyun cmd->min = cpu_to_le32(1);
1678*4882a593Smuzhiyun cmd->noRescan = cpu_to_le32(1);
1679*4882a593Smuzhiyun cmd->count = cpu_to_le32(0);
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun status = aac_fib_send(ContainerCommand,
1682*4882a593Smuzhiyun fibctx,
1683*4882a593Smuzhiyun sizeof(struct aac_pause),
1684*4882a593Smuzhiyun FsaNormal,
1685*4882a593Smuzhiyun -2 /* Timeout silently */, 1,
1686*4882a593Smuzhiyun NULL, NULL);
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun if (status >= 0)
1689*4882a593Smuzhiyun aac_fib_complete(fibctx);
1690*4882a593Smuzhiyun /* FIB should be freed only after getting
1691*4882a593Smuzhiyun * the response from the F/W */
1692*4882a593Smuzhiyun if (status != -ERESTARTSYS)
1693*4882a593Smuzhiyun aac_fib_free(fibctx);
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun return retval;
1698*4882a593Smuzhiyun }
1699*4882a593Smuzhiyun
aac_check_health(struct aac_dev * aac)1700*4882a593Smuzhiyun int aac_check_health(struct aac_dev * aac)
1701*4882a593Smuzhiyun {
1702*4882a593Smuzhiyun int BlinkLED;
1703*4882a593Smuzhiyun unsigned long time_now, flagv = 0;
1704*4882a593Smuzhiyun struct list_head * entry;
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1707*4882a593Smuzhiyun if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1708*4882a593Smuzhiyun return 0;
1709*4882a593Smuzhiyun
1710*4882a593Smuzhiyun if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1711*4882a593Smuzhiyun spin_unlock_irqrestore(&aac->fib_lock, flagv);
1712*4882a593Smuzhiyun return 0; /* OK */
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun aac->in_reset = 1;
1716*4882a593Smuzhiyun
1717*4882a593Smuzhiyun /* Fake up an AIF:
1718*4882a593Smuzhiyun * aac_aifcmd.command = AifCmdEventNotify = 1
1719*4882a593Smuzhiyun * aac_aifcmd.seqnum = 0xFFFFFFFF
1720*4882a593Smuzhiyun * aac_aifcmd.data[0] = AifEnExpEvent = 23
1721*4882a593Smuzhiyun * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1722*4882a593Smuzhiyun * aac.aifcmd.data[2] = AifHighPriority = 3
1723*4882a593Smuzhiyun * aac.aifcmd.data[3] = BlinkLED
1724*4882a593Smuzhiyun */
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun time_now = jiffies/HZ;
1727*4882a593Smuzhiyun entry = aac->fib_list.next;
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun /*
1730*4882a593Smuzhiyun * For each Context that is on the
1731*4882a593Smuzhiyun * fibctxList, make a copy of the
1732*4882a593Smuzhiyun * fib, and then set the event to wake up the
1733*4882a593Smuzhiyun * thread that is waiting for it.
1734*4882a593Smuzhiyun */
1735*4882a593Smuzhiyun while (entry != &aac->fib_list) {
1736*4882a593Smuzhiyun /*
1737*4882a593Smuzhiyun * Extract the fibctx
1738*4882a593Smuzhiyun */
1739*4882a593Smuzhiyun struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1740*4882a593Smuzhiyun struct hw_fib * hw_fib;
1741*4882a593Smuzhiyun struct fib * fib;
1742*4882a593Smuzhiyun /*
1743*4882a593Smuzhiyun * Check if the queue is getting
1744*4882a593Smuzhiyun * backlogged
1745*4882a593Smuzhiyun */
1746*4882a593Smuzhiyun if (fibctx->count > 20) {
1747*4882a593Smuzhiyun /*
1748*4882a593Smuzhiyun * It's *not* jiffies folks,
1749*4882a593Smuzhiyun * but jiffies / HZ, so do not
1750*4882a593Smuzhiyun * panic ...
1751*4882a593Smuzhiyun */
1752*4882a593Smuzhiyun u32 time_last = fibctx->jiffies;
1753*4882a593Smuzhiyun /*
1754*4882a593Smuzhiyun * Has it been > 2 minutes
1755*4882a593Smuzhiyun * since the last read off
1756*4882a593Smuzhiyun * the queue?
1757*4882a593Smuzhiyun */
1758*4882a593Smuzhiyun if ((time_now - time_last) > aif_timeout) {
1759*4882a593Smuzhiyun entry = entry->next;
1760*4882a593Smuzhiyun aac_close_fib_context(aac, fibctx);
1761*4882a593Smuzhiyun continue;
1762*4882a593Smuzhiyun }
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun /*
1765*4882a593Smuzhiyun * Warning: no sleep allowed while
1766*4882a593Smuzhiyun * holding spinlock
1767*4882a593Smuzhiyun */
1768*4882a593Smuzhiyun hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1769*4882a593Smuzhiyun fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
1770*4882a593Smuzhiyun if (fib && hw_fib) {
1771*4882a593Smuzhiyun struct aac_aifcmd * aif;
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun fib->hw_fib_va = hw_fib;
1774*4882a593Smuzhiyun fib->dev = aac;
1775*4882a593Smuzhiyun aac_fib_init(fib);
1776*4882a593Smuzhiyun fib->type = FSAFS_NTC_FIB_CONTEXT;
1777*4882a593Smuzhiyun fib->size = sizeof (struct fib);
1778*4882a593Smuzhiyun fib->data = hw_fib->data;
1779*4882a593Smuzhiyun aif = (struct aac_aifcmd *)hw_fib->data;
1780*4882a593Smuzhiyun aif->command = cpu_to_le32(AifCmdEventNotify);
1781*4882a593Smuzhiyun aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1782*4882a593Smuzhiyun ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
1783*4882a593Smuzhiyun ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
1784*4882a593Smuzhiyun ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
1785*4882a593Smuzhiyun ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
1786*4882a593Smuzhiyun
1787*4882a593Smuzhiyun /*
1788*4882a593Smuzhiyun * Put the FIB onto the
1789*4882a593Smuzhiyun * fibctx's fibs
1790*4882a593Smuzhiyun */
1791*4882a593Smuzhiyun list_add_tail(&fib->fiblink, &fibctx->fib_list);
1792*4882a593Smuzhiyun fibctx->count++;
1793*4882a593Smuzhiyun /*
1794*4882a593Smuzhiyun * Set the event to wake up the
1795*4882a593Smuzhiyun * thread that will waiting.
1796*4882a593Smuzhiyun */
1797*4882a593Smuzhiyun complete(&fibctx->completion);
1798*4882a593Smuzhiyun } else {
1799*4882a593Smuzhiyun printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1800*4882a593Smuzhiyun kfree(fib);
1801*4882a593Smuzhiyun kfree(hw_fib);
1802*4882a593Smuzhiyun }
1803*4882a593Smuzhiyun entry = entry->next;
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun
1806*4882a593Smuzhiyun spin_unlock_irqrestore(&aac->fib_lock, flagv);
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun if (BlinkLED < 0) {
1809*4882a593Smuzhiyun printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
1810*4882a593Smuzhiyun aac->name, BlinkLED);
1811*4882a593Smuzhiyun goto out;
1812*4882a593Smuzhiyun }
1813*4882a593Smuzhiyun
1814*4882a593Smuzhiyun printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun out:
1817*4882a593Smuzhiyun aac->in_reset = 0;
1818*4882a593Smuzhiyun return BlinkLED;
1819*4882a593Smuzhiyun }
1820*4882a593Smuzhiyun
is_safw_raid_volume(struct aac_dev * aac,int bus,int target)1821*4882a593Smuzhiyun static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
1822*4882a593Smuzhiyun {
1823*4882a593Smuzhiyun return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun
aac_lookup_safw_scsi_device(struct aac_dev * dev,int bus,int target)1826*4882a593Smuzhiyun static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
1827*4882a593Smuzhiyun int bus,
1828*4882a593Smuzhiyun int target)
1829*4882a593Smuzhiyun {
1830*4882a593Smuzhiyun if (bus != CONTAINER_CHANNEL)
1831*4882a593Smuzhiyun bus = aac_phys_to_logical(bus);
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
1834*4882a593Smuzhiyun }
1835*4882a593Smuzhiyun
aac_add_safw_device(struct aac_dev * dev,int bus,int target)1836*4882a593Smuzhiyun static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
1837*4882a593Smuzhiyun {
1838*4882a593Smuzhiyun if (bus != CONTAINER_CHANNEL)
1839*4882a593Smuzhiyun bus = aac_phys_to_logical(bus);
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
1842*4882a593Smuzhiyun }
1843*4882a593Smuzhiyun
aac_put_safw_scsi_device(struct scsi_device * sdev)1844*4882a593Smuzhiyun static void aac_put_safw_scsi_device(struct scsi_device *sdev)
1845*4882a593Smuzhiyun {
1846*4882a593Smuzhiyun if (sdev)
1847*4882a593Smuzhiyun scsi_device_put(sdev);
1848*4882a593Smuzhiyun }
1849*4882a593Smuzhiyun
aac_remove_safw_device(struct aac_dev * dev,int bus,int target)1850*4882a593Smuzhiyun static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
1851*4882a593Smuzhiyun {
1852*4882a593Smuzhiyun struct scsi_device *sdev;
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun sdev = aac_lookup_safw_scsi_device(dev, bus, target);
1855*4882a593Smuzhiyun scsi_remove_device(sdev);
1856*4882a593Smuzhiyun aac_put_safw_scsi_device(sdev);
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun
aac_is_safw_scan_count_equal(struct aac_dev * dev,int bus,int target)1859*4882a593Smuzhiyun static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
1860*4882a593Smuzhiyun int bus, int target)
1861*4882a593Smuzhiyun {
1862*4882a593Smuzhiyun return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
1863*4882a593Smuzhiyun }
1864*4882a593Smuzhiyun
aac_is_safw_target_valid(struct aac_dev * dev,int bus,int target)1865*4882a593Smuzhiyun static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
1866*4882a593Smuzhiyun {
1867*4882a593Smuzhiyun if (is_safw_raid_volume(dev, bus, target))
1868*4882a593Smuzhiyun return dev->fsa_dev[target].valid;
1869*4882a593Smuzhiyun else
1870*4882a593Smuzhiyun return aac_is_safw_scan_count_equal(dev, bus, target);
1871*4882a593Smuzhiyun }
1872*4882a593Smuzhiyun
aac_is_safw_device_exposed(struct aac_dev * dev,int bus,int target)1873*4882a593Smuzhiyun static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
1874*4882a593Smuzhiyun {
1875*4882a593Smuzhiyun int is_exposed = 0;
1876*4882a593Smuzhiyun struct scsi_device *sdev;
1877*4882a593Smuzhiyun
1878*4882a593Smuzhiyun sdev = aac_lookup_safw_scsi_device(dev, bus, target);
1879*4882a593Smuzhiyun if (sdev)
1880*4882a593Smuzhiyun is_exposed = 1;
1881*4882a593Smuzhiyun aac_put_safw_scsi_device(sdev);
1882*4882a593Smuzhiyun
1883*4882a593Smuzhiyun return is_exposed;
1884*4882a593Smuzhiyun }
1885*4882a593Smuzhiyun
aac_update_safw_host_devices(struct aac_dev * dev)1886*4882a593Smuzhiyun static int aac_update_safw_host_devices(struct aac_dev *dev)
1887*4882a593Smuzhiyun {
1888*4882a593Smuzhiyun int i;
1889*4882a593Smuzhiyun int bus;
1890*4882a593Smuzhiyun int target;
1891*4882a593Smuzhiyun int is_exposed = 0;
1892*4882a593Smuzhiyun int rcode = 0;
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun rcode = aac_setup_safw_adapter(dev);
1895*4882a593Smuzhiyun if (unlikely(rcode < 0)) {
1896*4882a593Smuzhiyun goto out;
1897*4882a593Smuzhiyun }
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun bus = get_bus_number(i);
1902*4882a593Smuzhiyun target = get_target_number(i);
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun is_exposed = aac_is_safw_device_exposed(dev, bus, target);
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
1907*4882a593Smuzhiyun aac_add_safw_device(dev, bus, target);
1908*4882a593Smuzhiyun else if (!aac_is_safw_target_valid(dev, bus, target) &&
1909*4882a593Smuzhiyun is_exposed)
1910*4882a593Smuzhiyun aac_remove_safw_device(dev, bus, target);
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun out:
1913*4882a593Smuzhiyun return rcode;
1914*4882a593Smuzhiyun }
1915*4882a593Smuzhiyun
aac_scan_safw_host(struct aac_dev * dev)1916*4882a593Smuzhiyun static int aac_scan_safw_host(struct aac_dev *dev)
1917*4882a593Smuzhiyun {
1918*4882a593Smuzhiyun int rcode = 0;
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun rcode = aac_update_safw_host_devices(dev);
1921*4882a593Smuzhiyun if (rcode)
1922*4882a593Smuzhiyun aac_schedule_safw_scan_worker(dev);
1923*4882a593Smuzhiyun
1924*4882a593Smuzhiyun return rcode;
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun
aac_scan_host(struct aac_dev * dev)1927*4882a593Smuzhiyun int aac_scan_host(struct aac_dev *dev)
1928*4882a593Smuzhiyun {
1929*4882a593Smuzhiyun int rcode = 0;
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun mutex_lock(&dev->scan_mutex);
1932*4882a593Smuzhiyun if (dev->sa_firmware)
1933*4882a593Smuzhiyun rcode = aac_scan_safw_host(dev);
1934*4882a593Smuzhiyun else
1935*4882a593Smuzhiyun scsi_scan_host(dev->scsi_host_ptr);
1936*4882a593Smuzhiyun mutex_unlock(&dev->scan_mutex);
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun return rcode;
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun
aac_src_reinit_aif_worker(struct work_struct * work)1941*4882a593Smuzhiyun void aac_src_reinit_aif_worker(struct work_struct *work)
1942*4882a593Smuzhiyun {
1943*4882a593Smuzhiyun struct aac_dev *dev = container_of(to_delayed_work(work),
1944*4882a593Smuzhiyun struct aac_dev, src_reinit_aif_worker);
1945*4882a593Smuzhiyun
1946*4882a593Smuzhiyun wait_event(dev->scsi_host_ptr->host_wait,
1947*4882a593Smuzhiyun !scsi_host_in_recovery(dev->scsi_host_ptr));
1948*4882a593Smuzhiyun aac_reinit_aif(dev, dev->cardtype);
1949*4882a593Smuzhiyun }
1950*4882a593Smuzhiyun
1951*4882a593Smuzhiyun /**
1952*4882a593Smuzhiyun * aac_handle_sa_aif Handle a message from the firmware
1953*4882a593Smuzhiyun * @dev: Which adapter this fib is from
1954*4882a593Smuzhiyun * @fibptr: Pointer to fibptr from adapter
1955*4882a593Smuzhiyun *
1956*4882a593Smuzhiyun * This routine handles a driver notify fib from the adapter and
1957*4882a593Smuzhiyun * dispatches it to the appropriate routine for handling.
1958*4882a593Smuzhiyun */
aac_handle_sa_aif(struct aac_dev * dev,struct fib * fibptr)1959*4882a593Smuzhiyun static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
1960*4882a593Smuzhiyun {
1961*4882a593Smuzhiyun int i;
1962*4882a593Smuzhiyun u32 events = 0;
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
1965*4882a593Smuzhiyun events = SA_AIF_HOTPLUG;
1966*4882a593Smuzhiyun else if (fibptr->hbacmd_size & SA_AIF_HARDWARE)
1967*4882a593Smuzhiyun events = SA_AIF_HARDWARE;
1968*4882a593Smuzhiyun else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE)
1969*4882a593Smuzhiyun events = SA_AIF_PDEV_CHANGE;
1970*4882a593Smuzhiyun else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE)
1971*4882a593Smuzhiyun events = SA_AIF_LDEV_CHANGE;
1972*4882a593Smuzhiyun else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE)
1973*4882a593Smuzhiyun events = SA_AIF_BPSTAT_CHANGE;
1974*4882a593Smuzhiyun else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE)
1975*4882a593Smuzhiyun events = SA_AIF_BPCFG_CHANGE;
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun switch (events) {
1978*4882a593Smuzhiyun case SA_AIF_HOTPLUG:
1979*4882a593Smuzhiyun case SA_AIF_HARDWARE:
1980*4882a593Smuzhiyun case SA_AIF_PDEV_CHANGE:
1981*4882a593Smuzhiyun case SA_AIF_LDEV_CHANGE:
1982*4882a593Smuzhiyun case SA_AIF_BPCFG_CHANGE:
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun aac_scan_host(dev);
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun break;
1987*4882a593Smuzhiyun
1988*4882a593Smuzhiyun case SA_AIF_BPSTAT_CHANGE:
1989*4882a593Smuzhiyun /* currently do nothing */
1990*4882a593Smuzhiyun break;
1991*4882a593Smuzhiyun }
1992*4882a593Smuzhiyun
1993*4882a593Smuzhiyun for (i = 1; i <= 10; ++i) {
1994*4882a593Smuzhiyun events = src_readl(dev, MUnit.IDR);
1995*4882a593Smuzhiyun if (events & (1<<23)) {
1996*4882a593Smuzhiyun pr_warn(" AIF not cleared by firmware - %d/%d)\n",
1997*4882a593Smuzhiyun i, 10);
1998*4882a593Smuzhiyun ssleep(1);
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun }
2001*4882a593Smuzhiyun }
2002*4882a593Smuzhiyun
get_fib_count(struct aac_dev * dev)2003*4882a593Smuzhiyun static int get_fib_count(struct aac_dev *dev)
2004*4882a593Smuzhiyun {
2005*4882a593Smuzhiyun unsigned int num = 0;
2006*4882a593Smuzhiyun struct list_head *entry;
2007*4882a593Smuzhiyun unsigned long flagv;
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun /*
2010*4882a593Smuzhiyun * Warning: no sleep allowed while
2011*4882a593Smuzhiyun * holding spinlock. We take the estimate
2012*4882a593Smuzhiyun * and pre-allocate a set of fibs outside the
2013*4882a593Smuzhiyun * lock.
2014*4882a593Smuzhiyun */
2015*4882a593Smuzhiyun num = le32_to_cpu(dev->init->r7.adapter_fibs_size)
2016*4882a593Smuzhiyun / sizeof(struct hw_fib); /* some extra */
2017*4882a593Smuzhiyun spin_lock_irqsave(&dev->fib_lock, flagv);
2018*4882a593Smuzhiyun entry = dev->fib_list.next;
2019*4882a593Smuzhiyun while (entry != &dev->fib_list) {
2020*4882a593Smuzhiyun entry = entry->next;
2021*4882a593Smuzhiyun ++num;
2022*4882a593Smuzhiyun }
2023*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->fib_lock, flagv);
2024*4882a593Smuzhiyun
2025*4882a593Smuzhiyun return num;
2026*4882a593Smuzhiyun }
2027*4882a593Smuzhiyun
fillup_pools(struct aac_dev * dev,struct hw_fib ** hw_fib_pool,struct fib ** fib_pool,unsigned int num)2028*4882a593Smuzhiyun static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2029*4882a593Smuzhiyun struct fib **fib_pool,
2030*4882a593Smuzhiyun unsigned int num)
2031*4882a593Smuzhiyun {
2032*4882a593Smuzhiyun struct hw_fib **hw_fib_p;
2033*4882a593Smuzhiyun struct fib **fib_p;
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun hw_fib_p = hw_fib_pool;
2036*4882a593Smuzhiyun fib_p = fib_pool;
2037*4882a593Smuzhiyun while (hw_fib_p < &hw_fib_pool[num]) {
2038*4882a593Smuzhiyun *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL);
2039*4882a593Smuzhiyun if (!(*(hw_fib_p++))) {
2040*4882a593Smuzhiyun --hw_fib_p;
2041*4882a593Smuzhiyun break;
2042*4882a593Smuzhiyun }
2043*4882a593Smuzhiyun
2044*4882a593Smuzhiyun *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL);
2045*4882a593Smuzhiyun if (!(*(fib_p++))) {
2046*4882a593Smuzhiyun kfree(*(--hw_fib_p));
2047*4882a593Smuzhiyun break;
2048*4882a593Smuzhiyun }
2049*4882a593Smuzhiyun }
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun /*
2052*4882a593Smuzhiyun * Get the actual number of allocated fibs
2053*4882a593Smuzhiyun */
2054*4882a593Smuzhiyun num = hw_fib_p - hw_fib_pool;
2055*4882a593Smuzhiyun return num;
2056*4882a593Smuzhiyun }
2057*4882a593Smuzhiyun
wakeup_fibctx_threads(struct aac_dev * dev,struct hw_fib ** hw_fib_pool,struct fib ** fib_pool,struct fib * fib,struct hw_fib * hw_fib,unsigned int num)2058*4882a593Smuzhiyun static void wakeup_fibctx_threads(struct aac_dev *dev,
2059*4882a593Smuzhiyun struct hw_fib **hw_fib_pool,
2060*4882a593Smuzhiyun struct fib **fib_pool,
2061*4882a593Smuzhiyun struct fib *fib,
2062*4882a593Smuzhiyun struct hw_fib *hw_fib,
2063*4882a593Smuzhiyun unsigned int num)
2064*4882a593Smuzhiyun {
2065*4882a593Smuzhiyun unsigned long flagv;
2066*4882a593Smuzhiyun struct list_head *entry;
2067*4882a593Smuzhiyun struct hw_fib **hw_fib_p;
2068*4882a593Smuzhiyun struct fib **fib_p;
2069*4882a593Smuzhiyun u32 time_now, time_last;
2070*4882a593Smuzhiyun struct hw_fib *hw_newfib;
2071*4882a593Smuzhiyun struct fib *newfib;
2072*4882a593Smuzhiyun struct aac_fib_context *fibctx;
2073*4882a593Smuzhiyun
2074*4882a593Smuzhiyun time_now = jiffies/HZ;
2075*4882a593Smuzhiyun spin_lock_irqsave(&dev->fib_lock, flagv);
2076*4882a593Smuzhiyun entry = dev->fib_list.next;
2077*4882a593Smuzhiyun /*
2078*4882a593Smuzhiyun * For each Context that is on the
2079*4882a593Smuzhiyun * fibctxList, make a copy of the
2080*4882a593Smuzhiyun * fib, and then set the event to wake up the
2081*4882a593Smuzhiyun * thread that is waiting for it.
2082*4882a593Smuzhiyun */
2083*4882a593Smuzhiyun
2084*4882a593Smuzhiyun hw_fib_p = hw_fib_pool;
2085*4882a593Smuzhiyun fib_p = fib_pool;
2086*4882a593Smuzhiyun while (entry != &dev->fib_list) {
2087*4882a593Smuzhiyun /*
2088*4882a593Smuzhiyun * Extract the fibctx
2089*4882a593Smuzhiyun */
2090*4882a593Smuzhiyun fibctx = list_entry(entry, struct aac_fib_context,
2091*4882a593Smuzhiyun next);
2092*4882a593Smuzhiyun /*
2093*4882a593Smuzhiyun * Check if the queue is getting
2094*4882a593Smuzhiyun * backlogged
2095*4882a593Smuzhiyun */
2096*4882a593Smuzhiyun if (fibctx->count > 20) {
2097*4882a593Smuzhiyun /*
2098*4882a593Smuzhiyun * It's *not* jiffies folks,
2099*4882a593Smuzhiyun * but jiffies / HZ so do not
2100*4882a593Smuzhiyun * panic ...
2101*4882a593Smuzhiyun */
2102*4882a593Smuzhiyun time_last = fibctx->jiffies;
2103*4882a593Smuzhiyun /*
2104*4882a593Smuzhiyun * Has it been > 2 minutes
2105*4882a593Smuzhiyun * since the last read off
2106*4882a593Smuzhiyun * the queue?
2107*4882a593Smuzhiyun */
2108*4882a593Smuzhiyun if ((time_now - time_last) > aif_timeout) {
2109*4882a593Smuzhiyun entry = entry->next;
2110*4882a593Smuzhiyun aac_close_fib_context(dev, fibctx);
2111*4882a593Smuzhiyun continue;
2112*4882a593Smuzhiyun }
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun /*
2115*4882a593Smuzhiyun * Warning: no sleep allowed while
2116*4882a593Smuzhiyun * holding spinlock
2117*4882a593Smuzhiyun */
2118*4882a593Smuzhiyun if (hw_fib_p >= &hw_fib_pool[num]) {
2119*4882a593Smuzhiyun pr_warn("aifd: didn't allocate NewFib\n");
2120*4882a593Smuzhiyun entry = entry->next;
2121*4882a593Smuzhiyun continue;
2122*4882a593Smuzhiyun }
2123*4882a593Smuzhiyun
2124*4882a593Smuzhiyun hw_newfib = *hw_fib_p;
2125*4882a593Smuzhiyun *(hw_fib_p++) = NULL;
2126*4882a593Smuzhiyun newfib = *fib_p;
2127*4882a593Smuzhiyun *(fib_p++) = NULL;
2128*4882a593Smuzhiyun /*
2129*4882a593Smuzhiyun * Make the copy of the FIB
2130*4882a593Smuzhiyun */
2131*4882a593Smuzhiyun memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
2132*4882a593Smuzhiyun memcpy(newfib, fib, sizeof(struct fib));
2133*4882a593Smuzhiyun newfib->hw_fib_va = hw_newfib;
2134*4882a593Smuzhiyun /*
2135*4882a593Smuzhiyun * Put the FIB onto the
2136*4882a593Smuzhiyun * fibctx's fibs
2137*4882a593Smuzhiyun */
2138*4882a593Smuzhiyun list_add_tail(&newfib->fiblink, &fibctx->fib_list);
2139*4882a593Smuzhiyun fibctx->count++;
2140*4882a593Smuzhiyun /*
2141*4882a593Smuzhiyun * Set the event to wake up the
2142*4882a593Smuzhiyun * thread that is waiting.
2143*4882a593Smuzhiyun */
2144*4882a593Smuzhiyun complete(&fibctx->completion);
2145*4882a593Smuzhiyun
2146*4882a593Smuzhiyun entry = entry->next;
2147*4882a593Smuzhiyun }
2148*4882a593Smuzhiyun /*
2149*4882a593Smuzhiyun * Set the status of this FIB
2150*4882a593Smuzhiyun */
2151*4882a593Smuzhiyun *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
2152*4882a593Smuzhiyun aac_fib_adapter_complete(fib, sizeof(u32));
2153*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->fib_lock, flagv);
2154*4882a593Smuzhiyun
2155*4882a593Smuzhiyun }
2156*4882a593Smuzhiyun
aac_process_events(struct aac_dev * dev)2157*4882a593Smuzhiyun static void aac_process_events(struct aac_dev *dev)
2158*4882a593Smuzhiyun {
2159*4882a593Smuzhiyun struct hw_fib *hw_fib;
2160*4882a593Smuzhiyun struct fib *fib;
2161*4882a593Smuzhiyun unsigned long flags;
2162*4882a593Smuzhiyun spinlock_t *t_lock;
2163*4882a593Smuzhiyun
2164*4882a593Smuzhiyun t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2165*4882a593Smuzhiyun spin_lock_irqsave(t_lock, flags);
2166*4882a593Smuzhiyun
2167*4882a593Smuzhiyun while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
2168*4882a593Smuzhiyun struct list_head *entry;
2169*4882a593Smuzhiyun struct aac_aifcmd *aifcmd;
2170*4882a593Smuzhiyun unsigned int num;
2171*4882a593Smuzhiyun struct hw_fib **hw_fib_pool, **hw_fib_p;
2172*4882a593Smuzhiyun struct fib **fib_pool, **fib_p;
2173*4882a593Smuzhiyun
2174*4882a593Smuzhiyun set_current_state(TASK_RUNNING);
2175*4882a593Smuzhiyun
2176*4882a593Smuzhiyun entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
2177*4882a593Smuzhiyun list_del(entry);
2178*4882a593Smuzhiyun
2179*4882a593Smuzhiyun t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2180*4882a593Smuzhiyun spin_unlock_irqrestore(t_lock, flags);
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun fib = list_entry(entry, struct fib, fiblink);
2183*4882a593Smuzhiyun hw_fib = fib->hw_fib_va;
2184*4882a593Smuzhiyun if (dev->sa_firmware) {
2185*4882a593Smuzhiyun /* Thor AIF */
2186*4882a593Smuzhiyun aac_handle_sa_aif(dev, fib);
2187*4882a593Smuzhiyun aac_fib_adapter_complete(fib, (u16)sizeof(u32));
2188*4882a593Smuzhiyun goto free_fib;
2189*4882a593Smuzhiyun }
2190*4882a593Smuzhiyun /*
2191*4882a593Smuzhiyun * We will process the FIB here or pass it to a
2192*4882a593Smuzhiyun * worker thread that is TBD. We Really can't
2193*4882a593Smuzhiyun * do anything at this point since we don't have
2194*4882a593Smuzhiyun * anything defined for this thread to do.
2195*4882a593Smuzhiyun */
2196*4882a593Smuzhiyun memset(fib, 0, sizeof(struct fib));
2197*4882a593Smuzhiyun fib->type = FSAFS_NTC_FIB_CONTEXT;
2198*4882a593Smuzhiyun fib->size = sizeof(struct fib);
2199*4882a593Smuzhiyun fib->hw_fib_va = hw_fib;
2200*4882a593Smuzhiyun fib->data = hw_fib->data;
2201*4882a593Smuzhiyun fib->dev = dev;
2202*4882a593Smuzhiyun /*
2203*4882a593Smuzhiyun * We only handle AifRequest fibs from the adapter.
2204*4882a593Smuzhiyun */
2205*4882a593Smuzhiyun
2206*4882a593Smuzhiyun aifcmd = (struct aac_aifcmd *) hw_fib->data;
2207*4882a593Smuzhiyun if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
2208*4882a593Smuzhiyun /* Handle Driver Notify Events */
2209*4882a593Smuzhiyun aac_handle_aif(dev, fib);
2210*4882a593Smuzhiyun *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
2211*4882a593Smuzhiyun aac_fib_adapter_complete(fib, (u16)sizeof(u32));
2212*4882a593Smuzhiyun goto free_fib;
2213*4882a593Smuzhiyun }
2214*4882a593Smuzhiyun /*
2215*4882a593Smuzhiyun * The u32 here is important and intended. We are using
2216*4882a593Smuzhiyun * 32bit wrapping time to fit the adapter field
2217*4882a593Smuzhiyun */
2218*4882a593Smuzhiyun
2219*4882a593Smuzhiyun /* Sniff events */
2220*4882a593Smuzhiyun if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)
2221*4882a593Smuzhiyun || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) {
2222*4882a593Smuzhiyun aac_handle_aif(dev, fib);
2223*4882a593Smuzhiyun }
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun /*
2226*4882a593Smuzhiyun * get number of fibs to process
2227*4882a593Smuzhiyun */
2228*4882a593Smuzhiyun num = get_fib_count(dev);
2229*4882a593Smuzhiyun if (!num)
2230*4882a593Smuzhiyun goto free_fib;
2231*4882a593Smuzhiyun
2232*4882a593Smuzhiyun hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *),
2233*4882a593Smuzhiyun GFP_KERNEL);
2234*4882a593Smuzhiyun if (!hw_fib_pool)
2235*4882a593Smuzhiyun goto free_fib;
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL);
2238*4882a593Smuzhiyun if (!fib_pool)
2239*4882a593Smuzhiyun goto free_hw_fib_pool;
2240*4882a593Smuzhiyun
2241*4882a593Smuzhiyun /*
2242*4882a593Smuzhiyun * Fill up fib pointer pools with actual fibs
2243*4882a593Smuzhiyun * and hw_fibs
2244*4882a593Smuzhiyun */
2245*4882a593Smuzhiyun num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
2246*4882a593Smuzhiyun if (!num)
2247*4882a593Smuzhiyun goto free_mem;
2248*4882a593Smuzhiyun
2249*4882a593Smuzhiyun /*
2250*4882a593Smuzhiyun * wakeup the thread that is waiting for
2251*4882a593Smuzhiyun * the response from fw (ioctl)
2252*4882a593Smuzhiyun */
2253*4882a593Smuzhiyun wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool,
2254*4882a593Smuzhiyun fib, hw_fib, num);
2255*4882a593Smuzhiyun
2256*4882a593Smuzhiyun free_mem:
2257*4882a593Smuzhiyun /* Free up the remaining resources */
2258*4882a593Smuzhiyun hw_fib_p = hw_fib_pool;
2259*4882a593Smuzhiyun fib_p = fib_pool;
2260*4882a593Smuzhiyun while (hw_fib_p < &hw_fib_pool[num]) {
2261*4882a593Smuzhiyun kfree(*hw_fib_p);
2262*4882a593Smuzhiyun kfree(*fib_p);
2263*4882a593Smuzhiyun ++fib_p;
2264*4882a593Smuzhiyun ++hw_fib_p;
2265*4882a593Smuzhiyun }
2266*4882a593Smuzhiyun kfree(fib_pool);
2267*4882a593Smuzhiyun free_hw_fib_pool:
2268*4882a593Smuzhiyun kfree(hw_fib_pool);
2269*4882a593Smuzhiyun free_fib:
2270*4882a593Smuzhiyun kfree(fib);
2271*4882a593Smuzhiyun t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2272*4882a593Smuzhiyun spin_lock_irqsave(t_lock, flags);
2273*4882a593Smuzhiyun }
2274*4882a593Smuzhiyun /*
2275*4882a593Smuzhiyun * There are no more AIF's
2276*4882a593Smuzhiyun */
2277*4882a593Smuzhiyun t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2278*4882a593Smuzhiyun spin_unlock_irqrestore(t_lock, flags);
2279*4882a593Smuzhiyun }
2280*4882a593Smuzhiyun
aac_send_wellness_command(struct aac_dev * dev,char * wellness_str,u32 datasize)2281*4882a593Smuzhiyun static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
2282*4882a593Smuzhiyun u32 datasize)
2283*4882a593Smuzhiyun {
2284*4882a593Smuzhiyun struct aac_srb *srbcmd;
2285*4882a593Smuzhiyun struct sgmap64 *sg64;
2286*4882a593Smuzhiyun dma_addr_t addr;
2287*4882a593Smuzhiyun char *dma_buf;
2288*4882a593Smuzhiyun struct fib *fibptr;
2289*4882a593Smuzhiyun int ret = -ENOMEM;
2290*4882a593Smuzhiyun u32 vbus, vid;
2291*4882a593Smuzhiyun
2292*4882a593Smuzhiyun fibptr = aac_fib_alloc(dev);
2293*4882a593Smuzhiyun if (!fibptr)
2294*4882a593Smuzhiyun goto out;
2295*4882a593Smuzhiyun
2296*4882a593Smuzhiyun dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
2297*4882a593Smuzhiyun GFP_KERNEL);
2298*4882a593Smuzhiyun if (!dma_buf)
2299*4882a593Smuzhiyun goto fib_free_out;
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun aac_fib_init(fibptr);
2302*4882a593Smuzhiyun
2303*4882a593Smuzhiyun vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
2304*4882a593Smuzhiyun vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
2305*4882a593Smuzhiyun
2306*4882a593Smuzhiyun srbcmd = (struct aac_srb *)fib_data(fibptr);
2307*4882a593Smuzhiyun
2308*4882a593Smuzhiyun srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
2309*4882a593Smuzhiyun srbcmd->channel = cpu_to_le32(vbus);
2310*4882a593Smuzhiyun srbcmd->id = cpu_to_le32(vid);
2311*4882a593Smuzhiyun srbcmd->lun = 0;
2312*4882a593Smuzhiyun srbcmd->flags = cpu_to_le32(SRB_DataOut);
2313*4882a593Smuzhiyun srbcmd->timeout = cpu_to_le32(10);
2314*4882a593Smuzhiyun srbcmd->retry_limit = 0;
2315*4882a593Smuzhiyun srbcmd->cdb_size = cpu_to_le32(12);
2316*4882a593Smuzhiyun srbcmd->count = cpu_to_le32(datasize);
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2319*4882a593Smuzhiyun srbcmd->cdb[0] = BMIC_OUT;
2320*4882a593Smuzhiyun srbcmd->cdb[6] = WRITE_HOST_WELLNESS;
2321*4882a593Smuzhiyun memcpy(dma_buf, (char *)wellness_str, datasize);
2322*4882a593Smuzhiyun
2323*4882a593Smuzhiyun sg64 = (struct sgmap64 *)&srbcmd->sg;
2324*4882a593Smuzhiyun sg64->count = cpu_to_le32(1);
2325*4882a593Smuzhiyun sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
2326*4882a593Smuzhiyun sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2327*4882a593Smuzhiyun sg64->sg[0].count = cpu_to_le32(datasize);
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
2330*4882a593Smuzhiyun FsaNormal, 1, 1, NULL, NULL);
2331*4882a593Smuzhiyun
2332*4882a593Smuzhiyun dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr);
2333*4882a593Smuzhiyun
2334*4882a593Smuzhiyun /*
2335*4882a593Smuzhiyun * Do not set XferState to zero unless
2336*4882a593Smuzhiyun * receives a response from F/W
2337*4882a593Smuzhiyun */
2338*4882a593Smuzhiyun if (ret >= 0)
2339*4882a593Smuzhiyun aac_fib_complete(fibptr);
2340*4882a593Smuzhiyun
2341*4882a593Smuzhiyun /*
2342*4882a593Smuzhiyun * FIB should be freed only after
2343*4882a593Smuzhiyun * getting the response from the F/W
2344*4882a593Smuzhiyun */
2345*4882a593Smuzhiyun if (ret != -ERESTARTSYS)
2346*4882a593Smuzhiyun goto fib_free_out;
2347*4882a593Smuzhiyun
2348*4882a593Smuzhiyun out:
2349*4882a593Smuzhiyun return ret;
2350*4882a593Smuzhiyun fib_free_out:
2351*4882a593Smuzhiyun aac_fib_free(fibptr);
2352*4882a593Smuzhiyun goto out;
2353*4882a593Smuzhiyun }
2354*4882a593Smuzhiyun
aac_send_safw_hostttime(struct aac_dev * dev,struct timespec64 * now)2355*4882a593Smuzhiyun static int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
2356*4882a593Smuzhiyun {
2357*4882a593Smuzhiyun struct tm cur_tm;
2358*4882a593Smuzhiyun char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
2359*4882a593Smuzhiyun u32 datasize = sizeof(wellness_str);
2360*4882a593Smuzhiyun time64_t local_time;
2361*4882a593Smuzhiyun int ret = -ENODEV;
2362*4882a593Smuzhiyun
2363*4882a593Smuzhiyun if (!dev->sa_firmware)
2364*4882a593Smuzhiyun goto out;
2365*4882a593Smuzhiyun
2366*4882a593Smuzhiyun local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60));
2367*4882a593Smuzhiyun time64_to_tm(local_time, 0, &cur_tm);
2368*4882a593Smuzhiyun cur_tm.tm_mon += 1;
2369*4882a593Smuzhiyun cur_tm.tm_year += 1900;
2370*4882a593Smuzhiyun wellness_str[8] = bin2bcd(cur_tm.tm_hour);
2371*4882a593Smuzhiyun wellness_str[9] = bin2bcd(cur_tm.tm_min);
2372*4882a593Smuzhiyun wellness_str[10] = bin2bcd(cur_tm.tm_sec);
2373*4882a593Smuzhiyun wellness_str[12] = bin2bcd(cur_tm.tm_mon);
2374*4882a593Smuzhiyun wellness_str[13] = bin2bcd(cur_tm.tm_mday);
2375*4882a593Smuzhiyun wellness_str[14] = bin2bcd(cur_tm.tm_year / 100);
2376*4882a593Smuzhiyun wellness_str[15] = bin2bcd(cur_tm.tm_year % 100);
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun ret = aac_send_wellness_command(dev, wellness_str, datasize);
2379*4882a593Smuzhiyun
2380*4882a593Smuzhiyun out:
2381*4882a593Smuzhiyun return ret;
2382*4882a593Smuzhiyun }
2383*4882a593Smuzhiyun
aac_send_hosttime(struct aac_dev * dev,struct timespec64 * now)2384*4882a593Smuzhiyun static int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
2385*4882a593Smuzhiyun {
2386*4882a593Smuzhiyun int ret = -ENOMEM;
2387*4882a593Smuzhiyun struct fib *fibptr;
2388*4882a593Smuzhiyun __le32 *info;
2389*4882a593Smuzhiyun
2390*4882a593Smuzhiyun fibptr = aac_fib_alloc(dev);
2391*4882a593Smuzhiyun if (!fibptr)
2392*4882a593Smuzhiyun goto out;
2393*4882a593Smuzhiyun
2394*4882a593Smuzhiyun aac_fib_init(fibptr);
2395*4882a593Smuzhiyun info = (__le32 *)fib_data(fibptr);
2396*4882a593Smuzhiyun *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */
2397*4882a593Smuzhiyun ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
2398*4882a593Smuzhiyun 1, 1, NULL, NULL);
2399*4882a593Smuzhiyun
2400*4882a593Smuzhiyun /*
2401*4882a593Smuzhiyun * Do not set XferState to zero unless
2402*4882a593Smuzhiyun * receives a response from F/W
2403*4882a593Smuzhiyun */
2404*4882a593Smuzhiyun if (ret >= 0)
2405*4882a593Smuzhiyun aac_fib_complete(fibptr);
2406*4882a593Smuzhiyun
2407*4882a593Smuzhiyun /*
2408*4882a593Smuzhiyun * FIB should be freed only after
2409*4882a593Smuzhiyun * getting the response from the F/W
2410*4882a593Smuzhiyun */
2411*4882a593Smuzhiyun if (ret != -ERESTARTSYS)
2412*4882a593Smuzhiyun aac_fib_free(fibptr);
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun out:
2415*4882a593Smuzhiyun return ret;
2416*4882a593Smuzhiyun }
2417*4882a593Smuzhiyun
2418*4882a593Smuzhiyun /**
2419*4882a593Smuzhiyun * aac_command_thread - command processing thread
2420*4882a593Smuzhiyun * @data: Adapter to monitor
2421*4882a593Smuzhiyun *
2422*4882a593Smuzhiyun * Waits on the commandready event in it's queue. When the event gets set
2423*4882a593Smuzhiyun * it will pull FIBs off it's queue. It will continue to pull FIBs off
2424*4882a593Smuzhiyun * until the queue is empty. When the queue is empty it will wait for
2425*4882a593Smuzhiyun * more FIBs.
2426*4882a593Smuzhiyun */
2427*4882a593Smuzhiyun
aac_command_thread(void * data)2428*4882a593Smuzhiyun int aac_command_thread(void *data)
2429*4882a593Smuzhiyun {
2430*4882a593Smuzhiyun struct aac_dev *dev = data;
2431*4882a593Smuzhiyun DECLARE_WAITQUEUE(wait, current);
2432*4882a593Smuzhiyun unsigned long next_jiffies = jiffies + HZ;
2433*4882a593Smuzhiyun unsigned long next_check_jiffies = next_jiffies;
2434*4882a593Smuzhiyun long difference = HZ;
2435*4882a593Smuzhiyun
2436*4882a593Smuzhiyun /*
2437*4882a593Smuzhiyun * We can only have one thread per adapter for AIF's.
2438*4882a593Smuzhiyun */
2439*4882a593Smuzhiyun if (dev->aif_thread)
2440*4882a593Smuzhiyun return -EINVAL;
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun /*
2443*4882a593Smuzhiyun * Let the DPC know it has a place to send the AIF's to.
2444*4882a593Smuzhiyun */
2445*4882a593Smuzhiyun dev->aif_thread = 1;
2446*4882a593Smuzhiyun add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
2447*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
2448*4882a593Smuzhiyun dprintk ((KERN_INFO "aac_command_thread start\n"));
2449*4882a593Smuzhiyun while (1) {
2450*4882a593Smuzhiyun
2451*4882a593Smuzhiyun aac_process_events(dev);
2452*4882a593Smuzhiyun
2453*4882a593Smuzhiyun /*
2454*4882a593Smuzhiyun * Background activity
2455*4882a593Smuzhiyun */
2456*4882a593Smuzhiyun if ((time_before(next_check_jiffies,next_jiffies))
2457*4882a593Smuzhiyun && ((difference = next_check_jiffies - jiffies) <= 0)) {
2458*4882a593Smuzhiyun next_check_jiffies = next_jiffies;
2459*4882a593Smuzhiyun if (aac_adapter_check_health(dev) == 0) {
2460*4882a593Smuzhiyun difference = ((long)(unsigned)check_interval)
2461*4882a593Smuzhiyun * HZ;
2462*4882a593Smuzhiyun next_check_jiffies = jiffies + difference;
2463*4882a593Smuzhiyun } else if (!dev->queues)
2464*4882a593Smuzhiyun break;
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun if (!time_before(next_check_jiffies,next_jiffies)
2467*4882a593Smuzhiyun && ((difference = next_jiffies - jiffies) <= 0)) {
2468*4882a593Smuzhiyun struct timespec64 now;
2469*4882a593Smuzhiyun int ret;
2470*4882a593Smuzhiyun
2471*4882a593Smuzhiyun /* Don't even try to talk to adapter if its sick */
2472*4882a593Smuzhiyun ret = aac_adapter_check_health(dev);
2473*4882a593Smuzhiyun if (ret || !dev->queues)
2474*4882a593Smuzhiyun break;
2475*4882a593Smuzhiyun next_check_jiffies = jiffies
2476*4882a593Smuzhiyun + ((long)(unsigned)check_interval)
2477*4882a593Smuzhiyun * HZ;
2478*4882a593Smuzhiyun ktime_get_real_ts64(&now);
2479*4882a593Smuzhiyun
2480*4882a593Smuzhiyun /* Synchronize our watches */
2481*4882a593Smuzhiyun if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
2482*4882a593Smuzhiyun && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
2483*4882a593Smuzhiyun difference = HZ + HZ / 2 -
2484*4882a593Smuzhiyun now.tv_nsec / (NSEC_PER_SEC / HZ);
2485*4882a593Smuzhiyun else {
2486*4882a593Smuzhiyun if (now.tv_nsec > NSEC_PER_SEC / 2)
2487*4882a593Smuzhiyun ++now.tv_sec;
2488*4882a593Smuzhiyun
2489*4882a593Smuzhiyun if (dev->sa_firmware)
2490*4882a593Smuzhiyun ret =
2491*4882a593Smuzhiyun aac_send_safw_hostttime(dev, &now);
2492*4882a593Smuzhiyun else
2493*4882a593Smuzhiyun ret = aac_send_hosttime(dev, &now);
2494*4882a593Smuzhiyun
2495*4882a593Smuzhiyun difference = (long)(unsigned)update_interval*HZ;
2496*4882a593Smuzhiyun }
2497*4882a593Smuzhiyun next_jiffies = jiffies + difference;
2498*4882a593Smuzhiyun if (time_before(next_check_jiffies,next_jiffies))
2499*4882a593Smuzhiyun difference = next_check_jiffies - jiffies;
2500*4882a593Smuzhiyun }
2501*4882a593Smuzhiyun if (difference <= 0)
2502*4882a593Smuzhiyun difference = 1;
2503*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
2504*4882a593Smuzhiyun
2505*4882a593Smuzhiyun if (kthread_should_stop())
2506*4882a593Smuzhiyun break;
2507*4882a593Smuzhiyun
2508*4882a593Smuzhiyun /*
2509*4882a593Smuzhiyun * we probably want usleep_range() here instead of the
2510*4882a593Smuzhiyun * jiffies computation
2511*4882a593Smuzhiyun */
2512*4882a593Smuzhiyun schedule_timeout(difference);
2513*4882a593Smuzhiyun
2514*4882a593Smuzhiyun if (kthread_should_stop())
2515*4882a593Smuzhiyun break;
2516*4882a593Smuzhiyun }
2517*4882a593Smuzhiyun if (dev->queues)
2518*4882a593Smuzhiyun remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
2519*4882a593Smuzhiyun dev->aif_thread = 0;
2520*4882a593Smuzhiyun return 0;
2521*4882a593Smuzhiyun }
2522*4882a593Smuzhiyun
aac_acquire_irq(struct aac_dev * dev)2523*4882a593Smuzhiyun int aac_acquire_irq(struct aac_dev *dev)
2524*4882a593Smuzhiyun {
2525*4882a593Smuzhiyun int i;
2526*4882a593Smuzhiyun int j;
2527*4882a593Smuzhiyun int ret = 0;
2528*4882a593Smuzhiyun
2529*4882a593Smuzhiyun if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
2530*4882a593Smuzhiyun for (i = 0; i < dev->max_msix; i++) {
2531*4882a593Smuzhiyun dev->aac_msix[i].vector_no = i;
2532*4882a593Smuzhiyun dev->aac_msix[i].dev = dev;
2533*4882a593Smuzhiyun if (request_irq(pci_irq_vector(dev->pdev, i),
2534*4882a593Smuzhiyun dev->a_ops.adapter_intr,
2535*4882a593Smuzhiyun 0, "aacraid", &(dev->aac_msix[i]))) {
2536*4882a593Smuzhiyun printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
2537*4882a593Smuzhiyun dev->name, dev->id, i);
2538*4882a593Smuzhiyun for (j = 0 ; j < i ; j++)
2539*4882a593Smuzhiyun free_irq(pci_irq_vector(dev->pdev, j),
2540*4882a593Smuzhiyun &(dev->aac_msix[j]));
2541*4882a593Smuzhiyun pci_disable_msix(dev->pdev);
2542*4882a593Smuzhiyun ret = -1;
2543*4882a593Smuzhiyun }
2544*4882a593Smuzhiyun }
2545*4882a593Smuzhiyun } else {
2546*4882a593Smuzhiyun dev->aac_msix[0].vector_no = 0;
2547*4882a593Smuzhiyun dev->aac_msix[0].dev = dev;
2548*4882a593Smuzhiyun
2549*4882a593Smuzhiyun if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
2550*4882a593Smuzhiyun IRQF_SHARED, "aacraid",
2551*4882a593Smuzhiyun &(dev->aac_msix[0])) < 0) {
2552*4882a593Smuzhiyun if (dev->msi)
2553*4882a593Smuzhiyun pci_disable_msi(dev->pdev);
2554*4882a593Smuzhiyun printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
2555*4882a593Smuzhiyun dev->name, dev->id);
2556*4882a593Smuzhiyun ret = -1;
2557*4882a593Smuzhiyun }
2558*4882a593Smuzhiyun }
2559*4882a593Smuzhiyun return ret;
2560*4882a593Smuzhiyun }
2561*4882a593Smuzhiyun
aac_free_irq(struct aac_dev * dev)2562*4882a593Smuzhiyun void aac_free_irq(struct aac_dev *dev)
2563*4882a593Smuzhiyun {
2564*4882a593Smuzhiyun int i;
2565*4882a593Smuzhiyun
2566*4882a593Smuzhiyun if (aac_is_src(dev)) {
2567*4882a593Smuzhiyun if (dev->max_msix > 1) {
2568*4882a593Smuzhiyun for (i = 0; i < dev->max_msix; i++)
2569*4882a593Smuzhiyun free_irq(pci_irq_vector(dev->pdev, i),
2570*4882a593Smuzhiyun &(dev->aac_msix[i]));
2571*4882a593Smuzhiyun } else {
2572*4882a593Smuzhiyun free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun } else {
2575*4882a593Smuzhiyun free_irq(dev->pdev->irq, dev);
2576*4882a593Smuzhiyun }
2577*4882a593Smuzhiyun if (dev->msi)
2578*4882a593Smuzhiyun pci_disable_msi(dev->pdev);
2579*4882a593Smuzhiyun else if (dev->max_msix > 1)
2580*4882a593Smuzhiyun pci_disable_msix(dev->pdev);
2581*4882a593Smuzhiyun }
2582