xref: /OK3568_Linux_fs/kernel/drivers/scsi/aacraid/src.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *	Adaptec AAC series RAID controller driver
4*4882a593Smuzhiyun  *	(c) Copyright 2001 Red Hat Inc.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * based on the old aacraid driver that is..
7*4882a593Smuzhiyun  * Adaptec aacraid device driver for Linux.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Copyright (c) 2000-2010 Adaptec, Inc.
10*4882a593Smuzhiyun  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11*4882a593Smuzhiyun  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * Module Name:
14*4882a593Smuzhiyun  *  src.c
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * Abstract: Hardware Device Interface for PMC SRC based controllers
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <linux/kernel.h>
20*4882a593Smuzhiyun #include <linux/init.h>
21*4882a593Smuzhiyun #include <linux/types.h>
22*4882a593Smuzhiyun #include <linux/pci.h>
23*4882a593Smuzhiyun #include <linux/spinlock.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/blkdev.h>
26*4882a593Smuzhiyun #include <linux/delay.h>
27*4882a593Smuzhiyun #include <linux/completion.h>
28*4882a593Smuzhiyun #include <linux/time.h>
29*4882a593Smuzhiyun #include <linux/interrupt.h>
30*4882a593Smuzhiyun #include <scsi/scsi_host.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include "aacraid.h"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun static int aac_src_get_sync_status(struct aac_dev *dev);
35*4882a593Smuzhiyun 
aac_src_intr_message(int irq,void * dev_id)36*4882a593Smuzhiyun static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	struct aac_msix_ctx *ctx;
39*4882a593Smuzhiyun 	struct aac_dev *dev;
40*4882a593Smuzhiyun 	unsigned long bellbits, bellbits_shifted;
41*4882a593Smuzhiyun 	int vector_no;
42*4882a593Smuzhiyun 	int isFastResponse, mode;
43*4882a593Smuzhiyun 	u32 index, handle;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	ctx = (struct aac_msix_ctx *)dev_id;
46*4882a593Smuzhiyun 	dev = ctx->dev;
47*4882a593Smuzhiyun 	vector_no = ctx->vector_no;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	if (dev->msi_enabled) {
50*4882a593Smuzhiyun 		mode = AAC_INT_MODE_MSI;
51*4882a593Smuzhiyun 		if (vector_no == 0) {
52*4882a593Smuzhiyun 			bellbits = src_readl(dev, MUnit.ODR_MSI);
53*4882a593Smuzhiyun 			if (bellbits & 0x40000)
54*4882a593Smuzhiyun 				mode |= AAC_INT_MODE_AIF;
55*4882a593Smuzhiyun 			if (bellbits & 0x1000)
56*4882a593Smuzhiyun 				mode |= AAC_INT_MODE_SYNC;
57*4882a593Smuzhiyun 		}
58*4882a593Smuzhiyun 	} else {
59*4882a593Smuzhiyun 		mode = AAC_INT_MODE_INTX;
60*4882a593Smuzhiyun 		bellbits = src_readl(dev, MUnit.ODR_R);
61*4882a593Smuzhiyun 		if (bellbits & PmDoorBellResponseSent) {
62*4882a593Smuzhiyun 			bellbits = PmDoorBellResponseSent;
63*4882a593Smuzhiyun 			src_writel(dev, MUnit.ODR_C, bellbits);
64*4882a593Smuzhiyun 			src_readl(dev, MUnit.ODR_C);
65*4882a593Smuzhiyun 		} else {
66*4882a593Smuzhiyun 			bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
67*4882a593Smuzhiyun 			src_writel(dev, MUnit.ODR_C, bellbits);
68*4882a593Smuzhiyun 			src_readl(dev, MUnit.ODR_C);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 			if (bellbits_shifted & DoorBellAifPending)
71*4882a593Smuzhiyun 				mode |= AAC_INT_MODE_AIF;
72*4882a593Smuzhiyun 			else if (bellbits_shifted & OUTBOUNDDOORBELL_0)
73*4882a593Smuzhiyun 				mode |= AAC_INT_MODE_SYNC;
74*4882a593Smuzhiyun 		}
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	if (mode & AAC_INT_MODE_SYNC) {
78*4882a593Smuzhiyun 		unsigned long sflags;
79*4882a593Smuzhiyun 		struct list_head *entry;
80*4882a593Smuzhiyun 		int send_it = 0;
81*4882a593Smuzhiyun 		extern int aac_sync_mode;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 		if (!aac_sync_mode && !dev->msi_enabled) {
84*4882a593Smuzhiyun 			src_writel(dev, MUnit.ODR_C, bellbits);
85*4882a593Smuzhiyun 			src_readl(dev, MUnit.ODR_C);
86*4882a593Smuzhiyun 		}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 		if (dev->sync_fib) {
89*4882a593Smuzhiyun 			if (dev->sync_fib->callback)
90*4882a593Smuzhiyun 				dev->sync_fib->callback(dev->sync_fib->callback_data,
91*4882a593Smuzhiyun 					dev->sync_fib);
92*4882a593Smuzhiyun 			spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
93*4882a593Smuzhiyun 			if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
94*4882a593Smuzhiyun 				dev->management_fib_count--;
95*4882a593Smuzhiyun 				complete(&dev->sync_fib->event_wait);
96*4882a593Smuzhiyun 			}
97*4882a593Smuzhiyun 			spin_unlock_irqrestore(&dev->sync_fib->event_lock,
98*4882a593Smuzhiyun 						sflags);
99*4882a593Smuzhiyun 			spin_lock_irqsave(&dev->sync_lock, sflags);
100*4882a593Smuzhiyun 			if (!list_empty(&dev->sync_fib_list)) {
101*4882a593Smuzhiyun 				entry = dev->sync_fib_list.next;
102*4882a593Smuzhiyun 				dev->sync_fib = list_entry(entry,
103*4882a593Smuzhiyun 							   struct fib,
104*4882a593Smuzhiyun 							   fiblink);
105*4882a593Smuzhiyun 				list_del(entry);
106*4882a593Smuzhiyun 				send_it = 1;
107*4882a593Smuzhiyun 			} else {
108*4882a593Smuzhiyun 				dev->sync_fib = NULL;
109*4882a593Smuzhiyun 			}
110*4882a593Smuzhiyun 			spin_unlock_irqrestore(&dev->sync_lock, sflags);
111*4882a593Smuzhiyun 			if (send_it) {
112*4882a593Smuzhiyun 				aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
113*4882a593Smuzhiyun 					(u32)dev->sync_fib->hw_fib_pa,
114*4882a593Smuzhiyun 					0, 0, 0, 0, 0,
115*4882a593Smuzhiyun 					NULL, NULL, NULL, NULL, NULL);
116*4882a593Smuzhiyun 			}
117*4882a593Smuzhiyun 		}
118*4882a593Smuzhiyun 		if (!dev->msi_enabled)
119*4882a593Smuzhiyun 			mode = 0;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (mode & AAC_INT_MODE_AIF) {
124*4882a593Smuzhiyun 		/* handle AIF */
125*4882a593Smuzhiyun 		if (dev->sa_firmware) {
126*4882a593Smuzhiyun 			u32 events = src_readl(dev, MUnit.SCR0);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 			aac_intr_normal(dev, events, 1, 0, NULL);
129*4882a593Smuzhiyun 			writel(events, &dev->IndexRegs->Mailbox[0]);
130*4882a593Smuzhiyun 			src_writel(dev, MUnit.IDR, 1 << 23);
131*4882a593Smuzhiyun 		} else {
132*4882a593Smuzhiyun 			if (dev->aif_thread && dev->fsa_dev)
133*4882a593Smuzhiyun 				aac_intr_normal(dev, 0, 2, 0, NULL);
134*4882a593Smuzhiyun 		}
135*4882a593Smuzhiyun 		if (dev->msi_enabled)
136*4882a593Smuzhiyun 			aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
137*4882a593Smuzhiyun 		mode = 0;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	if (mode) {
141*4882a593Smuzhiyun 		index = dev->host_rrq_idx[vector_no];
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		for (;;) {
144*4882a593Smuzhiyun 			isFastResponse = 0;
145*4882a593Smuzhiyun 			/* remove toggle bit (31) */
146*4882a593Smuzhiyun 			handle = le32_to_cpu((dev->host_rrq[index])
147*4882a593Smuzhiyun 				& 0x7fffffff);
148*4882a593Smuzhiyun 			/* check fast response bits (30, 1) */
149*4882a593Smuzhiyun 			if (handle & 0x40000000)
150*4882a593Smuzhiyun 				isFastResponse = 1;
151*4882a593Smuzhiyun 			handle &= 0x0000ffff;
152*4882a593Smuzhiyun 			if (handle == 0)
153*4882a593Smuzhiyun 				break;
154*4882a593Smuzhiyun 			handle >>= 2;
155*4882a593Smuzhiyun 			if (dev->msi_enabled && dev->max_msix > 1)
156*4882a593Smuzhiyun 				atomic_dec(&dev->rrq_outstanding[vector_no]);
157*4882a593Smuzhiyun 			aac_intr_normal(dev, handle, 0, isFastResponse, NULL);
158*4882a593Smuzhiyun 			dev->host_rrq[index++] = 0;
159*4882a593Smuzhiyun 			if (index == (vector_no + 1) * dev->vector_cap)
160*4882a593Smuzhiyun 				index = vector_no * dev->vector_cap;
161*4882a593Smuzhiyun 			dev->host_rrq_idx[vector_no] = index;
162*4882a593Smuzhiyun 		}
163*4882a593Smuzhiyun 		mode = 0;
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	return IRQ_HANDLED;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun /**
170*4882a593Smuzhiyun  *	aac_src_disable_interrupt	-	Disable interrupts
171*4882a593Smuzhiyun  *	@dev: Adapter
172*4882a593Smuzhiyun  */
173*4882a593Smuzhiyun 
aac_src_disable_interrupt(struct aac_dev * dev)174*4882a593Smuzhiyun static void aac_src_disable_interrupt(struct aac_dev *dev)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /**
180*4882a593Smuzhiyun  *	aac_src_enable_interrupt_message	-	Enable interrupts
181*4882a593Smuzhiyun  *	@dev: Adapter
182*4882a593Smuzhiyun  */
183*4882a593Smuzhiyun 
aac_src_enable_interrupt_message(struct aac_dev * dev)184*4882a593Smuzhiyun static void aac_src_enable_interrupt_message(struct aac_dev *dev)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /**
190*4882a593Smuzhiyun  *	src_sync_cmd	-	send a command and wait
191*4882a593Smuzhiyun  *	@dev: Adapter
192*4882a593Smuzhiyun  *	@command: Command to execute
193*4882a593Smuzhiyun  *	@p1: first parameter
194*4882a593Smuzhiyun  *	@p2: second parameter
195*4882a593Smuzhiyun  *	@p3: third parameter
196*4882a593Smuzhiyun  *	@p4: forth parameter
197*4882a593Smuzhiyun  *	@p5: fifth parameter
198*4882a593Smuzhiyun  *	@p6: sixth parameter
199*4882a593Smuzhiyun  *	@status: adapter status
200*4882a593Smuzhiyun  *	@r1: first return value
201*4882a593Smuzhiyun  *	@r2: second return valu
202*4882a593Smuzhiyun  *	@r3: third return value
203*4882a593Smuzhiyun  *	@r4: forth return value
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  *	This routine will send a synchronous command to the adapter and wait
206*4882a593Smuzhiyun  *	for its	completion.
207*4882a593Smuzhiyun  */
208*4882a593Smuzhiyun 
src_sync_cmd(struct aac_dev * dev,u32 command,u32 p1,u32 p2,u32 p3,u32 p4,u32 p5,u32 p6,u32 * status,u32 * r1,u32 * r2,u32 * r3,u32 * r4)209*4882a593Smuzhiyun static int src_sync_cmd(struct aac_dev *dev, u32 command,
210*4882a593Smuzhiyun 	u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
211*4882a593Smuzhiyun 	u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	unsigned long start;
214*4882a593Smuzhiyun 	unsigned long delay;
215*4882a593Smuzhiyun 	int ok;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/*
218*4882a593Smuzhiyun 	 *	Write the command into Mailbox 0
219*4882a593Smuzhiyun 	 */
220*4882a593Smuzhiyun 	writel(command, &dev->IndexRegs->Mailbox[0]);
221*4882a593Smuzhiyun 	/*
222*4882a593Smuzhiyun 	 *	Write the parameters into Mailboxes 1 - 6
223*4882a593Smuzhiyun 	 */
224*4882a593Smuzhiyun 	writel(p1, &dev->IndexRegs->Mailbox[1]);
225*4882a593Smuzhiyun 	writel(p2, &dev->IndexRegs->Mailbox[2]);
226*4882a593Smuzhiyun 	writel(p3, &dev->IndexRegs->Mailbox[3]);
227*4882a593Smuzhiyun 	writel(p4, &dev->IndexRegs->Mailbox[4]);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/*
230*4882a593Smuzhiyun 	 *	Clear the synch command doorbell to start on a clean slate.
231*4882a593Smuzhiyun 	 */
232*4882a593Smuzhiyun 	if (!dev->msi_enabled)
233*4882a593Smuzhiyun 		src_writel(dev,
234*4882a593Smuzhiyun 			   MUnit.ODR_C,
235*4882a593Smuzhiyun 			   OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	/*
238*4882a593Smuzhiyun 	 *	Disable doorbell interrupts
239*4882a593Smuzhiyun 	 */
240*4882a593Smuzhiyun 	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/*
243*4882a593Smuzhiyun 	 *	Force the completion of the mask register write before issuing
244*4882a593Smuzhiyun 	 *	the interrupt.
245*4882a593Smuzhiyun 	 */
246*4882a593Smuzhiyun 	src_readl(dev, MUnit.OIMR);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	/*
249*4882a593Smuzhiyun 	 *	Signal that there is a new synch command
250*4882a593Smuzhiyun 	 */
251*4882a593Smuzhiyun 	src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) &&
254*4882a593Smuzhiyun 		!dev->in_soft_reset) {
255*4882a593Smuzhiyun 		ok = 0;
256*4882a593Smuzhiyun 		start = jiffies;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 		if (command == IOP_RESET_ALWAYS) {
259*4882a593Smuzhiyun 			/* Wait up to 10 sec */
260*4882a593Smuzhiyun 			delay = 10*HZ;
261*4882a593Smuzhiyun 		} else {
262*4882a593Smuzhiyun 			/* Wait up to 5 minutes */
263*4882a593Smuzhiyun 			delay = 300*HZ;
264*4882a593Smuzhiyun 		}
265*4882a593Smuzhiyun 		while (time_before(jiffies, start+delay)) {
266*4882a593Smuzhiyun 			udelay(5);	/* Delay 5 microseconds to let Mon960 get info. */
267*4882a593Smuzhiyun 			/*
268*4882a593Smuzhiyun 			 *	Mon960 will set doorbell0 bit when it has completed the command.
269*4882a593Smuzhiyun 			 */
270*4882a593Smuzhiyun 			if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
271*4882a593Smuzhiyun 				/*
272*4882a593Smuzhiyun 				 *	Clear the doorbell.
273*4882a593Smuzhiyun 				 */
274*4882a593Smuzhiyun 				if (dev->msi_enabled)
275*4882a593Smuzhiyun 					aac_src_access_devreg(dev,
276*4882a593Smuzhiyun 						AAC_CLEAR_SYNC_BIT);
277*4882a593Smuzhiyun 				else
278*4882a593Smuzhiyun 					src_writel(dev,
279*4882a593Smuzhiyun 						MUnit.ODR_C,
280*4882a593Smuzhiyun 						OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
281*4882a593Smuzhiyun 				ok = 1;
282*4882a593Smuzhiyun 				break;
283*4882a593Smuzhiyun 			}
284*4882a593Smuzhiyun 			/*
285*4882a593Smuzhiyun 			 *	Yield the processor in case we are slow
286*4882a593Smuzhiyun 			 */
287*4882a593Smuzhiyun 			msleep(1);
288*4882a593Smuzhiyun 		}
289*4882a593Smuzhiyun 		if (unlikely(ok != 1)) {
290*4882a593Smuzhiyun 			/*
291*4882a593Smuzhiyun 			 *	Restore interrupt mask even though we timed out
292*4882a593Smuzhiyun 			 */
293*4882a593Smuzhiyun 			aac_adapter_enable_int(dev);
294*4882a593Smuzhiyun 			return -ETIMEDOUT;
295*4882a593Smuzhiyun 		}
296*4882a593Smuzhiyun 		/*
297*4882a593Smuzhiyun 		 *	Pull the synch status from Mailbox 0.
298*4882a593Smuzhiyun 		 */
299*4882a593Smuzhiyun 		if (status)
300*4882a593Smuzhiyun 			*status = readl(&dev->IndexRegs->Mailbox[0]);
301*4882a593Smuzhiyun 		if (r1)
302*4882a593Smuzhiyun 			*r1 = readl(&dev->IndexRegs->Mailbox[1]);
303*4882a593Smuzhiyun 		if (r2)
304*4882a593Smuzhiyun 			*r2 = readl(&dev->IndexRegs->Mailbox[2]);
305*4882a593Smuzhiyun 		if (r3)
306*4882a593Smuzhiyun 			*r3 = readl(&dev->IndexRegs->Mailbox[3]);
307*4882a593Smuzhiyun 		if (r4)
308*4882a593Smuzhiyun 			*r4 = readl(&dev->IndexRegs->Mailbox[4]);
309*4882a593Smuzhiyun 		if (command == GET_COMM_PREFERRED_SETTINGS)
310*4882a593Smuzhiyun 			dev->max_msix =
311*4882a593Smuzhiyun 				readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
312*4882a593Smuzhiyun 		/*
313*4882a593Smuzhiyun 		 *	Clear the synch command doorbell.
314*4882a593Smuzhiyun 		 */
315*4882a593Smuzhiyun 		if (!dev->msi_enabled)
316*4882a593Smuzhiyun 			src_writel(dev,
317*4882a593Smuzhiyun 				MUnit.ODR_C,
318*4882a593Smuzhiyun 				OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/*
322*4882a593Smuzhiyun 	 *	Restore interrupt mask
323*4882a593Smuzhiyun 	 */
324*4882a593Smuzhiyun 	aac_adapter_enable_int(dev);
325*4882a593Smuzhiyun 	return 0;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /**
329*4882a593Smuzhiyun  *	aac_src_interrupt_adapter	-	interrupt adapter
330*4882a593Smuzhiyun  *	@dev: Adapter
331*4882a593Smuzhiyun  *
332*4882a593Smuzhiyun  *	Send an interrupt to the i960 and breakpoint it.
333*4882a593Smuzhiyun  */
334*4882a593Smuzhiyun 
aac_src_interrupt_adapter(struct aac_dev * dev)335*4882a593Smuzhiyun static void aac_src_interrupt_adapter(struct aac_dev *dev)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	src_sync_cmd(dev, BREAKPOINT_REQUEST,
338*4882a593Smuzhiyun 		0, 0, 0, 0, 0, 0,
339*4882a593Smuzhiyun 		NULL, NULL, NULL, NULL, NULL);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun /**
343*4882a593Smuzhiyun  *	aac_src_notify_adapter		-	send an event to the adapter
344*4882a593Smuzhiyun  *	@dev: Adapter
345*4882a593Smuzhiyun  *	@event: Event to send
346*4882a593Smuzhiyun  *
347*4882a593Smuzhiyun  *	Notify the i960 that something it probably cares about has
348*4882a593Smuzhiyun  *	happened.
349*4882a593Smuzhiyun  */
350*4882a593Smuzhiyun 
aac_src_notify_adapter(struct aac_dev * dev,u32 event)351*4882a593Smuzhiyun static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	switch (event) {
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	case AdapNormCmdQue:
356*4882a593Smuzhiyun 		src_writel(dev, MUnit.ODR_C,
357*4882a593Smuzhiyun 			INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
358*4882a593Smuzhiyun 		break;
359*4882a593Smuzhiyun 	case HostNormRespNotFull:
360*4882a593Smuzhiyun 		src_writel(dev, MUnit.ODR_C,
361*4882a593Smuzhiyun 			INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
362*4882a593Smuzhiyun 		break;
363*4882a593Smuzhiyun 	case AdapNormRespQue:
364*4882a593Smuzhiyun 		src_writel(dev, MUnit.ODR_C,
365*4882a593Smuzhiyun 			INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
366*4882a593Smuzhiyun 		break;
367*4882a593Smuzhiyun 	case HostNormCmdNotFull:
368*4882a593Smuzhiyun 		src_writel(dev, MUnit.ODR_C,
369*4882a593Smuzhiyun 			INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
370*4882a593Smuzhiyun 		break;
371*4882a593Smuzhiyun 	case FastIo:
372*4882a593Smuzhiyun 		src_writel(dev, MUnit.ODR_C,
373*4882a593Smuzhiyun 			INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
374*4882a593Smuzhiyun 		break;
375*4882a593Smuzhiyun 	case AdapPrintfDone:
376*4882a593Smuzhiyun 		src_writel(dev, MUnit.ODR_C,
377*4882a593Smuzhiyun 			INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
378*4882a593Smuzhiyun 		break;
379*4882a593Smuzhiyun 	default:
380*4882a593Smuzhiyun 		BUG();
381*4882a593Smuzhiyun 		break;
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun /**
386*4882a593Smuzhiyun  *	aac_src_start_adapter		-	activate adapter
387*4882a593Smuzhiyun  *	@dev:	Adapter
388*4882a593Smuzhiyun  *
389*4882a593Smuzhiyun  *	Start up processing on an i960 based AAC adapter
390*4882a593Smuzhiyun  */
391*4882a593Smuzhiyun 
aac_src_start_adapter(struct aac_dev * dev)392*4882a593Smuzhiyun static void aac_src_start_adapter(struct aac_dev *dev)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	union aac_init *init;
395*4882a593Smuzhiyun 	int i;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	 /* reset host_rrq_idx first */
398*4882a593Smuzhiyun 	for (i = 0; i < dev->max_msix; i++) {
399*4882a593Smuzhiyun 		dev->host_rrq_idx[i] = i * dev->vector_cap;
400*4882a593Smuzhiyun 		atomic_set(&dev->rrq_outstanding[i], 0);
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 	atomic_set(&dev->msix_counter, 0);
403*4882a593Smuzhiyun 	dev->fibs_pushed_no = 0;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	init = dev->init;
406*4882a593Smuzhiyun 	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
407*4882a593Smuzhiyun 		init->r8.host_elapsed_seconds =
408*4882a593Smuzhiyun 			cpu_to_le32(ktime_get_real_seconds());
409*4882a593Smuzhiyun 		src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
410*4882a593Smuzhiyun 			lower_32_bits(dev->init_pa),
411*4882a593Smuzhiyun 			upper_32_bits(dev->init_pa),
412*4882a593Smuzhiyun 			sizeof(struct _r8) +
413*4882a593Smuzhiyun 			(AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
414*4882a593Smuzhiyun 			0, 0, 0, NULL, NULL, NULL, NULL, NULL);
415*4882a593Smuzhiyun 	} else {
416*4882a593Smuzhiyun 		init->r7.host_elapsed_seconds =
417*4882a593Smuzhiyun 			cpu_to_le32(ktime_get_real_seconds());
418*4882a593Smuzhiyun 		// We can only use a 32 bit address here
419*4882a593Smuzhiyun 		src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
420*4882a593Smuzhiyun 			(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
421*4882a593Smuzhiyun 			NULL, NULL, NULL, NULL, NULL);
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun /**
427*4882a593Smuzhiyun  *	aac_src_check_health
428*4882a593Smuzhiyun  *	@dev: device to check if healthy
429*4882a593Smuzhiyun  *
430*4882a593Smuzhiyun  *	Will attempt to determine if the specified adapter is alive and
431*4882a593Smuzhiyun  *	capable of handling requests, returning 0 if alive.
432*4882a593Smuzhiyun  */
aac_src_check_health(struct aac_dev * dev)433*4882a593Smuzhiyun static int aac_src_check_health(struct aac_dev *dev)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	u32 status = src_readl(dev, MUnit.OMR);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/*
438*4882a593Smuzhiyun 	 *	Check to see if the board panic'd.
439*4882a593Smuzhiyun 	 */
440*4882a593Smuzhiyun 	if (unlikely(status & KERNEL_PANIC))
441*4882a593Smuzhiyun 		goto err_blink;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	/*
444*4882a593Smuzhiyun 	 *	Check to see if the board failed any self tests.
445*4882a593Smuzhiyun 	 */
446*4882a593Smuzhiyun 	if (unlikely(status & SELF_TEST_FAILED))
447*4882a593Smuzhiyun 		goto err_out;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	/*
450*4882a593Smuzhiyun 	 *	Check to see if the board failed any self tests.
451*4882a593Smuzhiyun 	 */
452*4882a593Smuzhiyun 	if (unlikely(status & MONITOR_PANIC))
453*4882a593Smuzhiyun 		goto err_out;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	/*
456*4882a593Smuzhiyun 	 *	Wait for the adapter to be up and running.
457*4882a593Smuzhiyun 	 */
458*4882a593Smuzhiyun 	if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
459*4882a593Smuzhiyun 		return -3;
460*4882a593Smuzhiyun 	/*
461*4882a593Smuzhiyun 	 *	Everything is OK
462*4882a593Smuzhiyun 	 */
463*4882a593Smuzhiyun 	return 0;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun err_out:
466*4882a593Smuzhiyun 	return -1;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun err_blink:
469*4882a593Smuzhiyun 	return (status >> 16) & 0xFF;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
aac_get_vector(struct aac_dev * dev)472*4882a593Smuzhiyun static inline u32 aac_get_vector(struct aac_dev *dev)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	return atomic_inc_return(&dev->msix_counter)%dev->max_msix;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun /**
478*4882a593Smuzhiyun  *	aac_src_deliver_message
479*4882a593Smuzhiyun  *	@fib: fib to issue
480*4882a593Smuzhiyun  *
481*4882a593Smuzhiyun  *	Will send a fib, returning 0 if successful.
482*4882a593Smuzhiyun  */
aac_src_deliver_message(struct fib * fib)483*4882a593Smuzhiyun static int aac_src_deliver_message(struct fib *fib)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	struct aac_dev *dev = fib->dev;
486*4882a593Smuzhiyun 	struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
487*4882a593Smuzhiyun 	u32 fibsize;
488*4882a593Smuzhiyun 	dma_addr_t address;
489*4882a593Smuzhiyun 	struct aac_fib_xporthdr *pFibX;
490*4882a593Smuzhiyun 	int native_hba;
491*4882a593Smuzhiyun #if !defined(writeq)
492*4882a593Smuzhiyun 	unsigned long flags;
493*4882a593Smuzhiyun #endif
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	u16 vector_no;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	atomic_inc(&q->numpending);
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	if (dev->msi_enabled && dev->max_msix > 1 &&
503*4882a593Smuzhiyun 		(native_hba || fib->hw_fib_va->header.Command != AifRequest)) {
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 		if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
506*4882a593Smuzhiyun 			&& dev->sa_firmware)
507*4882a593Smuzhiyun 			vector_no = aac_get_vector(dev);
508*4882a593Smuzhiyun 		else
509*4882a593Smuzhiyun 			vector_no = fib->vector_no;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 		if (native_hba) {
512*4882a593Smuzhiyun 			if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
513*4882a593Smuzhiyun 				struct aac_hba_tm_req *tm_req;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 				tm_req = (struct aac_hba_tm_req *)
516*4882a593Smuzhiyun 						fib->hw_fib_va;
517*4882a593Smuzhiyun 				if (tm_req->iu_type ==
518*4882a593Smuzhiyun 					HBA_IU_TYPE_SCSI_TM_REQ) {
519*4882a593Smuzhiyun 					((struct aac_hba_tm_req *)
520*4882a593Smuzhiyun 						fib->hw_fib_va)->reply_qid
521*4882a593Smuzhiyun 							= vector_no;
522*4882a593Smuzhiyun 					((struct aac_hba_tm_req *)
523*4882a593Smuzhiyun 						fib->hw_fib_va)->request_id
524*4882a593Smuzhiyun 							+= (vector_no << 16);
525*4882a593Smuzhiyun 				} else {
526*4882a593Smuzhiyun 					((struct aac_hba_reset_req *)
527*4882a593Smuzhiyun 						fib->hw_fib_va)->reply_qid
528*4882a593Smuzhiyun 							= vector_no;
529*4882a593Smuzhiyun 					((struct aac_hba_reset_req *)
530*4882a593Smuzhiyun 						fib->hw_fib_va)->request_id
531*4882a593Smuzhiyun 							+= (vector_no << 16);
532*4882a593Smuzhiyun 				}
533*4882a593Smuzhiyun 			} else {
534*4882a593Smuzhiyun 				((struct aac_hba_cmd_req *)
535*4882a593Smuzhiyun 					fib->hw_fib_va)->reply_qid
536*4882a593Smuzhiyun 						= vector_no;
537*4882a593Smuzhiyun 				((struct aac_hba_cmd_req *)
538*4882a593Smuzhiyun 					fib->hw_fib_va)->request_id
539*4882a593Smuzhiyun 						+= (vector_no << 16);
540*4882a593Smuzhiyun 			}
541*4882a593Smuzhiyun 		} else {
542*4882a593Smuzhiyun 			fib->hw_fib_va->header.Handle += (vector_no << 16);
543*4882a593Smuzhiyun 		}
544*4882a593Smuzhiyun 	} else {
545*4882a593Smuzhiyun 		vector_no = 0;
546*4882a593Smuzhiyun 	}
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	atomic_inc(&dev->rrq_outstanding[vector_no]);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	if (native_hba) {
551*4882a593Smuzhiyun 		address = fib->hw_fib_pa;
552*4882a593Smuzhiyun 		fibsize = (fib->hbacmd_size + 127) / 128 - 1;
553*4882a593Smuzhiyun 		if (fibsize > 31)
554*4882a593Smuzhiyun 			fibsize = 31;
555*4882a593Smuzhiyun 		address |= fibsize;
556*4882a593Smuzhiyun #if defined(writeq)
557*4882a593Smuzhiyun 		src_writeq(dev, MUnit.IQN_L, (u64)address);
558*4882a593Smuzhiyun #else
559*4882a593Smuzhiyun 		spin_lock_irqsave(&fib->dev->iq_lock, flags);
560*4882a593Smuzhiyun 		src_writel(dev, MUnit.IQN_H,
561*4882a593Smuzhiyun 			upper_32_bits(address) & 0xffffffff);
562*4882a593Smuzhiyun 		src_writel(dev, MUnit.IQN_L, address & 0xffffffff);
563*4882a593Smuzhiyun 		spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
564*4882a593Smuzhiyun #endif
565*4882a593Smuzhiyun 	} else {
566*4882a593Smuzhiyun 		if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
567*4882a593Smuzhiyun 			dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
568*4882a593Smuzhiyun 			/* Calculate the amount to the fibsize bits */
569*4882a593Smuzhiyun 			fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size)
570*4882a593Smuzhiyun 				+ 127) / 128 - 1;
571*4882a593Smuzhiyun 			/* New FIB header, 32-bit */
572*4882a593Smuzhiyun 			address = fib->hw_fib_pa;
573*4882a593Smuzhiyun 			fib->hw_fib_va->header.StructType = FIB_MAGIC2;
574*4882a593Smuzhiyun 			fib->hw_fib_va->header.SenderFibAddress =
575*4882a593Smuzhiyun 				cpu_to_le32((u32)address);
576*4882a593Smuzhiyun 			fib->hw_fib_va->header.u.TimeStamp = 0;
577*4882a593Smuzhiyun 			WARN_ON(upper_32_bits(address) != 0L);
578*4882a593Smuzhiyun 		} else {
579*4882a593Smuzhiyun 			/* Calculate the amount to the fibsize bits */
580*4882a593Smuzhiyun 			fibsize = (sizeof(struct aac_fib_xporthdr) +
581*4882a593Smuzhiyun 				le16_to_cpu(fib->hw_fib_va->header.Size)
582*4882a593Smuzhiyun 				+ 127) / 128 - 1;
583*4882a593Smuzhiyun 			/* Fill XPORT header */
584*4882a593Smuzhiyun 			pFibX = (struct aac_fib_xporthdr *)
585*4882a593Smuzhiyun 				((unsigned char *)fib->hw_fib_va -
586*4882a593Smuzhiyun 				sizeof(struct aac_fib_xporthdr));
587*4882a593Smuzhiyun 			pFibX->Handle = fib->hw_fib_va->header.Handle;
588*4882a593Smuzhiyun 			pFibX->HostAddress =
589*4882a593Smuzhiyun 				cpu_to_le64((u64)fib->hw_fib_pa);
590*4882a593Smuzhiyun 			pFibX->Size = cpu_to_le32(
591*4882a593Smuzhiyun 				le16_to_cpu(fib->hw_fib_va->header.Size));
592*4882a593Smuzhiyun 			address = fib->hw_fib_pa -
593*4882a593Smuzhiyun 				(u64)sizeof(struct aac_fib_xporthdr);
594*4882a593Smuzhiyun 		}
595*4882a593Smuzhiyun 		if (fibsize > 31)
596*4882a593Smuzhiyun 			fibsize = 31;
597*4882a593Smuzhiyun 		address |= fibsize;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun #if defined(writeq)
600*4882a593Smuzhiyun 		src_writeq(dev, MUnit.IQ_L, (u64)address);
601*4882a593Smuzhiyun #else
602*4882a593Smuzhiyun 		spin_lock_irqsave(&fib->dev->iq_lock, flags);
603*4882a593Smuzhiyun 		src_writel(dev, MUnit.IQ_H,
604*4882a593Smuzhiyun 			upper_32_bits(address) & 0xffffffff);
605*4882a593Smuzhiyun 		src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
606*4882a593Smuzhiyun 		spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
607*4882a593Smuzhiyun #endif
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 	return 0;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun /**
613*4882a593Smuzhiyun  *	aac_src_ioremap
614*4882a593Smuzhiyun  *	@dev: device ioremap
615*4882a593Smuzhiyun  *	@size: mapping resize request
616*4882a593Smuzhiyun  *
617*4882a593Smuzhiyun  */
aac_src_ioremap(struct aac_dev * dev,u32 size)618*4882a593Smuzhiyun static int aac_src_ioremap(struct aac_dev *dev, u32 size)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun 	if (!size) {
621*4882a593Smuzhiyun 		iounmap(dev->regs.src.bar1);
622*4882a593Smuzhiyun 		dev->regs.src.bar1 = NULL;
623*4882a593Smuzhiyun 		iounmap(dev->regs.src.bar0);
624*4882a593Smuzhiyun 		dev->base = dev->regs.src.bar0 = NULL;
625*4882a593Smuzhiyun 		return 0;
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun 	dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
628*4882a593Smuzhiyun 		AAC_MIN_SRC_BAR1_SIZE);
629*4882a593Smuzhiyun 	dev->base = NULL;
630*4882a593Smuzhiyun 	if (dev->regs.src.bar1 == NULL)
631*4882a593Smuzhiyun 		return -1;
632*4882a593Smuzhiyun 	dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
633*4882a593Smuzhiyun 	if (dev->base == NULL) {
634*4882a593Smuzhiyun 		iounmap(dev->regs.src.bar1);
635*4882a593Smuzhiyun 		dev->regs.src.bar1 = NULL;
636*4882a593Smuzhiyun 		return -1;
637*4882a593Smuzhiyun 	}
638*4882a593Smuzhiyun 	dev->IndexRegs = &((struct src_registers __iomem *)
639*4882a593Smuzhiyun 		dev->base)->u.tupelo.IndexRegs;
640*4882a593Smuzhiyun 	return 0;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun /**
644*4882a593Smuzhiyun  *  aac_srcv_ioremap
645*4882a593Smuzhiyun  *	@dev: device ioremap
646*4882a593Smuzhiyun  *	@size: mapping resize request
647*4882a593Smuzhiyun  *
648*4882a593Smuzhiyun  */
aac_srcv_ioremap(struct aac_dev * dev,u32 size)649*4882a593Smuzhiyun static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	if (!size) {
652*4882a593Smuzhiyun 		iounmap(dev->regs.src.bar0);
653*4882a593Smuzhiyun 		dev->base = dev->regs.src.bar0 = NULL;
654*4882a593Smuzhiyun 		return 0;
655*4882a593Smuzhiyun 	}
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	dev->regs.src.bar1 =
658*4882a593Smuzhiyun 	ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE);
659*4882a593Smuzhiyun 	dev->base = NULL;
660*4882a593Smuzhiyun 	if (dev->regs.src.bar1 == NULL)
661*4882a593Smuzhiyun 		return -1;
662*4882a593Smuzhiyun 	dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
663*4882a593Smuzhiyun 	if (dev->base == NULL) {
664*4882a593Smuzhiyun 		iounmap(dev->regs.src.bar1);
665*4882a593Smuzhiyun 		dev->regs.src.bar1 = NULL;
666*4882a593Smuzhiyun 		return -1;
667*4882a593Smuzhiyun 	}
668*4882a593Smuzhiyun 	dev->IndexRegs = &((struct src_registers __iomem *)
669*4882a593Smuzhiyun 		dev->base)->u.denali.IndexRegs;
670*4882a593Smuzhiyun 	return 0;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
aac_set_intx_mode(struct aac_dev * dev)673*4882a593Smuzhiyun void aac_set_intx_mode(struct aac_dev *dev)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	if (dev->msi_enabled) {
676*4882a593Smuzhiyun 		aac_src_access_devreg(dev, AAC_ENABLE_INTX);
677*4882a593Smuzhiyun 		dev->msi_enabled = 0;
678*4882a593Smuzhiyun 		msleep(5000); /* Delay 5 seconds */
679*4882a593Smuzhiyun 	}
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun 
aac_clear_omr(struct aac_dev * dev)682*4882a593Smuzhiyun static void aac_clear_omr(struct aac_dev *dev)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	u32 omr_value = 0;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	omr_value = src_readl(dev, MUnit.OMR);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	/*
689*4882a593Smuzhiyun 	 * Check for PCI Errors or Kernel Panic
690*4882a593Smuzhiyun 	 */
691*4882a593Smuzhiyun 	if ((omr_value == INVALID_OMR) || (omr_value & KERNEL_PANIC))
692*4882a593Smuzhiyun 		omr_value = 0;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	/*
695*4882a593Smuzhiyun 	 * Preserve MSIX Value if any
696*4882a593Smuzhiyun 	 */
697*4882a593Smuzhiyun 	src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX);
698*4882a593Smuzhiyun 	src_readl(dev, MUnit.OMR);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun 
aac_dump_fw_fib_iop_reset(struct aac_dev * dev)701*4882a593Smuzhiyun static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	__le32 supported_options3;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	if (!aac_fib_dump)
706*4882a593Smuzhiyun 		return;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	supported_options3  = dev->supplement_adapter_info.supported_options3;
709*4882a593Smuzhiyun 	if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP))
710*4882a593Smuzhiyun 		return;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP,
713*4882a593Smuzhiyun 			0, 0, 0,  0, 0, 0, NULL, NULL, NULL, NULL, NULL);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun 
aac_is_ctrl_up_and_running(struct aac_dev * dev)716*4882a593Smuzhiyun static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun 	bool ctrl_up = true;
719*4882a593Smuzhiyun 	unsigned long status, start;
720*4882a593Smuzhiyun 	bool is_up = false;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	start = jiffies;
723*4882a593Smuzhiyun 	do {
724*4882a593Smuzhiyun 		schedule();
725*4882a593Smuzhiyun 		status = src_readl(dev, MUnit.OMR);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 		if (status == 0xffffffff)
728*4882a593Smuzhiyun 			status = 0;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 		if (status & KERNEL_BOOTING) {
731*4882a593Smuzhiyun 			start = jiffies;
732*4882a593Smuzhiyun 			continue;
733*4882a593Smuzhiyun 		}
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 		if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) {
736*4882a593Smuzhiyun 			ctrl_up = false;
737*4882a593Smuzhiyun 			break;
738*4882a593Smuzhiyun 		}
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 		is_up = status & KERNEL_UP_AND_RUNNING;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	} while (!is_up);
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	return ctrl_up;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun 
aac_src_drop_io(struct aac_dev * dev)747*4882a593Smuzhiyun static void aac_src_drop_io(struct aac_dev *dev)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun 	if (!dev->soft_reset_support)
750*4882a593Smuzhiyun 		return;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	aac_adapter_sync_cmd(dev, DROP_IO,
753*4882a593Smuzhiyun 			0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun 
aac_notify_fw_of_iop_reset(struct aac_dev * dev)756*4882a593Smuzhiyun static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun 	aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
759*4882a593Smuzhiyun 						NULL, NULL, NULL, NULL);
760*4882a593Smuzhiyun 	aac_src_drop_io(dev);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun 
aac_send_iop_reset(struct aac_dev * dev)763*4882a593Smuzhiyun static void aac_send_iop_reset(struct aac_dev *dev)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun 	aac_dump_fw_fib_iop_reset(dev);
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	aac_notify_fw_of_iop_reset(dev);
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	aac_set_intx_mode(dev);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	aac_clear_omr(dev);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	msleep(5000);
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun 
aac_send_hardware_soft_reset(struct aac_dev * dev)778*4882a593Smuzhiyun static void aac_send_hardware_soft_reset(struct aac_dev *dev)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	u_int32_t val;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	aac_clear_omr(dev);
783*4882a593Smuzhiyun 	val = readl(((char *)(dev->base) + IBW_SWR_OFFSET));
784*4882a593Smuzhiyun 	val |= 0x01;
785*4882a593Smuzhiyun 	writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET));
786*4882a593Smuzhiyun 	msleep_interruptible(20000);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun 
aac_src_restart_adapter(struct aac_dev * dev,int bled,u8 reset_type)789*4882a593Smuzhiyun static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun 	bool is_ctrl_up;
792*4882a593Smuzhiyun 	int ret = 0;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	if (bled < 0)
795*4882a593Smuzhiyun 		goto invalid_out;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	if (bled)
798*4882a593Smuzhiyun 		dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled);
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	/*
801*4882a593Smuzhiyun 	 * When there is a BlinkLED, IOP_RESET has not effect
802*4882a593Smuzhiyun 	 */
803*4882a593Smuzhiyun 	if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET)
804*4882a593Smuzhiyun 		reset_type &= ~HW_IOP_RESET;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	if (reset_type & HW_IOP_RESET) {
811*4882a593Smuzhiyun 		dev_info(&dev->pdev->dev, "Issuing IOP reset\n");
812*4882a593Smuzhiyun 		aac_send_iop_reset(dev);
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 		/*
815*4882a593Smuzhiyun 		 * Creates a delay or wait till up and running comes thru
816*4882a593Smuzhiyun 		 */
817*4882a593Smuzhiyun 		is_ctrl_up = aac_is_ctrl_up_and_running(dev);
818*4882a593Smuzhiyun 		if (!is_ctrl_up)
819*4882a593Smuzhiyun 			dev_err(&dev->pdev->dev, "IOP reset failed\n");
820*4882a593Smuzhiyun 		else {
821*4882a593Smuzhiyun 			dev_info(&dev->pdev->dev, "IOP reset succeeded\n");
822*4882a593Smuzhiyun 			goto set_startup;
823*4882a593Smuzhiyun 		}
824*4882a593Smuzhiyun 	}
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	if (!dev->sa_firmware) {
827*4882a593Smuzhiyun 		dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n");
828*4882a593Smuzhiyun 		ret = -ENODEV;
829*4882a593Smuzhiyun 		goto out;
830*4882a593Smuzhiyun 	}
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	if (reset_type & HW_SOFT_RESET) {
833*4882a593Smuzhiyun 		dev_info(&dev->pdev->dev, "Issuing SOFT reset\n");
834*4882a593Smuzhiyun 		aac_send_hardware_soft_reset(dev);
835*4882a593Smuzhiyun 		dev->msi_enabled = 0;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 		is_ctrl_up = aac_is_ctrl_up_and_running(dev);
838*4882a593Smuzhiyun 		if (!is_ctrl_up) {
839*4882a593Smuzhiyun 			dev_err(&dev->pdev->dev, "SOFT reset failed\n");
840*4882a593Smuzhiyun 			ret = -ENODEV;
841*4882a593Smuzhiyun 			goto out;
842*4882a593Smuzhiyun 		} else
843*4882a593Smuzhiyun 			dev_info(&dev->pdev->dev, "SOFT reset succeeded\n");
844*4882a593Smuzhiyun 	}
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun set_startup:
847*4882a593Smuzhiyun 	if (startup_timeout < 300)
848*4882a593Smuzhiyun 		startup_timeout = 300;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun out:
851*4882a593Smuzhiyun 	return ret;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun invalid_out:
854*4882a593Smuzhiyun 	if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
855*4882a593Smuzhiyun 		ret = -ENODEV;
856*4882a593Smuzhiyun goto out;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun /**
860*4882a593Smuzhiyun  *	aac_src_select_comm	-	Select communications method
861*4882a593Smuzhiyun  *	@dev: Adapter
862*4882a593Smuzhiyun  *	@comm: communications method
863*4882a593Smuzhiyun  */
aac_src_select_comm(struct aac_dev * dev,int comm)864*4882a593Smuzhiyun static int aac_src_select_comm(struct aac_dev *dev, int comm)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun 	switch (comm) {
867*4882a593Smuzhiyun 	case AAC_COMM_MESSAGE:
868*4882a593Smuzhiyun 		dev->a_ops.adapter_intr = aac_src_intr_message;
869*4882a593Smuzhiyun 		dev->a_ops.adapter_deliver = aac_src_deliver_message;
870*4882a593Smuzhiyun 		break;
871*4882a593Smuzhiyun 	default:
872*4882a593Smuzhiyun 		return 1;
873*4882a593Smuzhiyun 	}
874*4882a593Smuzhiyun 	return 0;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun /**
878*4882a593Smuzhiyun  *  aac_src_init	-	initialize an Cardinal Frey Bar card
879*4882a593Smuzhiyun  *  @dev: device to configure
880*4882a593Smuzhiyun  *
881*4882a593Smuzhiyun  */
882*4882a593Smuzhiyun 
aac_src_init(struct aac_dev * dev)883*4882a593Smuzhiyun int aac_src_init(struct aac_dev *dev)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun 	unsigned long start;
886*4882a593Smuzhiyun 	unsigned long status;
887*4882a593Smuzhiyun 	int restart = 0;
888*4882a593Smuzhiyun 	int instance = dev->id;
889*4882a593Smuzhiyun 	const char *name = dev->name;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	dev->a_ops.adapter_ioremap = aac_src_ioremap;
892*4882a593Smuzhiyun 	dev->a_ops.adapter_comm = aac_src_select_comm;
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
895*4882a593Smuzhiyun 	if (aac_adapter_ioremap(dev, dev->base_size)) {
896*4882a593Smuzhiyun 		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
897*4882a593Smuzhiyun 		goto error_iounmap;
898*4882a593Smuzhiyun 	}
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	/* Failure to reset here is an option ... */
901*4882a593Smuzhiyun 	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
902*4882a593Smuzhiyun 	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (dev->init_reset) {
905*4882a593Smuzhiyun 		dev->init_reset = false;
906*4882a593Smuzhiyun 		if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
907*4882a593Smuzhiyun 			++restart;
908*4882a593Smuzhiyun 	}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	/*
911*4882a593Smuzhiyun 	 *	Check to see if the board panic'd while booting.
912*4882a593Smuzhiyun 	 */
913*4882a593Smuzhiyun 	status = src_readl(dev, MUnit.OMR);
914*4882a593Smuzhiyun 	if (status & KERNEL_PANIC) {
915*4882a593Smuzhiyun 		if (aac_src_restart_adapter(dev,
916*4882a593Smuzhiyun 			aac_src_check_health(dev), IOP_HWSOFT_RESET))
917*4882a593Smuzhiyun 			goto error_iounmap;
918*4882a593Smuzhiyun 		++restart;
919*4882a593Smuzhiyun 	}
920*4882a593Smuzhiyun 	/*
921*4882a593Smuzhiyun 	 *	Check to see if the board failed any self tests.
922*4882a593Smuzhiyun 	 */
923*4882a593Smuzhiyun 	status = src_readl(dev, MUnit.OMR);
924*4882a593Smuzhiyun 	if (status & SELF_TEST_FAILED) {
925*4882a593Smuzhiyun 		printk(KERN_ERR "%s%d: adapter self-test failed.\n",
926*4882a593Smuzhiyun 			dev->name, instance);
927*4882a593Smuzhiyun 		goto error_iounmap;
928*4882a593Smuzhiyun 	}
929*4882a593Smuzhiyun 	/*
930*4882a593Smuzhiyun 	 *	Check to see if the monitor panic'd while booting.
931*4882a593Smuzhiyun 	 */
932*4882a593Smuzhiyun 	if (status & MONITOR_PANIC) {
933*4882a593Smuzhiyun 		printk(KERN_ERR "%s%d: adapter monitor panic.\n",
934*4882a593Smuzhiyun 			dev->name, instance);
935*4882a593Smuzhiyun 		goto error_iounmap;
936*4882a593Smuzhiyun 	}
937*4882a593Smuzhiyun 	start = jiffies;
938*4882a593Smuzhiyun 	/*
939*4882a593Smuzhiyun 	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
940*4882a593Smuzhiyun 	 */
941*4882a593Smuzhiyun 	while (!((status = src_readl(dev, MUnit.OMR)) &
942*4882a593Smuzhiyun 		KERNEL_UP_AND_RUNNING)) {
943*4882a593Smuzhiyun 		if ((restart &&
944*4882a593Smuzhiyun 		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
945*4882a593Smuzhiyun 		  time_after(jiffies, start+HZ*startup_timeout)) {
946*4882a593Smuzhiyun 			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
947*4882a593Smuzhiyun 					dev->name, instance, status);
948*4882a593Smuzhiyun 			goto error_iounmap;
949*4882a593Smuzhiyun 		}
950*4882a593Smuzhiyun 		if (!restart &&
951*4882a593Smuzhiyun 		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
952*4882a593Smuzhiyun 		  time_after(jiffies, start + HZ *
953*4882a593Smuzhiyun 		  ((startup_timeout > 60)
954*4882a593Smuzhiyun 		    ? (startup_timeout - 60)
955*4882a593Smuzhiyun 		    : (startup_timeout / 2))))) {
956*4882a593Smuzhiyun 			if (likely(!aac_src_restart_adapter(dev,
957*4882a593Smuzhiyun 				aac_src_check_health(dev), IOP_HWSOFT_RESET)))
958*4882a593Smuzhiyun 				start = jiffies;
959*4882a593Smuzhiyun 			++restart;
960*4882a593Smuzhiyun 		}
961*4882a593Smuzhiyun 		msleep(1);
962*4882a593Smuzhiyun 	}
963*4882a593Smuzhiyun 	if (restart && aac_commit)
964*4882a593Smuzhiyun 		aac_commit = 1;
965*4882a593Smuzhiyun 	/*
966*4882a593Smuzhiyun 	 *	Fill in the common function dispatch table.
967*4882a593Smuzhiyun 	 */
968*4882a593Smuzhiyun 	dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
969*4882a593Smuzhiyun 	dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
970*4882a593Smuzhiyun 	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
971*4882a593Smuzhiyun 	dev->a_ops.adapter_notify = aac_src_notify_adapter;
972*4882a593Smuzhiyun 	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
973*4882a593Smuzhiyun 	dev->a_ops.adapter_check_health = aac_src_check_health;
974*4882a593Smuzhiyun 	dev->a_ops.adapter_restart = aac_src_restart_adapter;
975*4882a593Smuzhiyun 	dev->a_ops.adapter_start = aac_src_start_adapter;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	/*
978*4882a593Smuzhiyun 	 *	First clear out all interrupts.  Then enable the one's that we
979*4882a593Smuzhiyun 	 *	can handle.
980*4882a593Smuzhiyun 	 */
981*4882a593Smuzhiyun 	aac_adapter_comm(dev, AAC_COMM_MESSAGE);
982*4882a593Smuzhiyun 	aac_adapter_disable_int(dev);
983*4882a593Smuzhiyun 	src_writel(dev, MUnit.ODR_C, 0xffffffff);
984*4882a593Smuzhiyun 	aac_adapter_enable_int(dev);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	if (aac_init_adapter(dev) == NULL)
987*4882a593Smuzhiyun 		goto error_iounmap;
988*4882a593Smuzhiyun 	if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
989*4882a593Smuzhiyun 		goto error_iounmap;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	dev->msi = !pci_enable_msi(dev->pdev);
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	dev->aac_msix[0].vector_no = 0;
994*4882a593Smuzhiyun 	dev->aac_msix[0].dev = dev;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
997*4882a593Smuzhiyun 			IRQF_SHARED, "aacraid", &(dev->aac_msix[0]))  < 0) {
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 		if (dev->msi)
1000*4882a593Smuzhiyun 			pci_disable_msi(dev->pdev);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
1003*4882a593Smuzhiyun 			name, instance);
1004*4882a593Smuzhiyun 		goto error_iounmap;
1005*4882a593Smuzhiyun 	}
1006*4882a593Smuzhiyun 	dev->dbg_base = pci_resource_start(dev->pdev, 2);
1007*4882a593Smuzhiyun 	dev->dbg_base_mapped = dev->regs.src.bar1;
1008*4882a593Smuzhiyun 	dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
1009*4882a593Smuzhiyun 	dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	aac_adapter_enable_int(dev);
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	if (!dev->sync_mode) {
1014*4882a593Smuzhiyun 		/*
1015*4882a593Smuzhiyun 		 * Tell the adapter that all is configured, and it can
1016*4882a593Smuzhiyun 		 * start accepting requests
1017*4882a593Smuzhiyun 		 */
1018*4882a593Smuzhiyun 		aac_src_start_adapter(dev);
1019*4882a593Smuzhiyun 	}
1020*4882a593Smuzhiyun 	return 0;
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun error_iounmap:
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	return -1;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun 
aac_src_wait_sync(struct aac_dev * dev,int * status)1027*4882a593Smuzhiyun static int aac_src_wait_sync(struct aac_dev *dev, int *status)
1028*4882a593Smuzhiyun {
1029*4882a593Smuzhiyun 	unsigned long start = jiffies;
1030*4882a593Smuzhiyun 	unsigned long usecs = 0;
1031*4882a593Smuzhiyun 	int delay = 5 * HZ;
1032*4882a593Smuzhiyun 	int rc = 1;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	while (time_before(jiffies, start+delay)) {
1035*4882a593Smuzhiyun 		/*
1036*4882a593Smuzhiyun 		 * Delay 5 microseconds to let Mon960 get info.
1037*4882a593Smuzhiyun 		 */
1038*4882a593Smuzhiyun 		udelay(5);
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 		/*
1041*4882a593Smuzhiyun 		 * Mon960 will set doorbell0 bit when it has completed the
1042*4882a593Smuzhiyun 		 * command.
1043*4882a593Smuzhiyun 		 */
1044*4882a593Smuzhiyun 		if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
1045*4882a593Smuzhiyun 			/*
1046*4882a593Smuzhiyun 			 * Clear: the doorbell.
1047*4882a593Smuzhiyun 			 */
1048*4882a593Smuzhiyun 			if (dev->msi_enabled)
1049*4882a593Smuzhiyun 				aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT);
1050*4882a593Smuzhiyun 			else
1051*4882a593Smuzhiyun 				src_writel(dev, MUnit.ODR_C,
1052*4882a593Smuzhiyun 					OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
1053*4882a593Smuzhiyun 			rc = 0;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 			break;
1056*4882a593Smuzhiyun 		}
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 		/*
1059*4882a593Smuzhiyun 		 * Yield the processor in case we are slow
1060*4882a593Smuzhiyun 		 */
1061*4882a593Smuzhiyun 		usecs = 1 * USEC_PER_MSEC;
1062*4882a593Smuzhiyun 		usleep_range(usecs, usecs + 50);
1063*4882a593Smuzhiyun 	}
1064*4882a593Smuzhiyun 	/*
1065*4882a593Smuzhiyun 	 * Pull the synch status from Mailbox 0.
1066*4882a593Smuzhiyun 	 */
1067*4882a593Smuzhiyun 	if (status && !rc) {
1068*4882a593Smuzhiyun 		status[0] = readl(&dev->IndexRegs->Mailbox[0]);
1069*4882a593Smuzhiyun 		status[1] = readl(&dev->IndexRegs->Mailbox[1]);
1070*4882a593Smuzhiyun 		status[2] = readl(&dev->IndexRegs->Mailbox[2]);
1071*4882a593Smuzhiyun 		status[3] = readl(&dev->IndexRegs->Mailbox[3]);
1072*4882a593Smuzhiyun 		status[4] = readl(&dev->IndexRegs->Mailbox[4]);
1073*4882a593Smuzhiyun 	}
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	return rc;
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun /**
1079*4882a593Smuzhiyun  *  aac_src_soft_reset	-	perform soft reset to speed up
1080*4882a593Smuzhiyun  *  access
1081*4882a593Smuzhiyun  *
1082*4882a593Smuzhiyun  *  Assumptions: That the controller is in a state where we can
1083*4882a593Smuzhiyun  *  bring it back to life with an init struct. We can only use
1084*4882a593Smuzhiyun  *  fast sync commands, as the timeout is 5 seconds.
1085*4882a593Smuzhiyun  *
1086*4882a593Smuzhiyun  *  @dev: device to configure
1087*4882a593Smuzhiyun  *
1088*4882a593Smuzhiyun  */
1089*4882a593Smuzhiyun 
aac_src_soft_reset(struct aac_dev * dev)1090*4882a593Smuzhiyun static int aac_src_soft_reset(struct aac_dev *dev)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun 	u32 status_omr = src_readl(dev, MUnit.OMR);
1093*4882a593Smuzhiyun 	u32 status[5];
1094*4882a593Smuzhiyun 	int rc = 1;
1095*4882a593Smuzhiyun 	int state = 0;
1096*4882a593Smuzhiyun 	char *state_str[7] = {
1097*4882a593Smuzhiyun 		"GET_ADAPTER_PROPERTIES Failed",
1098*4882a593Smuzhiyun 		"GET_ADAPTER_PROPERTIES timeout",
1099*4882a593Smuzhiyun 		"SOFT_RESET not supported",
1100*4882a593Smuzhiyun 		"DROP_IO Failed",
1101*4882a593Smuzhiyun 		"DROP_IO timeout",
1102*4882a593Smuzhiyun 		"Check Health failed"
1103*4882a593Smuzhiyun 	};
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	if (status_omr == INVALID_OMR)
1106*4882a593Smuzhiyun 		return 1;       // pcie hosed
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	if (!(status_omr & KERNEL_UP_AND_RUNNING))
1109*4882a593Smuzhiyun 		return 1;       // not up and running
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	/*
1112*4882a593Smuzhiyun 	 * We go into soft reset mode to allow us to handle response
1113*4882a593Smuzhiyun 	 */
1114*4882a593Smuzhiyun 	dev->in_soft_reset = 1;
1115*4882a593Smuzhiyun 	dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX;
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	/* Get adapter properties */
1118*4882a593Smuzhiyun 	rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0,
1119*4882a593Smuzhiyun 		0, 0, 0, status+0, status+1, status+2, status+3, status+4);
1120*4882a593Smuzhiyun 	if (rc)
1121*4882a593Smuzhiyun 		goto out;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	state++;
1124*4882a593Smuzhiyun 	if (aac_src_wait_sync(dev, status)) {
1125*4882a593Smuzhiyun 		rc = 1;
1126*4882a593Smuzhiyun 		goto out;
1127*4882a593Smuzhiyun 	}
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 	state++;
1130*4882a593Smuzhiyun 	if (!(status[1] & le32_to_cpu(AAC_OPT_EXTENDED) &&
1131*4882a593Smuzhiyun 		(status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)))) {
1132*4882a593Smuzhiyun 		rc = 2;
1133*4882a593Smuzhiyun 		goto out;
1134*4882a593Smuzhiyun 	}
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
1137*4882a593Smuzhiyun 		(status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
1138*4882a593Smuzhiyun 		dev->sa_firmware = 1;
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	state++;
1141*4882a593Smuzhiyun 	rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0,
1142*4882a593Smuzhiyun 		 status+0, status+1, status+2, status+3, status+4);
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	if (rc)
1145*4882a593Smuzhiyun 		goto out;
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	state++;
1148*4882a593Smuzhiyun 	if (aac_src_wait_sync(dev, status)) {
1149*4882a593Smuzhiyun 		rc = 3;
1150*4882a593Smuzhiyun 		goto out;
1151*4882a593Smuzhiyun 	}
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	if (status[1])
1154*4882a593Smuzhiyun 		dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n",
1155*4882a593Smuzhiyun 			__func__, status[1]);
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	state++;
1158*4882a593Smuzhiyun 	rc = aac_src_check_health(dev);
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun out:
1161*4882a593Smuzhiyun 	dev->in_soft_reset = 0;
1162*4882a593Smuzhiyun 	dev->msi_enabled = 0;
1163*4882a593Smuzhiyun 	if (rc)
1164*4882a593Smuzhiyun 		dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__,
1165*4882a593Smuzhiyun 			state_str[state], rc);
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	return rc;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun /**
1170*4882a593Smuzhiyun  *  aac_srcv_init	-	initialize an SRCv card
1171*4882a593Smuzhiyun  *  @dev: device to configure
1172*4882a593Smuzhiyun  *
1173*4882a593Smuzhiyun  */
1174*4882a593Smuzhiyun 
aac_srcv_init(struct aac_dev * dev)1175*4882a593Smuzhiyun int aac_srcv_init(struct aac_dev *dev)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun 	unsigned long start;
1178*4882a593Smuzhiyun 	unsigned long status;
1179*4882a593Smuzhiyun 	int restart = 0;
1180*4882a593Smuzhiyun 	int instance = dev->id;
1181*4882a593Smuzhiyun 	const char *name = dev->name;
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
1184*4882a593Smuzhiyun 	dev->a_ops.adapter_comm = aac_src_select_comm;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
1187*4882a593Smuzhiyun 	if (aac_adapter_ioremap(dev, dev->base_size)) {
1188*4882a593Smuzhiyun 		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
1189*4882a593Smuzhiyun 		goto error_iounmap;
1190*4882a593Smuzhiyun 	}
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	/* Failure to reset here is an option ... */
1193*4882a593Smuzhiyun 	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1194*4882a593Smuzhiyun 	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	if (dev->init_reset) {
1197*4882a593Smuzhiyun 		dev->init_reset = false;
1198*4882a593Smuzhiyun 		if (aac_src_soft_reset(dev)) {
1199*4882a593Smuzhiyun 			aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET);
1200*4882a593Smuzhiyun 			++restart;
1201*4882a593Smuzhiyun 		}
1202*4882a593Smuzhiyun 	}
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	/*
1205*4882a593Smuzhiyun 	 *	Check to see if flash update is running.
1206*4882a593Smuzhiyun 	 *	Wait for the adapter to be up and running. Wait up to 5 minutes
1207*4882a593Smuzhiyun 	 */
1208*4882a593Smuzhiyun 	status = src_readl(dev, MUnit.OMR);
1209*4882a593Smuzhiyun 	if (status & FLASH_UPD_PENDING) {
1210*4882a593Smuzhiyun 		start = jiffies;
1211*4882a593Smuzhiyun 		do {
1212*4882a593Smuzhiyun 			status = src_readl(dev, MUnit.OMR);
1213*4882a593Smuzhiyun 			if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
1214*4882a593Smuzhiyun 				printk(KERN_ERR "%s%d: adapter flash update failed.\n",
1215*4882a593Smuzhiyun 					dev->name, instance);
1216*4882a593Smuzhiyun 				goto error_iounmap;
1217*4882a593Smuzhiyun 			}
1218*4882a593Smuzhiyun 		} while (!(status & FLASH_UPD_SUCCESS) &&
1219*4882a593Smuzhiyun 			 !(status & FLASH_UPD_FAILED));
1220*4882a593Smuzhiyun 		/* Delay 10 seconds.
1221*4882a593Smuzhiyun 		 * Because right now FW is doing a soft reset,
1222*4882a593Smuzhiyun 		 * do not read scratch pad register at this time
1223*4882a593Smuzhiyun 		 */
1224*4882a593Smuzhiyun 		ssleep(10);
1225*4882a593Smuzhiyun 	}
1226*4882a593Smuzhiyun 	/*
1227*4882a593Smuzhiyun 	 *	Check to see if the board panic'd while booting.
1228*4882a593Smuzhiyun 	 */
1229*4882a593Smuzhiyun 	status = src_readl(dev, MUnit.OMR);
1230*4882a593Smuzhiyun 	if (status & KERNEL_PANIC) {
1231*4882a593Smuzhiyun 		if (aac_src_restart_adapter(dev,
1232*4882a593Smuzhiyun 			aac_src_check_health(dev), IOP_HWSOFT_RESET))
1233*4882a593Smuzhiyun 			goto error_iounmap;
1234*4882a593Smuzhiyun 		++restart;
1235*4882a593Smuzhiyun 	}
1236*4882a593Smuzhiyun 	/*
1237*4882a593Smuzhiyun 	 *	Check to see if the board failed any self tests.
1238*4882a593Smuzhiyun 	 */
1239*4882a593Smuzhiyun 	status = src_readl(dev, MUnit.OMR);
1240*4882a593Smuzhiyun 	if (status & SELF_TEST_FAILED) {
1241*4882a593Smuzhiyun 		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
1242*4882a593Smuzhiyun 		goto error_iounmap;
1243*4882a593Smuzhiyun 	}
1244*4882a593Smuzhiyun 	/*
1245*4882a593Smuzhiyun 	 *	Check to see if the monitor panic'd while booting.
1246*4882a593Smuzhiyun 	 */
1247*4882a593Smuzhiyun 	if (status & MONITOR_PANIC) {
1248*4882a593Smuzhiyun 		printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
1249*4882a593Smuzhiyun 		goto error_iounmap;
1250*4882a593Smuzhiyun 	}
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	start = jiffies;
1253*4882a593Smuzhiyun 	/*
1254*4882a593Smuzhiyun 	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
1255*4882a593Smuzhiyun 	 */
1256*4882a593Smuzhiyun 	do {
1257*4882a593Smuzhiyun 		status = src_readl(dev, MUnit.OMR);
1258*4882a593Smuzhiyun 		if (status == INVALID_OMR)
1259*4882a593Smuzhiyun 			status = 0;
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 		if ((restart &&
1262*4882a593Smuzhiyun 		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
1263*4882a593Smuzhiyun 		  time_after(jiffies, start+HZ*startup_timeout)) {
1264*4882a593Smuzhiyun 			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
1265*4882a593Smuzhiyun 					dev->name, instance, status);
1266*4882a593Smuzhiyun 			goto error_iounmap;
1267*4882a593Smuzhiyun 		}
1268*4882a593Smuzhiyun 		if (!restart &&
1269*4882a593Smuzhiyun 		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
1270*4882a593Smuzhiyun 		  time_after(jiffies, start + HZ *
1271*4882a593Smuzhiyun 		  ((startup_timeout > 60)
1272*4882a593Smuzhiyun 		    ? (startup_timeout - 60)
1273*4882a593Smuzhiyun 		    : (startup_timeout / 2))))) {
1274*4882a593Smuzhiyun 			if (likely(!aac_src_restart_adapter(dev,
1275*4882a593Smuzhiyun 				aac_src_check_health(dev), IOP_HWSOFT_RESET)))
1276*4882a593Smuzhiyun 				start = jiffies;
1277*4882a593Smuzhiyun 			++restart;
1278*4882a593Smuzhiyun 		}
1279*4882a593Smuzhiyun 		msleep(1);
1280*4882a593Smuzhiyun 	} while (!(status & KERNEL_UP_AND_RUNNING));
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 	if (restart && aac_commit)
1283*4882a593Smuzhiyun 		aac_commit = 1;
1284*4882a593Smuzhiyun 	/*
1285*4882a593Smuzhiyun 	 *	Fill in the common function dispatch table.
1286*4882a593Smuzhiyun 	 */
1287*4882a593Smuzhiyun 	dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
1288*4882a593Smuzhiyun 	dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
1289*4882a593Smuzhiyun 	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1290*4882a593Smuzhiyun 	dev->a_ops.adapter_notify = aac_src_notify_adapter;
1291*4882a593Smuzhiyun 	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1292*4882a593Smuzhiyun 	dev->a_ops.adapter_check_health = aac_src_check_health;
1293*4882a593Smuzhiyun 	dev->a_ops.adapter_restart = aac_src_restart_adapter;
1294*4882a593Smuzhiyun 	dev->a_ops.adapter_start = aac_src_start_adapter;
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	/*
1297*4882a593Smuzhiyun 	 *	First clear out all interrupts.  Then enable the one's that we
1298*4882a593Smuzhiyun 	 *	can handle.
1299*4882a593Smuzhiyun 	 */
1300*4882a593Smuzhiyun 	aac_adapter_comm(dev, AAC_COMM_MESSAGE);
1301*4882a593Smuzhiyun 	aac_adapter_disable_int(dev);
1302*4882a593Smuzhiyun 	src_writel(dev, MUnit.ODR_C, 0xffffffff);
1303*4882a593Smuzhiyun 	aac_adapter_enable_int(dev);
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 	if (aac_init_adapter(dev) == NULL)
1306*4882a593Smuzhiyun 		goto error_iounmap;
1307*4882a593Smuzhiyun 	if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) &&
1308*4882a593Smuzhiyun 		(dev->comm_interface != AAC_COMM_MESSAGE_TYPE3))
1309*4882a593Smuzhiyun 		goto error_iounmap;
1310*4882a593Smuzhiyun 	if (dev->msi_enabled)
1311*4882a593Smuzhiyun 		aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	if (aac_acquire_irq(dev))
1314*4882a593Smuzhiyun 		goto error_iounmap;
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 	dev->dbg_base = pci_resource_start(dev->pdev, 2);
1317*4882a593Smuzhiyun 	dev->dbg_base_mapped = dev->regs.src.bar1;
1318*4882a593Smuzhiyun 	dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE;
1319*4882a593Smuzhiyun 	dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	aac_adapter_enable_int(dev);
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	if (!dev->sync_mode) {
1324*4882a593Smuzhiyun 		/*
1325*4882a593Smuzhiyun 		 * Tell the adapter that all is configured, and it can
1326*4882a593Smuzhiyun 		 * start accepting requests
1327*4882a593Smuzhiyun 		 */
1328*4882a593Smuzhiyun 		aac_src_start_adapter(dev);
1329*4882a593Smuzhiyun 	}
1330*4882a593Smuzhiyun 	return 0;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun error_iounmap:
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 	return -1;
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun 
aac_src_access_devreg(struct aac_dev * dev,int mode)1337*4882a593Smuzhiyun void aac_src_access_devreg(struct aac_dev *dev, int mode)
1338*4882a593Smuzhiyun {
1339*4882a593Smuzhiyun 	u_int32_t val;
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	switch (mode) {
1342*4882a593Smuzhiyun 	case AAC_ENABLE_INTERRUPT:
1343*4882a593Smuzhiyun 		src_writel(dev,
1344*4882a593Smuzhiyun 			   MUnit.OIMR,
1345*4882a593Smuzhiyun 			   dev->OIMR = (dev->msi_enabled ?
1346*4882a593Smuzhiyun 					AAC_INT_ENABLE_TYPE1_MSIX :
1347*4882a593Smuzhiyun 					AAC_INT_ENABLE_TYPE1_INTX));
1348*4882a593Smuzhiyun 		break;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	case AAC_DISABLE_INTERRUPT:
1351*4882a593Smuzhiyun 		src_writel(dev,
1352*4882a593Smuzhiyun 			   MUnit.OIMR,
1353*4882a593Smuzhiyun 			   dev->OIMR = AAC_INT_DISABLE_ALL);
1354*4882a593Smuzhiyun 		break;
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	case AAC_ENABLE_MSIX:
1357*4882a593Smuzhiyun 		/* set bit 6 */
1358*4882a593Smuzhiyun 		val = src_readl(dev, MUnit.IDR);
1359*4882a593Smuzhiyun 		val |= 0x40;
1360*4882a593Smuzhiyun 		src_writel(dev,  MUnit.IDR, val);
1361*4882a593Smuzhiyun 		src_readl(dev, MUnit.IDR);
1362*4882a593Smuzhiyun 		/* unmask int. */
1363*4882a593Smuzhiyun 		val = PMC_ALL_INTERRUPT_BITS;
1364*4882a593Smuzhiyun 		src_writel(dev, MUnit.IOAR, val);
1365*4882a593Smuzhiyun 		val = src_readl(dev, MUnit.OIMR);
1366*4882a593Smuzhiyun 		src_writel(dev,
1367*4882a593Smuzhiyun 			   MUnit.OIMR,
1368*4882a593Smuzhiyun 			   val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
1369*4882a593Smuzhiyun 		break;
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 	case AAC_DISABLE_MSIX:
1372*4882a593Smuzhiyun 		/* reset bit 6 */
1373*4882a593Smuzhiyun 		val = src_readl(dev, MUnit.IDR);
1374*4882a593Smuzhiyun 		val &= ~0x40;
1375*4882a593Smuzhiyun 		src_writel(dev, MUnit.IDR, val);
1376*4882a593Smuzhiyun 		src_readl(dev, MUnit.IDR);
1377*4882a593Smuzhiyun 		break;
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	case AAC_CLEAR_AIF_BIT:
1380*4882a593Smuzhiyun 		/* set bit 5 */
1381*4882a593Smuzhiyun 		val = src_readl(dev, MUnit.IDR);
1382*4882a593Smuzhiyun 		val |= 0x20;
1383*4882a593Smuzhiyun 		src_writel(dev, MUnit.IDR, val);
1384*4882a593Smuzhiyun 		src_readl(dev, MUnit.IDR);
1385*4882a593Smuzhiyun 		break;
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	case AAC_CLEAR_SYNC_BIT:
1388*4882a593Smuzhiyun 		/* set bit 4 */
1389*4882a593Smuzhiyun 		val = src_readl(dev, MUnit.IDR);
1390*4882a593Smuzhiyun 		val |= 0x10;
1391*4882a593Smuzhiyun 		src_writel(dev, MUnit.IDR, val);
1392*4882a593Smuzhiyun 		src_readl(dev, MUnit.IDR);
1393*4882a593Smuzhiyun 		break;
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	case AAC_ENABLE_INTX:
1396*4882a593Smuzhiyun 		/* set bit 7 */
1397*4882a593Smuzhiyun 		val = src_readl(dev, MUnit.IDR);
1398*4882a593Smuzhiyun 		val |= 0x80;
1399*4882a593Smuzhiyun 		src_writel(dev, MUnit.IDR, val);
1400*4882a593Smuzhiyun 		src_readl(dev, MUnit.IDR);
1401*4882a593Smuzhiyun 		/* unmask int. */
1402*4882a593Smuzhiyun 		val = PMC_ALL_INTERRUPT_BITS;
1403*4882a593Smuzhiyun 		src_writel(dev, MUnit.IOAR, val);
1404*4882a593Smuzhiyun 		src_readl(dev, MUnit.IOAR);
1405*4882a593Smuzhiyun 		val = src_readl(dev, MUnit.OIMR);
1406*4882a593Smuzhiyun 		src_writel(dev, MUnit.OIMR,
1407*4882a593Smuzhiyun 				val & (~(PMC_GLOBAL_INT_BIT2)));
1408*4882a593Smuzhiyun 		break;
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	default:
1411*4882a593Smuzhiyun 		break;
1412*4882a593Smuzhiyun 	}
1413*4882a593Smuzhiyun }
1414*4882a593Smuzhiyun 
aac_src_get_sync_status(struct aac_dev * dev)1415*4882a593Smuzhiyun static int aac_src_get_sync_status(struct aac_dev *dev)
1416*4882a593Smuzhiyun {
1417*4882a593Smuzhiyun 	int msix_val = 0;
1418*4882a593Smuzhiyun 	int legacy_val = 0;
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0;
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	if (!dev->msi_enabled) {
1423*4882a593Smuzhiyun 		/*
1424*4882a593Smuzhiyun 		 * if Legacy int status indicates cmd is not complete
1425*4882a593Smuzhiyun 		 * sample MSIx register to see if it indiactes cmd complete,
1426*4882a593Smuzhiyun 		 * if yes set the controller in MSIx mode and consider cmd
1427*4882a593Smuzhiyun 		 * completed
1428*4882a593Smuzhiyun 		 */
1429*4882a593Smuzhiyun 		legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
1430*4882a593Smuzhiyun 		if (!(legacy_val & 1) && msix_val)
1431*4882a593Smuzhiyun 			dev->msi_enabled = 1;
1432*4882a593Smuzhiyun 		return legacy_val;
1433*4882a593Smuzhiyun 	}
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	return msix_val;
1436*4882a593Smuzhiyun }
1437