xref: /OK3568_Linux_fs/kernel/drivers/scsi/device_handler/scsi_dh_rdac.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2005 Mike Christie. All rights reserved.
5*4882a593Smuzhiyun  * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
8*4882a593Smuzhiyun  * it under the terms of the GNU General Public License as published by
9*4882a593Smuzhiyun  * the Free Software Foundation; either version 2 of the License, or
10*4882a593Smuzhiyun  * (at your option) any later version.
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful,
13*4882a593Smuzhiyun  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15*4882a593Smuzhiyun  * GNU General Public License for more details.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * You should have received a copy of the GNU General Public License
18*4882a593Smuzhiyun  * along with this program; if not, write to the Free Software
19*4882a593Smuzhiyun  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun #include <scsi/scsi.h>
23*4882a593Smuzhiyun #include <scsi/scsi_eh.h>
24*4882a593Smuzhiyun #include <scsi/scsi_dh.h>
25*4882a593Smuzhiyun #include <linux/workqueue.h>
26*4882a593Smuzhiyun #include <linux/slab.h>
27*4882a593Smuzhiyun #include <linux/module.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define RDAC_NAME "rdac"
30*4882a593Smuzhiyun #define RDAC_RETRY_COUNT 5
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * LSI mode page stuff
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * These struct definitions and the forming of the
36*4882a593Smuzhiyun  * mode page were taken from the LSI RDAC 2.4 GPL'd
37*4882a593Smuzhiyun  * driver, and then converted to Linux conventions.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun #define RDAC_QUIESCENCE_TIME 20
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun  * Page Codes
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * Controller modes definitions
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS	0x02
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun  * RDAC Options field
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun #define RDAC_FORCED_QUIESENCE 0x02
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define RDAC_TIMEOUT	(60 * HZ)
56*4882a593Smuzhiyun #define RDAC_RETRIES	3
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun struct rdac_mode_6_hdr {
59*4882a593Smuzhiyun 	u8	data_len;
60*4882a593Smuzhiyun 	u8	medium_type;
61*4882a593Smuzhiyun 	u8	device_params;
62*4882a593Smuzhiyun 	u8	block_desc_len;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun struct rdac_mode_10_hdr {
66*4882a593Smuzhiyun 	u16	data_len;
67*4882a593Smuzhiyun 	u8	medium_type;
68*4882a593Smuzhiyun 	u8	device_params;
69*4882a593Smuzhiyun 	u16	reserved;
70*4882a593Smuzhiyun 	u16	block_desc_len;
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun struct rdac_mode_common {
74*4882a593Smuzhiyun 	u8	controller_serial[16];
75*4882a593Smuzhiyun 	u8	alt_controller_serial[16];
76*4882a593Smuzhiyun 	u8	rdac_mode[2];
77*4882a593Smuzhiyun 	u8	alt_rdac_mode[2];
78*4882a593Smuzhiyun 	u8	quiescence_timeout;
79*4882a593Smuzhiyun 	u8	rdac_options;
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun struct rdac_pg_legacy {
83*4882a593Smuzhiyun 	struct rdac_mode_6_hdr hdr;
84*4882a593Smuzhiyun 	u8	page_code;
85*4882a593Smuzhiyun 	u8	page_len;
86*4882a593Smuzhiyun 	struct rdac_mode_common common;
87*4882a593Smuzhiyun #define MODE6_MAX_LUN	32
88*4882a593Smuzhiyun 	u8	lun_table[MODE6_MAX_LUN];
89*4882a593Smuzhiyun 	u8	reserved2[32];
90*4882a593Smuzhiyun 	u8	reserved3;
91*4882a593Smuzhiyun 	u8	reserved4;
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun struct rdac_pg_expanded {
95*4882a593Smuzhiyun 	struct rdac_mode_10_hdr hdr;
96*4882a593Smuzhiyun 	u8	page_code;
97*4882a593Smuzhiyun 	u8	subpage_code;
98*4882a593Smuzhiyun 	u8	page_len[2];
99*4882a593Smuzhiyun 	struct rdac_mode_common common;
100*4882a593Smuzhiyun 	u8	lun_table[256];
101*4882a593Smuzhiyun 	u8	reserved3;
102*4882a593Smuzhiyun 	u8	reserved4;
103*4882a593Smuzhiyun };
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun struct c9_inquiry {
106*4882a593Smuzhiyun 	u8	peripheral_info;
107*4882a593Smuzhiyun 	u8	page_code;	/* 0xC9 */
108*4882a593Smuzhiyun 	u8	reserved1;
109*4882a593Smuzhiyun 	u8	page_len;
110*4882a593Smuzhiyun 	u8	page_id[4];	/* "vace" */
111*4882a593Smuzhiyun 	u8	avte_cvp;
112*4882a593Smuzhiyun 	u8	path_prio;
113*4882a593Smuzhiyun 	u8	reserved2[38];
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun #define SUBSYS_ID_LEN	16
117*4882a593Smuzhiyun #define SLOT_ID_LEN	2
118*4882a593Smuzhiyun #define ARRAY_LABEL_LEN	31
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun struct c4_inquiry {
121*4882a593Smuzhiyun 	u8	peripheral_info;
122*4882a593Smuzhiyun 	u8	page_code;	/* 0xC4 */
123*4882a593Smuzhiyun 	u8	reserved1;
124*4882a593Smuzhiyun 	u8	page_len;
125*4882a593Smuzhiyun 	u8	page_id[4];	/* "subs" */
126*4882a593Smuzhiyun 	u8	subsys_id[SUBSYS_ID_LEN];
127*4882a593Smuzhiyun 	u8	revision[4];
128*4882a593Smuzhiyun 	u8	slot_id[SLOT_ID_LEN];
129*4882a593Smuzhiyun 	u8	reserved[2];
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun #define UNIQUE_ID_LEN 16
133*4882a593Smuzhiyun struct c8_inquiry {
134*4882a593Smuzhiyun 	u8	peripheral_info;
135*4882a593Smuzhiyun 	u8	page_code; /* 0xC8 */
136*4882a593Smuzhiyun 	u8	reserved1;
137*4882a593Smuzhiyun 	u8	page_len;
138*4882a593Smuzhiyun 	u8	page_id[4]; /* "edid" */
139*4882a593Smuzhiyun 	u8	reserved2[3];
140*4882a593Smuzhiyun 	u8	vol_uniq_id_len;
141*4882a593Smuzhiyun 	u8	vol_uniq_id[16];
142*4882a593Smuzhiyun 	u8	vol_user_label_len;
143*4882a593Smuzhiyun 	u8	vol_user_label[60];
144*4882a593Smuzhiyun 	u8	array_uniq_id_len;
145*4882a593Smuzhiyun 	u8	array_unique_id[UNIQUE_ID_LEN];
146*4882a593Smuzhiyun 	u8	array_user_label_len;
147*4882a593Smuzhiyun 	u8	array_user_label[60];
148*4882a593Smuzhiyun 	u8	lun[8];
149*4882a593Smuzhiyun };
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun struct rdac_controller {
152*4882a593Smuzhiyun 	u8			array_id[UNIQUE_ID_LEN];
153*4882a593Smuzhiyun 	int			use_ms10;
154*4882a593Smuzhiyun 	struct kref		kref;
155*4882a593Smuzhiyun 	struct list_head	node; /* list of all controllers */
156*4882a593Smuzhiyun 	union			{
157*4882a593Smuzhiyun 		struct rdac_pg_legacy legacy;
158*4882a593Smuzhiyun 		struct rdac_pg_expanded expanded;
159*4882a593Smuzhiyun 	} mode_select;
160*4882a593Smuzhiyun 	u8	index;
161*4882a593Smuzhiyun 	u8	array_name[ARRAY_LABEL_LEN];
162*4882a593Smuzhiyun 	struct Scsi_Host	*host;
163*4882a593Smuzhiyun 	spinlock_t		ms_lock;
164*4882a593Smuzhiyun 	int			ms_queued;
165*4882a593Smuzhiyun 	struct work_struct	ms_work;
166*4882a593Smuzhiyun 	struct scsi_device	*ms_sdev;
167*4882a593Smuzhiyun 	struct list_head	ms_head;
168*4882a593Smuzhiyun 	struct list_head	dh_list;
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun struct c2_inquiry {
172*4882a593Smuzhiyun 	u8	peripheral_info;
173*4882a593Smuzhiyun 	u8	page_code;	/* 0xC2 */
174*4882a593Smuzhiyun 	u8	reserved1;
175*4882a593Smuzhiyun 	u8	page_len;
176*4882a593Smuzhiyun 	u8	page_id[4];	/* "swr4" */
177*4882a593Smuzhiyun 	u8	sw_version[3];
178*4882a593Smuzhiyun 	u8	sw_date[3];
179*4882a593Smuzhiyun 	u8	features_enabled;
180*4882a593Smuzhiyun 	u8	max_lun_supported;
181*4882a593Smuzhiyun 	u8	partitions[239]; /* Total allocation length should be 0xFF */
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun struct rdac_dh_data {
185*4882a593Smuzhiyun 	struct list_head	node;
186*4882a593Smuzhiyun 	struct rdac_controller	*ctlr;
187*4882a593Smuzhiyun 	struct scsi_device	*sdev;
188*4882a593Smuzhiyun #define UNINITIALIZED_LUN	(1 << 8)
189*4882a593Smuzhiyun 	unsigned		lun;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun #define RDAC_MODE		0
192*4882a593Smuzhiyun #define RDAC_MODE_AVT		1
193*4882a593Smuzhiyun #define RDAC_MODE_IOSHIP	2
194*4882a593Smuzhiyun 	unsigned char		mode;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun #define RDAC_STATE_ACTIVE	0
197*4882a593Smuzhiyun #define RDAC_STATE_PASSIVE	1
198*4882a593Smuzhiyun 	unsigned char		state;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun #define RDAC_LUN_UNOWNED	0
201*4882a593Smuzhiyun #define RDAC_LUN_OWNED		1
202*4882a593Smuzhiyun 	char			lun_state;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun #define RDAC_PREFERRED		0
205*4882a593Smuzhiyun #define RDAC_NON_PREFERRED	1
206*4882a593Smuzhiyun 	char			preferred;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	union			{
209*4882a593Smuzhiyun 		struct c2_inquiry c2;
210*4882a593Smuzhiyun 		struct c4_inquiry c4;
211*4882a593Smuzhiyun 		struct c8_inquiry c8;
212*4882a593Smuzhiyun 		struct c9_inquiry c9;
213*4882a593Smuzhiyun 	} inq;
214*4882a593Smuzhiyun };
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun static const char *mode[] = {
217*4882a593Smuzhiyun 	"RDAC",
218*4882a593Smuzhiyun 	"AVT",
219*4882a593Smuzhiyun 	"IOSHIP",
220*4882a593Smuzhiyun };
221*4882a593Smuzhiyun static const char *lun_state[] =
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	"unowned",
224*4882a593Smuzhiyun 	"owned",
225*4882a593Smuzhiyun };
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun struct rdac_queue_data {
228*4882a593Smuzhiyun 	struct list_head	entry;
229*4882a593Smuzhiyun 	struct rdac_dh_data	*h;
230*4882a593Smuzhiyun 	activate_complete	callback_fn;
231*4882a593Smuzhiyun 	void			*callback_data;
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun static LIST_HEAD(ctlr_list);
235*4882a593Smuzhiyun static DEFINE_SPINLOCK(list_lock);
236*4882a593Smuzhiyun static struct workqueue_struct *kmpath_rdacd;
237*4882a593Smuzhiyun static void send_mode_select(struct work_struct *work);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun  * module parameter to enable rdac debug logging.
241*4882a593Smuzhiyun  * 2 bits for each type of logging, only two types defined for now
242*4882a593Smuzhiyun  * Can be enhanced if required at later point
243*4882a593Smuzhiyun  */
244*4882a593Smuzhiyun static int rdac_logging = 1;
245*4882a593Smuzhiyun module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
246*4882a593Smuzhiyun MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
247*4882a593Smuzhiyun 		"Default is 1 - failover logging enabled, "
248*4882a593Smuzhiyun 		"set it to 0xF to enable all the logs");
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun #define RDAC_LOG_FAILOVER	0
251*4882a593Smuzhiyun #define RDAC_LOG_SENSE		2
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun #define RDAC_LOG_BITS		2
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun #define RDAC_LOG_LEVEL(SHIFT)  \
256*4882a593Smuzhiyun 	((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun #define RDAC_LOG(SHIFT, sdev, f, arg...) \
259*4882a593Smuzhiyun do { \
260*4882a593Smuzhiyun 	if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
261*4882a593Smuzhiyun 		sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
262*4882a593Smuzhiyun } while (0);
263*4882a593Smuzhiyun 
rdac_failover_get(struct rdac_controller * ctlr,struct list_head * list,unsigned char * cdb)264*4882a593Smuzhiyun static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
265*4882a593Smuzhiyun 				      struct list_head *list,
266*4882a593Smuzhiyun 				      unsigned char *cdb)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	struct rdac_mode_common *common;
269*4882a593Smuzhiyun 	unsigned data_size;
270*4882a593Smuzhiyun 	struct rdac_queue_data *qdata;
271*4882a593Smuzhiyun 	u8 *lun_table;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	if (ctlr->use_ms10) {
274*4882a593Smuzhiyun 		struct rdac_pg_expanded *rdac_pg;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 		data_size = sizeof(struct rdac_pg_expanded);
277*4882a593Smuzhiyun 		rdac_pg = &ctlr->mode_select.expanded;
278*4882a593Smuzhiyun 		memset(rdac_pg, 0, data_size);
279*4882a593Smuzhiyun 		common = &rdac_pg->common;
280*4882a593Smuzhiyun 		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
281*4882a593Smuzhiyun 		rdac_pg->subpage_code = 0x1;
282*4882a593Smuzhiyun 		rdac_pg->page_len[0] = 0x01;
283*4882a593Smuzhiyun 		rdac_pg->page_len[1] = 0x28;
284*4882a593Smuzhiyun 		lun_table = rdac_pg->lun_table;
285*4882a593Smuzhiyun 	} else {
286*4882a593Smuzhiyun 		struct rdac_pg_legacy *rdac_pg;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 		data_size = sizeof(struct rdac_pg_legacy);
289*4882a593Smuzhiyun 		rdac_pg = &ctlr->mode_select.legacy;
290*4882a593Smuzhiyun 		memset(rdac_pg, 0, data_size);
291*4882a593Smuzhiyun 		common = &rdac_pg->common;
292*4882a593Smuzhiyun 		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
293*4882a593Smuzhiyun 		rdac_pg->page_len = 0x68;
294*4882a593Smuzhiyun 		lun_table = rdac_pg->lun_table;
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun 	common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
297*4882a593Smuzhiyun 	common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
298*4882a593Smuzhiyun 	common->rdac_options = RDAC_FORCED_QUIESENCE;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	list_for_each_entry(qdata, list, entry) {
301*4882a593Smuzhiyun 		lun_table[qdata->h->lun] = 0x81;
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	/* Prepare the command. */
305*4882a593Smuzhiyun 	if (ctlr->use_ms10) {
306*4882a593Smuzhiyun 		cdb[0] = MODE_SELECT_10;
307*4882a593Smuzhiyun 		cdb[7] = data_size >> 8;
308*4882a593Smuzhiyun 		cdb[8] = data_size & 0xff;
309*4882a593Smuzhiyun 	} else {
310*4882a593Smuzhiyun 		cdb[0] = MODE_SELECT;
311*4882a593Smuzhiyun 		cdb[4] = data_size;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	return data_size;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
release_controller(struct kref * kref)317*4882a593Smuzhiyun static void release_controller(struct kref *kref)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	struct rdac_controller *ctlr;
320*4882a593Smuzhiyun 	ctlr = container_of(kref, struct rdac_controller, kref);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	list_del(&ctlr->node);
323*4882a593Smuzhiyun 	kfree(ctlr);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
get_controller(int index,char * array_name,u8 * array_id,struct scsi_device * sdev)326*4882a593Smuzhiyun static struct rdac_controller *get_controller(int index, char *array_name,
327*4882a593Smuzhiyun 			u8 *array_id, struct scsi_device *sdev)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	struct rdac_controller *ctlr, *tmp;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	list_for_each_entry(tmp, &ctlr_list, node) {
332*4882a593Smuzhiyun 		if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
333*4882a593Smuzhiyun 			  (tmp->index == index) &&
334*4882a593Smuzhiyun 			  (tmp->host == sdev->host)) {
335*4882a593Smuzhiyun 			kref_get(&tmp->kref);
336*4882a593Smuzhiyun 			return tmp;
337*4882a593Smuzhiyun 		}
338*4882a593Smuzhiyun 	}
339*4882a593Smuzhiyun 	ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
340*4882a593Smuzhiyun 	if (!ctlr)
341*4882a593Smuzhiyun 		return NULL;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/* initialize fields of controller */
344*4882a593Smuzhiyun 	memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
345*4882a593Smuzhiyun 	ctlr->index = index;
346*4882a593Smuzhiyun 	ctlr->host = sdev->host;
347*4882a593Smuzhiyun 	memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	kref_init(&ctlr->kref);
350*4882a593Smuzhiyun 	ctlr->use_ms10 = -1;
351*4882a593Smuzhiyun 	ctlr->ms_queued = 0;
352*4882a593Smuzhiyun 	ctlr->ms_sdev = NULL;
353*4882a593Smuzhiyun 	spin_lock_init(&ctlr->ms_lock);
354*4882a593Smuzhiyun 	INIT_WORK(&ctlr->ms_work, send_mode_select);
355*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ctlr->ms_head);
356*4882a593Smuzhiyun 	list_add(&ctlr->node, &ctlr_list);
357*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ctlr->dh_list);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	return ctlr;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
get_lun_info(struct scsi_device * sdev,struct rdac_dh_data * h,char * array_name,u8 * array_id)362*4882a593Smuzhiyun static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
363*4882a593Smuzhiyun 			char *array_name, u8 *array_id)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	int err = SCSI_DH_IO, i;
366*4882a593Smuzhiyun 	struct c8_inquiry *inqp = &h->inq.c8;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	if (!scsi_get_vpd_page(sdev, 0xC8, (unsigned char *)inqp,
369*4882a593Smuzhiyun 			       sizeof(struct c8_inquiry))) {
370*4882a593Smuzhiyun 		if (inqp->page_code != 0xc8)
371*4882a593Smuzhiyun 			return SCSI_DH_NOSYS;
372*4882a593Smuzhiyun 		if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
373*4882a593Smuzhiyun 		    inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
374*4882a593Smuzhiyun 			return SCSI_DH_NOSYS;
375*4882a593Smuzhiyun 		h->lun = inqp->lun[7]; /* Uses only the last byte */
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 		for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
378*4882a593Smuzhiyun 			*(array_name+i) = inqp->array_user_label[(2*i)+1];
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		*(array_name+ARRAY_LABEL_LEN-1) = '\0';
381*4882a593Smuzhiyun 		memset(array_id, 0, UNIQUE_ID_LEN);
382*4882a593Smuzhiyun 		memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
383*4882a593Smuzhiyun 		err = SCSI_DH_OK;
384*4882a593Smuzhiyun 	}
385*4882a593Smuzhiyun 	return err;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
check_ownership(struct scsi_device * sdev,struct rdac_dh_data * h)388*4882a593Smuzhiyun static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	int err = SCSI_DH_IO, access_state;
391*4882a593Smuzhiyun 	struct rdac_dh_data *tmp;
392*4882a593Smuzhiyun 	struct c9_inquiry *inqp = &h->inq.c9;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	h->state = RDAC_STATE_ACTIVE;
395*4882a593Smuzhiyun 	if (!scsi_get_vpd_page(sdev, 0xC9, (unsigned char *)inqp,
396*4882a593Smuzhiyun 			       sizeof(struct c9_inquiry))) {
397*4882a593Smuzhiyun 		/* detect the operating mode */
398*4882a593Smuzhiyun 		if ((inqp->avte_cvp >> 5) & 0x1)
399*4882a593Smuzhiyun 			h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
400*4882a593Smuzhiyun 		else if (inqp->avte_cvp >> 7)
401*4882a593Smuzhiyun 			h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */
402*4882a593Smuzhiyun 		else
403*4882a593Smuzhiyun 			h->mode = RDAC_MODE; /* LUN in RDAC mode */
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 		/* Update ownership */
406*4882a593Smuzhiyun 		if (inqp->avte_cvp & 0x1) {
407*4882a593Smuzhiyun 			h->lun_state = RDAC_LUN_OWNED;
408*4882a593Smuzhiyun 			access_state = SCSI_ACCESS_STATE_OPTIMAL;
409*4882a593Smuzhiyun 		} else {
410*4882a593Smuzhiyun 			h->lun_state = RDAC_LUN_UNOWNED;
411*4882a593Smuzhiyun 			if (h->mode == RDAC_MODE) {
412*4882a593Smuzhiyun 				h->state = RDAC_STATE_PASSIVE;
413*4882a593Smuzhiyun 				access_state = SCSI_ACCESS_STATE_STANDBY;
414*4882a593Smuzhiyun 			} else
415*4882a593Smuzhiyun 				access_state = SCSI_ACCESS_STATE_ACTIVE;
416*4882a593Smuzhiyun 		}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 		/* Update path prio*/
419*4882a593Smuzhiyun 		if (inqp->path_prio & 0x1) {
420*4882a593Smuzhiyun 			h->preferred = RDAC_PREFERRED;
421*4882a593Smuzhiyun 			access_state |= SCSI_ACCESS_STATE_PREFERRED;
422*4882a593Smuzhiyun 		} else
423*4882a593Smuzhiyun 			h->preferred = RDAC_NON_PREFERRED;
424*4882a593Smuzhiyun 		rcu_read_lock();
425*4882a593Smuzhiyun 		list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) {
426*4882a593Smuzhiyun 			/* h->sdev should always be valid */
427*4882a593Smuzhiyun 			BUG_ON(!tmp->sdev);
428*4882a593Smuzhiyun 			tmp->sdev->access_state = access_state;
429*4882a593Smuzhiyun 		}
430*4882a593Smuzhiyun 		rcu_read_unlock();
431*4882a593Smuzhiyun 		err = SCSI_DH_OK;
432*4882a593Smuzhiyun 	}
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	return err;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
initialize_controller(struct scsi_device * sdev,struct rdac_dh_data * h,char * array_name,u8 * array_id)437*4882a593Smuzhiyun static int initialize_controller(struct scsi_device *sdev,
438*4882a593Smuzhiyun 		struct rdac_dh_data *h, char *array_name, u8 *array_id)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	int err = SCSI_DH_IO, index;
441*4882a593Smuzhiyun 	struct c4_inquiry *inqp = &h->inq.c4;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (!scsi_get_vpd_page(sdev, 0xC4, (unsigned char *)inqp,
444*4882a593Smuzhiyun 			       sizeof(struct c4_inquiry))) {
445*4882a593Smuzhiyun 		/* get the controller index */
446*4882a593Smuzhiyun 		if (inqp->slot_id[1] == 0x31)
447*4882a593Smuzhiyun 			index = 0;
448*4882a593Smuzhiyun 		else
449*4882a593Smuzhiyun 			index = 1;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 		spin_lock(&list_lock);
452*4882a593Smuzhiyun 		h->ctlr = get_controller(index, array_name, array_id, sdev);
453*4882a593Smuzhiyun 		if (!h->ctlr)
454*4882a593Smuzhiyun 			err = SCSI_DH_RES_TEMP_UNAVAIL;
455*4882a593Smuzhiyun 		else {
456*4882a593Smuzhiyun 			h->sdev = sdev;
457*4882a593Smuzhiyun 			list_add_rcu(&h->node, &h->ctlr->dh_list);
458*4882a593Smuzhiyun 		}
459*4882a593Smuzhiyun 		spin_unlock(&list_lock);
460*4882a593Smuzhiyun 		err = SCSI_DH_OK;
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun 	return err;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun 
set_mode_select(struct scsi_device * sdev,struct rdac_dh_data * h)465*4882a593Smuzhiyun static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	int err = SCSI_DH_IO;
468*4882a593Smuzhiyun 	struct c2_inquiry *inqp = &h->inq.c2;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	if (!scsi_get_vpd_page(sdev, 0xC2, (unsigned char *)inqp,
471*4882a593Smuzhiyun 			       sizeof(struct c2_inquiry))) {
472*4882a593Smuzhiyun 		/*
473*4882a593Smuzhiyun 		 * If more than MODE6_MAX_LUN luns are supported, use
474*4882a593Smuzhiyun 		 * mode select 10
475*4882a593Smuzhiyun 		 */
476*4882a593Smuzhiyun 		if (inqp->max_lun_supported >= MODE6_MAX_LUN)
477*4882a593Smuzhiyun 			h->ctlr->use_ms10 = 1;
478*4882a593Smuzhiyun 		else
479*4882a593Smuzhiyun 			h->ctlr->use_ms10 = 0;
480*4882a593Smuzhiyun 		err = SCSI_DH_OK;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 	return err;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
mode_select_handle_sense(struct scsi_device * sdev,struct scsi_sense_hdr * sense_hdr)485*4882a593Smuzhiyun static int mode_select_handle_sense(struct scsi_device *sdev,
486*4882a593Smuzhiyun 				    struct scsi_sense_hdr *sense_hdr)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	int err = SCSI_DH_IO;
489*4882a593Smuzhiyun 	struct rdac_dh_data *h = sdev->handler_data;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	if (!scsi_sense_valid(sense_hdr))
492*4882a593Smuzhiyun 		goto done;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	switch (sense_hdr->sense_key) {
495*4882a593Smuzhiyun 	case NO_SENSE:
496*4882a593Smuzhiyun 	case ABORTED_COMMAND:
497*4882a593Smuzhiyun 	case UNIT_ATTENTION:
498*4882a593Smuzhiyun 		err = SCSI_DH_RETRY;
499*4882a593Smuzhiyun 		break;
500*4882a593Smuzhiyun 	case NOT_READY:
501*4882a593Smuzhiyun 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
502*4882a593Smuzhiyun 			/* LUN Not Ready and is in the Process of Becoming
503*4882a593Smuzhiyun 			 * Ready
504*4882a593Smuzhiyun 			 */
505*4882a593Smuzhiyun 			err = SCSI_DH_RETRY;
506*4882a593Smuzhiyun 		break;
507*4882a593Smuzhiyun 	case ILLEGAL_REQUEST:
508*4882a593Smuzhiyun 		if (sense_hdr->asc == 0x91 && sense_hdr->ascq == 0x36)
509*4882a593Smuzhiyun 			/*
510*4882a593Smuzhiyun 			 * Command Lock contention
511*4882a593Smuzhiyun 			 */
512*4882a593Smuzhiyun 			err = SCSI_DH_IMM_RETRY;
513*4882a593Smuzhiyun 		break;
514*4882a593Smuzhiyun 	default:
515*4882a593Smuzhiyun 		break;
516*4882a593Smuzhiyun 	}
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
519*4882a593Smuzhiyun 		"MODE_SELECT returned with sense %02x/%02x/%02x",
520*4882a593Smuzhiyun 		(char *) h->ctlr->array_name, h->ctlr->index,
521*4882a593Smuzhiyun 		sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun done:
524*4882a593Smuzhiyun 	return err;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
send_mode_select(struct work_struct * work)527*4882a593Smuzhiyun static void send_mode_select(struct work_struct *work)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	struct rdac_controller *ctlr =
530*4882a593Smuzhiyun 		container_of(work, struct rdac_controller, ms_work);
531*4882a593Smuzhiyun 	struct scsi_device *sdev = ctlr->ms_sdev;
532*4882a593Smuzhiyun 	struct rdac_dh_data *h = sdev->handler_data;
533*4882a593Smuzhiyun 	int err = SCSI_DH_OK, retry_cnt = RDAC_RETRY_COUNT;
534*4882a593Smuzhiyun 	struct rdac_queue_data *tmp, *qdata;
535*4882a593Smuzhiyun 	LIST_HEAD(list);
536*4882a593Smuzhiyun 	unsigned char cdb[MAX_COMMAND_SIZE];
537*4882a593Smuzhiyun 	struct scsi_sense_hdr sshdr;
538*4882a593Smuzhiyun 	unsigned int data_size;
539*4882a593Smuzhiyun 	u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
540*4882a593Smuzhiyun 		REQ_FAILFAST_DRIVER;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	spin_lock(&ctlr->ms_lock);
543*4882a593Smuzhiyun 	list_splice_init(&ctlr->ms_head, &list);
544*4882a593Smuzhiyun 	ctlr->ms_queued = 0;
545*4882a593Smuzhiyun 	ctlr->ms_sdev = NULL;
546*4882a593Smuzhiyun 	spin_unlock(&ctlr->ms_lock);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun  retry:
549*4882a593Smuzhiyun 	memset(cdb, 0, sizeof(cdb));
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	data_size = rdac_failover_get(ctlr, &list, cdb);
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
554*4882a593Smuzhiyun 		"%s MODE_SELECT command",
555*4882a593Smuzhiyun 		(char *) h->ctlr->array_name, h->ctlr->index,
556*4882a593Smuzhiyun 		(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	if (scsi_execute(sdev, cdb, DMA_TO_DEVICE, &h->ctlr->mode_select,
559*4882a593Smuzhiyun 			data_size, NULL, &sshdr, RDAC_TIMEOUT * HZ,
560*4882a593Smuzhiyun 			RDAC_RETRIES, req_flags, 0, NULL)) {
561*4882a593Smuzhiyun 		err = mode_select_handle_sense(sdev, &sshdr);
562*4882a593Smuzhiyun 		if (err == SCSI_DH_RETRY && retry_cnt--)
563*4882a593Smuzhiyun 			goto retry;
564*4882a593Smuzhiyun 		if (err == SCSI_DH_IMM_RETRY)
565*4882a593Smuzhiyun 			goto retry;
566*4882a593Smuzhiyun 	}
567*4882a593Smuzhiyun 	if (err == SCSI_DH_OK) {
568*4882a593Smuzhiyun 		h->state = RDAC_STATE_ACTIVE;
569*4882a593Smuzhiyun 		RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
570*4882a593Smuzhiyun 				"MODE_SELECT completed",
571*4882a593Smuzhiyun 				(char *) h->ctlr->array_name, h->ctlr->index);
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	list_for_each_entry_safe(qdata, tmp, &list, entry) {
575*4882a593Smuzhiyun 		list_del(&qdata->entry);
576*4882a593Smuzhiyun 		if (err == SCSI_DH_OK)
577*4882a593Smuzhiyun 			qdata->h->state = RDAC_STATE_ACTIVE;
578*4882a593Smuzhiyun 		if (qdata->callback_fn)
579*4882a593Smuzhiyun 			qdata->callback_fn(qdata->callback_data, err);
580*4882a593Smuzhiyun 		kfree(qdata);
581*4882a593Smuzhiyun 	}
582*4882a593Smuzhiyun 	return;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
queue_mode_select(struct scsi_device * sdev,activate_complete fn,void * data)585*4882a593Smuzhiyun static int queue_mode_select(struct scsi_device *sdev,
586*4882a593Smuzhiyun 				activate_complete fn, void *data)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun 	struct rdac_queue_data *qdata;
589*4882a593Smuzhiyun 	struct rdac_controller *ctlr;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
592*4882a593Smuzhiyun 	if (!qdata)
593*4882a593Smuzhiyun 		return SCSI_DH_RETRY;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	qdata->h = sdev->handler_data;
596*4882a593Smuzhiyun 	qdata->callback_fn = fn;
597*4882a593Smuzhiyun 	qdata->callback_data = data;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	ctlr = qdata->h->ctlr;
600*4882a593Smuzhiyun 	spin_lock(&ctlr->ms_lock);
601*4882a593Smuzhiyun 	list_add_tail(&qdata->entry, &ctlr->ms_head);
602*4882a593Smuzhiyun 	if (!ctlr->ms_queued) {
603*4882a593Smuzhiyun 		ctlr->ms_queued = 1;
604*4882a593Smuzhiyun 		ctlr->ms_sdev = sdev;
605*4882a593Smuzhiyun 		queue_work(kmpath_rdacd, &ctlr->ms_work);
606*4882a593Smuzhiyun 	}
607*4882a593Smuzhiyun 	spin_unlock(&ctlr->ms_lock);
608*4882a593Smuzhiyun 	return SCSI_DH_OK;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun 
rdac_activate(struct scsi_device * sdev,activate_complete fn,void * data)611*4882a593Smuzhiyun static int rdac_activate(struct scsi_device *sdev,
612*4882a593Smuzhiyun 			activate_complete fn, void *data)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun 	struct rdac_dh_data *h = sdev->handler_data;
615*4882a593Smuzhiyun 	int err = SCSI_DH_OK;
616*4882a593Smuzhiyun 	int act = 0;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	err = check_ownership(sdev, h);
619*4882a593Smuzhiyun 	if (err != SCSI_DH_OK)
620*4882a593Smuzhiyun 		goto done;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	switch (h->mode) {
623*4882a593Smuzhiyun 	case RDAC_MODE:
624*4882a593Smuzhiyun 		if (h->lun_state == RDAC_LUN_UNOWNED)
625*4882a593Smuzhiyun 			act = 1;
626*4882a593Smuzhiyun 		break;
627*4882a593Smuzhiyun 	case RDAC_MODE_IOSHIP:
628*4882a593Smuzhiyun 		if ((h->lun_state == RDAC_LUN_UNOWNED) &&
629*4882a593Smuzhiyun 		    (h->preferred == RDAC_PREFERRED))
630*4882a593Smuzhiyun 			act = 1;
631*4882a593Smuzhiyun 		break;
632*4882a593Smuzhiyun 	default:
633*4882a593Smuzhiyun 		break;
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	if (act) {
637*4882a593Smuzhiyun 		err = queue_mode_select(sdev, fn, data);
638*4882a593Smuzhiyun 		if (err == SCSI_DH_OK)
639*4882a593Smuzhiyun 			return 0;
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun done:
642*4882a593Smuzhiyun 	if (fn)
643*4882a593Smuzhiyun 		fn(data, err);
644*4882a593Smuzhiyun 	return 0;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
rdac_prep_fn(struct scsi_device * sdev,struct request * req)647*4882a593Smuzhiyun static blk_status_t rdac_prep_fn(struct scsi_device *sdev, struct request *req)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun 	struct rdac_dh_data *h = sdev->handler_data;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	if (h->state != RDAC_STATE_ACTIVE) {
652*4882a593Smuzhiyun 		req->rq_flags |= RQF_QUIET;
653*4882a593Smuzhiyun 		return BLK_STS_IOERR;
654*4882a593Smuzhiyun 	}
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	return BLK_STS_OK;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun 
rdac_check_sense(struct scsi_device * sdev,struct scsi_sense_hdr * sense_hdr)659*4882a593Smuzhiyun static int rdac_check_sense(struct scsi_device *sdev,
660*4882a593Smuzhiyun 				struct scsi_sense_hdr *sense_hdr)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	struct rdac_dh_data *h = sdev->handler_data;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
665*4882a593Smuzhiyun 			"I/O returned with sense %02x/%02x/%02x",
666*4882a593Smuzhiyun 			(char *) h->ctlr->array_name, h->ctlr->index,
667*4882a593Smuzhiyun 			sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	switch (sense_hdr->sense_key) {
670*4882a593Smuzhiyun 	case NOT_READY:
671*4882a593Smuzhiyun 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
672*4882a593Smuzhiyun 			/* LUN Not Ready - Logical Unit Not Ready and is in
673*4882a593Smuzhiyun 			* the process of becoming ready
674*4882a593Smuzhiyun 			* Just retry.
675*4882a593Smuzhiyun 			*/
676*4882a593Smuzhiyun 			return ADD_TO_MLQUEUE;
677*4882a593Smuzhiyun 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
678*4882a593Smuzhiyun 			/* LUN Not Ready - Storage firmware incompatible
679*4882a593Smuzhiyun 			 * Manual code synchonisation required.
680*4882a593Smuzhiyun 			 *
681*4882a593Smuzhiyun 			 * Nothing we can do here. Try to bypass the path.
682*4882a593Smuzhiyun 			 */
683*4882a593Smuzhiyun 			return SUCCESS;
684*4882a593Smuzhiyun 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
685*4882a593Smuzhiyun 			/* LUN Not Ready - Quiescense in progress
686*4882a593Smuzhiyun 			 *
687*4882a593Smuzhiyun 			 * Just retry and wait.
688*4882a593Smuzhiyun 			 */
689*4882a593Smuzhiyun 			return ADD_TO_MLQUEUE;
690*4882a593Smuzhiyun 		if (sense_hdr->asc == 0xA1  && sense_hdr->ascq == 0x02)
691*4882a593Smuzhiyun 			/* LUN Not Ready - Quiescense in progress
692*4882a593Smuzhiyun 			 * or has been achieved
693*4882a593Smuzhiyun 			 * Just retry.
694*4882a593Smuzhiyun 			 */
695*4882a593Smuzhiyun 			return ADD_TO_MLQUEUE;
696*4882a593Smuzhiyun 		break;
697*4882a593Smuzhiyun 	case ILLEGAL_REQUEST:
698*4882a593Smuzhiyun 		if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
699*4882a593Smuzhiyun 			/* Invalid Request - Current Logical Unit Ownership.
700*4882a593Smuzhiyun 			 * Controller is not the current owner of the LUN,
701*4882a593Smuzhiyun 			 * Fail the path, so that the other path be used.
702*4882a593Smuzhiyun 			 */
703*4882a593Smuzhiyun 			h->state = RDAC_STATE_PASSIVE;
704*4882a593Smuzhiyun 			return SUCCESS;
705*4882a593Smuzhiyun 		}
706*4882a593Smuzhiyun 		break;
707*4882a593Smuzhiyun 	case UNIT_ATTENTION:
708*4882a593Smuzhiyun 		if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
709*4882a593Smuzhiyun 			/*
710*4882a593Smuzhiyun 			 * Power On, Reset, or Bus Device Reset, just retry.
711*4882a593Smuzhiyun 			 */
712*4882a593Smuzhiyun 			return ADD_TO_MLQUEUE;
713*4882a593Smuzhiyun 		if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
714*4882a593Smuzhiyun 			/*
715*4882a593Smuzhiyun 			 * Quiescence in progress , just retry.
716*4882a593Smuzhiyun 			 */
717*4882a593Smuzhiyun 			return ADD_TO_MLQUEUE;
718*4882a593Smuzhiyun 		break;
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 	/* success just means we do not care what scsi-ml does */
721*4882a593Smuzhiyun 	return SCSI_RETURN_NOT_HANDLED;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun 
rdac_bus_attach(struct scsi_device * sdev)724*4882a593Smuzhiyun static int rdac_bus_attach(struct scsi_device *sdev)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun 	struct rdac_dh_data *h;
727*4882a593Smuzhiyun 	int err;
728*4882a593Smuzhiyun 	char array_name[ARRAY_LABEL_LEN];
729*4882a593Smuzhiyun 	char array_id[UNIQUE_ID_LEN];
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	h = kzalloc(sizeof(*h) , GFP_KERNEL);
732*4882a593Smuzhiyun 	if (!h)
733*4882a593Smuzhiyun 		return SCSI_DH_NOMEM;
734*4882a593Smuzhiyun 	h->lun = UNINITIALIZED_LUN;
735*4882a593Smuzhiyun 	h->state = RDAC_STATE_ACTIVE;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	err = get_lun_info(sdev, h, array_name, array_id);
738*4882a593Smuzhiyun 	if (err != SCSI_DH_OK)
739*4882a593Smuzhiyun 		goto failed;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	err = initialize_controller(sdev, h, array_name, array_id);
742*4882a593Smuzhiyun 	if (err != SCSI_DH_OK)
743*4882a593Smuzhiyun 		goto failed;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	err = check_ownership(sdev, h);
746*4882a593Smuzhiyun 	if (err != SCSI_DH_OK)
747*4882a593Smuzhiyun 		goto clean_ctlr;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	err = set_mode_select(sdev, h);
750*4882a593Smuzhiyun 	if (err != SCSI_DH_OK)
751*4882a593Smuzhiyun 		goto clean_ctlr;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	sdev_printk(KERN_NOTICE, sdev,
754*4882a593Smuzhiyun 		    "%s: LUN %d (%s) (%s)\n",
755*4882a593Smuzhiyun 		    RDAC_NAME, h->lun, mode[(int)h->mode],
756*4882a593Smuzhiyun 		    lun_state[(int)h->lun_state]);
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	sdev->handler_data = h;
759*4882a593Smuzhiyun 	return SCSI_DH_OK;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun clean_ctlr:
762*4882a593Smuzhiyun 	spin_lock(&list_lock);
763*4882a593Smuzhiyun 	kref_put(&h->ctlr->kref, release_controller);
764*4882a593Smuzhiyun 	spin_unlock(&list_lock);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun failed:
767*4882a593Smuzhiyun 	kfree(h);
768*4882a593Smuzhiyun 	return err;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun 
rdac_bus_detach(struct scsi_device * sdev)771*4882a593Smuzhiyun static void rdac_bus_detach( struct scsi_device *sdev )
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun 	struct rdac_dh_data *h = sdev->handler_data;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	if (h->ctlr && h->ctlr->ms_queued)
776*4882a593Smuzhiyun 		flush_workqueue(kmpath_rdacd);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	spin_lock(&list_lock);
779*4882a593Smuzhiyun 	if (h->ctlr) {
780*4882a593Smuzhiyun 		list_del_rcu(&h->node);
781*4882a593Smuzhiyun 		kref_put(&h->ctlr->kref, release_controller);
782*4882a593Smuzhiyun 	}
783*4882a593Smuzhiyun 	spin_unlock(&list_lock);
784*4882a593Smuzhiyun 	sdev->handler_data = NULL;
785*4882a593Smuzhiyun 	synchronize_rcu();
786*4882a593Smuzhiyun 	kfree(h);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun static struct scsi_device_handler rdac_dh = {
790*4882a593Smuzhiyun 	.name = RDAC_NAME,
791*4882a593Smuzhiyun 	.module = THIS_MODULE,
792*4882a593Smuzhiyun 	.prep_fn = rdac_prep_fn,
793*4882a593Smuzhiyun 	.check_sense = rdac_check_sense,
794*4882a593Smuzhiyun 	.attach = rdac_bus_attach,
795*4882a593Smuzhiyun 	.detach = rdac_bus_detach,
796*4882a593Smuzhiyun 	.activate = rdac_activate,
797*4882a593Smuzhiyun };
798*4882a593Smuzhiyun 
rdac_init(void)799*4882a593Smuzhiyun static int __init rdac_init(void)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun 	int r;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	r = scsi_register_device_handler(&rdac_dh);
804*4882a593Smuzhiyun 	if (r != 0) {
805*4882a593Smuzhiyun 		printk(KERN_ERR "Failed to register scsi device handler.");
806*4882a593Smuzhiyun 		goto done;
807*4882a593Smuzhiyun 	}
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	/*
810*4882a593Smuzhiyun 	 * Create workqueue to handle mode selects for rdac
811*4882a593Smuzhiyun 	 */
812*4882a593Smuzhiyun 	kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
813*4882a593Smuzhiyun 	if (!kmpath_rdacd) {
814*4882a593Smuzhiyun 		scsi_unregister_device_handler(&rdac_dh);
815*4882a593Smuzhiyun 		printk(KERN_ERR "kmpath_rdacd creation failed.\n");
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 		r = -EINVAL;
818*4882a593Smuzhiyun 	}
819*4882a593Smuzhiyun done:
820*4882a593Smuzhiyun 	return r;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun 
rdac_exit(void)823*4882a593Smuzhiyun static void __exit rdac_exit(void)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun 	destroy_workqueue(kmpath_rdacd);
826*4882a593Smuzhiyun 	scsi_unregister_device_handler(&rdac_dh);
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun module_init(rdac_init);
830*4882a593Smuzhiyun module_exit(rdac_exit);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver");
833*4882a593Smuzhiyun MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
834*4882a593Smuzhiyun MODULE_VERSION("01.00.0000.0000");
835*4882a593Smuzhiyun MODULE_LICENSE("GPL");
836