xref: /OK3568_Linux_fs/kernel/drivers/scsi/qla4xxx/ql4_bsg.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * QLogic iSCSI HBA Driver
4*4882a593Smuzhiyun  * Copyright (c) 2011-2013 QLogic Corporation
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "ql4_def.h"
8*4882a593Smuzhiyun #include "ql4_glbl.h"
9*4882a593Smuzhiyun #include "ql4_bsg.h"
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun static int
qla4xxx_read_flash(struct bsg_job * bsg_job)12*4882a593Smuzhiyun qla4xxx_read_flash(struct bsg_job *bsg_job)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
15*4882a593Smuzhiyun 	struct scsi_qla_host *ha = to_qla_host(host);
16*4882a593Smuzhiyun 	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
17*4882a593Smuzhiyun 	struct iscsi_bsg_request *bsg_req = bsg_job->request;
18*4882a593Smuzhiyun 	uint32_t offset = 0;
19*4882a593Smuzhiyun 	uint32_t length = 0;
20*4882a593Smuzhiyun 	dma_addr_t flash_dma;
21*4882a593Smuzhiyun 	uint8_t *flash = NULL;
22*4882a593Smuzhiyun 	int rval = -EINVAL;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	if (unlikely(pci_channel_offline(ha->pdev)))
27*4882a593Smuzhiyun 		goto leave;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	if (ql4xxx_reset_active(ha)) {
30*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
31*4882a593Smuzhiyun 		rval = -EBUSY;
32*4882a593Smuzhiyun 		goto leave;
33*4882a593Smuzhiyun 	}
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	if (ha->flash_state != QLFLASH_WAITING) {
36*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: another flash operation "
37*4882a593Smuzhiyun 			   "active\n", __func__);
38*4882a593Smuzhiyun 		rval = -EBUSY;
39*4882a593Smuzhiyun 		goto leave;
40*4882a593Smuzhiyun 	}
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	ha->flash_state = QLFLASH_READING;
43*4882a593Smuzhiyun 	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
44*4882a593Smuzhiyun 	length = bsg_job->reply_payload.payload_len;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
47*4882a593Smuzhiyun 				   GFP_KERNEL);
48*4882a593Smuzhiyun 	if (!flash) {
49*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
50*4882a593Smuzhiyun 			   "data\n", __func__);
51*4882a593Smuzhiyun 		rval = -ENOMEM;
52*4882a593Smuzhiyun 		goto leave;
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
56*4882a593Smuzhiyun 	if (rval) {
57*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
58*4882a593Smuzhiyun 		bsg_reply->result = DID_ERROR << 16;
59*4882a593Smuzhiyun 		rval = -EIO;
60*4882a593Smuzhiyun 	} else {
61*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len =
62*4882a593Smuzhiyun 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
63*4882a593Smuzhiyun 					    bsg_job->reply_payload.sg_cnt,
64*4882a593Smuzhiyun 					    flash, length);
65*4882a593Smuzhiyun 		bsg_reply->result = DID_OK << 16;
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	bsg_job_done(bsg_job, bsg_reply->result,
69*4882a593Smuzhiyun 		     bsg_reply->reply_payload_rcv_len);
70*4882a593Smuzhiyun 	dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
71*4882a593Smuzhiyun leave:
72*4882a593Smuzhiyun 	ha->flash_state = QLFLASH_WAITING;
73*4882a593Smuzhiyun 	return rval;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun static int
qla4xxx_update_flash(struct bsg_job * bsg_job)77*4882a593Smuzhiyun qla4xxx_update_flash(struct bsg_job *bsg_job)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
80*4882a593Smuzhiyun 	struct scsi_qla_host *ha = to_qla_host(host);
81*4882a593Smuzhiyun 	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
82*4882a593Smuzhiyun 	struct iscsi_bsg_request *bsg_req = bsg_job->request;
83*4882a593Smuzhiyun 	uint32_t length = 0;
84*4882a593Smuzhiyun 	uint32_t offset = 0;
85*4882a593Smuzhiyun 	uint32_t options = 0;
86*4882a593Smuzhiyun 	dma_addr_t flash_dma;
87*4882a593Smuzhiyun 	uint8_t *flash = NULL;
88*4882a593Smuzhiyun 	int rval = -EINVAL;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	if (unlikely(pci_channel_offline(ha->pdev)))
93*4882a593Smuzhiyun 		goto leave;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	if (ql4xxx_reset_active(ha)) {
96*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
97*4882a593Smuzhiyun 		rval = -EBUSY;
98*4882a593Smuzhiyun 		goto leave;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (ha->flash_state != QLFLASH_WAITING) {
102*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: another flash operation "
103*4882a593Smuzhiyun 			   "active\n", __func__);
104*4882a593Smuzhiyun 		rval = -EBUSY;
105*4882a593Smuzhiyun 		goto leave;
106*4882a593Smuzhiyun 	}
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	ha->flash_state = QLFLASH_WRITING;
109*4882a593Smuzhiyun 	length = bsg_job->request_payload.payload_len;
110*4882a593Smuzhiyun 	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
111*4882a593Smuzhiyun 	options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
114*4882a593Smuzhiyun 				   GFP_KERNEL);
115*4882a593Smuzhiyun 	if (!flash) {
116*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
117*4882a593Smuzhiyun 			   "data\n", __func__);
118*4882a593Smuzhiyun 		rval = -ENOMEM;
119*4882a593Smuzhiyun 		goto leave;
120*4882a593Smuzhiyun 	}
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
123*4882a593Smuzhiyun 			  bsg_job->request_payload.sg_cnt, flash, length);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
126*4882a593Smuzhiyun 	if (rval) {
127*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
128*4882a593Smuzhiyun 		bsg_reply->result = DID_ERROR << 16;
129*4882a593Smuzhiyun 		rval = -EIO;
130*4882a593Smuzhiyun 	} else
131*4882a593Smuzhiyun 		bsg_reply->result = DID_OK << 16;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	bsg_job_done(bsg_job, bsg_reply->result,
134*4882a593Smuzhiyun 		     bsg_reply->reply_payload_rcv_len);
135*4882a593Smuzhiyun 	dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
136*4882a593Smuzhiyun leave:
137*4882a593Smuzhiyun 	ha->flash_state = QLFLASH_WAITING;
138*4882a593Smuzhiyun 	return rval;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun static int
qla4xxx_get_acb_state(struct bsg_job * bsg_job)142*4882a593Smuzhiyun qla4xxx_get_acb_state(struct bsg_job *bsg_job)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
145*4882a593Smuzhiyun 	struct scsi_qla_host *ha = to_qla_host(host);
146*4882a593Smuzhiyun 	struct iscsi_bsg_request *bsg_req = bsg_job->request;
147*4882a593Smuzhiyun 	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
148*4882a593Smuzhiyun 	uint32_t status[MBOX_REG_COUNT];
149*4882a593Smuzhiyun 	uint32_t acb_idx;
150*4882a593Smuzhiyun 	uint32_t ip_idx;
151*4882a593Smuzhiyun 	int rval = -EINVAL;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (unlikely(pci_channel_offline(ha->pdev)))
156*4882a593Smuzhiyun 		goto leave;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* Only 4022 and above adapters are supported */
159*4882a593Smuzhiyun 	if (is_qla4010(ha))
160*4882a593Smuzhiyun 		goto leave;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (ql4xxx_reset_active(ha)) {
163*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
164*4882a593Smuzhiyun 		rval = -EBUSY;
165*4882a593Smuzhiyun 		goto leave;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (bsg_job->reply_payload.payload_len < sizeof(status)) {
169*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
170*4882a593Smuzhiyun 			   __func__, bsg_job->reply_payload.payload_len);
171*4882a593Smuzhiyun 		rval = -EINVAL;
172*4882a593Smuzhiyun 		goto leave;
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
176*4882a593Smuzhiyun 	ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
179*4882a593Smuzhiyun 	if (rval) {
180*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
181*4882a593Smuzhiyun 			   __func__);
182*4882a593Smuzhiyun 		bsg_reply->result = DID_ERROR << 16;
183*4882a593Smuzhiyun 		rval = -EIO;
184*4882a593Smuzhiyun 	} else {
185*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len =
186*4882a593Smuzhiyun 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
187*4882a593Smuzhiyun 					    bsg_job->reply_payload.sg_cnt,
188*4882a593Smuzhiyun 					    status, sizeof(status));
189*4882a593Smuzhiyun 		bsg_reply->result = DID_OK << 16;
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	bsg_job_done(bsg_job, bsg_reply->result,
193*4882a593Smuzhiyun 		     bsg_reply->reply_payload_rcv_len);
194*4882a593Smuzhiyun leave:
195*4882a593Smuzhiyun 	return rval;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun static int
qla4xxx_read_nvram(struct bsg_job * bsg_job)199*4882a593Smuzhiyun qla4xxx_read_nvram(struct bsg_job *bsg_job)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
202*4882a593Smuzhiyun 	struct scsi_qla_host *ha = to_qla_host(host);
203*4882a593Smuzhiyun 	struct iscsi_bsg_request *bsg_req = bsg_job->request;
204*4882a593Smuzhiyun 	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
205*4882a593Smuzhiyun 	uint32_t offset = 0;
206*4882a593Smuzhiyun 	uint32_t len = 0;
207*4882a593Smuzhiyun 	uint32_t total_len = 0;
208*4882a593Smuzhiyun 	dma_addr_t nvram_dma;
209*4882a593Smuzhiyun 	uint8_t *nvram = NULL;
210*4882a593Smuzhiyun 	int rval = -EINVAL;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (unlikely(pci_channel_offline(ha->pdev)))
215*4882a593Smuzhiyun 		goto leave;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/* Only 40xx adapters are supported */
218*4882a593Smuzhiyun 	if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
219*4882a593Smuzhiyun 		goto leave;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	if (ql4xxx_reset_active(ha)) {
222*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
223*4882a593Smuzhiyun 		rval = -EBUSY;
224*4882a593Smuzhiyun 		goto leave;
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
228*4882a593Smuzhiyun 	len = bsg_job->reply_payload.payload_len;
229*4882a593Smuzhiyun 	total_len = offset + len;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	/* total len should not be greater than max NVRAM size */
232*4882a593Smuzhiyun 	if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
233*4882a593Smuzhiyun 	    ((is_qla4022(ha) || is_qla4032(ha)) &&
234*4882a593Smuzhiyun 	     total_len > QL40X2_NVRAM_SIZE)) {
235*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
236*4882a593Smuzhiyun 			   " nvram size, offset=%d len=%d\n",
237*4882a593Smuzhiyun 			   __func__, offset, len);
238*4882a593Smuzhiyun 		goto leave;
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
242*4882a593Smuzhiyun 				   GFP_KERNEL);
243*4882a593Smuzhiyun 	if (!nvram) {
244*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
245*4882a593Smuzhiyun 			   "data\n", __func__);
246*4882a593Smuzhiyun 		rval = -ENOMEM;
247*4882a593Smuzhiyun 		goto leave;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
251*4882a593Smuzhiyun 	if (rval) {
252*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
253*4882a593Smuzhiyun 		bsg_reply->result = DID_ERROR << 16;
254*4882a593Smuzhiyun 		rval = -EIO;
255*4882a593Smuzhiyun 	} else {
256*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len =
257*4882a593Smuzhiyun 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
258*4882a593Smuzhiyun 					    bsg_job->reply_payload.sg_cnt,
259*4882a593Smuzhiyun 					    nvram, len);
260*4882a593Smuzhiyun 		bsg_reply->result = DID_OK << 16;
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	bsg_job_done(bsg_job, bsg_reply->result,
264*4882a593Smuzhiyun 		     bsg_reply->reply_payload_rcv_len);
265*4882a593Smuzhiyun 	dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
266*4882a593Smuzhiyun leave:
267*4882a593Smuzhiyun 	return rval;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun static int
qla4xxx_update_nvram(struct bsg_job * bsg_job)271*4882a593Smuzhiyun qla4xxx_update_nvram(struct bsg_job *bsg_job)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
274*4882a593Smuzhiyun 	struct scsi_qla_host *ha = to_qla_host(host);
275*4882a593Smuzhiyun 	struct iscsi_bsg_request *bsg_req = bsg_job->request;
276*4882a593Smuzhiyun 	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
277*4882a593Smuzhiyun 	uint32_t offset = 0;
278*4882a593Smuzhiyun 	uint32_t len = 0;
279*4882a593Smuzhiyun 	uint32_t total_len = 0;
280*4882a593Smuzhiyun 	dma_addr_t nvram_dma;
281*4882a593Smuzhiyun 	uint8_t *nvram = NULL;
282*4882a593Smuzhiyun 	int rval = -EINVAL;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	if (unlikely(pci_channel_offline(ha->pdev)))
287*4882a593Smuzhiyun 		goto leave;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
290*4882a593Smuzhiyun 		goto leave;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	if (ql4xxx_reset_active(ha)) {
293*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
294*4882a593Smuzhiyun 		rval = -EBUSY;
295*4882a593Smuzhiyun 		goto leave;
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
299*4882a593Smuzhiyun 	len = bsg_job->request_payload.payload_len;
300*4882a593Smuzhiyun 	total_len = offset + len;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/* total len should not be greater than max NVRAM size */
303*4882a593Smuzhiyun 	if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
304*4882a593Smuzhiyun 	    ((is_qla4022(ha) || is_qla4032(ha)) &&
305*4882a593Smuzhiyun 	     total_len > QL40X2_NVRAM_SIZE)) {
306*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
307*4882a593Smuzhiyun 			   " nvram size, offset=%d len=%d\n",
308*4882a593Smuzhiyun 			   __func__, offset, len);
309*4882a593Smuzhiyun 		goto leave;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
313*4882a593Smuzhiyun 				   GFP_KERNEL);
314*4882a593Smuzhiyun 	if (!nvram) {
315*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
316*4882a593Smuzhiyun 			   "data\n", __func__);
317*4882a593Smuzhiyun 		rval = -ENOMEM;
318*4882a593Smuzhiyun 		goto leave;
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
322*4882a593Smuzhiyun 			  bsg_job->request_payload.sg_cnt, nvram, len);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
325*4882a593Smuzhiyun 	if (rval) {
326*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
327*4882a593Smuzhiyun 		bsg_reply->result = DID_ERROR << 16;
328*4882a593Smuzhiyun 		rval = -EIO;
329*4882a593Smuzhiyun 	} else
330*4882a593Smuzhiyun 		bsg_reply->result = DID_OK << 16;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	bsg_job_done(bsg_job, bsg_reply->result,
333*4882a593Smuzhiyun 		     bsg_reply->reply_payload_rcv_len);
334*4882a593Smuzhiyun 	dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
335*4882a593Smuzhiyun leave:
336*4882a593Smuzhiyun 	return rval;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun static int
qla4xxx_restore_defaults(struct bsg_job * bsg_job)340*4882a593Smuzhiyun qla4xxx_restore_defaults(struct bsg_job *bsg_job)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
343*4882a593Smuzhiyun 	struct scsi_qla_host *ha = to_qla_host(host);
344*4882a593Smuzhiyun 	struct iscsi_bsg_request *bsg_req = bsg_job->request;
345*4882a593Smuzhiyun 	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
346*4882a593Smuzhiyun 	uint32_t region = 0;
347*4882a593Smuzhiyun 	uint32_t field0 = 0;
348*4882a593Smuzhiyun 	uint32_t field1 = 0;
349*4882a593Smuzhiyun 	int rval = -EINVAL;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (unlikely(pci_channel_offline(ha->pdev)))
354*4882a593Smuzhiyun 		goto leave;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if (is_qla4010(ha))
357*4882a593Smuzhiyun 		goto leave;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (ql4xxx_reset_active(ha)) {
360*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
361*4882a593Smuzhiyun 		rval = -EBUSY;
362*4882a593Smuzhiyun 		goto leave;
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
366*4882a593Smuzhiyun 	field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
367*4882a593Smuzhiyun 	field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
370*4882a593Smuzhiyun 	if (rval) {
371*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
372*4882a593Smuzhiyun 		bsg_reply->result = DID_ERROR << 16;
373*4882a593Smuzhiyun 		rval = -EIO;
374*4882a593Smuzhiyun 	} else
375*4882a593Smuzhiyun 		bsg_reply->result = DID_OK << 16;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	bsg_job_done(bsg_job, bsg_reply->result,
378*4882a593Smuzhiyun 		     bsg_reply->reply_payload_rcv_len);
379*4882a593Smuzhiyun leave:
380*4882a593Smuzhiyun 	return rval;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun static int
qla4xxx_bsg_get_acb(struct bsg_job * bsg_job)384*4882a593Smuzhiyun qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
387*4882a593Smuzhiyun 	struct scsi_qla_host *ha = to_qla_host(host);
388*4882a593Smuzhiyun 	struct iscsi_bsg_request *bsg_req = bsg_job->request;
389*4882a593Smuzhiyun 	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
390*4882a593Smuzhiyun 	uint32_t acb_type = 0;
391*4882a593Smuzhiyun 	uint32_t len = 0;
392*4882a593Smuzhiyun 	dma_addr_t acb_dma;
393*4882a593Smuzhiyun 	uint8_t *acb = NULL;
394*4882a593Smuzhiyun 	int rval = -EINVAL;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	if (unlikely(pci_channel_offline(ha->pdev)))
399*4882a593Smuzhiyun 		goto leave;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/* Only 4022 and above adapters are supported */
402*4882a593Smuzhiyun 	if (is_qla4010(ha))
403*4882a593Smuzhiyun 		goto leave;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	if (ql4xxx_reset_active(ha)) {
406*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
407*4882a593Smuzhiyun 		rval = -EBUSY;
408*4882a593Smuzhiyun 		goto leave;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
412*4882a593Smuzhiyun 	len = bsg_job->reply_payload.payload_len;
413*4882a593Smuzhiyun 	if (len < sizeof(struct addr_ctrl_blk)) {
414*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
415*4882a593Smuzhiyun 			   __func__, len);
416*4882a593Smuzhiyun 		rval = -EINVAL;
417*4882a593Smuzhiyun 		goto leave;
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
421*4882a593Smuzhiyun 	if (!acb) {
422*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
423*4882a593Smuzhiyun 			   "data\n", __func__);
424*4882a593Smuzhiyun 		rval = -ENOMEM;
425*4882a593Smuzhiyun 		goto leave;
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
429*4882a593Smuzhiyun 	if (rval) {
430*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
431*4882a593Smuzhiyun 		bsg_reply->result = DID_ERROR << 16;
432*4882a593Smuzhiyun 		rval = -EIO;
433*4882a593Smuzhiyun 	} else {
434*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len =
435*4882a593Smuzhiyun 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
436*4882a593Smuzhiyun 					    bsg_job->reply_payload.sg_cnt,
437*4882a593Smuzhiyun 					    acb, len);
438*4882a593Smuzhiyun 		bsg_reply->result = DID_OK << 16;
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	bsg_job_done(bsg_job, bsg_reply->result,
442*4882a593Smuzhiyun 		     bsg_reply->reply_payload_rcv_len);
443*4882a593Smuzhiyun 	dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
444*4882a593Smuzhiyun leave:
445*4882a593Smuzhiyun 	return rval;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
ql4xxx_execute_diag_cmd(struct bsg_job * bsg_job)448*4882a593Smuzhiyun static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
451*4882a593Smuzhiyun 	struct scsi_qla_host *ha = to_qla_host(host);
452*4882a593Smuzhiyun 	struct iscsi_bsg_request *bsg_req = bsg_job->request;
453*4882a593Smuzhiyun 	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
454*4882a593Smuzhiyun 	uint8_t *rsp_ptr = NULL;
455*4882a593Smuzhiyun 	uint32_t mbox_cmd[MBOX_REG_COUNT];
456*4882a593Smuzhiyun 	uint32_t mbox_sts[MBOX_REG_COUNT];
457*4882a593Smuzhiyun 	int status = QLA_ERROR;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
462*4882a593Smuzhiyun 		ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
463*4882a593Smuzhiyun 			   __func__);
464*4882a593Smuzhiyun 		bsg_reply->result = DID_ERROR << 16;
465*4882a593Smuzhiyun 		goto exit_diag_mem_test;
466*4882a593Smuzhiyun 	}
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
469*4882a593Smuzhiyun 	memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
470*4882a593Smuzhiyun 	       sizeof(uint32_t) * MBOX_REG_COUNT);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha,
473*4882a593Smuzhiyun 			  "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
474*4882a593Smuzhiyun 			  __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
475*4882a593Smuzhiyun 			  mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
476*4882a593Smuzhiyun 			  mbox_cmd[7]));
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
479*4882a593Smuzhiyun 					 &mbox_sts[0]);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha,
482*4882a593Smuzhiyun 			  "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
483*4882a593Smuzhiyun 			  __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
484*4882a593Smuzhiyun 			  mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
485*4882a593Smuzhiyun 			  mbox_sts[7]));
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	if (status == QLA_SUCCESS)
488*4882a593Smuzhiyun 		bsg_reply->result = DID_OK << 16;
489*4882a593Smuzhiyun 	else
490*4882a593Smuzhiyun 		bsg_reply->result = DID_ERROR << 16;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	/* Send mbox_sts to application */
493*4882a593Smuzhiyun 	bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
494*4882a593Smuzhiyun 	rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
495*4882a593Smuzhiyun 	memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun exit_diag_mem_test:
498*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha,
499*4882a593Smuzhiyun 			  "%s: bsg_reply->result = x%x, status = %s\n",
500*4882a593Smuzhiyun 			  __func__, bsg_reply->result, STATUS(status)));
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	bsg_job_done(bsg_job, bsg_reply->result,
503*4882a593Smuzhiyun 		     bsg_reply->reply_payload_rcv_len);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun 
qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host * ha,int wait_for_link)506*4882a593Smuzhiyun static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
507*4882a593Smuzhiyun 						   int wait_for_link)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	int status = QLA_SUCCESS;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) {
512*4882a593Smuzhiyun 		ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout",
513*4882a593Smuzhiyun 			   __func__, ha->idc_extend_tmo);
514*4882a593Smuzhiyun 		if (ha->idc_extend_tmo) {
515*4882a593Smuzhiyun 			if (!wait_for_completion_timeout(&ha->idc_comp,
516*4882a593Smuzhiyun 						(ha->idc_extend_tmo * HZ))) {
517*4882a593Smuzhiyun 				ha->notify_idc_comp = 0;
518*4882a593Smuzhiyun 				ha->notify_link_up_comp = 0;
519*4882a593Smuzhiyun 				ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
520*4882a593Smuzhiyun 					   __func__);
521*4882a593Smuzhiyun 				status = QLA_ERROR;
522*4882a593Smuzhiyun 				goto exit_wait;
523*4882a593Smuzhiyun 			} else {
524*4882a593Smuzhiyun 				DEBUG2(ql4_printk(KERN_INFO, ha,
525*4882a593Smuzhiyun 						  "%s: IDC Complete notification received\n",
526*4882a593Smuzhiyun 						  __func__));
527*4882a593Smuzhiyun 			}
528*4882a593Smuzhiyun 		}
529*4882a593Smuzhiyun 	} else {
530*4882a593Smuzhiyun 		DEBUG2(ql4_printk(KERN_INFO, ha,
531*4882a593Smuzhiyun 				  "%s: IDC Complete notification received\n",
532*4882a593Smuzhiyun 				  __func__));
533*4882a593Smuzhiyun 	}
534*4882a593Smuzhiyun 	ha->notify_idc_comp = 0;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	if (wait_for_link) {
537*4882a593Smuzhiyun 		if (!wait_for_completion_timeout(&ha->link_up_comp,
538*4882a593Smuzhiyun 						 (IDC_COMP_TOV * HZ))) {
539*4882a593Smuzhiyun 			ha->notify_link_up_comp = 0;
540*4882a593Smuzhiyun 			ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
541*4882a593Smuzhiyun 				   __func__);
542*4882a593Smuzhiyun 			status = QLA_ERROR;
543*4882a593Smuzhiyun 			goto exit_wait;
544*4882a593Smuzhiyun 		} else {
545*4882a593Smuzhiyun 			DEBUG2(ql4_printk(KERN_INFO, ha,
546*4882a593Smuzhiyun 					  "%s: LINK UP notification received\n",
547*4882a593Smuzhiyun 					  __func__));
548*4882a593Smuzhiyun 		}
549*4882a593Smuzhiyun 		ha->notify_link_up_comp = 0;
550*4882a593Smuzhiyun 	}
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun exit_wait:
553*4882a593Smuzhiyun 	return status;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
qla4_83xx_pre_loopback_config(struct scsi_qla_host * ha,uint32_t * mbox_cmd)556*4882a593Smuzhiyun static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha,
557*4882a593Smuzhiyun 					 uint32_t *mbox_cmd)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	uint32_t config = 0;
560*4882a593Smuzhiyun 	int status = QLA_SUCCESS;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	status = qla4_83xx_get_port_config(ha, &config);
565*4882a593Smuzhiyun 	if (status != QLA_SUCCESS)
566*4882a593Smuzhiyun 		goto exit_pre_loopback_config;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n",
569*4882a593Smuzhiyun 			  __func__, config));
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	if ((config & ENABLE_INTERNAL_LOOPBACK) ||
572*4882a593Smuzhiyun 	    (config & ENABLE_EXTERNAL_LOOPBACK)) {
573*4882a593Smuzhiyun 		ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid request\n",
574*4882a593Smuzhiyun 			   __func__);
575*4882a593Smuzhiyun 		goto exit_pre_loopback_config;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
579*4882a593Smuzhiyun 		config |= ENABLE_INTERNAL_LOOPBACK;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
582*4882a593Smuzhiyun 		config |= ENABLE_EXTERNAL_LOOPBACK;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	config &= ~ENABLE_DCBX;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n",
587*4882a593Smuzhiyun 			  __func__, config));
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	ha->notify_idc_comp = 1;
590*4882a593Smuzhiyun 	ha->notify_link_up_comp = 1;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	/* get the link state */
593*4882a593Smuzhiyun 	qla4xxx_get_firmware_state(ha);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	status = qla4_83xx_set_port_config(ha, &config);
596*4882a593Smuzhiyun 	if (status != QLA_SUCCESS) {
597*4882a593Smuzhiyun 		ha->notify_idc_comp = 0;
598*4882a593Smuzhiyun 		ha->notify_link_up_comp = 0;
599*4882a593Smuzhiyun 		goto exit_pre_loopback_config;
600*4882a593Smuzhiyun 	}
601*4882a593Smuzhiyun exit_pre_loopback_config:
602*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
603*4882a593Smuzhiyun 			  STATUS(status)));
604*4882a593Smuzhiyun 	return status;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun 
qla4_83xx_post_loopback_config(struct scsi_qla_host * ha,uint32_t * mbox_cmd)607*4882a593Smuzhiyun static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha,
608*4882a593Smuzhiyun 					  uint32_t *mbox_cmd)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun 	int status = QLA_SUCCESS;
611*4882a593Smuzhiyun 	uint32_t config = 0;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	status = qla4_83xx_get_port_config(ha, &config);
616*4882a593Smuzhiyun 	if (status != QLA_SUCCESS)
617*4882a593Smuzhiyun 		goto exit_post_loopback_config;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__,
620*4882a593Smuzhiyun 			  config));
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
623*4882a593Smuzhiyun 		config &= ~ENABLE_INTERNAL_LOOPBACK;
624*4882a593Smuzhiyun 	else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
625*4882a593Smuzhiyun 		config &= ~ENABLE_EXTERNAL_LOOPBACK;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	config |= ENABLE_DCBX;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha,
630*4882a593Smuzhiyun 			  "%s: Restore default port config=%08X\n", __func__,
631*4882a593Smuzhiyun 			  config));
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	ha->notify_idc_comp = 1;
634*4882a593Smuzhiyun 	if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP)
635*4882a593Smuzhiyun 		ha->notify_link_up_comp = 1;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	status = qla4_83xx_set_port_config(ha, &config);
638*4882a593Smuzhiyun 	if (status != QLA_SUCCESS) {
639*4882a593Smuzhiyun 		ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n",
640*4882a593Smuzhiyun 			   __func__);
641*4882a593Smuzhiyun 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
642*4882a593Smuzhiyun 		clear_bit(AF_LOOPBACK, &ha->flags);
643*4882a593Smuzhiyun 		goto exit_post_loopback_config;
644*4882a593Smuzhiyun 	}
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun exit_post_loopback_config:
647*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
648*4882a593Smuzhiyun 			  STATUS(status)));
649*4882a593Smuzhiyun 	return status;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
qla4xxx_execute_diag_loopback_cmd(struct bsg_job * bsg_job)652*4882a593Smuzhiyun static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun 	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
655*4882a593Smuzhiyun 	struct scsi_qla_host *ha = to_qla_host(host);
656*4882a593Smuzhiyun 	struct iscsi_bsg_request *bsg_req = bsg_job->request;
657*4882a593Smuzhiyun 	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
658*4882a593Smuzhiyun 	uint8_t *rsp_ptr = NULL;
659*4882a593Smuzhiyun 	uint32_t mbox_cmd[MBOX_REG_COUNT];
660*4882a593Smuzhiyun 	uint32_t mbox_sts[MBOX_REG_COUNT];
661*4882a593Smuzhiyun 	int wait_for_link = 1;
662*4882a593Smuzhiyun 	int status = QLA_ERROR;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	if (test_bit(AF_LOOPBACK, &ha->flags)) {
669*4882a593Smuzhiyun 		ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n",
670*4882a593Smuzhiyun 			   __func__);
671*4882a593Smuzhiyun 		bsg_reply->result = DID_ERROR << 16;
672*4882a593Smuzhiyun 		goto exit_loopback_cmd;
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
676*4882a593Smuzhiyun 		ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
677*4882a593Smuzhiyun 			   __func__);
678*4882a593Smuzhiyun 		bsg_reply->result = DID_ERROR << 16;
679*4882a593Smuzhiyun 		goto exit_loopback_cmd;
680*4882a593Smuzhiyun 	}
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
683*4882a593Smuzhiyun 	       sizeof(uint32_t) * MBOX_REG_COUNT);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	if (is_qla8032(ha) || is_qla8042(ha)) {
686*4882a593Smuzhiyun 		status = qla4_83xx_pre_loopback_config(ha, mbox_cmd);
687*4882a593Smuzhiyun 		if (status != QLA_SUCCESS) {
688*4882a593Smuzhiyun 			bsg_reply->result = DID_ERROR << 16;
689*4882a593Smuzhiyun 			goto exit_loopback_cmd;
690*4882a593Smuzhiyun 		}
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 		status = qla4_83xx_wait_for_loopback_config_comp(ha,
693*4882a593Smuzhiyun 								 wait_for_link);
694*4882a593Smuzhiyun 		if (status != QLA_SUCCESS) {
695*4882a593Smuzhiyun 			bsg_reply->result = DID_TIME_OUT << 16;
696*4882a593Smuzhiyun 			goto restore;
697*4882a593Smuzhiyun 		}
698*4882a593Smuzhiyun 	}
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha,
701*4882a593Smuzhiyun 			  "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
702*4882a593Smuzhiyun 			  __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
703*4882a593Smuzhiyun 			  mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
704*4882a593Smuzhiyun 			  mbox_cmd[7]));
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
707*4882a593Smuzhiyun 				&mbox_sts[0]);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	if (status == QLA_SUCCESS)
710*4882a593Smuzhiyun 		bsg_reply->result = DID_OK << 16;
711*4882a593Smuzhiyun 	else
712*4882a593Smuzhiyun 		bsg_reply->result = DID_ERROR << 16;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha,
715*4882a593Smuzhiyun 			  "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
716*4882a593Smuzhiyun 			  __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
717*4882a593Smuzhiyun 			  mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
718*4882a593Smuzhiyun 			  mbox_sts[7]));
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	/* Send mbox_sts to application */
721*4882a593Smuzhiyun 	bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
722*4882a593Smuzhiyun 	rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
723*4882a593Smuzhiyun 	memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
724*4882a593Smuzhiyun restore:
725*4882a593Smuzhiyun 	if (is_qla8032(ha) || is_qla8042(ha)) {
726*4882a593Smuzhiyun 		status = qla4_83xx_post_loopback_config(ha, mbox_cmd);
727*4882a593Smuzhiyun 		if (status != QLA_SUCCESS) {
728*4882a593Smuzhiyun 			bsg_reply->result = DID_ERROR << 16;
729*4882a593Smuzhiyun 			goto exit_loopback_cmd;
730*4882a593Smuzhiyun 		}
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 		/* for pre_loopback_config() wait for LINK UP only
733*4882a593Smuzhiyun 		 * if PHY LINK is UP */
734*4882a593Smuzhiyun 		if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP))
735*4882a593Smuzhiyun 			wait_for_link = 0;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 		status = qla4_83xx_wait_for_loopback_config_comp(ha,
738*4882a593Smuzhiyun 								 wait_for_link);
739*4882a593Smuzhiyun 		if (status != QLA_SUCCESS) {
740*4882a593Smuzhiyun 			bsg_reply->result = DID_TIME_OUT << 16;
741*4882a593Smuzhiyun 			goto exit_loopback_cmd;
742*4882a593Smuzhiyun 		}
743*4882a593Smuzhiyun 	}
744*4882a593Smuzhiyun exit_loopback_cmd:
745*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha,
746*4882a593Smuzhiyun 			  "%s: bsg_reply->result = x%x, status = %s\n",
747*4882a593Smuzhiyun 			  __func__, bsg_reply->result, STATUS(status)));
748*4882a593Smuzhiyun 	bsg_job_done(bsg_job, bsg_reply->result,
749*4882a593Smuzhiyun 		     bsg_reply->reply_payload_rcv_len);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun 
qla4xxx_execute_diag_test(struct bsg_job * bsg_job)752*4882a593Smuzhiyun static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun 	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
755*4882a593Smuzhiyun 	struct scsi_qla_host *ha = to_qla_host(host);
756*4882a593Smuzhiyun 	struct iscsi_bsg_request *bsg_req = bsg_job->request;
757*4882a593Smuzhiyun 	uint32_t diag_cmd;
758*4882a593Smuzhiyun 	int rval = -EINVAL;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
763*4882a593Smuzhiyun 	if (diag_cmd == MBOX_CMD_DIAG_TEST) {
764*4882a593Smuzhiyun 		switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) {
765*4882a593Smuzhiyun 		case QL_DIAG_CMD_TEST_DDR_SIZE:
766*4882a593Smuzhiyun 		case QL_DIAG_CMD_TEST_DDR_RW:
767*4882a593Smuzhiyun 		case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW:
768*4882a593Smuzhiyun 		case QL_DIAG_CMD_TEST_NVRAM:
769*4882a593Smuzhiyun 		case QL_DIAG_CMD_TEST_FLASH_ROM:
770*4882a593Smuzhiyun 		case QL_DIAG_CMD_TEST_DMA_XFER:
771*4882a593Smuzhiyun 		case QL_DIAG_CMD_SELF_DDR_RW:
772*4882a593Smuzhiyun 		case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW:
773*4882a593Smuzhiyun 			/* Execute diag test for adapter RAM/FLASH */
774*4882a593Smuzhiyun 			ql4xxx_execute_diag_cmd(bsg_job);
775*4882a593Smuzhiyun 			/* Always return success as we want to sent bsg_reply
776*4882a593Smuzhiyun 			 * to Application */
777*4882a593Smuzhiyun 			rval = QLA_SUCCESS;
778*4882a593Smuzhiyun 			break;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 		case QL_DIAG_CMD_TEST_INT_LOOPBACK:
781*4882a593Smuzhiyun 		case QL_DIAG_CMD_TEST_EXT_LOOPBACK:
782*4882a593Smuzhiyun 			/* Execute diag test for Network */
783*4882a593Smuzhiyun 			qla4xxx_execute_diag_loopback_cmd(bsg_job);
784*4882a593Smuzhiyun 			/* Always return success as we want to sent bsg_reply
785*4882a593Smuzhiyun 			 * to Application */
786*4882a593Smuzhiyun 			rval = QLA_SUCCESS;
787*4882a593Smuzhiyun 			break;
788*4882a593Smuzhiyun 		default:
789*4882a593Smuzhiyun 			ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n",
790*4882a593Smuzhiyun 				   __func__,
791*4882a593Smuzhiyun 				   bsg_req->rqst_data.h_vendor.vendor_cmd[2]);
792*4882a593Smuzhiyun 		}
793*4882a593Smuzhiyun 	} else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) ||
794*4882a593Smuzhiyun 		   (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) {
795*4882a593Smuzhiyun 		ql4xxx_execute_diag_cmd(bsg_job);
796*4882a593Smuzhiyun 		rval = QLA_SUCCESS;
797*4882a593Smuzhiyun 	} else {
798*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n",
799*4882a593Smuzhiyun 			   __func__, diag_cmd);
800*4882a593Smuzhiyun 	}
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	return rval;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun /**
806*4882a593Smuzhiyun  * qla4xxx_process_vendor_specific - handle vendor specific bsg request
807*4882a593Smuzhiyun  * @bsg_job: iscsi_bsg_job to handle
808*4882a593Smuzhiyun  **/
qla4xxx_process_vendor_specific(struct bsg_job * bsg_job)809*4882a593Smuzhiyun int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun 	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
812*4882a593Smuzhiyun 	struct iscsi_bsg_request *bsg_req = bsg_job->request;
813*4882a593Smuzhiyun 	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
814*4882a593Smuzhiyun 	struct scsi_qla_host *ha = to_qla_host(host);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
817*4882a593Smuzhiyun 	case QLISCSI_VND_READ_FLASH:
818*4882a593Smuzhiyun 		return qla4xxx_read_flash(bsg_job);
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	case QLISCSI_VND_UPDATE_FLASH:
821*4882a593Smuzhiyun 		return qla4xxx_update_flash(bsg_job);
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	case QLISCSI_VND_GET_ACB_STATE:
824*4882a593Smuzhiyun 		return qla4xxx_get_acb_state(bsg_job);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	case QLISCSI_VND_READ_NVRAM:
827*4882a593Smuzhiyun 		return qla4xxx_read_nvram(bsg_job);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	case QLISCSI_VND_UPDATE_NVRAM:
830*4882a593Smuzhiyun 		return qla4xxx_update_nvram(bsg_job);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	case QLISCSI_VND_RESTORE_DEFAULTS:
833*4882a593Smuzhiyun 		return qla4xxx_restore_defaults(bsg_job);
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	case QLISCSI_VND_GET_ACB:
836*4882a593Smuzhiyun 		return qla4xxx_bsg_get_acb(bsg_job);
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	case QLISCSI_VND_DIAG_TEST:
839*4882a593Smuzhiyun 		return qla4xxx_execute_diag_test(bsg_job);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	default:
842*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
843*4882a593Smuzhiyun 			   "0x%x\n", __func__, bsg_req->msgcode);
844*4882a593Smuzhiyun 		bsg_reply->result = (DID_ERROR << 16);
845*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len = 0;
846*4882a593Smuzhiyun 		bsg_job_done(bsg_job, bsg_reply->result,
847*4882a593Smuzhiyun 			     bsg_reply->reply_payload_rcv_len);
848*4882a593Smuzhiyun 		return -ENOSYS;
849*4882a593Smuzhiyun 	}
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun /**
853*4882a593Smuzhiyun  * qla4xxx_bsg_request - handle bsg request from ISCSI transport
854*4882a593Smuzhiyun  * @bsg_job: iscsi_bsg_job to handle
855*4882a593Smuzhiyun  */
qla4xxx_bsg_request(struct bsg_job * bsg_job)856*4882a593Smuzhiyun int qla4xxx_bsg_request(struct bsg_job *bsg_job)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun 	struct iscsi_bsg_request *bsg_req = bsg_job->request;
859*4882a593Smuzhiyun 	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
860*4882a593Smuzhiyun 	struct scsi_qla_host *ha = to_qla_host(host);
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	switch (bsg_req->msgcode) {
863*4882a593Smuzhiyun 	case ISCSI_BSG_HST_VENDOR:
864*4882a593Smuzhiyun 		return qla4xxx_process_vendor_specific(bsg_job);
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	default:
867*4882a593Smuzhiyun 		ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
868*4882a593Smuzhiyun 			   __func__, bsg_req->msgcode);
869*4882a593Smuzhiyun 	}
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	return -ENOSYS;
872*4882a593Smuzhiyun }
873