xref: /OK3568_Linux_fs/kernel/drivers/scsi/lpfc/lpfc_bsg.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*******************************************************************
2*4882a593Smuzhiyun  * This file is part of the Emulex Linux Device Driver for         *
3*4882a593Smuzhiyun  * Fibre Channel Host Bus Adapters.                                *
4*4882a593Smuzhiyun  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5*4882a593Smuzhiyun  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6*4882a593Smuzhiyun  * Copyright (C) 2009-2015 Emulex.  All rights reserved.           *
7*4882a593Smuzhiyun  * EMULEX and SLI are trademarks of Emulex.                        *
8*4882a593Smuzhiyun  * www.broadcom.com                                                *
9*4882a593Smuzhiyun  *                                                                 *
10*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or   *
11*4882a593Smuzhiyun  * modify it under the terms of version 2 of the GNU General       *
12*4882a593Smuzhiyun  * Public License as published by the Free Software Foundation.    *
13*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful. *
14*4882a593Smuzhiyun  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
15*4882a593Smuzhiyun  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
17*4882a593Smuzhiyun  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18*4882a593Smuzhiyun  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
19*4882a593Smuzhiyun  * more details, a copy of which can be found in the file COPYING  *
20*4882a593Smuzhiyun  * included with this package.                                     *
21*4882a593Smuzhiyun  *******************************************************************/
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <linux/interrupt.h>
24*4882a593Smuzhiyun #include <linux/mempool.h>
25*4882a593Smuzhiyun #include <linux/pci.h>
26*4882a593Smuzhiyun #include <linux/slab.h>
27*4882a593Smuzhiyun #include <linux/delay.h>
28*4882a593Smuzhiyun #include <linux/list.h>
29*4882a593Smuzhiyun #include <linux/bsg-lib.h>
30*4882a593Smuzhiyun #include <linux/vmalloc.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <scsi/scsi.h>
33*4882a593Smuzhiyun #include <scsi/scsi_host.h>
34*4882a593Smuzhiyun #include <scsi/scsi_transport_fc.h>
35*4882a593Smuzhiyun #include <scsi/scsi_bsg_fc.h>
36*4882a593Smuzhiyun #include <scsi/fc/fc_fs.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #include "lpfc_hw4.h"
39*4882a593Smuzhiyun #include "lpfc_hw.h"
40*4882a593Smuzhiyun #include "lpfc_sli.h"
41*4882a593Smuzhiyun #include "lpfc_sli4.h"
42*4882a593Smuzhiyun #include "lpfc_nl.h"
43*4882a593Smuzhiyun #include "lpfc_bsg.h"
44*4882a593Smuzhiyun #include "lpfc_disc.h"
45*4882a593Smuzhiyun #include "lpfc_scsi.h"
46*4882a593Smuzhiyun #include "lpfc.h"
47*4882a593Smuzhiyun #include "lpfc_logmsg.h"
48*4882a593Smuzhiyun #include "lpfc_crtn.h"
49*4882a593Smuzhiyun #include "lpfc_debugfs.h"
50*4882a593Smuzhiyun #include "lpfc_vport.h"
51*4882a593Smuzhiyun #include "lpfc_version.h"
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun struct lpfc_bsg_event {
54*4882a593Smuzhiyun 	struct list_head node;
55*4882a593Smuzhiyun 	struct kref kref;
56*4882a593Smuzhiyun 	wait_queue_head_t wq;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	/* Event type and waiter identifiers */
59*4882a593Smuzhiyun 	uint32_t type_mask;
60*4882a593Smuzhiyun 	uint32_t req_id;
61*4882a593Smuzhiyun 	uint32_t reg_id;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	/* next two flags are here for the auto-delete logic */
64*4882a593Smuzhiyun 	unsigned long wait_time_stamp;
65*4882a593Smuzhiyun 	int waiting;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	/* seen and not seen events */
68*4882a593Smuzhiyun 	struct list_head events_to_get;
69*4882a593Smuzhiyun 	struct list_head events_to_see;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	/* driver data associated with the job */
72*4882a593Smuzhiyun 	void *dd_data;
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun struct lpfc_bsg_iocb {
76*4882a593Smuzhiyun 	struct lpfc_iocbq *cmdiocbq;
77*4882a593Smuzhiyun 	struct lpfc_dmabuf *rmp;
78*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun struct lpfc_bsg_mbox {
82*4882a593Smuzhiyun 	LPFC_MBOXQ_t *pmboxq;
83*4882a593Smuzhiyun 	MAILBOX_t *mb;
84*4882a593Smuzhiyun 	struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
85*4882a593Smuzhiyun 	uint8_t *ext; /* extended mailbox data */
86*4882a593Smuzhiyun 	uint32_t mbOffset; /* from app */
87*4882a593Smuzhiyun 	uint32_t inExtWLen; /* from app */
88*4882a593Smuzhiyun 	uint32_t outExtWLen; /* from app */
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun #define MENLO_DID 0x0000FC0E
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun struct lpfc_bsg_menlo {
94*4882a593Smuzhiyun 	struct lpfc_iocbq *cmdiocbq;
95*4882a593Smuzhiyun 	struct lpfc_dmabuf *rmp;
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #define TYPE_EVT 	1
99*4882a593Smuzhiyun #define TYPE_IOCB	2
100*4882a593Smuzhiyun #define TYPE_MBOX	3
101*4882a593Smuzhiyun #define TYPE_MENLO	4
102*4882a593Smuzhiyun struct bsg_job_data {
103*4882a593Smuzhiyun 	uint32_t type;
104*4882a593Smuzhiyun 	struct bsg_job *set_job; /* job waiting for this iocb to finish */
105*4882a593Smuzhiyun 	union {
106*4882a593Smuzhiyun 		struct lpfc_bsg_event *evt;
107*4882a593Smuzhiyun 		struct lpfc_bsg_iocb iocb;
108*4882a593Smuzhiyun 		struct lpfc_bsg_mbox mbox;
109*4882a593Smuzhiyun 		struct lpfc_bsg_menlo menlo;
110*4882a593Smuzhiyun 	} context_un;
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun struct event_data {
114*4882a593Smuzhiyun 	struct list_head node;
115*4882a593Smuzhiyun 	uint32_t type;
116*4882a593Smuzhiyun 	uint32_t immed_dat;
117*4882a593Smuzhiyun 	void *data;
118*4882a593Smuzhiyun 	uint32_t len;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #define BUF_SZ_4K 4096
122*4882a593Smuzhiyun #define SLI_CT_ELX_LOOPBACK 0x10
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun enum ELX_LOOPBACK_CMD {
125*4882a593Smuzhiyun 	ELX_LOOPBACK_XRI_SETUP,
126*4882a593Smuzhiyun 	ELX_LOOPBACK_DATA,
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun #define ELX_LOOPBACK_HEADER_SZ \
130*4882a593Smuzhiyun 	(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun struct lpfc_dmabufext {
133*4882a593Smuzhiyun 	struct lpfc_dmabuf dma;
134*4882a593Smuzhiyun 	uint32_t size;
135*4882a593Smuzhiyun 	uint32_t flag;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun static void
lpfc_free_bsg_buffers(struct lpfc_hba * phba,struct lpfc_dmabuf * mlist)139*4882a593Smuzhiyun lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct lpfc_dmabuf *mlast, *next_mlast;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (mlist) {
144*4882a593Smuzhiyun 		list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
145*4882a593Smuzhiyun 					 list) {
146*4882a593Smuzhiyun 			lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
147*4882a593Smuzhiyun 			list_del(&mlast->list);
148*4882a593Smuzhiyun 			kfree(mlast);
149*4882a593Smuzhiyun 		}
150*4882a593Smuzhiyun 		lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
151*4882a593Smuzhiyun 		kfree(mlist);
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 	return;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun static struct lpfc_dmabuf *
lpfc_alloc_bsg_buffers(struct lpfc_hba * phba,unsigned int size,int outbound_buffers,struct ulp_bde64 * bpl,int * bpl_entries)157*4882a593Smuzhiyun lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
158*4882a593Smuzhiyun 		       int outbound_buffers, struct ulp_bde64 *bpl,
159*4882a593Smuzhiyun 		       int *bpl_entries)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	struct lpfc_dmabuf *mlist = NULL;
162*4882a593Smuzhiyun 	struct lpfc_dmabuf *mp;
163*4882a593Smuzhiyun 	unsigned int bytes_left = size;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* Verify we can support the size specified */
166*4882a593Smuzhiyun 	if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
167*4882a593Smuzhiyun 		return NULL;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	/* Determine the number of dma buffers to allocate */
170*4882a593Smuzhiyun 	*bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
171*4882a593Smuzhiyun 			size/LPFC_BPL_SIZE);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/* Allocate dma buffer and place in BPL passed */
174*4882a593Smuzhiyun 	while (bytes_left) {
175*4882a593Smuzhiyun 		/* Allocate dma buffer  */
176*4882a593Smuzhiyun 		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
177*4882a593Smuzhiyun 		if (!mp) {
178*4882a593Smuzhiyun 			if (mlist)
179*4882a593Smuzhiyun 				lpfc_free_bsg_buffers(phba, mlist);
180*4882a593Smuzhiyun 			return NULL;
181*4882a593Smuzhiyun 		}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 		INIT_LIST_HEAD(&mp->list);
184*4882a593Smuzhiyun 		mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 		if (!mp->virt) {
187*4882a593Smuzhiyun 			kfree(mp);
188*4882a593Smuzhiyun 			if (mlist)
189*4882a593Smuzhiyun 				lpfc_free_bsg_buffers(phba, mlist);
190*4882a593Smuzhiyun 			return NULL;
191*4882a593Smuzhiyun 		}
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		/* Queue it to a linked list */
194*4882a593Smuzhiyun 		if (!mlist)
195*4882a593Smuzhiyun 			mlist = mp;
196*4882a593Smuzhiyun 		else
197*4882a593Smuzhiyun 			list_add_tail(&mp->list, &mlist->list);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 		/* Add buffer to buffer pointer list */
200*4882a593Smuzhiyun 		if (outbound_buffers)
201*4882a593Smuzhiyun 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
202*4882a593Smuzhiyun 		else
203*4882a593Smuzhiyun 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
204*4882a593Smuzhiyun 		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
205*4882a593Smuzhiyun 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
206*4882a593Smuzhiyun 		bpl->tus.f.bdeSize = (uint16_t)
207*4882a593Smuzhiyun 			(bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
208*4882a593Smuzhiyun 			 bytes_left);
209*4882a593Smuzhiyun 		bytes_left -= bpl->tus.f.bdeSize;
210*4882a593Smuzhiyun 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
211*4882a593Smuzhiyun 		bpl++;
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun 	return mlist;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun static unsigned int
lpfc_bsg_copy_data(struct lpfc_dmabuf * dma_buffers,struct bsg_buffer * bsg_buffers,unsigned int bytes_to_transfer,int to_buffers)217*4882a593Smuzhiyun lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
218*4882a593Smuzhiyun 		   struct bsg_buffer *bsg_buffers,
219*4882a593Smuzhiyun 		   unsigned int bytes_to_transfer, int to_buffers)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	struct lpfc_dmabuf *mp;
223*4882a593Smuzhiyun 	unsigned int transfer_bytes, bytes_copied = 0;
224*4882a593Smuzhiyun 	unsigned int sg_offset, dma_offset;
225*4882a593Smuzhiyun 	unsigned char *dma_address, *sg_address;
226*4882a593Smuzhiyun 	LIST_HEAD(temp_list);
227*4882a593Smuzhiyun 	struct sg_mapping_iter miter;
228*4882a593Smuzhiyun 	unsigned long flags;
229*4882a593Smuzhiyun 	unsigned int sg_flags = SG_MITER_ATOMIC;
230*4882a593Smuzhiyun 	bool sg_valid;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	list_splice_init(&dma_buffers->list, &temp_list);
233*4882a593Smuzhiyun 	list_add(&dma_buffers->list, &temp_list);
234*4882a593Smuzhiyun 	sg_offset = 0;
235*4882a593Smuzhiyun 	if (to_buffers)
236*4882a593Smuzhiyun 		sg_flags |= SG_MITER_FROM_SG;
237*4882a593Smuzhiyun 	else
238*4882a593Smuzhiyun 		sg_flags |= SG_MITER_TO_SG;
239*4882a593Smuzhiyun 	sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
240*4882a593Smuzhiyun 		       sg_flags);
241*4882a593Smuzhiyun 	local_irq_save(flags);
242*4882a593Smuzhiyun 	sg_valid = sg_miter_next(&miter);
243*4882a593Smuzhiyun 	list_for_each_entry(mp, &temp_list, list) {
244*4882a593Smuzhiyun 		dma_offset = 0;
245*4882a593Smuzhiyun 		while (bytes_to_transfer && sg_valid &&
246*4882a593Smuzhiyun 		       (dma_offset < LPFC_BPL_SIZE)) {
247*4882a593Smuzhiyun 			dma_address = mp->virt + dma_offset;
248*4882a593Smuzhiyun 			if (sg_offset) {
249*4882a593Smuzhiyun 				/* Continue previous partial transfer of sg */
250*4882a593Smuzhiyun 				sg_address = miter.addr + sg_offset;
251*4882a593Smuzhiyun 				transfer_bytes = miter.length - sg_offset;
252*4882a593Smuzhiyun 			} else {
253*4882a593Smuzhiyun 				sg_address = miter.addr;
254*4882a593Smuzhiyun 				transfer_bytes = miter.length;
255*4882a593Smuzhiyun 			}
256*4882a593Smuzhiyun 			if (bytes_to_transfer < transfer_bytes)
257*4882a593Smuzhiyun 				transfer_bytes = bytes_to_transfer;
258*4882a593Smuzhiyun 			if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
259*4882a593Smuzhiyun 				transfer_bytes = LPFC_BPL_SIZE - dma_offset;
260*4882a593Smuzhiyun 			if (to_buffers)
261*4882a593Smuzhiyun 				memcpy(dma_address, sg_address, transfer_bytes);
262*4882a593Smuzhiyun 			else
263*4882a593Smuzhiyun 				memcpy(sg_address, dma_address, transfer_bytes);
264*4882a593Smuzhiyun 			dma_offset += transfer_bytes;
265*4882a593Smuzhiyun 			sg_offset += transfer_bytes;
266*4882a593Smuzhiyun 			bytes_to_transfer -= transfer_bytes;
267*4882a593Smuzhiyun 			bytes_copied += transfer_bytes;
268*4882a593Smuzhiyun 			if (sg_offset >= miter.length) {
269*4882a593Smuzhiyun 				sg_offset = 0;
270*4882a593Smuzhiyun 				sg_valid = sg_miter_next(&miter);
271*4882a593Smuzhiyun 			}
272*4882a593Smuzhiyun 		}
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 	sg_miter_stop(&miter);
275*4882a593Smuzhiyun 	local_irq_restore(flags);
276*4882a593Smuzhiyun 	list_del_init(&dma_buffers->list);
277*4882a593Smuzhiyun 	list_splice(&temp_list, &dma_buffers->list);
278*4882a593Smuzhiyun 	return bytes_copied;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /**
282*4882a593Smuzhiyun  * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
283*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
284*4882a593Smuzhiyun  * @cmdiocbq: Pointer to command iocb.
285*4882a593Smuzhiyun  * @rspiocbq: Pointer to response iocb.
286*4882a593Smuzhiyun  *
287*4882a593Smuzhiyun  * This function is the completion handler for iocbs issued using
288*4882a593Smuzhiyun  * lpfc_bsg_send_mgmt_cmd function. This function is called by the
289*4882a593Smuzhiyun  * ring event handler function without any lock held. This function
290*4882a593Smuzhiyun  * can be called from both worker thread context and interrupt
291*4882a593Smuzhiyun  * context. This function also can be called from another thread which
292*4882a593Smuzhiyun  * cleans up the SLI layer objects.
293*4882a593Smuzhiyun  * This function copies the contents of the response iocb to the
294*4882a593Smuzhiyun  * response iocb memory object provided by the caller of
295*4882a593Smuzhiyun  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
296*4882a593Smuzhiyun  * sleeps for the iocb completion.
297*4882a593Smuzhiyun  **/
298*4882a593Smuzhiyun static void
lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)299*4882a593Smuzhiyun lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
300*4882a593Smuzhiyun 			struct lpfc_iocbq *cmdiocbq,
301*4882a593Smuzhiyun 			struct lpfc_iocbq *rspiocbq)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	struct bsg_job_data *dd_data;
304*4882a593Smuzhiyun 	struct bsg_job *job;
305*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply;
306*4882a593Smuzhiyun 	IOCB_t *rsp;
307*4882a593Smuzhiyun 	struct lpfc_dmabuf *bmp, *cmp, *rmp;
308*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
309*4882a593Smuzhiyun 	struct lpfc_bsg_iocb *iocb;
310*4882a593Smuzhiyun 	unsigned long flags;
311*4882a593Smuzhiyun 	unsigned int rsp_size;
312*4882a593Smuzhiyun 	int rc = 0;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	dd_data = cmdiocbq->context1;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* Determine if job has been aborted */
317*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
318*4882a593Smuzhiyun 	job = dd_data->set_job;
319*4882a593Smuzhiyun 	if (job) {
320*4882a593Smuzhiyun 		bsg_reply = job->reply;
321*4882a593Smuzhiyun 		/* Prevent timeout handling from trying to abort job */
322*4882a593Smuzhiyun 		job->dd_data = NULL;
323*4882a593Smuzhiyun 	}
324*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	/* Close the timeout handler abort window */
327*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, flags);
328*4882a593Smuzhiyun 	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
329*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, flags);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	iocb = &dd_data->context_un.iocb;
332*4882a593Smuzhiyun 	ndlp = iocb->ndlp;
333*4882a593Smuzhiyun 	rmp = iocb->rmp;
334*4882a593Smuzhiyun 	cmp = cmdiocbq->context2;
335*4882a593Smuzhiyun 	bmp = cmdiocbq->context3;
336*4882a593Smuzhiyun 	rsp = &rspiocbq->iocb;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* Copy the completed data or set the error status */
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	if (job) {
341*4882a593Smuzhiyun 		if (rsp->ulpStatus) {
342*4882a593Smuzhiyun 			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
343*4882a593Smuzhiyun 				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
344*4882a593Smuzhiyun 				case IOERR_SEQUENCE_TIMEOUT:
345*4882a593Smuzhiyun 					rc = -ETIMEDOUT;
346*4882a593Smuzhiyun 					break;
347*4882a593Smuzhiyun 				case IOERR_INVALID_RPI:
348*4882a593Smuzhiyun 					rc = -EFAULT;
349*4882a593Smuzhiyun 					break;
350*4882a593Smuzhiyun 				default:
351*4882a593Smuzhiyun 					rc = -EACCES;
352*4882a593Smuzhiyun 					break;
353*4882a593Smuzhiyun 				}
354*4882a593Smuzhiyun 			} else {
355*4882a593Smuzhiyun 				rc = -EACCES;
356*4882a593Smuzhiyun 			}
357*4882a593Smuzhiyun 		} else {
358*4882a593Smuzhiyun 			rsp_size = rsp->un.genreq64.bdl.bdeSize;
359*4882a593Smuzhiyun 			bsg_reply->reply_payload_rcv_len =
360*4882a593Smuzhiyun 				lpfc_bsg_copy_data(rmp, &job->reply_payload,
361*4882a593Smuzhiyun 						   rsp_size, 0);
362*4882a593Smuzhiyun 		}
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	lpfc_free_bsg_buffers(phba, cmp);
366*4882a593Smuzhiyun 	lpfc_free_bsg_buffers(phba, rmp);
367*4882a593Smuzhiyun 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
368*4882a593Smuzhiyun 	kfree(bmp);
369*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, cmdiocbq);
370*4882a593Smuzhiyun 	lpfc_nlp_put(ndlp);
371*4882a593Smuzhiyun 	kfree(dd_data);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/* Complete the job if the job is still active */
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	if (job) {
376*4882a593Smuzhiyun 		bsg_reply->result = rc;
377*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
378*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 	return;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun /**
384*4882a593Smuzhiyun  * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
385*4882a593Smuzhiyun  * @job: fc_bsg_job to handle
386*4882a593Smuzhiyun  **/
387*4882a593Smuzhiyun static int
lpfc_bsg_send_mgmt_cmd(struct bsg_job * job)388*4882a593Smuzhiyun lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
391*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
392*4882a593Smuzhiyun 	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
393*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp = rdata->pnode;
394*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
395*4882a593Smuzhiyun 	struct ulp_bde64 *bpl = NULL;
396*4882a593Smuzhiyun 	uint32_t timeout;
397*4882a593Smuzhiyun 	struct lpfc_iocbq *cmdiocbq = NULL;
398*4882a593Smuzhiyun 	IOCB_t *cmd;
399*4882a593Smuzhiyun 	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
400*4882a593Smuzhiyun 	int request_nseg;
401*4882a593Smuzhiyun 	int reply_nseg;
402*4882a593Smuzhiyun 	struct bsg_job_data *dd_data;
403*4882a593Smuzhiyun 	unsigned long flags;
404*4882a593Smuzhiyun 	uint32_t creg_val;
405*4882a593Smuzhiyun 	int rc = 0;
406*4882a593Smuzhiyun 	int iocb_stat;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/* in case no data is transferred */
409*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	/* allocate our bsg tracking structure */
412*4882a593Smuzhiyun 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
413*4882a593Smuzhiyun 	if (!dd_data) {
414*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
415*4882a593Smuzhiyun 				"2733 Failed allocation of dd_data\n");
416*4882a593Smuzhiyun 		rc = -ENOMEM;
417*4882a593Smuzhiyun 		goto no_dd_data;
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	if (!lpfc_nlp_get(ndlp)) {
421*4882a593Smuzhiyun 		rc = -ENODEV;
422*4882a593Smuzhiyun 		goto no_ndlp;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
426*4882a593Smuzhiyun 		rc = -ENODEV;
427*4882a593Smuzhiyun 		goto free_ndlp;
428*4882a593Smuzhiyun 	}
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	cmdiocbq = lpfc_sli_get_iocbq(phba);
431*4882a593Smuzhiyun 	if (!cmdiocbq) {
432*4882a593Smuzhiyun 		rc = -ENOMEM;
433*4882a593Smuzhiyun 		goto free_ndlp;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	cmd = &cmdiocbq->iocb;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
439*4882a593Smuzhiyun 	if (!bmp) {
440*4882a593Smuzhiyun 		rc = -ENOMEM;
441*4882a593Smuzhiyun 		goto free_cmdiocbq;
442*4882a593Smuzhiyun 	}
443*4882a593Smuzhiyun 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
444*4882a593Smuzhiyun 	if (!bmp->virt) {
445*4882a593Smuzhiyun 		rc = -ENOMEM;
446*4882a593Smuzhiyun 		goto free_bmp;
447*4882a593Smuzhiyun 	}
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	INIT_LIST_HEAD(&bmp->list);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	bpl = (struct ulp_bde64 *) bmp->virt;
452*4882a593Smuzhiyun 	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
453*4882a593Smuzhiyun 	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
454*4882a593Smuzhiyun 				     1, bpl, &request_nseg);
455*4882a593Smuzhiyun 	if (!cmp) {
456*4882a593Smuzhiyun 		rc = -ENOMEM;
457*4882a593Smuzhiyun 		goto free_bmp;
458*4882a593Smuzhiyun 	}
459*4882a593Smuzhiyun 	lpfc_bsg_copy_data(cmp, &job->request_payload,
460*4882a593Smuzhiyun 			   job->request_payload.payload_len, 1);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	bpl += request_nseg;
463*4882a593Smuzhiyun 	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
464*4882a593Smuzhiyun 	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
465*4882a593Smuzhiyun 				     bpl, &reply_nseg);
466*4882a593Smuzhiyun 	if (!rmp) {
467*4882a593Smuzhiyun 		rc = -ENOMEM;
468*4882a593Smuzhiyun 		goto free_cmp;
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
472*4882a593Smuzhiyun 	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
473*4882a593Smuzhiyun 	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
474*4882a593Smuzhiyun 	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
475*4882a593Smuzhiyun 	cmd->un.genreq64.bdl.bdeSize =
476*4882a593Smuzhiyun 		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
477*4882a593Smuzhiyun 	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
478*4882a593Smuzhiyun 	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
479*4882a593Smuzhiyun 	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
480*4882a593Smuzhiyun 	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
481*4882a593Smuzhiyun 	cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
482*4882a593Smuzhiyun 	cmd->ulpBdeCount = 1;
483*4882a593Smuzhiyun 	cmd->ulpLe = 1;
484*4882a593Smuzhiyun 	cmd->ulpClass = CLASS3;
485*4882a593Smuzhiyun 	cmd->ulpContext = ndlp->nlp_rpi;
486*4882a593Smuzhiyun 	if (phba->sli_rev == LPFC_SLI_REV4)
487*4882a593Smuzhiyun 		cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
488*4882a593Smuzhiyun 	cmd->ulpOwner = OWN_CHIP;
489*4882a593Smuzhiyun 	cmdiocbq->vport = phba->pport;
490*4882a593Smuzhiyun 	cmdiocbq->context3 = bmp;
491*4882a593Smuzhiyun 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
492*4882a593Smuzhiyun 	timeout = phba->fc_ratov * 2;
493*4882a593Smuzhiyun 	cmd->ulpTimeout = timeout;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
496*4882a593Smuzhiyun 	cmdiocbq->context1 = dd_data;
497*4882a593Smuzhiyun 	cmdiocbq->context2 = cmp;
498*4882a593Smuzhiyun 	cmdiocbq->context3 = bmp;
499*4882a593Smuzhiyun 	cmdiocbq->context_un.ndlp = ndlp;
500*4882a593Smuzhiyun 	dd_data->type = TYPE_IOCB;
501*4882a593Smuzhiyun 	dd_data->set_job = job;
502*4882a593Smuzhiyun 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
503*4882a593Smuzhiyun 	dd_data->context_un.iocb.ndlp = ndlp;
504*4882a593Smuzhiyun 	dd_data->context_un.iocb.rmp = rmp;
505*4882a593Smuzhiyun 	job->dd_data = dd_data;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
508*4882a593Smuzhiyun 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
509*4882a593Smuzhiyun 			rc = -EIO ;
510*4882a593Smuzhiyun 			goto free_rmp;
511*4882a593Smuzhiyun 		}
512*4882a593Smuzhiyun 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
513*4882a593Smuzhiyun 		writel(creg_val, phba->HCregaddr);
514*4882a593Smuzhiyun 		readl(phba->HCregaddr); /* flush */
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	if (iocb_stat == IOCB_SUCCESS) {
520*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->hbalock, flags);
521*4882a593Smuzhiyun 		/* make sure the I/O had not been completed yet */
522*4882a593Smuzhiyun 		if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
523*4882a593Smuzhiyun 			/* open up abort window to timeout handler */
524*4882a593Smuzhiyun 			cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
525*4882a593Smuzhiyun 		}
526*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, flags);
527*4882a593Smuzhiyun 		return 0; /* done for now */
528*4882a593Smuzhiyun 	} else if (iocb_stat == IOCB_BUSY) {
529*4882a593Smuzhiyun 		rc = -EAGAIN;
530*4882a593Smuzhiyun 	} else {
531*4882a593Smuzhiyun 		rc = -EIO;
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	/* iocb failed so cleanup */
535*4882a593Smuzhiyun 	job->dd_data = NULL;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun free_rmp:
538*4882a593Smuzhiyun 	lpfc_free_bsg_buffers(phba, rmp);
539*4882a593Smuzhiyun free_cmp:
540*4882a593Smuzhiyun 	lpfc_free_bsg_buffers(phba, cmp);
541*4882a593Smuzhiyun free_bmp:
542*4882a593Smuzhiyun 	if (bmp->virt)
543*4882a593Smuzhiyun 		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
544*4882a593Smuzhiyun 	kfree(bmp);
545*4882a593Smuzhiyun free_cmdiocbq:
546*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, cmdiocbq);
547*4882a593Smuzhiyun free_ndlp:
548*4882a593Smuzhiyun 	lpfc_nlp_put(ndlp);
549*4882a593Smuzhiyun no_ndlp:
550*4882a593Smuzhiyun 	kfree(dd_data);
551*4882a593Smuzhiyun no_dd_data:
552*4882a593Smuzhiyun 	/* make error code available to userspace */
553*4882a593Smuzhiyun 	bsg_reply->result = rc;
554*4882a593Smuzhiyun 	job->dd_data = NULL;
555*4882a593Smuzhiyun 	return rc;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun /**
559*4882a593Smuzhiyun  * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
560*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
561*4882a593Smuzhiyun  * @cmdiocbq: Pointer to command iocb.
562*4882a593Smuzhiyun  * @rspiocbq: Pointer to response iocb.
563*4882a593Smuzhiyun  *
564*4882a593Smuzhiyun  * This function is the completion handler for iocbs issued using
565*4882a593Smuzhiyun  * lpfc_bsg_rport_els_cmp function. This function is called by the
566*4882a593Smuzhiyun  * ring event handler function without any lock held. This function
567*4882a593Smuzhiyun  * can be called from both worker thread context and interrupt
568*4882a593Smuzhiyun  * context. This function also can be called from other thread which
569*4882a593Smuzhiyun  * cleans up the SLI layer objects.
570*4882a593Smuzhiyun  * This function copies the contents of the response iocb to the
571*4882a593Smuzhiyun  * response iocb memory object provided by the caller of
572*4882a593Smuzhiyun  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
573*4882a593Smuzhiyun  * sleeps for the iocb completion.
574*4882a593Smuzhiyun  **/
575*4882a593Smuzhiyun static void
lpfc_bsg_rport_els_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)576*4882a593Smuzhiyun lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
577*4882a593Smuzhiyun 			struct lpfc_iocbq *cmdiocbq,
578*4882a593Smuzhiyun 			struct lpfc_iocbq *rspiocbq)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun 	struct bsg_job_data *dd_data;
581*4882a593Smuzhiyun 	struct bsg_job *job;
582*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply;
583*4882a593Smuzhiyun 	IOCB_t *rsp;
584*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
585*4882a593Smuzhiyun 	struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
586*4882a593Smuzhiyun 	struct fc_bsg_ctels_reply *els_reply;
587*4882a593Smuzhiyun 	uint8_t *rjt_data;
588*4882a593Smuzhiyun 	unsigned long flags;
589*4882a593Smuzhiyun 	unsigned int rsp_size;
590*4882a593Smuzhiyun 	int rc = 0;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	dd_data = cmdiocbq->context1;
593*4882a593Smuzhiyun 	ndlp = dd_data->context_un.iocb.ndlp;
594*4882a593Smuzhiyun 	cmdiocbq->context1 = ndlp;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	/* Determine if job has been aborted */
597*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
598*4882a593Smuzhiyun 	job = dd_data->set_job;
599*4882a593Smuzhiyun 	if (job) {
600*4882a593Smuzhiyun 		bsg_reply = job->reply;
601*4882a593Smuzhiyun 		/* Prevent timeout handling from trying to abort job  */
602*4882a593Smuzhiyun 		job->dd_data = NULL;
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	/* Close the timeout handler abort window */
607*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, flags);
608*4882a593Smuzhiyun 	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
609*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, flags);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	rsp = &rspiocbq->iocb;
612*4882a593Smuzhiyun 	pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
613*4882a593Smuzhiyun 	prsp = (struct lpfc_dmabuf *)pcmd->list.next;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	/* Copy the completed job data or determine the job status if job is
616*4882a593Smuzhiyun 	 * still active
617*4882a593Smuzhiyun 	 */
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	if (job) {
620*4882a593Smuzhiyun 		if (rsp->ulpStatus == IOSTAT_SUCCESS) {
621*4882a593Smuzhiyun 			rsp_size = rsp->un.elsreq64.bdl.bdeSize;
622*4882a593Smuzhiyun 			bsg_reply->reply_payload_rcv_len =
623*4882a593Smuzhiyun 				sg_copy_from_buffer(job->reply_payload.sg_list,
624*4882a593Smuzhiyun 						    job->reply_payload.sg_cnt,
625*4882a593Smuzhiyun 						    prsp->virt,
626*4882a593Smuzhiyun 						    rsp_size);
627*4882a593Smuzhiyun 		} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
628*4882a593Smuzhiyun 			bsg_reply->reply_payload_rcv_len =
629*4882a593Smuzhiyun 				sizeof(struct fc_bsg_ctels_reply);
630*4882a593Smuzhiyun 			/* LS_RJT data returned in word 4 */
631*4882a593Smuzhiyun 			rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
632*4882a593Smuzhiyun 			els_reply = &bsg_reply->reply_data.ctels_reply;
633*4882a593Smuzhiyun 			els_reply->status = FC_CTELS_STATUS_REJECT;
634*4882a593Smuzhiyun 			els_reply->rjt_data.action = rjt_data[3];
635*4882a593Smuzhiyun 			els_reply->rjt_data.reason_code = rjt_data[2];
636*4882a593Smuzhiyun 			els_reply->rjt_data.reason_explanation = rjt_data[1];
637*4882a593Smuzhiyun 			els_reply->rjt_data.vendor_unique = rjt_data[0];
638*4882a593Smuzhiyun 		} else {
639*4882a593Smuzhiyun 			rc = -EIO;
640*4882a593Smuzhiyun 		}
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	lpfc_nlp_put(ndlp);
644*4882a593Smuzhiyun 	lpfc_els_free_iocb(phba, cmdiocbq);
645*4882a593Smuzhiyun 	kfree(dd_data);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	/* Complete the job if the job is still active */
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	if (job) {
650*4882a593Smuzhiyun 		bsg_reply->result = rc;
651*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
652*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
653*4882a593Smuzhiyun 	}
654*4882a593Smuzhiyun 	return;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun /**
658*4882a593Smuzhiyun  * lpfc_bsg_rport_els - send an ELS command from a bsg request
659*4882a593Smuzhiyun  * @job: fc_bsg_job to handle
660*4882a593Smuzhiyun  **/
661*4882a593Smuzhiyun static int
lpfc_bsg_rport_els(struct bsg_job * job)662*4882a593Smuzhiyun lpfc_bsg_rport_els(struct bsg_job *job)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
665*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
666*4882a593Smuzhiyun 	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
667*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp = rdata->pnode;
668*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
669*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
670*4882a593Smuzhiyun 	uint32_t elscmd;
671*4882a593Smuzhiyun 	uint32_t cmdsize;
672*4882a593Smuzhiyun 	struct lpfc_iocbq *cmdiocbq;
673*4882a593Smuzhiyun 	uint16_t rpi = 0;
674*4882a593Smuzhiyun 	struct bsg_job_data *dd_data;
675*4882a593Smuzhiyun 	unsigned long flags;
676*4882a593Smuzhiyun 	uint32_t creg_val;
677*4882a593Smuzhiyun 	int rc = 0;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	/* in case no data is transferred */
680*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	/* verify the els command is not greater than the
683*4882a593Smuzhiyun 	 * maximum ELS transfer size.
684*4882a593Smuzhiyun 	 */
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	if (job->request_payload.payload_len > FCELSSIZE) {
687*4882a593Smuzhiyun 		rc = -EINVAL;
688*4882a593Smuzhiyun 		goto no_dd_data;
689*4882a593Smuzhiyun 	}
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	/* allocate our bsg tracking structure */
692*4882a593Smuzhiyun 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
693*4882a593Smuzhiyun 	if (!dd_data) {
694*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
695*4882a593Smuzhiyun 				"2735 Failed allocation of dd_data\n");
696*4882a593Smuzhiyun 		rc = -ENOMEM;
697*4882a593Smuzhiyun 		goto no_dd_data;
698*4882a593Smuzhiyun 	}
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	elscmd = bsg_request->rqst_data.r_els.els_code;
701*4882a593Smuzhiyun 	cmdsize = job->request_payload.payload_len;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	if (!lpfc_nlp_get(ndlp)) {
704*4882a593Smuzhiyun 		rc = -ENODEV;
705*4882a593Smuzhiyun 		goto free_dd_data;
706*4882a593Smuzhiyun 	}
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	/* We will use the allocated dma buffers by prep els iocb for command
709*4882a593Smuzhiyun 	 * and response to ensure if the job times out and the request is freed,
710*4882a593Smuzhiyun 	 * we won't be dma into memory that is no longer allocated to for the
711*4882a593Smuzhiyun 	 * request.
712*4882a593Smuzhiyun 	 */
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
715*4882a593Smuzhiyun 				      ndlp->nlp_DID, elscmd);
716*4882a593Smuzhiyun 	if (!cmdiocbq) {
717*4882a593Smuzhiyun 		rc = -EIO;
718*4882a593Smuzhiyun 		goto release_ndlp;
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	rpi = ndlp->nlp_rpi;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	/* Transfer the request payload to allocated command dma buffer */
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	sg_copy_to_buffer(job->request_payload.sg_list,
726*4882a593Smuzhiyun 			  job->request_payload.sg_cnt,
727*4882a593Smuzhiyun 			  ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
728*4882a593Smuzhiyun 			  cmdsize);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	if (phba->sli_rev == LPFC_SLI_REV4)
731*4882a593Smuzhiyun 		cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
732*4882a593Smuzhiyun 	else
733*4882a593Smuzhiyun 		cmdiocbq->iocb.ulpContext = rpi;
734*4882a593Smuzhiyun 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
735*4882a593Smuzhiyun 	cmdiocbq->context1 = dd_data;
736*4882a593Smuzhiyun 	cmdiocbq->context_un.ndlp = ndlp;
737*4882a593Smuzhiyun 	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
738*4882a593Smuzhiyun 	dd_data->type = TYPE_IOCB;
739*4882a593Smuzhiyun 	dd_data->set_job = job;
740*4882a593Smuzhiyun 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
741*4882a593Smuzhiyun 	dd_data->context_un.iocb.ndlp = ndlp;
742*4882a593Smuzhiyun 	dd_data->context_un.iocb.rmp = NULL;
743*4882a593Smuzhiyun 	job->dd_data = dd_data;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
746*4882a593Smuzhiyun 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
747*4882a593Smuzhiyun 			rc = -EIO;
748*4882a593Smuzhiyun 			goto linkdown_err;
749*4882a593Smuzhiyun 		}
750*4882a593Smuzhiyun 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
751*4882a593Smuzhiyun 		writel(creg_val, phba->HCregaddr);
752*4882a593Smuzhiyun 		readl(phba->HCregaddr); /* flush */
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	if (rc == IOCB_SUCCESS) {
758*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->hbalock, flags);
759*4882a593Smuzhiyun 		/* make sure the I/O had not been completed/released */
760*4882a593Smuzhiyun 		if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
761*4882a593Smuzhiyun 			/* open up abort window to timeout handler */
762*4882a593Smuzhiyun 			cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
763*4882a593Smuzhiyun 		}
764*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, flags);
765*4882a593Smuzhiyun 		return 0; /* done for now */
766*4882a593Smuzhiyun 	} else if (rc == IOCB_BUSY) {
767*4882a593Smuzhiyun 		rc = -EAGAIN;
768*4882a593Smuzhiyun 	} else {
769*4882a593Smuzhiyun 		rc = -EIO;
770*4882a593Smuzhiyun 	}
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	/* iocb failed so cleanup */
773*4882a593Smuzhiyun 	job->dd_data = NULL;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun linkdown_err:
776*4882a593Smuzhiyun 	cmdiocbq->context1 = ndlp;
777*4882a593Smuzhiyun 	lpfc_els_free_iocb(phba, cmdiocbq);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun release_ndlp:
780*4882a593Smuzhiyun 	lpfc_nlp_put(ndlp);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun free_dd_data:
783*4882a593Smuzhiyun 	kfree(dd_data);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun no_dd_data:
786*4882a593Smuzhiyun 	/* make error code available to userspace */
787*4882a593Smuzhiyun 	bsg_reply->result = rc;
788*4882a593Smuzhiyun 	job->dd_data = NULL;
789*4882a593Smuzhiyun 	return rc;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun /**
793*4882a593Smuzhiyun  * lpfc_bsg_event_free - frees an allocated event structure
794*4882a593Smuzhiyun  * @kref: Pointer to a kref.
795*4882a593Smuzhiyun  *
796*4882a593Smuzhiyun  * Called from kref_put. Back cast the kref into an event structure address.
797*4882a593Smuzhiyun  * Free any events to get, delete associated nodes, free any events to see,
798*4882a593Smuzhiyun  * free any data then free the event itself.
799*4882a593Smuzhiyun  **/
800*4882a593Smuzhiyun static void
lpfc_bsg_event_free(struct kref * kref)801*4882a593Smuzhiyun lpfc_bsg_event_free(struct kref *kref)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
804*4882a593Smuzhiyun 						  kref);
805*4882a593Smuzhiyun 	struct event_data *ed;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	list_del(&evt->node);
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	while (!list_empty(&evt->events_to_get)) {
810*4882a593Smuzhiyun 		ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
811*4882a593Smuzhiyun 		list_del(&ed->node);
812*4882a593Smuzhiyun 		kfree(ed->data);
813*4882a593Smuzhiyun 		kfree(ed);
814*4882a593Smuzhiyun 	}
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	while (!list_empty(&evt->events_to_see)) {
817*4882a593Smuzhiyun 		ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
818*4882a593Smuzhiyun 		list_del(&ed->node);
819*4882a593Smuzhiyun 		kfree(ed->data);
820*4882a593Smuzhiyun 		kfree(ed);
821*4882a593Smuzhiyun 	}
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	kfree(evt->dd_data);
824*4882a593Smuzhiyun 	kfree(evt);
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun /**
828*4882a593Smuzhiyun  * lpfc_bsg_event_ref - increments the kref for an event
829*4882a593Smuzhiyun  * @evt: Pointer to an event structure.
830*4882a593Smuzhiyun  **/
831*4882a593Smuzhiyun static inline void
lpfc_bsg_event_ref(struct lpfc_bsg_event * evt)832*4882a593Smuzhiyun lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun 	kref_get(&evt->kref);
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun /**
838*4882a593Smuzhiyun  * lpfc_bsg_event_unref - Uses kref_put to free an event structure
839*4882a593Smuzhiyun  * @evt: Pointer to an event structure.
840*4882a593Smuzhiyun  **/
841*4882a593Smuzhiyun static inline void
lpfc_bsg_event_unref(struct lpfc_bsg_event * evt)842*4882a593Smuzhiyun lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun 	kref_put(&evt->kref, lpfc_bsg_event_free);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun /**
848*4882a593Smuzhiyun  * lpfc_bsg_event_new - allocate and initialize a event structure
849*4882a593Smuzhiyun  * @ev_mask: Mask of events.
850*4882a593Smuzhiyun  * @ev_reg_id: Event reg id.
851*4882a593Smuzhiyun  * @ev_req_id: Event request id.
852*4882a593Smuzhiyun  **/
853*4882a593Smuzhiyun static struct lpfc_bsg_event *
lpfc_bsg_event_new(uint32_t ev_mask,int ev_reg_id,uint32_t ev_req_id)854*4882a593Smuzhiyun lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun 	struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	if (!evt)
859*4882a593Smuzhiyun 		return NULL;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	INIT_LIST_HEAD(&evt->events_to_get);
862*4882a593Smuzhiyun 	INIT_LIST_HEAD(&evt->events_to_see);
863*4882a593Smuzhiyun 	evt->type_mask = ev_mask;
864*4882a593Smuzhiyun 	evt->req_id = ev_req_id;
865*4882a593Smuzhiyun 	evt->reg_id = ev_reg_id;
866*4882a593Smuzhiyun 	evt->wait_time_stamp = jiffies;
867*4882a593Smuzhiyun 	evt->dd_data = NULL;
868*4882a593Smuzhiyun 	init_waitqueue_head(&evt->wq);
869*4882a593Smuzhiyun 	kref_init(&evt->kref);
870*4882a593Smuzhiyun 	return evt;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun /**
874*4882a593Smuzhiyun  * diag_cmd_data_free - Frees an lpfc dma buffer extension
875*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
876*4882a593Smuzhiyun  * @mlist: Pointer to an lpfc dma buffer extension.
877*4882a593Smuzhiyun  **/
878*4882a593Smuzhiyun static int
diag_cmd_data_free(struct lpfc_hba * phba,struct lpfc_dmabufext * mlist)879*4882a593Smuzhiyun diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun 	struct lpfc_dmabufext *mlast;
882*4882a593Smuzhiyun 	struct pci_dev *pcidev;
883*4882a593Smuzhiyun 	struct list_head head, *curr, *next;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	if ((!mlist) || (!lpfc_is_link_up(phba) &&
886*4882a593Smuzhiyun 		(phba->link_flag & LS_LOOPBACK_MODE))) {
887*4882a593Smuzhiyun 		return 0;
888*4882a593Smuzhiyun 	}
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	pcidev = phba->pcidev;
891*4882a593Smuzhiyun 	list_add_tail(&head, &mlist->dma.list);
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	list_for_each_safe(curr, next, &head) {
894*4882a593Smuzhiyun 		mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
895*4882a593Smuzhiyun 		if (mlast->dma.virt)
896*4882a593Smuzhiyun 			dma_free_coherent(&pcidev->dev,
897*4882a593Smuzhiyun 					  mlast->size,
898*4882a593Smuzhiyun 					  mlast->dma.virt,
899*4882a593Smuzhiyun 					  mlast->dma.phys);
900*4882a593Smuzhiyun 		kfree(mlast);
901*4882a593Smuzhiyun 	}
902*4882a593Smuzhiyun 	return 0;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun /**
906*4882a593Smuzhiyun  * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
907*4882a593Smuzhiyun  * @phba:
908*4882a593Smuzhiyun  * @pring:
909*4882a593Smuzhiyun  * @piocbq:
910*4882a593Smuzhiyun  *
911*4882a593Smuzhiyun  * This function is called when an unsolicited CT command is received.  It
912*4882a593Smuzhiyun  * forwards the event to any processes registered to receive CT events.
913*4882a593Smuzhiyun  **/
914*4882a593Smuzhiyun int
lpfc_bsg_ct_unsol_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocbq)915*4882a593Smuzhiyun lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
916*4882a593Smuzhiyun 			struct lpfc_iocbq *piocbq)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun 	uint32_t evt_req_id = 0;
919*4882a593Smuzhiyun 	uint32_t cmd;
920*4882a593Smuzhiyun 	struct lpfc_dmabuf *dmabuf = NULL;
921*4882a593Smuzhiyun 	struct lpfc_bsg_event *evt;
922*4882a593Smuzhiyun 	struct event_data *evt_dat = NULL;
923*4882a593Smuzhiyun 	struct lpfc_iocbq *iocbq;
924*4882a593Smuzhiyun 	size_t offset = 0;
925*4882a593Smuzhiyun 	struct list_head head;
926*4882a593Smuzhiyun 	struct ulp_bde64 *bde;
927*4882a593Smuzhiyun 	dma_addr_t dma_addr;
928*4882a593Smuzhiyun 	int i;
929*4882a593Smuzhiyun 	struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
930*4882a593Smuzhiyun 	struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
931*4882a593Smuzhiyun 	struct lpfc_hbq_entry *hbqe;
932*4882a593Smuzhiyun 	struct lpfc_sli_ct_request *ct_req;
933*4882a593Smuzhiyun 	struct bsg_job *job = NULL;
934*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply;
935*4882a593Smuzhiyun 	struct bsg_job_data *dd_data = NULL;
936*4882a593Smuzhiyun 	unsigned long flags;
937*4882a593Smuzhiyun 	int size = 0;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	INIT_LIST_HEAD(&head);
940*4882a593Smuzhiyun 	list_add_tail(&head, &piocbq->list);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (piocbq->iocb.ulpBdeCount == 0 ||
943*4882a593Smuzhiyun 	    piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
944*4882a593Smuzhiyun 		goto error_ct_unsol_exit;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	if (phba->link_state == LPFC_HBA_ERROR ||
947*4882a593Smuzhiyun 		(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
948*4882a593Smuzhiyun 		goto error_ct_unsol_exit;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
951*4882a593Smuzhiyun 		dmabuf = bdeBuf1;
952*4882a593Smuzhiyun 	else {
953*4882a593Smuzhiyun 		dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
954*4882a593Smuzhiyun 				    piocbq->iocb.un.cont64[0].addrLow);
955*4882a593Smuzhiyun 		dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
956*4882a593Smuzhiyun 	}
957*4882a593Smuzhiyun 	if (dmabuf == NULL)
958*4882a593Smuzhiyun 		goto error_ct_unsol_exit;
959*4882a593Smuzhiyun 	ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
960*4882a593Smuzhiyun 	evt_req_id = ct_req->FsType;
961*4882a593Smuzhiyun 	cmd = ct_req->CommandResponse.bits.CmdRsp;
962*4882a593Smuzhiyun 	if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
963*4882a593Smuzhiyun 		lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
966*4882a593Smuzhiyun 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
967*4882a593Smuzhiyun 		if (!(evt->type_mask & FC_REG_CT_EVENT) ||
968*4882a593Smuzhiyun 			evt->req_id != evt_req_id)
969*4882a593Smuzhiyun 			continue;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 		lpfc_bsg_event_ref(evt);
972*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
973*4882a593Smuzhiyun 		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
974*4882a593Smuzhiyun 		if (evt_dat == NULL) {
975*4882a593Smuzhiyun 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
976*4882a593Smuzhiyun 			lpfc_bsg_event_unref(evt);
977*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
978*4882a593Smuzhiyun 					"2614 Memory allocation failed for "
979*4882a593Smuzhiyun 					"CT event\n");
980*4882a593Smuzhiyun 			break;
981*4882a593Smuzhiyun 		}
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
984*4882a593Smuzhiyun 			/* take accumulated byte count from the last iocbq */
985*4882a593Smuzhiyun 			iocbq = list_entry(head.prev, typeof(*iocbq), list);
986*4882a593Smuzhiyun 			evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
987*4882a593Smuzhiyun 		} else {
988*4882a593Smuzhiyun 			list_for_each_entry(iocbq, &head, list) {
989*4882a593Smuzhiyun 				for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
990*4882a593Smuzhiyun 					evt_dat->len +=
991*4882a593Smuzhiyun 					iocbq->iocb.un.cont64[i].tus.f.bdeSize;
992*4882a593Smuzhiyun 			}
993*4882a593Smuzhiyun 		}
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
996*4882a593Smuzhiyun 		if (evt_dat->data == NULL) {
997*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
998*4882a593Smuzhiyun 					"2615 Memory allocation failed for "
999*4882a593Smuzhiyun 					"CT event data, size %d\n",
1000*4882a593Smuzhiyun 					evt_dat->len);
1001*4882a593Smuzhiyun 			kfree(evt_dat);
1002*4882a593Smuzhiyun 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
1003*4882a593Smuzhiyun 			lpfc_bsg_event_unref(evt);
1004*4882a593Smuzhiyun 			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1005*4882a593Smuzhiyun 			goto error_ct_unsol_exit;
1006*4882a593Smuzhiyun 		}
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 		list_for_each_entry(iocbq, &head, list) {
1009*4882a593Smuzhiyun 			size = 0;
1010*4882a593Smuzhiyun 			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1011*4882a593Smuzhiyun 				bdeBuf1 = iocbq->context2;
1012*4882a593Smuzhiyun 				bdeBuf2 = iocbq->context3;
1013*4882a593Smuzhiyun 			}
1014*4882a593Smuzhiyun 			for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
1015*4882a593Smuzhiyun 				if (phba->sli3_options &
1016*4882a593Smuzhiyun 				    LPFC_SLI3_HBQ_ENABLED) {
1017*4882a593Smuzhiyun 					if (i == 0) {
1018*4882a593Smuzhiyun 						hbqe = (struct lpfc_hbq_entry *)
1019*4882a593Smuzhiyun 						  &iocbq->iocb.un.ulpWord[0];
1020*4882a593Smuzhiyun 						size = hbqe->bde.tus.f.bdeSize;
1021*4882a593Smuzhiyun 						dmabuf = bdeBuf1;
1022*4882a593Smuzhiyun 					} else if (i == 1) {
1023*4882a593Smuzhiyun 						hbqe = (struct lpfc_hbq_entry *)
1024*4882a593Smuzhiyun 							&iocbq->iocb.unsli3.
1025*4882a593Smuzhiyun 							sli3Words[4];
1026*4882a593Smuzhiyun 						size = hbqe->bde.tus.f.bdeSize;
1027*4882a593Smuzhiyun 						dmabuf = bdeBuf2;
1028*4882a593Smuzhiyun 					}
1029*4882a593Smuzhiyun 					if ((offset + size) > evt_dat->len)
1030*4882a593Smuzhiyun 						size = evt_dat->len - offset;
1031*4882a593Smuzhiyun 				} else {
1032*4882a593Smuzhiyun 					size = iocbq->iocb.un.cont64[i].
1033*4882a593Smuzhiyun 						tus.f.bdeSize;
1034*4882a593Smuzhiyun 					bde = &iocbq->iocb.un.cont64[i];
1035*4882a593Smuzhiyun 					dma_addr = getPaddr(bde->addrHigh,
1036*4882a593Smuzhiyun 							    bde->addrLow);
1037*4882a593Smuzhiyun 					dmabuf = lpfc_sli_ringpostbuf_get(phba,
1038*4882a593Smuzhiyun 							pring, dma_addr);
1039*4882a593Smuzhiyun 				}
1040*4882a593Smuzhiyun 				if (!dmabuf) {
1041*4882a593Smuzhiyun 					lpfc_printf_log(phba, KERN_ERR,
1042*4882a593Smuzhiyun 						LOG_LIBDFC, "2616 No dmabuf "
1043*4882a593Smuzhiyun 						"found for iocbq x%px\n",
1044*4882a593Smuzhiyun 						iocbq);
1045*4882a593Smuzhiyun 					kfree(evt_dat->data);
1046*4882a593Smuzhiyun 					kfree(evt_dat);
1047*4882a593Smuzhiyun 					spin_lock_irqsave(&phba->ct_ev_lock,
1048*4882a593Smuzhiyun 						flags);
1049*4882a593Smuzhiyun 					lpfc_bsg_event_unref(evt);
1050*4882a593Smuzhiyun 					spin_unlock_irqrestore(
1051*4882a593Smuzhiyun 						&phba->ct_ev_lock, flags);
1052*4882a593Smuzhiyun 					goto error_ct_unsol_exit;
1053*4882a593Smuzhiyun 				}
1054*4882a593Smuzhiyun 				memcpy((char *)(evt_dat->data) + offset,
1055*4882a593Smuzhiyun 				       dmabuf->virt, size);
1056*4882a593Smuzhiyun 				offset += size;
1057*4882a593Smuzhiyun 				if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
1058*4882a593Smuzhiyun 				    !(phba->sli3_options &
1059*4882a593Smuzhiyun 				      LPFC_SLI3_HBQ_ENABLED)) {
1060*4882a593Smuzhiyun 					lpfc_sli_ringpostbuf_put(phba, pring,
1061*4882a593Smuzhiyun 								 dmabuf);
1062*4882a593Smuzhiyun 				} else {
1063*4882a593Smuzhiyun 					switch (cmd) {
1064*4882a593Smuzhiyun 					case ELX_LOOPBACK_DATA:
1065*4882a593Smuzhiyun 						if (phba->sli_rev <
1066*4882a593Smuzhiyun 						    LPFC_SLI_REV4)
1067*4882a593Smuzhiyun 							diag_cmd_data_free(phba,
1068*4882a593Smuzhiyun 							(struct lpfc_dmabufext
1069*4882a593Smuzhiyun 							 *)dmabuf);
1070*4882a593Smuzhiyun 						break;
1071*4882a593Smuzhiyun 					case ELX_LOOPBACK_XRI_SETUP:
1072*4882a593Smuzhiyun 						if ((phba->sli_rev ==
1073*4882a593Smuzhiyun 							LPFC_SLI_REV2) ||
1074*4882a593Smuzhiyun 							(phba->sli3_options &
1075*4882a593Smuzhiyun 							LPFC_SLI3_HBQ_ENABLED
1076*4882a593Smuzhiyun 							)) {
1077*4882a593Smuzhiyun 							lpfc_in_buf_free(phba,
1078*4882a593Smuzhiyun 									dmabuf);
1079*4882a593Smuzhiyun 						} else {
1080*4882a593Smuzhiyun 							lpfc_post_buffer(phba,
1081*4882a593Smuzhiyun 									 pring,
1082*4882a593Smuzhiyun 									 1);
1083*4882a593Smuzhiyun 						}
1084*4882a593Smuzhiyun 						break;
1085*4882a593Smuzhiyun 					default:
1086*4882a593Smuzhiyun 						if (!(phba->sli3_options &
1087*4882a593Smuzhiyun 						      LPFC_SLI3_HBQ_ENABLED))
1088*4882a593Smuzhiyun 							lpfc_post_buffer(phba,
1089*4882a593Smuzhiyun 									 pring,
1090*4882a593Smuzhiyun 									 1);
1091*4882a593Smuzhiyun 						break;
1092*4882a593Smuzhiyun 					}
1093*4882a593Smuzhiyun 				}
1094*4882a593Smuzhiyun 			}
1095*4882a593Smuzhiyun 		}
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1098*4882a593Smuzhiyun 		if (phba->sli_rev == LPFC_SLI_REV4) {
1099*4882a593Smuzhiyun 			evt_dat->immed_dat = phba->ctx_idx;
1100*4882a593Smuzhiyun 			phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
1101*4882a593Smuzhiyun 			/* Provide warning for over-run of the ct_ctx array */
1102*4882a593Smuzhiyun 			if (phba->ct_ctx[evt_dat->immed_dat].valid ==
1103*4882a593Smuzhiyun 			    UNSOL_VALID)
1104*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1105*4882a593Smuzhiyun 						"2717 CT context array entry "
1106*4882a593Smuzhiyun 						"[%d] over-run: oxid:x%x, "
1107*4882a593Smuzhiyun 						"sid:x%x\n", phba->ctx_idx,
1108*4882a593Smuzhiyun 						phba->ct_ctx[
1109*4882a593Smuzhiyun 						    evt_dat->immed_dat].oxid,
1110*4882a593Smuzhiyun 						phba->ct_ctx[
1111*4882a593Smuzhiyun 						    evt_dat->immed_dat].SID);
1112*4882a593Smuzhiyun 			phba->ct_ctx[evt_dat->immed_dat].rxid =
1113*4882a593Smuzhiyun 				piocbq->iocb.ulpContext;
1114*4882a593Smuzhiyun 			phba->ct_ctx[evt_dat->immed_dat].oxid =
1115*4882a593Smuzhiyun 				piocbq->iocb.unsli3.rcvsli3.ox_id;
1116*4882a593Smuzhiyun 			phba->ct_ctx[evt_dat->immed_dat].SID =
1117*4882a593Smuzhiyun 				piocbq->iocb.un.rcvels.remoteID;
1118*4882a593Smuzhiyun 			phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
1119*4882a593Smuzhiyun 		} else
1120*4882a593Smuzhiyun 			evt_dat->immed_dat = piocbq->iocb.ulpContext;
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 		evt_dat->type = FC_REG_CT_EVENT;
1123*4882a593Smuzhiyun 		list_add(&evt_dat->node, &evt->events_to_see);
1124*4882a593Smuzhiyun 		if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
1125*4882a593Smuzhiyun 			wake_up_interruptible(&evt->wq);
1126*4882a593Smuzhiyun 			lpfc_bsg_event_unref(evt);
1127*4882a593Smuzhiyun 			break;
1128*4882a593Smuzhiyun 		}
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 		list_move(evt->events_to_see.prev, &evt->events_to_get);
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 		dd_data = (struct bsg_job_data *)evt->dd_data;
1133*4882a593Smuzhiyun 		job = dd_data->set_job;
1134*4882a593Smuzhiyun 		dd_data->set_job = NULL;
1135*4882a593Smuzhiyun 		lpfc_bsg_event_unref(evt);
1136*4882a593Smuzhiyun 		if (job) {
1137*4882a593Smuzhiyun 			bsg_reply = job->reply;
1138*4882a593Smuzhiyun 			bsg_reply->reply_payload_rcv_len = size;
1139*4882a593Smuzhiyun 			/* make error code available to userspace */
1140*4882a593Smuzhiyun 			bsg_reply->result = 0;
1141*4882a593Smuzhiyun 			job->dd_data = NULL;
1142*4882a593Smuzhiyun 			/* complete the job back to userspace */
1143*4882a593Smuzhiyun 			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1144*4882a593Smuzhiyun 			bsg_job_done(job, bsg_reply->result,
1145*4882a593Smuzhiyun 				       bsg_reply->reply_payload_rcv_len);
1146*4882a593Smuzhiyun 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
1147*4882a593Smuzhiyun 		}
1148*4882a593Smuzhiyun 	}
1149*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun error_ct_unsol_exit:
1152*4882a593Smuzhiyun 	if (!list_empty(&head))
1153*4882a593Smuzhiyun 		list_del(&head);
1154*4882a593Smuzhiyun 	if ((phba->sli_rev < LPFC_SLI_REV4) &&
1155*4882a593Smuzhiyun 	    (evt_req_id == SLI_CT_ELX_LOOPBACK))
1156*4882a593Smuzhiyun 		return 0;
1157*4882a593Smuzhiyun 	return 1;
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun /**
1161*4882a593Smuzhiyun  * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1162*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
1163*4882a593Smuzhiyun  * @dmabuf: pointer to a dmabuf that describes the FC sequence
1164*4882a593Smuzhiyun  *
1165*4882a593Smuzhiyun  * This function handles abort to the CT command toward management plane
1166*4882a593Smuzhiyun  * for SLI4 port.
1167*4882a593Smuzhiyun  *
1168*4882a593Smuzhiyun  * If the pending context of a CT command to management plane present, clears
1169*4882a593Smuzhiyun  * such context and returns 1 for handled; otherwise, it returns 0 indicating
1170*4882a593Smuzhiyun  * no context exists.
1171*4882a593Smuzhiyun  **/
1172*4882a593Smuzhiyun int
lpfc_bsg_ct_unsol_abort(struct lpfc_hba * phba,struct hbq_dmabuf * dmabuf)1173*4882a593Smuzhiyun lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun 	struct fc_frame_header fc_hdr;
1176*4882a593Smuzhiyun 	struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1177*4882a593Smuzhiyun 	int ctx_idx, handled = 0;
1178*4882a593Smuzhiyun 	uint16_t oxid, rxid;
1179*4882a593Smuzhiyun 	uint32_t sid;
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1182*4882a593Smuzhiyun 	sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1183*4882a593Smuzhiyun 	oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1184*4882a593Smuzhiyun 	rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1187*4882a593Smuzhiyun 		if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1188*4882a593Smuzhiyun 			continue;
1189*4882a593Smuzhiyun 		if (phba->ct_ctx[ctx_idx].rxid != rxid)
1190*4882a593Smuzhiyun 			continue;
1191*4882a593Smuzhiyun 		if (phba->ct_ctx[ctx_idx].oxid != oxid)
1192*4882a593Smuzhiyun 			continue;
1193*4882a593Smuzhiyun 		if (phba->ct_ctx[ctx_idx].SID != sid)
1194*4882a593Smuzhiyun 			continue;
1195*4882a593Smuzhiyun 		phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1196*4882a593Smuzhiyun 		handled = 1;
1197*4882a593Smuzhiyun 	}
1198*4882a593Smuzhiyun 	return handled;
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun /**
1202*4882a593Smuzhiyun  * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1203*4882a593Smuzhiyun  * @job: SET_EVENT fc_bsg_job
1204*4882a593Smuzhiyun  **/
1205*4882a593Smuzhiyun static int
lpfc_bsg_hba_set_event(struct bsg_job * job)1206*4882a593Smuzhiyun lpfc_bsg_hba_set_event(struct bsg_job *job)
1207*4882a593Smuzhiyun {
1208*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1209*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
1210*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
1211*4882a593Smuzhiyun 	struct set_ct_event *event_req;
1212*4882a593Smuzhiyun 	struct lpfc_bsg_event *evt;
1213*4882a593Smuzhiyun 	int rc = 0;
1214*4882a593Smuzhiyun 	struct bsg_job_data *dd_data = NULL;
1215*4882a593Smuzhiyun 	uint32_t ev_mask;
1216*4882a593Smuzhiyun 	unsigned long flags;
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	if (job->request_len <
1219*4882a593Smuzhiyun 	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1220*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1221*4882a593Smuzhiyun 				"2612 Received SET_CT_EVENT below minimum "
1222*4882a593Smuzhiyun 				"size\n");
1223*4882a593Smuzhiyun 		rc = -EINVAL;
1224*4882a593Smuzhiyun 		goto job_error;
1225*4882a593Smuzhiyun 	}
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	event_req = (struct set_ct_event *)
1228*4882a593Smuzhiyun 		bsg_request->rqst_data.h_vendor.vendor_cmd;
1229*4882a593Smuzhiyun 	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1230*4882a593Smuzhiyun 				FC_REG_EVENT_MASK);
1231*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1232*4882a593Smuzhiyun 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1233*4882a593Smuzhiyun 		if (evt->reg_id == event_req->ev_reg_id) {
1234*4882a593Smuzhiyun 			lpfc_bsg_event_ref(evt);
1235*4882a593Smuzhiyun 			evt->wait_time_stamp = jiffies;
1236*4882a593Smuzhiyun 			dd_data = (struct bsg_job_data *)evt->dd_data;
1237*4882a593Smuzhiyun 			break;
1238*4882a593Smuzhiyun 		}
1239*4882a593Smuzhiyun 	}
1240*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 	if (&evt->node == &phba->ct_ev_waiters) {
1243*4882a593Smuzhiyun 		/* no event waiting struct yet - first call */
1244*4882a593Smuzhiyun 		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1245*4882a593Smuzhiyun 		if (dd_data == NULL) {
1246*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1247*4882a593Smuzhiyun 					"2734 Failed allocation of dd_data\n");
1248*4882a593Smuzhiyun 			rc = -ENOMEM;
1249*4882a593Smuzhiyun 			goto job_error;
1250*4882a593Smuzhiyun 		}
1251*4882a593Smuzhiyun 		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1252*4882a593Smuzhiyun 					event_req->ev_req_id);
1253*4882a593Smuzhiyun 		if (!evt) {
1254*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1255*4882a593Smuzhiyun 					"2617 Failed allocation of event "
1256*4882a593Smuzhiyun 					"waiter\n");
1257*4882a593Smuzhiyun 			rc = -ENOMEM;
1258*4882a593Smuzhiyun 			goto job_error;
1259*4882a593Smuzhiyun 		}
1260*4882a593Smuzhiyun 		dd_data->type = TYPE_EVT;
1261*4882a593Smuzhiyun 		dd_data->set_job = NULL;
1262*4882a593Smuzhiyun 		dd_data->context_un.evt = evt;
1263*4882a593Smuzhiyun 		evt->dd_data = (void *)dd_data;
1264*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1265*4882a593Smuzhiyun 		list_add(&evt->node, &phba->ct_ev_waiters);
1266*4882a593Smuzhiyun 		lpfc_bsg_event_ref(evt);
1267*4882a593Smuzhiyun 		evt->wait_time_stamp = jiffies;
1268*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1269*4882a593Smuzhiyun 	}
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1272*4882a593Smuzhiyun 	evt->waiting = 1;
1273*4882a593Smuzhiyun 	dd_data->set_job = job; /* for unsolicited command */
1274*4882a593Smuzhiyun 	job->dd_data = dd_data; /* for fc transport timeout callback*/
1275*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1276*4882a593Smuzhiyun 	return 0; /* call job done later */
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun job_error:
1279*4882a593Smuzhiyun 	kfree(dd_data);
1280*4882a593Smuzhiyun 	job->dd_data = NULL;
1281*4882a593Smuzhiyun 	return rc;
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun /**
1285*4882a593Smuzhiyun  * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1286*4882a593Smuzhiyun  * @job: GET_EVENT fc_bsg_job
1287*4882a593Smuzhiyun  **/
1288*4882a593Smuzhiyun static int
lpfc_bsg_hba_get_event(struct bsg_job * job)1289*4882a593Smuzhiyun lpfc_bsg_hba_get_event(struct bsg_job *job)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1292*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
1293*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
1294*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
1295*4882a593Smuzhiyun 	struct get_ct_event *event_req;
1296*4882a593Smuzhiyun 	struct get_ct_event_reply *event_reply;
1297*4882a593Smuzhiyun 	struct lpfc_bsg_event *evt, *evt_next;
1298*4882a593Smuzhiyun 	struct event_data *evt_dat = NULL;
1299*4882a593Smuzhiyun 	unsigned long flags;
1300*4882a593Smuzhiyun 	uint32_t rc = 0;
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	if (job->request_len <
1303*4882a593Smuzhiyun 	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1304*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1305*4882a593Smuzhiyun 				"2613 Received GET_CT_EVENT request below "
1306*4882a593Smuzhiyun 				"minimum size\n");
1307*4882a593Smuzhiyun 		rc = -EINVAL;
1308*4882a593Smuzhiyun 		goto job_error;
1309*4882a593Smuzhiyun 	}
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	event_req = (struct get_ct_event *)
1312*4882a593Smuzhiyun 		bsg_request->rqst_data.h_vendor.vendor_cmd;
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	event_reply = (struct get_ct_event_reply *)
1315*4882a593Smuzhiyun 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
1316*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1317*4882a593Smuzhiyun 	list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1318*4882a593Smuzhiyun 		if (evt->reg_id == event_req->ev_reg_id) {
1319*4882a593Smuzhiyun 			if (list_empty(&evt->events_to_get))
1320*4882a593Smuzhiyun 				break;
1321*4882a593Smuzhiyun 			lpfc_bsg_event_ref(evt);
1322*4882a593Smuzhiyun 			evt->wait_time_stamp = jiffies;
1323*4882a593Smuzhiyun 			evt_dat = list_entry(evt->events_to_get.prev,
1324*4882a593Smuzhiyun 					     struct event_data, node);
1325*4882a593Smuzhiyun 			list_del(&evt_dat->node);
1326*4882a593Smuzhiyun 			break;
1327*4882a593Smuzhiyun 		}
1328*4882a593Smuzhiyun 	}
1329*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	/* The app may continue to ask for event data until it gets
1332*4882a593Smuzhiyun 	 * an error indicating that there isn't anymore
1333*4882a593Smuzhiyun 	 */
1334*4882a593Smuzhiyun 	if (evt_dat == NULL) {
1335*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len = 0;
1336*4882a593Smuzhiyun 		rc = -ENOENT;
1337*4882a593Smuzhiyun 		goto job_error;
1338*4882a593Smuzhiyun 	}
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	if (evt_dat->len > job->request_payload.payload_len) {
1341*4882a593Smuzhiyun 		evt_dat->len = job->request_payload.payload_len;
1342*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1343*4882a593Smuzhiyun 				"2618 Truncated event data at %d "
1344*4882a593Smuzhiyun 				"bytes\n",
1345*4882a593Smuzhiyun 				job->request_payload.payload_len);
1346*4882a593Smuzhiyun 	}
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	event_reply->type = evt_dat->type;
1349*4882a593Smuzhiyun 	event_reply->immed_data = evt_dat->immed_dat;
1350*4882a593Smuzhiyun 	if (evt_dat->len > 0)
1351*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len =
1352*4882a593Smuzhiyun 			sg_copy_from_buffer(job->request_payload.sg_list,
1353*4882a593Smuzhiyun 					    job->request_payload.sg_cnt,
1354*4882a593Smuzhiyun 					    evt_dat->data, evt_dat->len);
1355*4882a593Smuzhiyun 	else
1356*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len = 0;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	if (evt_dat) {
1359*4882a593Smuzhiyun 		kfree(evt_dat->data);
1360*4882a593Smuzhiyun 		kfree(evt_dat);
1361*4882a593Smuzhiyun 	}
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1364*4882a593Smuzhiyun 	lpfc_bsg_event_unref(evt);
1365*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1366*4882a593Smuzhiyun 	job->dd_data = NULL;
1367*4882a593Smuzhiyun 	bsg_reply->result = 0;
1368*4882a593Smuzhiyun 	bsg_job_done(job, bsg_reply->result,
1369*4882a593Smuzhiyun 		       bsg_reply->reply_payload_rcv_len);
1370*4882a593Smuzhiyun 	return 0;
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun job_error:
1373*4882a593Smuzhiyun 	job->dd_data = NULL;
1374*4882a593Smuzhiyun 	bsg_reply->result = rc;
1375*4882a593Smuzhiyun 	return rc;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun /**
1379*4882a593Smuzhiyun  * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1380*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
1381*4882a593Smuzhiyun  * @cmdiocbq: Pointer to command iocb.
1382*4882a593Smuzhiyun  * @rspiocbq: Pointer to response iocb.
1383*4882a593Smuzhiyun  *
1384*4882a593Smuzhiyun  * This function is the completion handler for iocbs issued using
1385*4882a593Smuzhiyun  * lpfc_issue_ct_rsp_cmp function. This function is called by the
1386*4882a593Smuzhiyun  * ring event handler function without any lock held. This function
1387*4882a593Smuzhiyun  * can be called from both worker thread context and interrupt
1388*4882a593Smuzhiyun  * context. This function also can be called from other thread which
1389*4882a593Smuzhiyun  * cleans up the SLI layer objects.
1390*4882a593Smuzhiyun  * This function copy the contents of the response iocb to the
1391*4882a593Smuzhiyun  * response iocb memory object provided by the caller of
1392*4882a593Smuzhiyun  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1393*4882a593Smuzhiyun  * sleeps for the iocb completion.
1394*4882a593Smuzhiyun  **/
1395*4882a593Smuzhiyun static void
lpfc_issue_ct_rsp_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)1396*4882a593Smuzhiyun lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1397*4882a593Smuzhiyun 			struct lpfc_iocbq *cmdiocbq,
1398*4882a593Smuzhiyun 			struct lpfc_iocbq *rspiocbq)
1399*4882a593Smuzhiyun {
1400*4882a593Smuzhiyun 	struct bsg_job_data *dd_data;
1401*4882a593Smuzhiyun 	struct bsg_job *job;
1402*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply;
1403*4882a593Smuzhiyun 	IOCB_t *rsp;
1404*4882a593Smuzhiyun 	struct lpfc_dmabuf *bmp, *cmp;
1405*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
1406*4882a593Smuzhiyun 	unsigned long flags;
1407*4882a593Smuzhiyun 	int rc = 0;
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	dd_data = cmdiocbq->context1;
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	/* Determine if job has been aborted */
1412*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1413*4882a593Smuzhiyun 	job = dd_data->set_job;
1414*4882a593Smuzhiyun 	if (job) {
1415*4882a593Smuzhiyun 		/* Prevent timeout handling from trying to abort job  */
1416*4882a593Smuzhiyun 		job->dd_data = NULL;
1417*4882a593Smuzhiyun 	}
1418*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	/* Close the timeout handler abort window */
1421*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, flags);
1422*4882a593Smuzhiyun 	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
1423*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, flags);
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	ndlp = dd_data->context_un.iocb.ndlp;
1426*4882a593Smuzhiyun 	cmp = cmdiocbq->context2;
1427*4882a593Smuzhiyun 	bmp = cmdiocbq->context3;
1428*4882a593Smuzhiyun 	rsp = &rspiocbq->iocb;
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 	/* Copy the completed job data or set the error status */
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	if (job) {
1433*4882a593Smuzhiyun 		bsg_reply = job->reply;
1434*4882a593Smuzhiyun 		if (rsp->ulpStatus) {
1435*4882a593Smuzhiyun 			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1436*4882a593Smuzhiyun 				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1437*4882a593Smuzhiyun 				case IOERR_SEQUENCE_TIMEOUT:
1438*4882a593Smuzhiyun 					rc = -ETIMEDOUT;
1439*4882a593Smuzhiyun 					break;
1440*4882a593Smuzhiyun 				case IOERR_INVALID_RPI:
1441*4882a593Smuzhiyun 					rc = -EFAULT;
1442*4882a593Smuzhiyun 					break;
1443*4882a593Smuzhiyun 				default:
1444*4882a593Smuzhiyun 					rc = -EACCES;
1445*4882a593Smuzhiyun 					break;
1446*4882a593Smuzhiyun 				}
1447*4882a593Smuzhiyun 			} else {
1448*4882a593Smuzhiyun 				rc = -EACCES;
1449*4882a593Smuzhiyun 			}
1450*4882a593Smuzhiyun 		} else {
1451*4882a593Smuzhiyun 			bsg_reply->reply_payload_rcv_len = 0;
1452*4882a593Smuzhiyun 		}
1453*4882a593Smuzhiyun 	}
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	lpfc_free_bsg_buffers(phba, cmp);
1456*4882a593Smuzhiyun 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1457*4882a593Smuzhiyun 	kfree(bmp);
1458*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, cmdiocbq);
1459*4882a593Smuzhiyun 	lpfc_nlp_put(ndlp);
1460*4882a593Smuzhiyun 	kfree(dd_data);
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun 	/* Complete the job if the job is still active */
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	if (job) {
1465*4882a593Smuzhiyun 		bsg_reply->result = rc;
1466*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
1467*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
1468*4882a593Smuzhiyun 	}
1469*4882a593Smuzhiyun 	return;
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun /**
1473*4882a593Smuzhiyun  * lpfc_issue_ct_rsp - issue a ct response
1474*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
1475*4882a593Smuzhiyun  * @job: Pointer to the job object.
1476*4882a593Smuzhiyun  * @tag: tag index value into the ports context exchange array.
1477*4882a593Smuzhiyun  * @bmp: Pointer to a dma buffer descriptor.
1478*4882a593Smuzhiyun  * @num_entry: Number of enties in the bde.
1479*4882a593Smuzhiyun  **/
1480*4882a593Smuzhiyun static int
lpfc_issue_ct_rsp(struct lpfc_hba * phba,struct bsg_job * job,uint32_t tag,struct lpfc_dmabuf * cmp,struct lpfc_dmabuf * bmp,int num_entry)1481*4882a593Smuzhiyun lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
1482*4882a593Smuzhiyun 		  struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1483*4882a593Smuzhiyun 		  int num_entry)
1484*4882a593Smuzhiyun {
1485*4882a593Smuzhiyun 	IOCB_t *icmd;
1486*4882a593Smuzhiyun 	struct lpfc_iocbq *ctiocb = NULL;
1487*4882a593Smuzhiyun 	int rc = 0;
1488*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp = NULL;
1489*4882a593Smuzhiyun 	struct bsg_job_data *dd_data;
1490*4882a593Smuzhiyun 	unsigned long flags;
1491*4882a593Smuzhiyun 	uint32_t creg_val;
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	/* allocate our bsg tracking structure */
1494*4882a593Smuzhiyun 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1495*4882a593Smuzhiyun 	if (!dd_data) {
1496*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1497*4882a593Smuzhiyun 				"2736 Failed allocation of dd_data\n");
1498*4882a593Smuzhiyun 		rc = -ENOMEM;
1499*4882a593Smuzhiyun 		goto no_dd_data;
1500*4882a593Smuzhiyun 	}
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	/* Allocate buffer for  command iocb */
1503*4882a593Smuzhiyun 	ctiocb = lpfc_sli_get_iocbq(phba);
1504*4882a593Smuzhiyun 	if (!ctiocb) {
1505*4882a593Smuzhiyun 		rc = -ENOMEM;
1506*4882a593Smuzhiyun 		goto no_ctiocb;
1507*4882a593Smuzhiyun 	}
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	icmd = &ctiocb->iocb;
1510*4882a593Smuzhiyun 	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1511*4882a593Smuzhiyun 	icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1512*4882a593Smuzhiyun 	icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1513*4882a593Smuzhiyun 	icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1514*4882a593Smuzhiyun 	icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1515*4882a593Smuzhiyun 	icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1516*4882a593Smuzhiyun 	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1517*4882a593Smuzhiyun 	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1518*4882a593Smuzhiyun 	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	/* Fill in rest of iocb */
1521*4882a593Smuzhiyun 	icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1522*4882a593Smuzhiyun 	icmd->ulpBdeCount = 1;
1523*4882a593Smuzhiyun 	icmd->ulpLe = 1;
1524*4882a593Smuzhiyun 	icmd->ulpClass = CLASS3;
1525*4882a593Smuzhiyun 	if (phba->sli_rev == LPFC_SLI_REV4) {
1526*4882a593Smuzhiyun 		/* Do not issue unsol response if oxid not marked as valid */
1527*4882a593Smuzhiyun 		if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1528*4882a593Smuzhiyun 			rc = IOCB_ERROR;
1529*4882a593Smuzhiyun 			goto issue_ct_rsp_exit;
1530*4882a593Smuzhiyun 		}
1531*4882a593Smuzhiyun 		icmd->ulpContext = phba->ct_ctx[tag].rxid;
1532*4882a593Smuzhiyun 		icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1533*4882a593Smuzhiyun 		ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1534*4882a593Smuzhiyun 		if (!ndlp) {
1535*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1536*4882a593Smuzhiyun 				 "2721 ndlp null for oxid %x SID %x\n",
1537*4882a593Smuzhiyun 					icmd->ulpContext,
1538*4882a593Smuzhiyun 					phba->ct_ctx[tag].SID);
1539*4882a593Smuzhiyun 			rc = IOCB_ERROR;
1540*4882a593Smuzhiyun 			goto issue_ct_rsp_exit;
1541*4882a593Smuzhiyun 		}
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 		/* Check if the ndlp is active */
1544*4882a593Smuzhiyun 		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1545*4882a593Smuzhiyun 			rc = IOCB_ERROR;
1546*4882a593Smuzhiyun 			goto issue_ct_rsp_exit;
1547*4882a593Smuzhiyun 		}
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 		/* get a refernece count so the ndlp doesn't go away while
1550*4882a593Smuzhiyun 		 * we respond
1551*4882a593Smuzhiyun 		 */
1552*4882a593Smuzhiyun 		if (!lpfc_nlp_get(ndlp)) {
1553*4882a593Smuzhiyun 			rc = IOCB_ERROR;
1554*4882a593Smuzhiyun 			goto issue_ct_rsp_exit;
1555*4882a593Smuzhiyun 		}
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 		icmd->un.ulpWord[3] =
1558*4882a593Smuzhiyun 				phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 		/* The exchange is done, mark the entry as invalid */
1561*4882a593Smuzhiyun 		phba->ct_ctx[tag].valid = UNSOL_INVALID;
1562*4882a593Smuzhiyun 	} else
1563*4882a593Smuzhiyun 		icmd->ulpContext = (ushort) tag;
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun 	icmd->ulpTimeout = phba->fc_ratov * 2;
1566*4882a593Smuzhiyun 
1567*4882a593Smuzhiyun 	/* Xmit CT response on exchange <xid> */
1568*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1569*4882a593Smuzhiyun 		"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1570*4882a593Smuzhiyun 		icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun 	ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1573*4882a593Smuzhiyun 	ctiocb->vport = phba->pport;
1574*4882a593Smuzhiyun 	ctiocb->context1 = dd_data;
1575*4882a593Smuzhiyun 	ctiocb->context2 = cmp;
1576*4882a593Smuzhiyun 	ctiocb->context3 = bmp;
1577*4882a593Smuzhiyun 	ctiocb->context_un.ndlp = ndlp;
1578*4882a593Smuzhiyun 	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	dd_data->type = TYPE_IOCB;
1581*4882a593Smuzhiyun 	dd_data->set_job = job;
1582*4882a593Smuzhiyun 	dd_data->context_un.iocb.cmdiocbq = ctiocb;
1583*4882a593Smuzhiyun 	dd_data->context_un.iocb.ndlp = ndlp;
1584*4882a593Smuzhiyun 	dd_data->context_un.iocb.rmp = NULL;
1585*4882a593Smuzhiyun 	job->dd_data = dd_data;
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1588*4882a593Smuzhiyun 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1589*4882a593Smuzhiyun 			rc = -IOCB_ERROR;
1590*4882a593Smuzhiyun 			goto issue_ct_rsp_exit;
1591*4882a593Smuzhiyun 		}
1592*4882a593Smuzhiyun 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1593*4882a593Smuzhiyun 		writel(creg_val, phba->HCregaddr);
1594*4882a593Smuzhiyun 		readl(phba->HCregaddr); /* flush */
1595*4882a593Smuzhiyun 	}
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 	if (rc == IOCB_SUCCESS) {
1600*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->hbalock, flags);
1601*4882a593Smuzhiyun 		/* make sure the I/O had not been completed/released */
1602*4882a593Smuzhiyun 		if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
1603*4882a593Smuzhiyun 			/* open up abort window to timeout handler */
1604*4882a593Smuzhiyun 			ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
1605*4882a593Smuzhiyun 		}
1606*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, flags);
1607*4882a593Smuzhiyun 		return 0; /* done for now */
1608*4882a593Smuzhiyun 	}
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	/* iocb failed so cleanup */
1611*4882a593Smuzhiyun 	job->dd_data = NULL;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun issue_ct_rsp_exit:
1614*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, ctiocb);
1615*4882a593Smuzhiyun no_ctiocb:
1616*4882a593Smuzhiyun 	kfree(dd_data);
1617*4882a593Smuzhiyun no_dd_data:
1618*4882a593Smuzhiyun 	return rc;
1619*4882a593Smuzhiyun }
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun /**
1622*4882a593Smuzhiyun  * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1623*4882a593Smuzhiyun  * @job: SEND_MGMT_RESP fc_bsg_job
1624*4882a593Smuzhiyun  **/
1625*4882a593Smuzhiyun static int
lpfc_bsg_send_mgmt_rsp(struct bsg_job * job)1626*4882a593Smuzhiyun lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1629*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
1630*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
1631*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
1632*4882a593Smuzhiyun 	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1633*4882a593Smuzhiyun 		bsg_request->rqst_data.h_vendor.vendor_cmd;
1634*4882a593Smuzhiyun 	struct ulp_bde64 *bpl;
1635*4882a593Smuzhiyun 	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1636*4882a593Smuzhiyun 	int bpl_entries;
1637*4882a593Smuzhiyun 	uint32_t tag = mgmt_resp->tag;
1638*4882a593Smuzhiyun 	unsigned long reqbfrcnt =
1639*4882a593Smuzhiyun 			(unsigned long)job->request_payload.payload_len;
1640*4882a593Smuzhiyun 	int rc = 0;
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun 	/* in case no data is transferred */
1643*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1646*4882a593Smuzhiyun 		rc = -ERANGE;
1647*4882a593Smuzhiyun 		goto send_mgmt_rsp_exit;
1648*4882a593Smuzhiyun 	}
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1651*4882a593Smuzhiyun 	if (!bmp) {
1652*4882a593Smuzhiyun 		rc = -ENOMEM;
1653*4882a593Smuzhiyun 		goto send_mgmt_rsp_exit;
1654*4882a593Smuzhiyun 	}
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1657*4882a593Smuzhiyun 	if (!bmp->virt) {
1658*4882a593Smuzhiyun 		rc = -ENOMEM;
1659*4882a593Smuzhiyun 		goto send_mgmt_rsp_free_bmp;
1660*4882a593Smuzhiyun 	}
1661*4882a593Smuzhiyun 
1662*4882a593Smuzhiyun 	INIT_LIST_HEAD(&bmp->list);
1663*4882a593Smuzhiyun 	bpl = (struct ulp_bde64 *) bmp->virt;
1664*4882a593Smuzhiyun 	bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1665*4882a593Smuzhiyun 	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1666*4882a593Smuzhiyun 				     1, bpl, &bpl_entries);
1667*4882a593Smuzhiyun 	if (!cmp) {
1668*4882a593Smuzhiyun 		rc = -ENOMEM;
1669*4882a593Smuzhiyun 		goto send_mgmt_rsp_free_bmp;
1670*4882a593Smuzhiyun 	}
1671*4882a593Smuzhiyun 	lpfc_bsg_copy_data(cmp, &job->request_payload,
1672*4882a593Smuzhiyun 			   job->request_payload.payload_len, 1);
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun 	rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 	if (rc == IOCB_SUCCESS)
1677*4882a593Smuzhiyun 		return 0; /* done for now */
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	rc = -EACCES;
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	lpfc_free_bsg_buffers(phba, cmp);
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun send_mgmt_rsp_free_bmp:
1684*4882a593Smuzhiyun 	if (bmp->virt)
1685*4882a593Smuzhiyun 		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1686*4882a593Smuzhiyun 	kfree(bmp);
1687*4882a593Smuzhiyun send_mgmt_rsp_exit:
1688*4882a593Smuzhiyun 	/* make error code available to userspace */
1689*4882a593Smuzhiyun 	bsg_reply->result = rc;
1690*4882a593Smuzhiyun 	job->dd_data = NULL;
1691*4882a593Smuzhiyun 	return rc;
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun 
1694*4882a593Smuzhiyun /**
1695*4882a593Smuzhiyun  * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1696*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
1697*4882a593Smuzhiyun  *
1698*4882a593Smuzhiyun  * This function is responsible for preparing driver for diag loopback
1699*4882a593Smuzhiyun  * on device.
1700*4882a593Smuzhiyun  */
1701*4882a593Smuzhiyun static int
lpfc_bsg_diag_mode_enter(struct lpfc_hba * phba)1702*4882a593Smuzhiyun lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1703*4882a593Smuzhiyun {
1704*4882a593Smuzhiyun 	struct lpfc_vport **vports;
1705*4882a593Smuzhiyun 	struct Scsi_Host *shost;
1706*4882a593Smuzhiyun 	struct lpfc_sli *psli;
1707*4882a593Smuzhiyun 	struct lpfc_queue *qp = NULL;
1708*4882a593Smuzhiyun 	struct lpfc_sli_ring *pring;
1709*4882a593Smuzhiyun 	int i = 0;
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun 	psli = &phba->sli;
1712*4882a593Smuzhiyun 	if (!psli)
1713*4882a593Smuzhiyun 		return -ENODEV;
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 
1716*4882a593Smuzhiyun 	if ((phba->link_state == LPFC_HBA_ERROR) ||
1717*4882a593Smuzhiyun 	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1718*4882a593Smuzhiyun 	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1719*4882a593Smuzhiyun 		return -EACCES;
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun 	vports = lpfc_create_vport_work_array(phba);
1722*4882a593Smuzhiyun 	if (vports) {
1723*4882a593Smuzhiyun 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1724*4882a593Smuzhiyun 			shost = lpfc_shost_from_vport(vports[i]);
1725*4882a593Smuzhiyun 			scsi_block_requests(shost);
1726*4882a593Smuzhiyun 		}
1727*4882a593Smuzhiyun 		lpfc_destroy_vport_work_array(phba, vports);
1728*4882a593Smuzhiyun 	} else {
1729*4882a593Smuzhiyun 		shost = lpfc_shost_from_vport(phba->pport);
1730*4882a593Smuzhiyun 		scsi_block_requests(shost);
1731*4882a593Smuzhiyun 	}
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 	if (phba->sli_rev != LPFC_SLI_REV4) {
1734*4882a593Smuzhiyun 		pring = &psli->sli3_ring[LPFC_FCP_RING];
1735*4882a593Smuzhiyun 		lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
1736*4882a593Smuzhiyun 		return 0;
1737*4882a593Smuzhiyun 	}
1738*4882a593Smuzhiyun 	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1739*4882a593Smuzhiyun 		pring = qp->pring;
1740*4882a593Smuzhiyun 		if (!pring || (pring->ringno != LPFC_FCP_RING))
1741*4882a593Smuzhiyun 			continue;
1742*4882a593Smuzhiyun 		if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1743*4882a593Smuzhiyun 				      &pring->ring_lock))
1744*4882a593Smuzhiyun 			break;
1745*4882a593Smuzhiyun 	}
1746*4882a593Smuzhiyun 	return 0;
1747*4882a593Smuzhiyun }
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun /**
1750*4882a593Smuzhiyun  * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1751*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
1752*4882a593Smuzhiyun  *
1753*4882a593Smuzhiyun  * This function is responsible for driver exit processing of setting up
1754*4882a593Smuzhiyun  * diag loopback mode on device.
1755*4882a593Smuzhiyun  */
1756*4882a593Smuzhiyun static void
lpfc_bsg_diag_mode_exit(struct lpfc_hba * phba)1757*4882a593Smuzhiyun lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1758*4882a593Smuzhiyun {
1759*4882a593Smuzhiyun 	struct Scsi_Host *shost;
1760*4882a593Smuzhiyun 	struct lpfc_vport **vports;
1761*4882a593Smuzhiyun 	int i;
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	vports = lpfc_create_vport_work_array(phba);
1764*4882a593Smuzhiyun 	if (vports) {
1765*4882a593Smuzhiyun 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1766*4882a593Smuzhiyun 			shost = lpfc_shost_from_vport(vports[i]);
1767*4882a593Smuzhiyun 			scsi_unblock_requests(shost);
1768*4882a593Smuzhiyun 		}
1769*4882a593Smuzhiyun 		lpfc_destroy_vport_work_array(phba, vports);
1770*4882a593Smuzhiyun 	} else {
1771*4882a593Smuzhiyun 		shost = lpfc_shost_from_vport(phba->pport);
1772*4882a593Smuzhiyun 		scsi_unblock_requests(shost);
1773*4882a593Smuzhiyun 	}
1774*4882a593Smuzhiyun 	return;
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun /**
1778*4882a593Smuzhiyun  * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1779*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
1780*4882a593Smuzhiyun  * @job: LPFC_BSG_VENDOR_DIAG_MODE
1781*4882a593Smuzhiyun  *
1782*4882a593Smuzhiyun  * This function is responsible for placing an sli3  port into diagnostic
1783*4882a593Smuzhiyun  * loopback mode in order to perform a diagnostic loopback test.
1784*4882a593Smuzhiyun  * All new scsi requests are blocked, a small delay is used to allow the
1785*4882a593Smuzhiyun  * scsi requests to complete then the link is brought down. If the link is
1786*4882a593Smuzhiyun  * is placed in loopback mode then scsi requests are again allowed
1787*4882a593Smuzhiyun  * so the scsi mid-layer doesn't give up on the port.
1788*4882a593Smuzhiyun  * All of this is done in-line.
1789*4882a593Smuzhiyun  */
1790*4882a593Smuzhiyun static int
lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba * phba,struct bsg_job * job)1791*4882a593Smuzhiyun lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
1792*4882a593Smuzhiyun {
1793*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
1794*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
1795*4882a593Smuzhiyun 	struct diag_mode_set *loopback_mode;
1796*4882a593Smuzhiyun 	uint32_t link_flags;
1797*4882a593Smuzhiyun 	uint32_t timeout;
1798*4882a593Smuzhiyun 	LPFC_MBOXQ_t *pmboxq  = NULL;
1799*4882a593Smuzhiyun 	int mbxstatus = MBX_SUCCESS;
1800*4882a593Smuzhiyun 	int i = 0;
1801*4882a593Smuzhiyun 	int rc = 0;
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	/* no data to return just the return code */
1804*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	if (job->request_len < sizeof(struct fc_bsg_request) +
1807*4882a593Smuzhiyun 	    sizeof(struct diag_mode_set)) {
1808*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1809*4882a593Smuzhiyun 				"2738 Received DIAG MODE request size:%d "
1810*4882a593Smuzhiyun 				"below the minimum size:%d\n",
1811*4882a593Smuzhiyun 				job->request_len,
1812*4882a593Smuzhiyun 				(int)(sizeof(struct fc_bsg_request) +
1813*4882a593Smuzhiyun 				sizeof(struct diag_mode_set)));
1814*4882a593Smuzhiyun 		rc = -EINVAL;
1815*4882a593Smuzhiyun 		goto job_error;
1816*4882a593Smuzhiyun 	}
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 	rc = lpfc_bsg_diag_mode_enter(phba);
1819*4882a593Smuzhiyun 	if (rc)
1820*4882a593Smuzhiyun 		goto job_error;
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 	/* bring the link to diagnostic mode */
1823*4882a593Smuzhiyun 	loopback_mode = (struct diag_mode_set *)
1824*4882a593Smuzhiyun 		bsg_request->rqst_data.h_vendor.vendor_cmd;
1825*4882a593Smuzhiyun 	link_flags = loopback_mode->type;
1826*4882a593Smuzhiyun 	timeout = loopback_mode->timeout * 100;
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1829*4882a593Smuzhiyun 	if (!pmboxq) {
1830*4882a593Smuzhiyun 		rc = -ENOMEM;
1831*4882a593Smuzhiyun 		goto loopback_mode_exit;
1832*4882a593Smuzhiyun 	}
1833*4882a593Smuzhiyun 	memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1834*4882a593Smuzhiyun 	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1835*4882a593Smuzhiyun 	pmboxq->u.mb.mbxOwner = OWN_HOST;
1836*4882a593Smuzhiyun 
1837*4882a593Smuzhiyun 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1840*4882a593Smuzhiyun 		/* wait for link down before proceeding */
1841*4882a593Smuzhiyun 		i = 0;
1842*4882a593Smuzhiyun 		while (phba->link_state != LPFC_LINK_DOWN) {
1843*4882a593Smuzhiyun 			if (i++ > timeout) {
1844*4882a593Smuzhiyun 				rc = -ETIMEDOUT;
1845*4882a593Smuzhiyun 				goto loopback_mode_exit;
1846*4882a593Smuzhiyun 			}
1847*4882a593Smuzhiyun 			msleep(10);
1848*4882a593Smuzhiyun 		}
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 		memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1851*4882a593Smuzhiyun 		if (link_flags == INTERNAL_LOOP_BACK)
1852*4882a593Smuzhiyun 			pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1853*4882a593Smuzhiyun 		else
1854*4882a593Smuzhiyun 			pmboxq->u.mb.un.varInitLnk.link_flags =
1855*4882a593Smuzhiyun 				FLAGS_TOPOLOGY_MODE_LOOP;
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun 		pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1858*4882a593Smuzhiyun 		pmboxq->u.mb.mbxOwner = OWN_HOST;
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1861*4882a593Smuzhiyun 						     LPFC_MBOX_TMO);
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 		if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1864*4882a593Smuzhiyun 			rc = -ENODEV;
1865*4882a593Smuzhiyun 		else {
1866*4882a593Smuzhiyun 			spin_lock_irq(&phba->hbalock);
1867*4882a593Smuzhiyun 			phba->link_flag |= LS_LOOPBACK_MODE;
1868*4882a593Smuzhiyun 			spin_unlock_irq(&phba->hbalock);
1869*4882a593Smuzhiyun 			/* wait for the link attention interrupt */
1870*4882a593Smuzhiyun 			msleep(100);
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 			i = 0;
1873*4882a593Smuzhiyun 			while (phba->link_state != LPFC_HBA_READY) {
1874*4882a593Smuzhiyun 				if (i++ > timeout) {
1875*4882a593Smuzhiyun 					rc = -ETIMEDOUT;
1876*4882a593Smuzhiyun 					break;
1877*4882a593Smuzhiyun 				}
1878*4882a593Smuzhiyun 
1879*4882a593Smuzhiyun 				msleep(10);
1880*4882a593Smuzhiyun 			}
1881*4882a593Smuzhiyun 		}
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	} else
1884*4882a593Smuzhiyun 		rc = -ENODEV;
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun loopback_mode_exit:
1887*4882a593Smuzhiyun 	lpfc_bsg_diag_mode_exit(phba);
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun 	/*
1890*4882a593Smuzhiyun 	 * Let SLI layer release mboxq if mbox command completed after timeout.
1891*4882a593Smuzhiyun 	 */
1892*4882a593Smuzhiyun 	if (pmboxq && mbxstatus != MBX_TIMEOUT)
1893*4882a593Smuzhiyun 		mempool_free(pmboxq, phba->mbox_mem_pool);
1894*4882a593Smuzhiyun 
1895*4882a593Smuzhiyun job_error:
1896*4882a593Smuzhiyun 	/* make error code available to userspace */
1897*4882a593Smuzhiyun 	bsg_reply->result = rc;
1898*4882a593Smuzhiyun 	/* complete the job back to userspace if no error */
1899*4882a593Smuzhiyun 	if (rc == 0)
1900*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
1901*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
1902*4882a593Smuzhiyun 	return rc;
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun /**
1906*4882a593Smuzhiyun  * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1907*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
1908*4882a593Smuzhiyun  * @diag: Flag for set link to diag or nomral operation state.
1909*4882a593Smuzhiyun  *
1910*4882a593Smuzhiyun  * This function is responsible for issuing a sli4 mailbox command for setting
1911*4882a593Smuzhiyun  * link to either diag state or normal operation state.
1912*4882a593Smuzhiyun  */
1913*4882a593Smuzhiyun static int
lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba * phba,uint32_t diag)1914*4882a593Smuzhiyun lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1915*4882a593Smuzhiyun {
1916*4882a593Smuzhiyun 	LPFC_MBOXQ_t *pmboxq;
1917*4882a593Smuzhiyun 	struct lpfc_mbx_set_link_diag_state *link_diag_state;
1918*4882a593Smuzhiyun 	uint32_t req_len, alloc_len;
1919*4882a593Smuzhiyun 	int mbxstatus = MBX_SUCCESS, rc;
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1922*4882a593Smuzhiyun 	if (!pmboxq)
1923*4882a593Smuzhiyun 		return -ENOMEM;
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1926*4882a593Smuzhiyun 		   sizeof(struct lpfc_sli4_cfg_mhdr));
1927*4882a593Smuzhiyun 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1928*4882a593Smuzhiyun 				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1929*4882a593Smuzhiyun 				req_len, LPFC_SLI4_MBX_EMBED);
1930*4882a593Smuzhiyun 	if (alloc_len != req_len) {
1931*4882a593Smuzhiyun 		rc = -ENOMEM;
1932*4882a593Smuzhiyun 		goto link_diag_state_set_out;
1933*4882a593Smuzhiyun 	}
1934*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1935*4882a593Smuzhiyun 			"3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1936*4882a593Smuzhiyun 			diag, phba->sli4_hba.lnk_info.lnk_tp,
1937*4882a593Smuzhiyun 			phba->sli4_hba.lnk_info.lnk_no);
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1940*4882a593Smuzhiyun 	bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1941*4882a593Smuzhiyun 	       LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1942*4882a593Smuzhiyun 	bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1943*4882a593Smuzhiyun 	       phba->sli4_hba.lnk_info.lnk_no);
1944*4882a593Smuzhiyun 	bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1945*4882a593Smuzhiyun 	       phba->sli4_hba.lnk_info.lnk_tp);
1946*4882a593Smuzhiyun 	if (diag)
1947*4882a593Smuzhiyun 		bf_set(lpfc_mbx_set_diag_state_diag,
1948*4882a593Smuzhiyun 		       &link_diag_state->u.req, 1);
1949*4882a593Smuzhiyun 	else
1950*4882a593Smuzhiyun 		bf_set(lpfc_mbx_set_diag_state_diag,
1951*4882a593Smuzhiyun 		       &link_diag_state->u.req, 0);
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1954*4882a593Smuzhiyun 
1955*4882a593Smuzhiyun 	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1956*4882a593Smuzhiyun 		rc = 0;
1957*4882a593Smuzhiyun 	else
1958*4882a593Smuzhiyun 		rc = -ENODEV;
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun link_diag_state_set_out:
1961*4882a593Smuzhiyun 	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1962*4882a593Smuzhiyun 		mempool_free(pmboxq, phba->mbox_mem_pool);
1963*4882a593Smuzhiyun 
1964*4882a593Smuzhiyun 	return rc;
1965*4882a593Smuzhiyun }
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun /**
1968*4882a593Smuzhiyun  * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic
1969*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
1970*4882a593Smuzhiyun  * @mode: loopback mode to set
1971*4882a593Smuzhiyun  * @link_no: link number for loopback mode to set
1972*4882a593Smuzhiyun  *
1973*4882a593Smuzhiyun  * This function is responsible for issuing a sli4 mailbox command for setting
1974*4882a593Smuzhiyun  * up loopback diagnostic for a link.
1975*4882a593Smuzhiyun  */
1976*4882a593Smuzhiyun static int
lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba * phba,int mode,uint32_t link_no)1977*4882a593Smuzhiyun lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
1978*4882a593Smuzhiyun 				uint32_t link_no)
1979*4882a593Smuzhiyun {
1980*4882a593Smuzhiyun 	LPFC_MBOXQ_t *pmboxq;
1981*4882a593Smuzhiyun 	uint32_t req_len, alloc_len;
1982*4882a593Smuzhiyun 	struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1983*4882a593Smuzhiyun 	int mbxstatus = MBX_SUCCESS, rc = 0;
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1986*4882a593Smuzhiyun 	if (!pmboxq)
1987*4882a593Smuzhiyun 		return -ENOMEM;
1988*4882a593Smuzhiyun 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1989*4882a593Smuzhiyun 		   sizeof(struct lpfc_sli4_cfg_mhdr));
1990*4882a593Smuzhiyun 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1991*4882a593Smuzhiyun 				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1992*4882a593Smuzhiyun 				req_len, LPFC_SLI4_MBX_EMBED);
1993*4882a593Smuzhiyun 	if (alloc_len != req_len) {
1994*4882a593Smuzhiyun 		mempool_free(pmboxq, phba->mbox_mem_pool);
1995*4882a593Smuzhiyun 		return -ENOMEM;
1996*4882a593Smuzhiyun 	}
1997*4882a593Smuzhiyun 	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1998*4882a593Smuzhiyun 	bf_set(lpfc_mbx_set_diag_state_link_num,
1999*4882a593Smuzhiyun 	       &link_diag_loopback->u.req, link_no);
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 	if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2002*4882a593Smuzhiyun 		bf_set(lpfc_mbx_set_diag_state_link_type,
2003*4882a593Smuzhiyun 		       &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
2004*4882a593Smuzhiyun 	} else {
2005*4882a593Smuzhiyun 		bf_set(lpfc_mbx_set_diag_state_link_type,
2006*4882a593Smuzhiyun 		       &link_diag_loopback->u.req,
2007*4882a593Smuzhiyun 		       phba->sli4_hba.lnk_info.lnk_tp);
2008*4882a593Smuzhiyun 	}
2009*4882a593Smuzhiyun 
2010*4882a593Smuzhiyun 	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
2011*4882a593Smuzhiyun 	       mode);
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
2014*4882a593Smuzhiyun 	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
2015*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2016*4882a593Smuzhiyun 				"3127 Failed setup loopback mode mailbox "
2017*4882a593Smuzhiyun 				"command, rc:x%x, status:x%x\n", mbxstatus,
2018*4882a593Smuzhiyun 				pmboxq->u.mb.mbxStatus);
2019*4882a593Smuzhiyun 		rc = -ENODEV;
2020*4882a593Smuzhiyun 	}
2021*4882a593Smuzhiyun 	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
2022*4882a593Smuzhiyun 		mempool_free(pmboxq, phba->mbox_mem_pool);
2023*4882a593Smuzhiyun 	return rc;
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun /**
2027*4882a593Smuzhiyun  * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
2028*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
2029*4882a593Smuzhiyun  *
2030*4882a593Smuzhiyun  * This function set up SLI4 FC port registrations for diagnostic run, which
2031*4882a593Smuzhiyun  * includes all the rpis, vfi, and also vpi.
2032*4882a593Smuzhiyun  */
2033*4882a593Smuzhiyun static int
lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba * phba)2034*4882a593Smuzhiyun lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
2035*4882a593Smuzhiyun {
2036*4882a593Smuzhiyun 	int rc;
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
2039*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2040*4882a593Smuzhiyun 				"3136 Port still had vfi registered: "
2041*4882a593Smuzhiyun 				"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
2042*4882a593Smuzhiyun 				phba->pport->fc_myDID, phba->fcf.fcfi,
2043*4882a593Smuzhiyun 				phba->sli4_hba.vfi_ids[phba->pport->vfi],
2044*4882a593Smuzhiyun 				phba->vpi_ids[phba->pport->vpi]);
2045*4882a593Smuzhiyun 		return -EINVAL;
2046*4882a593Smuzhiyun 	}
2047*4882a593Smuzhiyun 	rc = lpfc_issue_reg_vfi(phba->pport);
2048*4882a593Smuzhiyun 	return rc;
2049*4882a593Smuzhiyun }
2050*4882a593Smuzhiyun 
2051*4882a593Smuzhiyun /**
2052*4882a593Smuzhiyun  * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
2053*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
2054*4882a593Smuzhiyun  * @job: LPFC_BSG_VENDOR_DIAG_MODE
2055*4882a593Smuzhiyun  *
2056*4882a593Smuzhiyun  * This function is responsible for placing an sli4 port into diagnostic
2057*4882a593Smuzhiyun  * loopback mode in order to perform a diagnostic loopback test.
2058*4882a593Smuzhiyun  */
2059*4882a593Smuzhiyun static int
lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba * phba,struct bsg_job * job)2060*4882a593Smuzhiyun lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
2061*4882a593Smuzhiyun {
2062*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
2063*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
2064*4882a593Smuzhiyun 	struct diag_mode_set *loopback_mode;
2065*4882a593Smuzhiyun 	uint32_t link_flags, timeout, link_no;
2066*4882a593Smuzhiyun 	int i, rc = 0;
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun 	/* no data to return just the return code */
2069*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
2070*4882a593Smuzhiyun 
2071*4882a593Smuzhiyun 	if (job->request_len < sizeof(struct fc_bsg_request) +
2072*4882a593Smuzhiyun 	    sizeof(struct diag_mode_set)) {
2073*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2074*4882a593Smuzhiyun 				"3011 Received DIAG MODE request size:%d "
2075*4882a593Smuzhiyun 				"below the minimum size:%d\n",
2076*4882a593Smuzhiyun 				job->request_len,
2077*4882a593Smuzhiyun 				(int)(sizeof(struct fc_bsg_request) +
2078*4882a593Smuzhiyun 				sizeof(struct diag_mode_set)));
2079*4882a593Smuzhiyun 		rc = -EINVAL;
2080*4882a593Smuzhiyun 		goto job_done;
2081*4882a593Smuzhiyun 	}
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	loopback_mode = (struct diag_mode_set *)
2084*4882a593Smuzhiyun 		bsg_request->rqst_data.h_vendor.vendor_cmd;
2085*4882a593Smuzhiyun 	link_flags = loopback_mode->type;
2086*4882a593Smuzhiyun 	timeout = loopback_mode->timeout * 100;
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun 	if (loopback_mode->physical_link == -1)
2089*4882a593Smuzhiyun 		link_no = phba->sli4_hba.lnk_info.lnk_no;
2090*4882a593Smuzhiyun 	else
2091*4882a593Smuzhiyun 		link_no = loopback_mode->physical_link;
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 	if (link_flags == DISABLE_LOOP_BACK) {
2094*4882a593Smuzhiyun 		rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2095*4882a593Smuzhiyun 					LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
2096*4882a593Smuzhiyun 					link_no);
2097*4882a593Smuzhiyun 		if (!rc) {
2098*4882a593Smuzhiyun 			/* Unset the need disable bit */
2099*4882a593Smuzhiyun 			phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
2100*4882a593Smuzhiyun 		}
2101*4882a593Smuzhiyun 		goto job_done;
2102*4882a593Smuzhiyun 	} else {
2103*4882a593Smuzhiyun 		/* Check if we need to disable the loopback state */
2104*4882a593Smuzhiyun 		if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
2105*4882a593Smuzhiyun 			rc = -EPERM;
2106*4882a593Smuzhiyun 			goto job_done;
2107*4882a593Smuzhiyun 		}
2108*4882a593Smuzhiyun 	}
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 	rc = lpfc_bsg_diag_mode_enter(phba);
2111*4882a593Smuzhiyun 	if (rc)
2112*4882a593Smuzhiyun 		goto job_done;
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 	/* indicate we are in loobpack diagnostic mode */
2115*4882a593Smuzhiyun 	spin_lock_irq(&phba->hbalock);
2116*4882a593Smuzhiyun 	phba->link_flag |= LS_LOOPBACK_MODE;
2117*4882a593Smuzhiyun 	spin_unlock_irq(&phba->hbalock);
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 	/* reset port to start frome scratch */
2120*4882a593Smuzhiyun 	rc = lpfc_selective_reset(phba);
2121*4882a593Smuzhiyun 	if (rc)
2122*4882a593Smuzhiyun 		goto job_done;
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun 	/* bring the link to diagnostic mode */
2125*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2126*4882a593Smuzhiyun 			"3129 Bring link to diagnostic state.\n");
2127*4882a593Smuzhiyun 
2128*4882a593Smuzhiyun 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2129*4882a593Smuzhiyun 	if (rc) {
2130*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2131*4882a593Smuzhiyun 				"3130 Failed to bring link to diagnostic "
2132*4882a593Smuzhiyun 				"state, rc:x%x\n", rc);
2133*4882a593Smuzhiyun 		goto loopback_mode_exit;
2134*4882a593Smuzhiyun 	}
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun 	/* wait for link down before proceeding */
2137*4882a593Smuzhiyun 	i = 0;
2138*4882a593Smuzhiyun 	while (phba->link_state != LPFC_LINK_DOWN) {
2139*4882a593Smuzhiyun 		if (i++ > timeout) {
2140*4882a593Smuzhiyun 			rc = -ETIMEDOUT;
2141*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2142*4882a593Smuzhiyun 					"3131 Timeout waiting for link to "
2143*4882a593Smuzhiyun 					"diagnostic mode, timeout:%d ms\n",
2144*4882a593Smuzhiyun 					timeout * 10);
2145*4882a593Smuzhiyun 			goto loopback_mode_exit;
2146*4882a593Smuzhiyun 		}
2147*4882a593Smuzhiyun 		msleep(10);
2148*4882a593Smuzhiyun 	}
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun 	/* set up loopback mode */
2151*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2152*4882a593Smuzhiyun 			"3132 Set up loopback mode:x%x\n", link_flags);
2153*4882a593Smuzhiyun 
2154*4882a593Smuzhiyun 	switch (link_flags) {
2155*4882a593Smuzhiyun 	case INTERNAL_LOOP_BACK:
2156*4882a593Smuzhiyun 		if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2157*4882a593Smuzhiyun 			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2158*4882a593Smuzhiyun 					LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2159*4882a593Smuzhiyun 					link_no);
2160*4882a593Smuzhiyun 		} else {
2161*4882a593Smuzhiyun 			/* Trunk is configured, but link is not in this trunk */
2162*4882a593Smuzhiyun 			if (phba->sli4_hba.conf_trunk) {
2163*4882a593Smuzhiyun 				rc = -ELNRNG;
2164*4882a593Smuzhiyun 				goto loopback_mode_exit;
2165*4882a593Smuzhiyun 			}
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2168*4882a593Smuzhiyun 					LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2169*4882a593Smuzhiyun 					link_no);
2170*4882a593Smuzhiyun 		}
2171*4882a593Smuzhiyun 
2172*4882a593Smuzhiyun 		if (!rc) {
2173*4882a593Smuzhiyun 			/* Set the need disable bit */
2174*4882a593Smuzhiyun 			phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2175*4882a593Smuzhiyun 		}
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 		break;
2178*4882a593Smuzhiyun 	case EXTERNAL_LOOP_BACK:
2179*4882a593Smuzhiyun 		if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2180*4882a593Smuzhiyun 			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2181*4882a593Smuzhiyun 				LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
2182*4882a593Smuzhiyun 				link_no);
2183*4882a593Smuzhiyun 		} else {
2184*4882a593Smuzhiyun 			/* Trunk is configured, but link is not in this trunk */
2185*4882a593Smuzhiyun 			if (phba->sli4_hba.conf_trunk) {
2186*4882a593Smuzhiyun 				rc = -ELNRNG;
2187*4882a593Smuzhiyun 				goto loopback_mode_exit;
2188*4882a593Smuzhiyun 			}
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2191*4882a593Smuzhiyun 						LPFC_DIAG_LOOPBACK_TYPE_SERDES,
2192*4882a593Smuzhiyun 						link_no);
2193*4882a593Smuzhiyun 		}
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 		if (!rc) {
2196*4882a593Smuzhiyun 			/* Set the need disable bit */
2197*4882a593Smuzhiyun 			phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2198*4882a593Smuzhiyun 		}
2199*4882a593Smuzhiyun 
2200*4882a593Smuzhiyun 		break;
2201*4882a593Smuzhiyun 	default:
2202*4882a593Smuzhiyun 		rc = -EINVAL;
2203*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2204*4882a593Smuzhiyun 				"3141 Loopback mode:x%x not supported\n",
2205*4882a593Smuzhiyun 				link_flags);
2206*4882a593Smuzhiyun 		goto loopback_mode_exit;
2207*4882a593Smuzhiyun 	}
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	if (!rc) {
2210*4882a593Smuzhiyun 		/* wait for the link attention interrupt */
2211*4882a593Smuzhiyun 		msleep(100);
2212*4882a593Smuzhiyun 		i = 0;
2213*4882a593Smuzhiyun 		while (phba->link_state < LPFC_LINK_UP) {
2214*4882a593Smuzhiyun 			if (i++ > timeout) {
2215*4882a593Smuzhiyun 				rc = -ETIMEDOUT;
2216*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2217*4882a593Smuzhiyun 					"3137 Timeout waiting for link up "
2218*4882a593Smuzhiyun 					"in loopback mode, timeout:%d ms\n",
2219*4882a593Smuzhiyun 					timeout * 10);
2220*4882a593Smuzhiyun 				break;
2221*4882a593Smuzhiyun 			}
2222*4882a593Smuzhiyun 			msleep(10);
2223*4882a593Smuzhiyun 		}
2224*4882a593Smuzhiyun 	}
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 	/* port resource registration setup for loopback diagnostic */
2227*4882a593Smuzhiyun 	if (!rc) {
2228*4882a593Smuzhiyun 		/* set up a none zero myDID for loopback test */
2229*4882a593Smuzhiyun 		phba->pport->fc_myDID = 1;
2230*4882a593Smuzhiyun 		rc = lpfc_sli4_diag_fcport_reg_setup(phba);
2231*4882a593Smuzhiyun 	} else
2232*4882a593Smuzhiyun 		goto loopback_mode_exit;
2233*4882a593Smuzhiyun 
2234*4882a593Smuzhiyun 	if (!rc) {
2235*4882a593Smuzhiyun 		/* wait for the port ready */
2236*4882a593Smuzhiyun 		msleep(100);
2237*4882a593Smuzhiyun 		i = 0;
2238*4882a593Smuzhiyun 		while (phba->link_state != LPFC_HBA_READY) {
2239*4882a593Smuzhiyun 			if (i++ > timeout) {
2240*4882a593Smuzhiyun 				rc = -ETIMEDOUT;
2241*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2242*4882a593Smuzhiyun 					"3133 Timeout waiting for port "
2243*4882a593Smuzhiyun 					"loopback mode ready, timeout:%d ms\n",
2244*4882a593Smuzhiyun 					timeout * 10);
2245*4882a593Smuzhiyun 				break;
2246*4882a593Smuzhiyun 			}
2247*4882a593Smuzhiyun 			msleep(10);
2248*4882a593Smuzhiyun 		}
2249*4882a593Smuzhiyun 	}
2250*4882a593Smuzhiyun 
2251*4882a593Smuzhiyun loopback_mode_exit:
2252*4882a593Smuzhiyun 	/* clear loopback diagnostic mode */
2253*4882a593Smuzhiyun 	if (rc) {
2254*4882a593Smuzhiyun 		spin_lock_irq(&phba->hbalock);
2255*4882a593Smuzhiyun 		phba->link_flag &= ~LS_LOOPBACK_MODE;
2256*4882a593Smuzhiyun 		spin_unlock_irq(&phba->hbalock);
2257*4882a593Smuzhiyun 	}
2258*4882a593Smuzhiyun 	lpfc_bsg_diag_mode_exit(phba);
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun job_done:
2261*4882a593Smuzhiyun 	/* make error code available to userspace */
2262*4882a593Smuzhiyun 	bsg_reply->result = rc;
2263*4882a593Smuzhiyun 	/* complete the job back to userspace if no error */
2264*4882a593Smuzhiyun 	if (rc == 0)
2265*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
2266*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
2267*4882a593Smuzhiyun 	return rc;
2268*4882a593Smuzhiyun }
2269*4882a593Smuzhiyun 
2270*4882a593Smuzhiyun /**
2271*4882a593Smuzhiyun  * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2272*4882a593Smuzhiyun  * @job: LPFC_BSG_VENDOR_DIAG_MODE
2273*4882a593Smuzhiyun  *
2274*4882a593Smuzhiyun  * This function is responsible for responding to check and dispatch bsg diag
2275*4882a593Smuzhiyun  * command from the user to proper driver action routines.
2276*4882a593Smuzhiyun  */
2277*4882a593Smuzhiyun static int
lpfc_bsg_diag_loopback_mode(struct bsg_job * job)2278*4882a593Smuzhiyun lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
2279*4882a593Smuzhiyun {
2280*4882a593Smuzhiyun 	struct Scsi_Host *shost;
2281*4882a593Smuzhiyun 	struct lpfc_vport *vport;
2282*4882a593Smuzhiyun 	struct lpfc_hba *phba;
2283*4882a593Smuzhiyun 	int rc;
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	shost = fc_bsg_to_shost(job);
2286*4882a593Smuzhiyun 	if (!shost)
2287*4882a593Smuzhiyun 		return -ENODEV;
2288*4882a593Smuzhiyun 	vport = shost_priv(shost);
2289*4882a593Smuzhiyun 	if (!vport)
2290*4882a593Smuzhiyun 		return -ENODEV;
2291*4882a593Smuzhiyun 	phba = vport->phba;
2292*4882a593Smuzhiyun 	if (!phba)
2293*4882a593Smuzhiyun 		return -ENODEV;
2294*4882a593Smuzhiyun 
2295*4882a593Smuzhiyun 	if (phba->sli_rev < LPFC_SLI_REV4)
2296*4882a593Smuzhiyun 		rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2297*4882a593Smuzhiyun 	else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2298*4882a593Smuzhiyun 		 LPFC_SLI_INTF_IF_TYPE_2)
2299*4882a593Smuzhiyun 		rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2300*4882a593Smuzhiyun 	else
2301*4882a593Smuzhiyun 		rc = -ENODEV;
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun 	return rc;
2304*4882a593Smuzhiyun }
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun /**
2307*4882a593Smuzhiyun  * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2308*4882a593Smuzhiyun  * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2309*4882a593Smuzhiyun  *
2310*4882a593Smuzhiyun  * This function is responsible for responding to check and dispatch bsg diag
2311*4882a593Smuzhiyun  * command from the user to proper driver action routines.
2312*4882a593Smuzhiyun  */
2313*4882a593Smuzhiyun static int
lpfc_sli4_bsg_diag_mode_end(struct bsg_job * job)2314*4882a593Smuzhiyun lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
2315*4882a593Smuzhiyun {
2316*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
2317*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
2318*4882a593Smuzhiyun 	struct Scsi_Host *shost;
2319*4882a593Smuzhiyun 	struct lpfc_vport *vport;
2320*4882a593Smuzhiyun 	struct lpfc_hba *phba;
2321*4882a593Smuzhiyun 	struct diag_mode_set *loopback_mode_end_cmd;
2322*4882a593Smuzhiyun 	uint32_t timeout;
2323*4882a593Smuzhiyun 	int rc, i;
2324*4882a593Smuzhiyun 
2325*4882a593Smuzhiyun 	shost = fc_bsg_to_shost(job);
2326*4882a593Smuzhiyun 	if (!shost)
2327*4882a593Smuzhiyun 		return -ENODEV;
2328*4882a593Smuzhiyun 	vport = shost_priv(shost);
2329*4882a593Smuzhiyun 	if (!vport)
2330*4882a593Smuzhiyun 		return -ENODEV;
2331*4882a593Smuzhiyun 	phba = vport->phba;
2332*4882a593Smuzhiyun 	if (!phba)
2333*4882a593Smuzhiyun 		return -ENODEV;
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 	if (phba->sli_rev < LPFC_SLI_REV4)
2336*4882a593Smuzhiyun 		return -ENODEV;
2337*4882a593Smuzhiyun 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2338*4882a593Smuzhiyun 	    LPFC_SLI_INTF_IF_TYPE_2)
2339*4882a593Smuzhiyun 		return -ENODEV;
2340*4882a593Smuzhiyun 
2341*4882a593Smuzhiyun 	/* clear loopback diagnostic mode */
2342*4882a593Smuzhiyun 	spin_lock_irq(&phba->hbalock);
2343*4882a593Smuzhiyun 	phba->link_flag &= ~LS_LOOPBACK_MODE;
2344*4882a593Smuzhiyun 	spin_unlock_irq(&phba->hbalock);
2345*4882a593Smuzhiyun 	loopback_mode_end_cmd = (struct diag_mode_set *)
2346*4882a593Smuzhiyun 			bsg_request->rqst_data.h_vendor.vendor_cmd;
2347*4882a593Smuzhiyun 	timeout = loopback_mode_end_cmd->timeout * 100;
2348*4882a593Smuzhiyun 
2349*4882a593Smuzhiyun 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2350*4882a593Smuzhiyun 	if (rc) {
2351*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2352*4882a593Smuzhiyun 				"3139 Failed to bring link to diagnostic "
2353*4882a593Smuzhiyun 				"state, rc:x%x\n", rc);
2354*4882a593Smuzhiyun 		goto loopback_mode_end_exit;
2355*4882a593Smuzhiyun 	}
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 	/* wait for link down before proceeding */
2358*4882a593Smuzhiyun 	i = 0;
2359*4882a593Smuzhiyun 	while (phba->link_state != LPFC_LINK_DOWN) {
2360*4882a593Smuzhiyun 		if (i++ > timeout) {
2361*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2362*4882a593Smuzhiyun 					"3140 Timeout waiting for link to "
2363*4882a593Smuzhiyun 					"diagnostic mode_end, timeout:%d ms\n",
2364*4882a593Smuzhiyun 					timeout * 10);
2365*4882a593Smuzhiyun 			/* there is nothing much we can do here */
2366*4882a593Smuzhiyun 			break;
2367*4882a593Smuzhiyun 		}
2368*4882a593Smuzhiyun 		msleep(10);
2369*4882a593Smuzhiyun 	}
2370*4882a593Smuzhiyun 
2371*4882a593Smuzhiyun 	/* reset port resource registrations */
2372*4882a593Smuzhiyun 	rc = lpfc_selective_reset(phba);
2373*4882a593Smuzhiyun 	phba->pport->fc_myDID = 0;
2374*4882a593Smuzhiyun 
2375*4882a593Smuzhiyun loopback_mode_end_exit:
2376*4882a593Smuzhiyun 	/* make return code available to userspace */
2377*4882a593Smuzhiyun 	bsg_reply->result = rc;
2378*4882a593Smuzhiyun 	/* complete the job back to userspace if no error */
2379*4882a593Smuzhiyun 	if (rc == 0)
2380*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
2381*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
2382*4882a593Smuzhiyun 	return rc;
2383*4882a593Smuzhiyun }
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun /**
2386*4882a593Smuzhiyun  * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2387*4882a593Smuzhiyun  * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2388*4882a593Smuzhiyun  *
2389*4882a593Smuzhiyun  * This function is to perform SLI4 diag link test request from the user
2390*4882a593Smuzhiyun  * applicaiton.
2391*4882a593Smuzhiyun  */
2392*4882a593Smuzhiyun static int
lpfc_sli4_bsg_link_diag_test(struct bsg_job * job)2393*4882a593Smuzhiyun lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
2394*4882a593Smuzhiyun {
2395*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
2396*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
2397*4882a593Smuzhiyun 	struct Scsi_Host *shost;
2398*4882a593Smuzhiyun 	struct lpfc_vport *vport;
2399*4882a593Smuzhiyun 	struct lpfc_hba *phba;
2400*4882a593Smuzhiyun 	LPFC_MBOXQ_t *pmboxq;
2401*4882a593Smuzhiyun 	struct sli4_link_diag *link_diag_test_cmd;
2402*4882a593Smuzhiyun 	uint32_t req_len, alloc_len;
2403*4882a593Smuzhiyun 	struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2404*4882a593Smuzhiyun 	union lpfc_sli4_cfg_shdr *shdr;
2405*4882a593Smuzhiyun 	uint32_t shdr_status, shdr_add_status;
2406*4882a593Smuzhiyun 	struct diag_status *diag_status_reply;
2407*4882a593Smuzhiyun 	int mbxstatus, rc = -ENODEV, rc1 = 0;
2408*4882a593Smuzhiyun 
2409*4882a593Smuzhiyun 	shost = fc_bsg_to_shost(job);
2410*4882a593Smuzhiyun 	if (!shost)
2411*4882a593Smuzhiyun 		goto job_error;
2412*4882a593Smuzhiyun 
2413*4882a593Smuzhiyun 	vport = shost_priv(shost);
2414*4882a593Smuzhiyun 	if (!vport)
2415*4882a593Smuzhiyun 		goto job_error;
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun 	phba = vport->phba;
2418*4882a593Smuzhiyun 	if (!phba)
2419*4882a593Smuzhiyun 		goto job_error;
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun 
2422*4882a593Smuzhiyun 	if (phba->sli_rev < LPFC_SLI_REV4)
2423*4882a593Smuzhiyun 		goto job_error;
2424*4882a593Smuzhiyun 
2425*4882a593Smuzhiyun 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2426*4882a593Smuzhiyun 	    LPFC_SLI_INTF_IF_TYPE_2)
2427*4882a593Smuzhiyun 		goto job_error;
2428*4882a593Smuzhiyun 
2429*4882a593Smuzhiyun 	if (job->request_len < sizeof(struct fc_bsg_request) +
2430*4882a593Smuzhiyun 	    sizeof(struct sli4_link_diag)) {
2431*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2432*4882a593Smuzhiyun 				"3013 Received LINK DIAG TEST request "
2433*4882a593Smuzhiyun 				" size:%d below the minimum size:%d\n",
2434*4882a593Smuzhiyun 				job->request_len,
2435*4882a593Smuzhiyun 				(int)(sizeof(struct fc_bsg_request) +
2436*4882a593Smuzhiyun 				sizeof(struct sli4_link_diag)));
2437*4882a593Smuzhiyun 		rc = -EINVAL;
2438*4882a593Smuzhiyun 		goto job_error;
2439*4882a593Smuzhiyun 	}
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	rc = lpfc_bsg_diag_mode_enter(phba);
2442*4882a593Smuzhiyun 	if (rc)
2443*4882a593Smuzhiyun 		goto job_error;
2444*4882a593Smuzhiyun 
2445*4882a593Smuzhiyun 	link_diag_test_cmd = (struct sli4_link_diag *)
2446*4882a593Smuzhiyun 			 bsg_request->rqst_data.h_vendor.vendor_cmd;
2447*4882a593Smuzhiyun 
2448*4882a593Smuzhiyun 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2449*4882a593Smuzhiyun 
2450*4882a593Smuzhiyun 	if (rc)
2451*4882a593Smuzhiyun 		goto job_error;
2452*4882a593Smuzhiyun 
2453*4882a593Smuzhiyun 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2454*4882a593Smuzhiyun 	if (!pmboxq)
2455*4882a593Smuzhiyun 		goto link_diag_test_exit;
2456*4882a593Smuzhiyun 
2457*4882a593Smuzhiyun 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2458*4882a593Smuzhiyun 		   sizeof(struct lpfc_sli4_cfg_mhdr));
2459*4882a593Smuzhiyun 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2460*4882a593Smuzhiyun 				     LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2461*4882a593Smuzhiyun 				     req_len, LPFC_SLI4_MBX_EMBED);
2462*4882a593Smuzhiyun 	if (alloc_len != req_len) {
2463*4882a593Smuzhiyun 		rc = -ENOMEM;
2464*4882a593Smuzhiyun 		goto link_diag_test_exit;
2465*4882a593Smuzhiyun 	}
2466*4882a593Smuzhiyun 
2467*4882a593Smuzhiyun 	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2468*4882a593Smuzhiyun 	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2469*4882a593Smuzhiyun 	       phba->sli4_hba.lnk_info.lnk_no);
2470*4882a593Smuzhiyun 	bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2471*4882a593Smuzhiyun 	       phba->sli4_hba.lnk_info.lnk_tp);
2472*4882a593Smuzhiyun 	bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2473*4882a593Smuzhiyun 	       link_diag_test_cmd->test_id);
2474*4882a593Smuzhiyun 	bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2475*4882a593Smuzhiyun 	       link_diag_test_cmd->loops);
2476*4882a593Smuzhiyun 	bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2477*4882a593Smuzhiyun 	       link_diag_test_cmd->test_version);
2478*4882a593Smuzhiyun 	bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2479*4882a593Smuzhiyun 	       link_diag_test_cmd->error_action);
2480*4882a593Smuzhiyun 
2481*4882a593Smuzhiyun 	mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun 	shdr = (union lpfc_sli4_cfg_shdr *)
2484*4882a593Smuzhiyun 		&pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2485*4882a593Smuzhiyun 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2486*4882a593Smuzhiyun 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2487*4882a593Smuzhiyun 	if (shdr_status || shdr_add_status || mbxstatus) {
2488*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2489*4882a593Smuzhiyun 				"3010 Run link diag test mailbox failed with "
2490*4882a593Smuzhiyun 				"mbx_status x%x status x%x, add_status x%x\n",
2491*4882a593Smuzhiyun 				mbxstatus, shdr_status, shdr_add_status);
2492*4882a593Smuzhiyun 	}
2493*4882a593Smuzhiyun 
2494*4882a593Smuzhiyun 	diag_status_reply = (struct diag_status *)
2495*4882a593Smuzhiyun 			    bsg_reply->reply_data.vendor_reply.vendor_rsp;
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun 	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
2498*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2499*4882a593Smuzhiyun 				"3012 Received Run link diag test reply "
2500*4882a593Smuzhiyun 				"below minimum size (%d): reply_len:%d\n",
2501*4882a593Smuzhiyun 				(int)(sizeof(*bsg_reply) +
2502*4882a593Smuzhiyun 				sizeof(*diag_status_reply)),
2503*4882a593Smuzhiyun 				job->reply_len);
2504*4882a593Smuzhiyun 		rc = -EINVAL;
2505*4882a593Smuzhiyun 		goto job_error;
2506*4882a593Smuzhiyun 	}
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun 	diag_status_reply->mbox_status = mbxstatus;
2509*4882a593Smuzhiyun 	diag_status_reply->shdr_status = shdr_status;
2510*4882a593Smuzhiyun 	diag_status_reply->shdr_add_status = shdr_add_status;
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun link_diag_test_exit:
2513*4882a593Smuzhiyun 	rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2514*4882a593Smuzhiyun 
2515*4882a593Smuzhiyun 	if (pmboxq)
2516*4882a593Smuzhiyun 		mempool_free(pmboxq, phba->mbox_mem_pool);
2517*4882a593Smuzhiyun 
2518*4882a593Smuzhiyun 	lpfc_bsg_diag_mode_exit(phba);
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun job_error:
2521*4882a593Smuzhiyun 	/* make error code available to userspace */
2522*4882a593Smuzhiyun 	if (rc1 && !rc)
2523*4882a593Smuzhiyun 		rc = rc1;
2524*4882a593Smuzhiyun 	bsg_reply->result = rc;
2525*4882a593Smuzhiyun 	/* complete the job back to userspace if no error */
2526*4882a593Smuzhiyun 	if (rc == 0)
2527*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
2528*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
2529*4882a593Smuzhiyun 	return rc;
2530*4882a593Smuzhiyun }
2531*4882a593Smuzhiyun 
2532*4882a593Smuzhiyun /**
2533*4882a593Smuzhiyun  * lpfcdiag_loop_self_reg - obtains a remote port login id
2534*4882a593Smuzhiyun  * @phba: Pointer to HBA context object
2535*4882a593Smuzhiyun  * @rpi: Pointer to a remote port login id
2536*4882a593Smuzhiyun  *
2537*4882a593Smuzhiyun  * This function obtains a remote port login id so the diag loopback test
2538*4882a593Smuzhiyun  * can send and receive its own unsolicited CT command.
2539*4882a593Smuzhiyun  **/
lpfcdiag_loop_self_reg(struct lpfc_hba * phba,uint16_t * rpi)2540*4882a593Smuzhiyun static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2541*4882a593Smuzhiyun {
2542*4882a593Smuzhiyun 	LPFC_MBOXQ_t *mbox;
2543*4882a593Smuzhiyun 	struct lpfc_dmabuf *dmabuff;
2544*4882a593Smuzhiyun 	int status;
2545*4882a593Smuzhiyun 
2546*4882a593Smuzhiyun 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2547*4882a593Smuzhiyun 	if (!mbox)
2548*4882a593Smuzhiyun 		return -ENOMEM;
2549*4882a593Smuzhiyun 
2550*4882a593Smuzhiyun 	if (phba->sli_rev < LPFC_SLI_REV4)
2551*4882a593Smuzhiyun 		status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2552*4882a593Smuzhiyun 				(uint8_t *)&phba->pport->fc_sparam,
2553*4882a593Smuzhiyun 				mbox, *rpi);
2554*4882a593Smuzhiyun 	else {
2555*4882a593Smuzhiyun 		*rpi = lpfc_sli4_alloc_rpi(phba);
2556*4882a593Smuzhiyun 		if (*rpi == LPFC_RPI_ALLOC_ERROR) {
2557*4882a593Smuzhiyun 			mempool_free(mbox, phba->mbox_mem_pool);
2558*4882a593Smuzhiyun 			return -EBUSY;
2559*4882a593Smuzhiyun 		}
2560*4882a593Smuzhiyun 		status = lpfc_reg_rpi(phba, phba->pport->vpi,
2561*4882a593Smuzhiyun 				phba->pport->fc_myDID,
2562*4882a593Smuzhiyun 				(uint8_t *)&phba->pport->fc_sparam,
2563*4882a593Smuzhiyun 				mbox, *rpi);
2564*4882a593Smuzhiyun 	}
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun 	if (status) {
2567*4882a593Smuzhiyun 		mempool_free(mbox, phba->mbox_mem_pool);
2568*4882a593Smuzhiyun 		if (phba->sli_rev == LPFC_SLI_REV4)
2569*4882a593Smuzhiyun 			lpfc_sli4_free_rpi(phba, *rpi);
2570*4882a593Smuzhiyun 		return -ENOMEM;
2571*4882a593Smuzhiyun 	}
2572*4882a593Smuzhiyun 
2573*4882a593Smuzhiyun 	dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
2574*4882a593Smuzhiyun 	mbox->ctx_buf = NULL;
2575*4882a593Smuzhiyun 	mbox->ctx_ndlp = NULL;
2576*4882a593Smuzhiyun 	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2577*4882a593Smuzhiyun 
2578*4882a593Smuzhiyun 	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2579*4882a593Smuzhiyun 		lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2580*4882a593Smuzhiyun 		kfree(dmabuff);
2581*4882a593Smuzhiyun 		if (status != MBX_TIMEOUT)
2582*4882a593Smuzhiyun 			mempool_free(mbox, phba->mbox_mem_pool);
2583*4882a593Smuzhiyun 		if (phba->sli_rev == LPFC_SLI_REV4)
2584*4882a593Smuzhiyun 			lpfc_sli4_free_rpi(phba, *rpi);
2585*4882a593Smuzhiyun 		return -ENODEV;
2586*4882a593Smuzhiyun 	}
2587*4882a593Smuzhiyun 
2588*4882a593Smuzhiyun 	if (phba->sli_rev < LPFC_SLI_REV4)
2589*4882a593Smuzhiyun 		*rpi = mbox->u.mb.un.varWords[0];
2590*4882a593Smuzhiyun 
2591*4882a593Smuzhiyun 	lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2592*4882a593Smuzhiyun 	kfree(dmabuff);
2593*4882a593Smuzhiyun 	mempool_free(mbox, phba->mbox_mem_pool);
2594*4882a593Smuzhiyun 	return 0;
2595*4882a593Smuzhiyun }
2596*4882a593Smuzhiyun 
2597*4882a593Smuzhiyun /**
2598*4882a593Smuzhiyun  * lpfcdiag_loop_self_unreg - unregs from the rpi
2599*4882a593Smuzhiyun  * @phba: Pointer to HBA context object
2600*4882a593Smuzhiyun  * @rpi: Remote port login id
2601*4882a593Smuzhiyun  *
2602*4882a593Smuzhiyun  * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2603*4882a593Smuzhiyun  **/
lpfcdiag_loop_self_unreg(struct lpfc_hba * phba,uint16_t rpi)2604*4882a593Smuzhiyun static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2605*4882a593Smuzhiyun {
2606*4882a593Smuzhiyun 	LPFC_MBOXQ_t *mbox;
2607*4882a593Smuzhiyun 	int status;
2608*4882a593Smuzhiyun 
2609*4882a593Smuzhiyun 	/* Allocate mboxq structure */
2610*4882a593Smuzhiyun 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2611*4882a593Smuzhiyun 	if (mbox == NULL)
2612*4882a593Smuzhiyun 		return -ENOMEM;
2613*4882a593Smuzhiyun 
2614*4882a593Smuzhiyun 	if (phba->sli_rev < LPFC_SLI_REV4)
2615*4882a593Smuzhiyun 		lpfc_unreg_login(phba, 0, rpi, mbox);
2616*4882a593Smuzhiyun 	else
2617*4882a593Smuzhiyun 		lpfc_unreg_login(phba, phba->pport->vpi,
2618*4882a593Smuzhiyun 				 phba->sli4_hba.rpi_ids[rpi], mbox);
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun 	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2621*4882a593Smuzhiyun 
2622*4882a593Smuzhiyun 	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2623*4882a593Smuzhiyun 		if (status != MBX_TIMEOUT)
2624*4882a593Smuzhiyun 			mempool_free(mbox, phba->mbox_mem_pool);
2625*4882a593Smuzhiyun 		return -EIO;
2626*4882a593Smuzhiyun 	}
2627*4882a593Smuzhiyun 	mempool_free(mbox, phba->mbox_mem_pool);
2628*4882a593Smuzhiyun 	if (phba->sli_rev == LPFC_SLI_REV4)
2629*4882a593Smuzhiyun 		lpfc_sli4_free_rpi(phba, rpi);
2630*4882a593Smuzhiyun 	return 0;
2631*4882a593Smuzhiyun }
2632*4882a593Smuzhiyun 
2633*4882a593Smuzhiyun /**
2634*4882a593Smuzhiyun  * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2635*4882a593Smuzhiyun  * @phba: Pointer to HBA context object
2636*4882a593Smuzhiyun  * @rpi: Remote port login id
2637*4882a593Smuzhiyun  * @txxri: Pointer to transmit exchange id
2638*4882a593Smuzhiyun  * @rxxri: Pointer to response exchabge id
2639*4882a593Smuzhiyun  *
2640*4882a593Smuzhiyun  * This function obtains the transmit and receive ids required to send
2641*4882a593Smuzhiyun  * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2642*4882a593Smuzhiyun  * flags are used to the unsolicted response handler is able to process
2643*4882a593Smuzhiyun  * the ct command sent on the same port.
2644*4882a593Smuzhiyun  **/
lpfcdiag_loop_get_xri(struct lpfc_hba * phba,uint16_t rpi,uint16_t * txxri,uint16_t * rxxri)2645*4882a593Smuzhiyun static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2646*4882a593Smuzhiyun 			 uint16_t *txxri, uint16_t * rxxri)
2647*4882a593Smuzhiyun {
2648*4882a593Smuzhiyun 	struct lpfc_bsg_event *evt;
2649*4882a593Smuzhiyun 	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2650*4882a593Smuzhiyun 	IOCB_t *cmd, *rsp;
2651*4882a593Smuzhiyun 	struct lpfc_dmabuf *dmabuf;
2652*4882a593Smuzhiyun 	struct ulp_bde64 *bpl = NULL;
2653*4882a593Smuzhiyun 	struct lpfc_sli_ct_request *ctreq = NULL;
2654*4882a593Smuzhiyun 	int ret_val = 0;
2655*4882a593Smuzhiyun 	int time_left;
2656*4882a593Smuzhiyun 	int iocb_stat = IOCB_SUCCESS;
2657*4882a593Smuzhiyun 	unsigned long flags;
2658*4882a593Smuzhiyun 
2659*4882a593Smuzhiyun 	*txxri = 0;
2660*4882a593Smuzhiyun 	*rxxri = 0;
2661*4882a593Smuzhiyun 	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2662*4882a593Smuzhiyun 				SLI_CT_ELX_LOOPBACK);
2663*4882a593Smuzhiyun 	if (!evt)
2664*4882a593Smuzhiyun 		return -ENOMEM;
2665*4882a593Smuzhiyun 
2666*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2667*4882a593Smuzhiyun 	list_add(&evt->node, &phba->ct_ev_waiters);
2668*4882a593Smuzhiyun 	lpfc_bsg_event_ref(evt);
2669*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun 	cmdiocbq = lpfc_sli_get_iocbq(phba);
2672*4882a593Smuzhiyun 	rspiocbq = lpfc_sli_get_iocbq(phba);
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun 	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2675*4882a593Smuzhiyun 	if (dmabuf) {
2676*4882a593Smuzhiyun 		dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2677*4882a593Smuzhiyun 		if (dmabuf->virt) {
2678*4882a593Smuzhiyun 			INIT_LIST_HEAD(&dmabuf->list);
2679*4882a593Smuzhiyun 			bpl = (struct ulp_bde64 *) dmabuf->virt;
2680*4882a593Smuzhiyun 			memset(bpl, 0, sizeof(*bpl));
2681*4882a593Smuzhiyun 			ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2682*4882a593Smuzhiyun 			bpl->addrHigh =
2683*4882a593Smuzhiyun 				le32_to_cpu(putPaddrHigh(dmabuf->phys +
2684*4882a593Smuzhiyun 					sizeof(*bpl)));
2685*4882a593Smuzhiyun 			bpl->addrLow =
2686*4882a593Smuzhiyun 				le32_to_cpu(putPaddrLow(dmabuf->phys +
2687*4882a593Smuzhiyun 					sizeof(*bpl)));
2688*4882a593Smuzhiyun 			bpl->tus.f.bdeFlags = 0;
2689*4882a593Smuzhiyun 			bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2690*4882a593Smuzhiyun 			bpl->tus.w = le32_to_cpu(bpl->tus.w);
2691*4882a593Smuzhiyun 		}
2692*4882a593Smuzhiyun 	}
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 	if (cmdiocbq == NULL || rspiocbq == NULL ||
2695*4882a593Smuzhiyun 	    dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2696*4882a593Smuzhiyun 		dmabuf->virt == NULL) {
2697*4882a593Smuzhiyun 		ret_val = -ENOMEM;
2698*4882a593Smuzhiyun 		goto err_get_xri_exit;
2699*4882a593Smuzhiyun 	}
2700*4882a593Smuzhiyun 
2701*4882a593Smuzhiyun 	cmd = &cmdiocbq->iocb;
2702*4882a593Smuzhiyun 	rsp = &rspiocbq->iocb;
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun 	memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2705*4882a593Smuzhiyun 
2706*4882a593Smuzhiyun 	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2707*4882a593Smuzhiyun 	ctreq->RevisionId.bits.InId = 0;
2708*4882a593Smuzhiyun 	ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2709*4882a593Smuzhiyun 	ctreq->FsSubType = 0;
2710*4882a593Smuzhiyun 	ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2711*4882a593Smuzhiyun 	ctreq->CommandResponse.bits.Size = 0;
2712*4882a593Smuzhiyun 
2713*4882a593Smuzhiyun 
2714*4882a593Smuzhiyun 	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
2715*4882a593Smuzhiyun 	cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
2716*4882a593Smuzhiyun 	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2717*4882a593Smuzhiyun 	cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
2718*4882a593Smuzhiyun 
2719*4882a593Smuzhiyun 	cmd->un.xseq64.w5.hcsw.Fctl = LA;
2720*4882a593Smuzhiyun 	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2721*4882a593Smuzhiyun 	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2722*4882a593Smuzhiyun 	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2723*4882a593Smuzhiyun 
2724*4882a593Smuzhiyun 	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2725*4882a593Smuzhiyun 	cmd->ulpBdeCount = 1;
2726*4882a593Smuzhiyun 	cmd->ulpLe = 1;
2727*4882a593Smuzhiyun 	cmd->ulpClass = CLASS3;
2728*4882a593Smuzhiyun 	cmd->ulpContext = rpi;
2729*4882a593Smuzhiyun 
2730*4882a593Smuzhiyun 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2731*4882a593Smuzhiyun 	cmdiocbq->vport = phba->pport;
2732*4882a593Smuzhiyun 	cmdiocbq->iocb_cmpl = NULL;
2733*4882a593Smuzhiyun 
2734*4882a593Smuzhiyun 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2735*4882a593Smuzhiyun 				rspiocbq,
2736*4882a593Smuzhiyun 				(phba->fc_ratov * 2)
2737*4882a593Smuzhiyun 				+ LPFC_DRVR_TIMEOUT);
2738*4882a593Smuzhiyun 	if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
2739*4882a593Smuzhiyun 		ret_val = -EIO;
2740*4882a593Smuzhiyun 		goto err_get_xri_exit;
2741*4882a593Smuzhiyun 	}
2742*4882a593Smuzhiyun 	*txxri =  rsp->ulpContext;
2743*4882a593Smuzhiyun 
2744*4882a593Smuzhiyun 	evt->waiting = 1;
2745*4882a593Smuzhiyun 	evt->wait_time_stamp = jiffies;
2746*4882a593Smuzhiyun 	time_left = wait_event_interruptible_timeout(
2747*4882a593Smuzhiyun 		evt->wq, !list_empty(&evt->events_to_see),
2748*4882a593Smuzhiyun 		msecs_to_jiffies(1000 *
2749*4882a593Smuzhiyun 			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2750*4882a593Smuzhiyun 	if (list_empty(&evt->events_to_see))
2751*4882a593Smuzhiyun 		ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2752*4882a593Smuzhiyun 	else {
2753*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
2754*4882a593Smuzhiyun 		list_move(evt->events_to_see.prev, &evt->events_to_get);
2755*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2756*4882a593Smuzhiyun 		*rxxri = (list_entry(evt->events_to_get.prev,
2757*4882a593Smuzhiyun 				     typeof(struct event_data),
2758*4882a593Smuzhiyun 				     node))->immed_dat;
2759*4882a593Smuzhiyun 	}
2760*4882a593Smuzhiyun 	evt->waiting = 0;
2761*4882a593Smuzhiyun 
2762*4882a593Smuzhiyun err_get_xri_exit:
2763*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2764*4882a593Smuzhiyun 	lpfc_bsg_event_unref(evt); /* release ref */
2765*4882a593Smuzhiyun 	lpfc_bsg_event_unref(evt); /* delete */
2766*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2767*4882a593Smuzhiyun 
2768*4882a593Smuzhiyun 	if (dmabuf) {
2769*4882a593Smuzhiyun 		if (dmabuf->virt)
2770*4882a593Smuzhiyun 			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2771*4882a593Smuzhiyun 		kfree(dmabuf);
2772*4882a593Smuzhiyun 	}
2773*4882a593Smuzhiyun 
2774*4882a593Smuzhiyun 	if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2775*4882a593Smuzhiyun 		lpfc_sli_release_iocbq(phba, cmdiocbq);
2776*4882a593Smuzhiyun 	if (rspiocbq)
2777*4882a593Smuzhiyun 		lpfc_sli_release_iocbq(phba, rspiocbq);
2778*4882a593Smuzhiyun 	return ret_val;
2779*4882a593Smuzhiyun }
2780*4882a593Smuzhiyun 
2781*4882a593Smuzhiyun /**
2782*4882a593Smuzhiyun  * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2783*4882a593Smuzhiyun  * @phba: Pointer to HBA context object
2784*4882a593Smuzhiyun  *
2785*4882a593Smuzhiyun  * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
2786*4882a593Smuzhiyun  * returns the pointer to the buffer.
2787*4882a593Smuzhiyun  **/
2788*4882a593Smuzhiyun static struct lpfc_dmabuf *
lpfc_bsg_dma_page_alloc(struct lpfc_hba * phba)2789*4882a593Smuzhiyun lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2790*4882a593Smuzhiyun {
2791*4882a593Smuzhiyun 	struct lpfc_dmabuf *dmabuf;
2792*4882a593Smuzhiyun 	struct pci_dev *pcidev = phba->pcidev;
2793*4882a593Smuzhiyun 
2794*4882a593Smuzhiyun 	/* allocate dma buffer struct */
2795*4882a593Smuzhiyun 	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2796*4882a593Smuzhiyun 	if (!dmabuf)
2797*4882a593Smuzhiyun 		return NULL;
2798*4882a593Smuzhiyun 
2799*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dmabuf->list);
2800*4882a593Smuzhiyun 
2801*4882a593Smuzhiyun 	/* now, allocate dma buffer */
2802*4882a593Smuzhiyun 	dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2803*4882a593Smuzhiyun 					  &(dmabuf->phys), GFP_KERNEL);
2804*4882a593Smuzhiyun 
2805*4882a593Smuzhiyun 	if (!dmabuf->virt) {
2806*4882a593Smuzhiyun 		kfree(dmabuf);
2807*4882a593Smuzhiyun 		return NULL;
2808*4882a593Smuzhiyun 	}
2809*4882a593Smuzhiyun 
2810*4882a593Smuzhiyun 	return dmabuf;
2811*4882a593Smuzhiyun }
2812*4882a593Smuzhiyun 
2813*4882a593Smuzhiyun /**
2814*4882a593Smuzhiyun  * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2815*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
2816*4882a593Smuzhiyun  * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2817*4882a593Smuzhiyun  *
2818*4882a593Smuzhiyun  * This routine just simply frees a dma buffer and its associated buffer
2819*4882a593Smuzhiyun  * descriptor referred by @dmabuf.
2820*4882a593Smuzhiyun  **/
2821*4882a593Smuzhiyun static void
lpfc_bsg_dma_page_free(struct lpfc_hba * phba,struct lpfc_dmabuf * dmabuf)2822*4882a593Smuzhiyun lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2823*4882a593Smuzhiyun {
2824*4882a593Smuzhiyun 	struct pci_dev *pcidev = phba->pcidev;
2825*4882a593Smuzhiyun 
2826*4882a593Smuzhiyun 	if (!dmabuf)
2827*4882a593Smuzhiyun 		return;
2828*4882a593Smuzhiyun 
2829*4882a593Smuzhiyun 	if (dmabuf->virt)
2830*4882a593Smuzhiyun 		dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2831*4882a593Smuzhiyun 				  dmabuf->virt, dmabuf->phys);
2832*4882a593Smuzhiyun 	kfree(dmabuf);
2833*4882a593Smuzhiyun 	return;
2834*4882a593Smuzhiyun }
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun /**
2837*4882a593Smuzhiyun  * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2838*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
2839*4882a593Smuzhiyun  * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2840*4882a593Smuzhiyun  *
2841*4882a593Smuzhiyun  * This routine just simply frees all dma buffers and their associated buffer
2842*4882a593Smuzhiyun  * descriptors referred by @dmabuf_list.
2843*4882a593Smuzhiyun  **/
2844*4882a593Smuzhiyun static void
lpfc_bsg_dma_page_list_free(struct lpfc_hba * phba,struct list_head * dmabuf_list)2845*4882a593Smuzhiyun lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2846*4882a593Smuzhiyun 			    struct list_head *dmabuf_list)
2847*4882a593Smuzhiyun {
2848*4882a593Smuzhiyun 	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun 	if (list_empty(dmabuf_list))
2851*4882a593Smuzhiyun 		return;
2852*4882a593Smuzhiyun 
2853*4882a593Smuzhiyun 	list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2854*4882a593Smuzhiyun 		list_del_init(&dmabuf->list);
2855*4882a593Smuzhiyun 		lpfc_bsg_dma_page_free(phba, dmabuf);
2856*4882a593Smuzhiyun 	}
2857*4882a593Smuzhiyun 	return;
2858*4882a593Smuzhiyun }
2859*4882a593Smuzhiyun 
2860*4882a593Smuzhiyun /**
2861*4882a593Smuzhiyun  * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2862*4882a593Smuzhiyun  * @phba: Pointer to HBA context object
2863*4882a593Smuzhiyun  * @bpl: Pointer to 64 bit bde structure
2864*4882a593Smuzhiyun  * @size: Number of bytes to process
2865*4882a593Smuzhiyun  * @nocopydata: Flag to copy user data into the allocated buffer
2866*4882a593Smuzhiyun  *
2867*4882a593Smuzhiyun  * This function allocates page size buffers and populates an lpfc_dmabufext.
2868*4882a593Smuzhiyun  * If allowed the user data pointed to with indataptr is copied into the kernel
2869*4882a593Smuzhiyun  * memory. The chained list of page size buffers is returned.
2870*4882a593Smuzhiyun  **/
2871*4882a593Smuzhiyun static struct lpfc_dmabufext *
diag_cmd_data_alloc(struct lpfc_hba * phba,struct ulp_bde64 * bpl,uint32_t size,int nocopydata)2872*4882a593Smuzhiyun diag_cmd_data_alloc(struct lpfc_hba *phba,
2873*4882a593Smuzhiyun 		   struct ulp_bde64 *bpl, uint32_t size,
2874*4882a593Smuzhiyun 		   int nocopydata)
2875*4882a593Smuzhiyun {
2876*4882a593Smuzhiyun 	struct lpfc_dmabufext *mlist = NULL;
2877*4882a593Smuzhiyun 	struct lpfc_dmabufext *dmp;
2878*4882a593Smuzhiyun 	int cnt, offset = 0, i = 0;
2879*4882a593Smuzhiyun 	struct pci_dev *pcidev;
2880*4882a593Smuzhiyun 
2881*4882a593Smuzhiyun 	pcidev = phba->pcidev;
2882*4882a593Smuzhiyun 
2883*4882a593Smuzhiyun 	while (size) {
2884*4882a593Smuzhiyun 		/* We get chunks of 4K */
2885*4882a593Smuzhiyun 		if (size > BUF_SZ_4K)
2886*4882a593Smuzhiyun 			cnt = BUF_SZ_4K;
2887*4882a593Smuzhiyun 		else
2888*4882a593Smuzhiyun 			cnt = size;
2889*4882a593Smuzhiyun 
2890*4882a593Smuzhiyun 		/* allocate struct lpfc_dmabufext buffer header */
2891*4882a593Smuzhiyun 		dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2892*4882a593Smuzhiyun 		if (!dmp)
2893*4882a593Smuzhiyun 			goto out;
2894*4882a593Smuzhiyun 
2895*4882a593Smuzhiyun 		INIT_LIST_HEAD(&dmp->dma.list);
2896*4882a593Smuzhiyun 
2897*4882a593Smuzhiyun 		/* Queue it to a linked list */
2898*4882a593Smuzhiyun 		if (mlist)
2899*4882a593Smuzhiyun 			list_add_tail(&dmp->dma.list, &mlist->dma.list);
2900*4882a593Smuzhiyun 		else
2901*4882a593Smuzhiyun 			mlist = dmp;
2902*4882a593Smuzhiyun 
2903*4882a593Smuzhiyun 		/* allocate buffer */
2904*4882a593Smuzhiyun 		dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2905*4882a593Smuzhiyun 						   cnt,
2906*4882a593Smuzhiyun 						   &(dmp->dma.phys),
2907*4882a593Smuzhiyun 						   GFP_KERNEL);
2908*4882a593Smuzhiyun 
2909*4882a593Smuzhiyun 		if (!dmp->dma.virt)
2910*4882a593Smuzhiyun 			goto out;
2911*4882a593Smuzhiyun 
2912*4882a593Smuzhiyun 		dmp->size = cnt;
2913*4882a593Smuzhiyun 
2914*4882a593Smuzhiyun 		if (nocopydata) {
2915*4882a593Smuzhiyun 			bpl->tus.f.bdeFlags = 0;
2916*4882a593Smuzhiyun 		} else {
2917*4882a593Smuzhiyun 			memset((uint8_t *)dmp->dma.virt, 0, cnt);
2918*4882a593Smuzhiyun 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2919*4882a593Smuzhiyun 		}
2920*4882a593Smuzhiyun 
2921*4882a593Smuzhiyun 		/* build buffer ptr list for IOCB */
2922*4882a593Smuzhiyun 		bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2923*4882a593Smuzhiyun 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2924*4882a593Smuzhiyun 		bpl->tus.f.bdeSize = (ushort) cnt;
2925*4882a593Smuzhiyun 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
2926*4882a593Smuzhiyun 		bpl++;
2927*4882a593Smuzhiyun 
2928*4882a593Smuzhiyun 		i++;
2929*4882a593Smuzhiyun 		offset += cnt;
2930*4882a593Smuzhiyun 		size -= cnt;
2931*4882a593Smuzhiyun 	}
2932*4882a593Smuzhiyun 
2933*4882a593Smuzhiyun 	if (mlist) {
2934*4882a593Smuzhiyun 		mlist->flag = i;
2935*4882a593Smuzhiyun 		return mlist;
2936*4882a593Smuzhiyun 	}
2937*4882a593Smuzhiyun out:
2938*4882a593Smuzhiyun 	diag_cmd_data_free(phba, mlist);
2939*4882a593Smuzhiyun 	return NULL;
2940*4882a593Smuzhiyun }
2941*4882a593Smuzhiyun 
2942*4882a593Smuzhiyun /**
2943*4882a593Smuzhiyun  * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2944*4882a593Smuzhiyun  * @phba: Pointer to HBA context object
2945*4882a593Smuzhiyun  * @rxxri: Receive exchange id
2946*4882a593Smuzhiyun  * @len: Number of data bytes
2947*4882a593Smuzhiyun  *
2948*4882a593Smuzhiyun  * This function allocates and posts a data buffer of sufficient size to receive
2949*4882a593Smuzhiyun  * an unsolicted CT command.
2950*4882a593Smuzhiyun  **/
lpfcdiag_loop_post_rxbufs(struct lpfc_hba * phba,uint16_t rxxri,size_t len)2951*4882a593Smuzhiyun static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2952*4882a593Smuzhiyun 			     size_t len)
2953*4882a593Smuzhiyun {
2954*4882a593Smuzhiyun 	struct lpfc_sli_ring *pring;
2955*4882a593Smuzhiyun 	struct lpfc_iocbq *cmdiocbq;
2956*4882a593Smuzhiyun 	IOCB_t *cmd = NULL;
2957*4882a593Smuzhiyun 	struct list_head head, *curr, *next;
2958*4882a593Smuzhiyun 	struct lpfc_dmabuf *rxbmp;
2959*4882a593Smuzhiyun 	struct lpfc_dmabuf *dmp;
2960*4882a593Smuzhiyun 	struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2961*4882a593Smuzhiyun 	struct ulp_bde64 *rxbpl = NULL;
2962*4882a593Smuzhiyun 	uint32_t num_bde;
2963*4882a593Smuzhiyun 	struct lpfc_dmabufext *rxbuffer = NULL;
2964*4882a593Smuzhiyun 	int ret_val = 0;
2965*4882a593Smuzhiyun 	int iocb_stat;
2966*4882a593Smuzhiyun 	int i = 0;
2967*4882a593Smuzhiyun 
2968*4882a593Smuzhiyun 	pring = lpfc_phba_elsring(phba);
2969*4882a593Smuzhiyun 
2970*4882a593Smuzhiyun 	cmdiocbq = lpfc_sli_get_iocbq(phba);
2971*4882a593Smuzhiyun 	rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2972*4882a593Smuzhiyun 	if (rxbmp != NULL) {
2973*4882a593Smuzhiyun 		rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2974*4882a593Smuzhiyun 		if (rxbmp->virt) {
2975*4882a593Smuzhiyun 			INIT_LIST_HEAD(&rxbmp->list);
2976*4882a593Smuzhiyun 			rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2977*4882a593Smuzhiyun 			rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2978*4882a593Smuzhiyun 		}
2979*4882a593Smuzhiyun 	}
2980*4882a593Smuzhiyun 
2981*4882a593Smuzhiyun 	if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
2982*4882a593Smuzhiyun 		ret_val = -ENOMEM;
2983*4882a593Smuzhiyun 		goto err_post_rxbufs_exit;
2984*4882a593Smuzhiyun 	}
2985*4882a593Smuzhiyun 
2986*4882a593Smuzhiyun 	/* Queue buffers for the receive exchange */
2987*4882a593Smuzhiyun 	num_bde = (uint32_t)rxbuffer->flag;
2988*4882a593Smuzhiyun 	dmp = &rxbuffer->dma;
2989*4882a593Smuzhiyun 
2990*4882a593Smuzhiyun 	cmd = &cmdiocbq->iocb;
2991*4882a593Smuzhiyun 	i = 0;
2992*4882a593Smuzhiyun 
2993*4882a593Smuzhiyun 	INIT_LIST_HEAD(&head);
2994*4882a593Smuzhiyun 	list_add_tail(&head, &dmp->list);
2995*4882a593Smuzhiyun 	list_for_each_safe(curr, next, &head) {
2996*4882a593Smuzhiyun 		mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2997*4882a593Smuzhiyun 		list_del(curr);
2998*4882a593Smuzhiyun 
2999*4882a593Smuzhiyun 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3000*4882a593Smuzhiyun 			mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
3001*4882a593Smuzhiyun 			cmd->un.quexri64cx.buff.bde.addrHigh =
3002*4882a593Smuzhiyun 				putPaddrHigh(mp[i]->phys);
3003*4882a593Smuzhiyun 			cmd->un.quexri64cx.buff.bde.addrLow =
3004*4882a593Smuzhiyun 				putPaddrLow(mp[i]->phys);
3005*4882a593Smuzhiyun 			cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
3006*4882a593Smuzhiyun 				((struct lpfc_dmabufext *)mp[i])->size;
3007*4882a593Smuzhiyun 			cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
3008*4882a593Smuzhiyun 			cmd->ulpCommand = CMD_QUE_XRI64_CX;
3009*4882a593Smuzhiyun 			cmd->ulpPU = 0;
3010*4882a593Smuzhiyun 			cmd->ulpLe = 1;
3011*4882a593Smuzhiyun 			cmd->ulpBdeCount = 1;
3012*4882a593Smuzhiyun 			cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
3013*4882a593Smuzhiyun 
3014*4882a593Smuzhiyun 		} else {
3015*4882a593Smuzhiyun 			cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
3016*4882a593Smuzhiyun 			cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
3017*4882a593Smuzhiyun 			cmd->un.cont64[i].tus.f.bdeSize =
3018*4882a593Smuzhiyun 				((struct lpfc_dmabufext *)mp[i])->size;
3019*4882a593Smuzhiyun 			cmd->ulpBdeCount = ++i;
3020*4882a593Smuzhiyun 
3021*4882a593Smuzhiyun 			if ((--num_bde > 0) && (i < 2))
3022*4882a593Smuzhiyun 				continue;
3023*4882a593Smuzhiyun 
3024*4882a593Smuzhiyun 			cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
3025*4882a593Smuzhiyun 			cmd->ulpLe = 1;
3026*4882a593Smuzhiyun 		}
3027*4882a593Smuzhiyun 
3028*4882a593Smuzhiyun 		cmd->ulpClass = CLASS3;
3029*4882a593Smuzhiyun 		cmd->ulpContext = rxxri;
3030*4882a593Smuzhiyun 
3031*4882a593Smuzhiyun 		iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
3032*4882a593Smuzhiyun 						0);
3033*4882a593Smuzhiyun 		if (iocb_stat == IOCB_ERROR) {
3034*4882a593Smuzhiyun 			diag_cmd_data_free(phba,
3035*4882a593Smuzhiyun 				(struct lpfc_dmabufext *)mp[0]);
3036*4882a593Smuzhiyun 			if (mp[1])
3037*4882a593Smuzhiyun 				diag_cmd_data_free(phba,
3038*4882a593Smuzhiyun 					  (struct lpfc_dmabufext *)mp[1]);
3039*4882a593Smuzhiyun 			dmp = list_entry(next, struct lpfc_dmabuf, list);
3040*4882a593Smuzhiyun 			ret_val = -EIO;
3041*4882a593Smuzhiyun 			goto err_post_rxbufs_exit;
3042*4882a593Smuzhiyun 		}
3043*4882a593Smuzhiyun 
3044*4882a593Smuzhiyun 		lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
3045*4882a593Smuzhiyun 		if (mp[1]) {
3046*4882a593Smuzhiyun 			lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
3047*4882a593Smuzhiyun 			mp[1] = NULL;
3048*4882a593Smuzhiyun 		}
3049*4882a593Smuzhiyun 
3050*4882a593Smuzhiyun 		/* The iocb was freed by lpfc_sli_issue_iocb */
3051*4882a593Smuzhiyun 		cmdiocbq = lpfc_sli_get_iocbq(phba);
3052*4882a593Smuzhiyun 		if (!cmdiocbq) {
3053*4882a593Smuzhiyun 			dmp = list_entry(next, struct lpfc_dmabuf, list);
3054*4882a593Smuzhiyun 			ret_val = -EIO;
3055*4882a593Smuzhiyun 			goto err_post_rxbufs_exit;
3056*4882a593Smuzhiyun 		}
3057*4882a593Smuzhiyun 
3058*4882a593Smuzhiyun 		cmd = &cmdiocbq->iocb;
3059*4882a593Smuzhiyun 		i = 0;
3060*4882a593Smuzhiyun 	}
3061*4882a593Smuzhiyun 	list_del(&head);
3062*4882a593Smuzhiyun 
3063*4882a593Smuzhiyun err_post_rxbufs_exit:
3064*4882a593Smuzhiyun 
3065*4882a593Smuzhiyun 	if (rxbmp) {
3066*4882a593Smuzhiyun 		if (rxbmp->virt)
3067*4882a593Smuzhiyun 			lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
3068*4882a593Smuzhiyun 		kfree(rxbmp);
3069*4882a593Smuzhiyun 	}
3070*4882a593Smuzhiyun 
3071*4882a593Smuzhiyun 	if (cmdiocbq)
3072*4882a593Smuzhiyun 		lpfc_sli_release_iocbq(phba, cmdiocbq);
3073*4882a593Smuzhiyun 	return ret_val;
3074*4882a593Smuzhiyun }
3075*4882a593Smuzhiyun 
3076*4882a593Smuzhiyun /**
3077*4882a593Smuzhiyun  * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
3078*4882a593Smuzhiyun  * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
3079*4882a593Smuzhiyun  *
3080*4882a593Smuzhiyun  * This function receives a user data buffer to be transmitted and received on
3081*4882a593Smuzhiyun  * the same port, the link must be up and in loopback mode prior
3082*4882a593Smuzhiyun  * to being called.
3083*4882a593Smuzhiyun  * 1. A kernel buffer is allocated to copy the user data into.
3084*4882a593Smuzhiyun  * 2. The port registers with "itself".
3085*4882a593Smuzhiyun  * 3. The transmit and receive exchange ids are obtained.
3086*4882a593Smuzhiyun  * 4. The receive exchange id is posted.
3087*4882a593Smuzhiyun  * 5. A new els loopback event is created.
3088*4882a593Smuzhiyun  * 6. The command and response iocbs are allocated.
3089*4882a593Smuzhiyun  * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
3090*4882a593Smuzhiyun  *
3091*4882a593Smuzhiyun  * This function is meant to be called n times while the port is in loopback
3092*4882a593Smuzhiyun  * so it is the apps responsibility to issue a reset to take the port out
3093*4882a593Smuzhiyun  * of loopback mode.
3094*4882a593Smuzhiyun  **/
3095*4882a593Smuzhiyun static int
lpfc_bsg_diag_loopback_run(struct bsg_job * job)3096*4882a593Smuzhiyun lpfc_bsg_diag_loopback_run(struct bsg_job *job)
3097*4882a593Smuzhiyun {
3098*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3099*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
3100*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
3101*4882a593Smuzhiyun 	struct lpfc_bsg_event *evt;
3102*4882a593Smuzhiyun 	struct event_data *evdat;
3103*4882a593Smuzhiyun 	struct lpfc_sli *psli = &phba->sli;
3104*4882a593Smuzhiyun 	uint32_t size;
3105*4882a593Smuzhiyun 	uint32_t full_size;
3106*4882a593Smuzhiyun 	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
3107*4882a593Smuzhiyun 	uint16_t rpi = 0;
3108*4882a593Smuzhiyun 	struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
3109*4882a593Smuzhiyun 	IOCB_t *cmd, *rsp = NULL;
3110*4882a593Smuzhiyun 	struct lpfc_sli_ct_request *ctreq;
3111*4882a593Smuzhiyun 	struct lpfc_dmabuf *txbmp;
3112*4882a593Smuzhiyun 	struct ulp_bde64 *txbpl = NULL;
3113*4882a593Smuzhiyun 	struct lpfc_dmabufext *txbuffer = NULL;
3114*4882a593Smuzhiyun 	struct list_head head;
3115*4882a593Smuzhiyun 	struct lpfc_dmabuf  *curr;
3116*4882a593Smuzhiyun 	uint16_t txxri = 0, rxxri;
3117*4882a593Smuzhiyun 	uint32_t num_bde;
3118*4882a593Smuzhiyun 	uint8_t *ptr = NULL, *rx_databuf = NULL;
3119*4882a593Smuzhiyun 	int rc = 0;
3120*4882a593Smuzhiyun 	int time_left;
3121*4882a593Smuzhiyun 	int iocb_stat = IOCB_SUCCESS;
3122*4882a593Smuzhiyun 	unsigned long flags;
3123*4882a593Smuzhiyun 	void *dataout = NULL;
3124*4882a593Smuzhiyun 	uint32_t total_mem;
3125*4882a593Smuzhiyun 
3126*4882a593Smuzhiyun 	/* in case no data is returned return just the return code */
3127*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
3128*4882a593Smuzhiyun 
3129*4882a593Smuzhiyun 	if (job->request_len <
3130*4882a593Smuzhiyun 	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
3131*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3132*4882a593Smuzhiyun 				"2739 Received DIAG TEST request below minimum "
3133*4882a593Smuzhiyun 				"size\n");
3134*4882a593Smuzhiyun 		rc = -EINVAL;
3135*4882a593Smuzhiyun 		goto loopback_test_exit;
3136*4882a593Smuzhiyun 	}
3137*4882a593Smuzhiyun 
3138*4882a593Smuzhiyun 	if (job->request_payload.payload_len !=
3139*4882a593Smuzhiyun 		job->reply_payload.payload_len) {
3140*4882a593Smuzhiyun 		rc = -EINVAL;
3141*4882a593Smuzhiyun 		goto loopback_test_exit;
3142*4882a593Smuzhiyun 	}
3143*4882a593Smuzhiyun 
3144*4882a593Smuzhiyun 	if ((phba->link_state == LPFC_HBA_ERROR) ||
3145*4882a593Smuzhiyun 	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
3146*4882a593Smuzhiyun 	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
3147*4882a593Smuzhiyun 		rc = -EACCES;
3148*4882a593Smuzhiyun 		goto loopback_test_exit;
3149*4882a593Smuzhiyun 	}
3150*4882a593Smuzhiyun 
3151*4882a593Smuzhiyun 	if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
3152*4882a593Smuzhiyun 		rc = -EACCES;
3153*4882a593Smuzhiyun 		goto loopback_test_exit;
3154*4882a593Smuzhiyun 	}
3155*4882a593Smuzhiyun 
3156*4882a593Smuzhiyun 	size = job->request_payload.payload_len;
3157*4882a593Smuzhiyun 	full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
3158*4882a593Smuzhiyun 
3159*4882a593Smuzhiyun 	if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
3160*4882a593Smuzhiyun 		rc = -ERANGE;
3161*4882a593Smuzhiyun 		goto loopback_test_exit;
3162*4882a593Smuzhiyun 	}
3163*4882a593Smuzhiyun 
3164*4882a593Smuzhiyun 	if (full_size >= BUF_SZ_4K) {
3165*4882a593Smuzhiyun 		/*
3166*4882a593Smuzhiyun 		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
3167*4882a593Smuzhiyun 		 * then we allocate 64k and re-use that buffer over and over to
3168*4882a593Smuzhiyun 		 * xfer the whole block. This is because Linux kernel has a
3169*4882a593Smuzhiyun 		 * problem allocating more than 120k of kernel space memory. Saw
3170*4882a593Smuzhiyun 		 * problem with GET_FCPTARGETMAPPING...
3171*4882a593Smuzhiyun 		 */
3172*4882a593Smuzhiyun 		if (size <= (64 * 1024))
3173*4882a593Smuzhiyun 			total_mem = full_size;
3174*4882a593Smuzhiyun 		else
3175*4882a593Smuzhiyun 			total_mem = 64 * 1024;
3176*4882a593Smuzhiyun 	} else
3177*4882a593Smuzhiyun 		/* Allocate memory for ioctl data */
3178*4882a593Smuzhiyun 		total_mem = BUF_SZ_4K;
3179*4882a593Smuzhiyun 
3180*4882a593Smuzhiyun 	dataout = kmalloc(total_mem, GFP_KERNEL);
3181*4882a593Smuzhiyun 	if (dataout == NULL) {
3182*4882a593Smuzhiyun 		rc = -ENOMEM;
3183*4882a593Smuzhiyun 		goto loopback_test_exit;
3184*4882a593Smuzhiyun 	}
3185*4882a593Smuzhiyun 
3186*4882a593Smuzhiyun 	ptr = dataout;
3187*4882a593Smuzhiyun 	ptr += ELX_LOOPBACK_HEADER_SZ;
3188*4882a593Smuzhiyun 	sg_copy_to_buffer(job->request_payload.sg_list,
3189*4882a593Smuzhiyun 				job->request_payload.sg_cnt,
3190*4882a593Smuzhiyun 				ptr, size);
3191*4882a593Smuzhiyun 	rc = lpfcdiag_loop_self_reg(phba, &rpi);
3192*4882a593Smuzhiyun 	if (rc)
3193*4882a593Smuzhiyun 		goto loopback_test_exit;
3194*4882a593Smuzhiyun 
3195*4882a593Smuzhiyun 	if (phba->sli_rev < LPFC_SLI_REV4) {
3196*4882a593Smuzhiyun 		rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
3197*4882a593Smuzhiyun 		if (rc) {
3198*4882a593Smuzhiyun 			lpfcdiag_loop_self_unreg(phba, rpi);
3199*4882a593Smuzhiyun 			goto loopback_test_exit;
3200*4882a593Smuzhiyun 		}
3201*4882a593Smuzhiyun 
3202*4882a593Smuzhiyun 		rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
3203*4882a593Smuzhiyun 		if (rc) {
3204*4882a593Smuzhiyun 			lpfcdiag_loop_self_unreg(phba, rpi);
3205*4882a593Smuzhiyun 			goto loopback_test_exit;
3206*4882a593Smuzhiyun 		}
3207*4882a593Smuzhiyun 	}
3208*4882a593Smuzhiyun 	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
3209*4882a593Smuzhiyun 				SLI_CT_ELX_LOOPBACK);
3210*4882a593Smuzhiyun 	if (!evt) {
3211*4882a593Smuzhiyun 		lpfcdiag_loop_self_unreg(phba, rpi);
3212*4882a593Smuzhiyun 		rc = -ENOMEM;
3213*4882a593Smuzhiyun 		goto loopback_test_exit;
3214*4882a593Smuzhiyun 	}
3215*4882a593Smuzhiyun 
3216*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3217*4882a593Smuzhiyun 	list_add(&evt->node, &phba->ct_ev_waiters);
3218*4882a593Smuzhiyun 	lpfc_bsg_event_ref(evt);
3219*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3220*4882a593Smuzhiyun 
3221*4882a593Smuzhiyun 	cmdiocbq = lpfc_sli_get_iocbq(phba);
3222*4882a593Smuzhiyun 	if (phba->sli_rev < LPFC_SLI_REV4)
3223*4882a593Smuzhiyun 		rspiocbq = lpfc_sli_get_iocbq(phba);
3224*4882a593Smuzhiyun 	txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3225*4882a593Smuzhiyun 
3226*4882a593Smuzhiyun 	if (txbmp) {
3227*4882a593Smuzhiyun 		txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
3228*4882a593Smuzhiyun 		if (txbmp->virt) {
3229*4882a593Smuzhiyun 			INIT_LIST_HEAD(&txbmp->list);
3230*4882a593Smuzhiyun 			txbpl = (struct ulp_bde64 *) txbmp->virt;
3231*4882a593Smuzhiyun 			txbuffer = diag_cmd_data_alloc(phba,
3232*4882a593Smuzhiyun 							txbpl, full_size, 0);
3233*4882a593Smuzhiyun 		}
3234*4882a593Smuzhiyun 	}
3235*4882a593Smuzhiyun 
3236*4882a593Smuzhiyun 	if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
3237*4882a593Smuzhiyun 		rc = -ENOMEM;
3238*4882a593Smuzhiyun 		goto err_loopback_test_exit;
3239*4882a593Smuzhiyun 	}
3240*4882a593Smuzhiyun 	if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
3241*4882a593Smuzhiyun 		rc = -ENOMEM;
3242*4882a593Smuzhiyun 		goto err_loopback_test_exit;
3243*4882a593Smuzhiyun 	}
3244*4882a593Smuzhiyun 
3245*4882a593Smuzhiyun 	cmd = &cmdiocbq->iocb;
3246*4882a593Smuzhiyun 	if (phba->sli_rev < LPFC_SLI_REV4)
3247*4882a593Smuzhiyun 		rsp = &rspiocbq->iocb;
3248*4882a593Smuzhiyun 
3249*4882a593Smuzhiyun 	INIT_LIST_HEAD(&head);
3250*4882a593Smuzhiyun 	list_add_tail(&head, &txbuffer->dma.list);
3251*4882a593Smuzhiyun 	list_for_each_entry(curr, &head, list) {
3252*4882a593Smuzhiyun 		segment_len = ((struct lpfc_dmabufext *)curr)->size;
3253*4882a593Smuzhiyun 		if (current_offset == 0) {
3254*4882a593Smuzhiyun 			ctreq = curr->virt;
3255*4882a593Smuzhiyun 			memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
3256*4882a593Smuzhiyun 			ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3257*4882a593Smuzhiyun 			ctreq->RevisionId.bits.InId = 0;
3258*4882a593Smuzhiyun 			ctreq->FsType = SLI_CT_ELX_LOOPBACK;
3259*4882a593Smuzhiyun 			ctreq->FsSubType = 0;
3260*4882a593Smuzhiyun 			ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
3261*4882a593Smuzhiyun 			ctreq->CommandResponse.bits.Size   = size;
3262*4882a593Smuzhiyun 			segment_offset = ELX_LOOPBACK_HEADER_SZ;
3263*4882a593Smuzhiyun 		} else
3264*4882a593Smuzhiyun 			segment_offset = 0;
3265*4882a593Smuzhiyun 
3266*4882a593Smuzhiyun 		BUG_ON(segment_offset >= segment_len);
3267*4882a593Smuzhiyun 		memcpy(curr->virt + segment_offset,
3268*4882a593Smuzhiyun 			ptr + current_offset,
3269*4882a593Smuzhiyun 			segment_len - segment_offset);
3270*4882a593Smuzhiyun 
3271*4882a593Smuzhiyun 		current_offset += segment_len - segment_offset;
3272*4882a593Smuzhiyun 		BUG_ON(current_offset > size);
3273*4882a593Smuzhiyun 	}
3274*4882a593Smuzhiyun 	list_del(&head);
3275*4882a593Smuzhiyun 
3276*4882a593Smuzhiyun 	/* Build the XMIT_SEQUENCE iocb */
3277*4882a593Smuzhiyun 	num_bde = (uint32_t)txbuffer->flag;
3278*4882a593Smuzhiyun 
3279*4882a593Smuzhiyun 	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
3280*4882a593Smuzhiyun 	cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
3281*4882a593Smuzhiyun 	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
3282*4882a593Smuzhiyun 	cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
3283*4882a593Smuzhiyun 
3284*4882a593Smuzhiyun 	cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
3285*4882a593Smuzhiyun 	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
3286*4882a593Smuzhiyun 	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
3287*4882a593Smuzhiyun 	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
3288*4882a593Smuzhiyun 
3289*4882a593Smuzhiyun 	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
3290*4882a593Smuzhiyun 	cmd->ulpBdeCount = 1;
3291*4882a593Smuzhiyun 	cmd->ulpLe = 1;
3292*4882a593Smuzhiyun 	cmd->ulpClass = CLASS3;
3293*4882a593Smuzhiyun 
3294*4882a593Smuzhiyun 	if (phba->sli_rev < LPFC_SLI_REV4) {
3295*4882a593Smuzhiyun 		cmd->ulpContext = txxri;
3296*4882a593Smuzhiyun 	} else {
3297*4882a593Smuzhiyun 		cmd->un.xseq64.bdl.ulpIoTag32 = 0;
3298*4882a593Smuzhiyun 		cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
3299*4882a593Smuzhiyun 		cmdiocbq->context3 = txbmp;
3300*4882a593Smuzhiyun 		cmdiocbq->sli4_xritag = NO_XRI;
3301*4882a593Smuzhiyun 		cmd->unsli3.rcvsli3.ox_id = 0xffff;
3302*4882a593Smuzhiyun 	}
3303*4882a593Smuzhiyun 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3304*4882a593Smuzhiyun 	cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
3305*4882a593Smuzhiyun 	cmdiocbq->vport = phba->pport;
3306*4882a593Smuzhiyun 	cmdiocbq->iocb_cmpl = NULL;
3307*4882a593Smuzhiyun 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3308*4882a593Smuzhiyun 					     rspiocbq, (phba->fc_ratov * 2) +
3309*4882a593Smuzhiyun 					     LPFC_DRVR_TIMEOUT);
3310*4882a593Smuzhiyun 
3311*4882a593Smuzhiyun 	if ((iocb_stat != IOCB_SUCCESS) ||
3312*4882a593Smuzhiyun 	    ((phba->sli_rev < LPFC_SLI_REV4) &&
3313*4882a593Smuzhiyun 	     (rsp->ulpStatus != IOSTAT_SUCCESS))) {
3314*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3315*4882a593Smuzhiyun 				"3126 Failed loopback test issue iocb: "
3316*4882a593Smuzhiyun 				"iocb_stat:x%x\n", iocb_stat);
3317*4882a593Smuzhiyun 		rc = -EIO;
3318*4882a593Smuzhiyun 		goto err_loopback_test_exit;
3319*4882a593Smuzhiyun 	}
3320*4882a593Smuzhiyun 
3321*4882a593Smuzhiyun 	evt->waiting = 1;
3322*4882a593Smuzhiyun 	time_left = wait_event_interruptible_timeout(
3323*4882a593Smuzhiyun 		evt->wq, !list_empty(&evt->events_to_see),
3324*4882a593Smuzhiyun 		msecs_to_jiffies(1000 *
3325*4882a593Smuzhiyun 			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3326*4882a593Smuzhiyun 	evt->waiting = 0;
3327*4882a593Smuzhiyun 	if (list_empty(&evt->events_to_see)) {
3328*4882a593Smuzhiyun 		rc = (time_left) ? -EINTR : -ETIMEDOUT;
3329*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3330*4882a593Smuzhiyun 				"3125 Not receiving unsolicited event, "
3331*4882a593Smuzhiyun 				"rc:x%x\n", rc);
3332*4882a593Smuzhiyun 	} else {
3333*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
3334*4882a593Smuzhiyun 		list_move(evt->events_to_see.prev, &evt->events_to_get);
3335*4882a593Smuzhiyun 		evdat = list_entry(evt->events_to_get.prev,
3336*4882a593Smuzhiyun 				   typeof(*evdat), node);
3337*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3338*4882a593Smuzhiyun 		rx_databuf = evdat->data;
3339*4882a593Smuzhiyun 		if (evdat->len != full_size) {
3340*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3341*4882a593Smuzhiyun 				"1603 Loopback test did not receive expected "
3342*4882a593Smuzhiyun 				"data length. actual length 0x%x expected "
3343*4882a593Smuzhiyun 				"length 0x%x\n",
3344*4882a593Smuzhiyun 				evdat->len, full_size);
3345*4882a593Smuzhiyun 			rc = -EIO;
3346*4882a593Smuzhiyun 		} else if (rx_databuf == NULL)
3347*4882a593Smuzhiyun 			rc = -EIO;
3348*4882a593Smuzhiyun 		else {
3349*4882a593Smuzhiyun 			rc = IOCB_SUCCESS;
3350*4882a593Smuzhiyun 			/* skip over elx loopback header */
3351*4882a593Smuzhiyun 			rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3352*4882a593Smuzhiyun 			bsg_reply->reply_payload_rcv_len =
3353*4882a593Smuzhiyun 				sg_copy_from_buffer(job->reply_payload.sg_list,
3354*4882a593Smuzhiyun 						    job->reply_payload.sg_cnt,
3355*4882a593Smuzhiyun 						    rx_databuf, size);
3356*4882a593Smuzhiyun 			bsg_reply->reply_payload_rcv_len = size;
3357*4882a593Smuzhiyun 		}
3358*4882a593Smuzhiyun 	}
3359*4882a593Smuzhiyun 
3360*4882a593Smuzhiyun err_loopback_test_exit:
3361*4882a593Smuzhiyun 	lpfcdiag_loop_self_unreg(phba, rpi);
3362*4882a593Smuzhiyun 
3363*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3364*4882a593Smuzhiyun 	lpfc_bsg_event_unref(evt); /* release ref */
3365*4882a593Smuzhiyun 	lpfc_bsg_event_unref(evt); /* delete */
3366*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3367*4882a593Smuzhiyun 
3368*4882a593Smuzhiyun 	if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
3369*4882a593Smuzhiyun 		lpfc_sli_release_iocbq(phba, cmdiocbq);
3370*4882a593Smuzhiyun 
3371*4882a593Smuzhiyun 	if (rspiocbq != NULL)
3372*4882a593Smuzhiyun 		lpfc_sli_release_iocbq(phba, rspiocbq);
3373*4882a593Smuzhiyun 
3374*4882a593Smuzhiyun 	if (txbmp != NULL) {
3375*4882a593Smuzhiyun 		if (txbpl != NULL) {
3376*4882a593Smuzhiyun 			if (txbuffer != NULL)
3377*4882a593Smuzhiyun 				diag_cmd_data_free(phba, txbuffer);
3378*4882a593Smuzhiyun 			lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3379*4882a593Smuzhiyun 		}
3380*4882a593Smuzhiyun 		kfree(txbmp);
3381*4882a593Smuzhiyun 	}
3382*4882a593Smuzhiyun 
3383*4882a593Smuzhiyun loopback_test_exit:
3384*4882a593Smuzhiyun 	kfree(dataout);
3385*4882a593Smuzhiyun 	/* make error code available to userspace */
3386*4882a593Smuzhiyun 	bsg_reply->result = rc;
3387*4882a593Smuzhiyun 	job->dd_data = NULL;
3388*4882a593Smuzhiyun 	/* complete the job back to userspace if no error */
3389*4882a593Smuzhiyun 	if (rc == IOCB_SUCCESS)
3390*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
3391*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
3392*4882a593Smuzhiyun 	return rc;
3393*4882a593Smuzhiyun }
3394*4882a593Smuzhiyun 
3395*4882a593Smuzhiyun /**
3396*4882a593Smuzhiyun  * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3397*4882a593Smuzhiyun  * @job: GET_DFC_REV fc_bsg_job
3398*4882a593Smuzhiyun  **/
3399*4882a593Smuzhiyun static int
lpfc_bsg_get_dfc_rev(struct bsg_job * job)3400*4882a593Smuzhiyun lpfc_bsg_get_dfc_rev(struct bsg_job *job)
3401*4882a593Smuzhiyun {
3402*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3403*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
3404*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
3405*4882a593Smuzhiyun 	struct get_mgmt_rev_reply *event_reply;
3406*4882a593Smuzhiyun 	int rc = 0;
3407*4882a593Smuzhiyun 
3408*4882a593Smuzhiyun 	if (job->request_len <
3409*4882a593Smuzhiyun 	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3410*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3411*4882a593Smuzhiyun 				"2740 Received GET_DFC_REV request below "
3412*4882a593Smuzhiyun 				"minimum size\n");
3413*4882a593Smuzhiyun 		rc = -EINVAL;
3414*4882a593Smuzhiyun 		goto job_error;
3415*4882a593Smuzhiyun 	}
3416*4882a593Smuzhiyun 
3417*4882a593Smuzhiyun 	event_reply = (struct get_mgmt_rev_reply *)
3418*4882a593Smuzhiyun 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
3419*4882a593Smuzhiyun 
3420*4882a593Smuzhiyun 	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
3421*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3422*4882a593Smuzhiyun 				"2741 Received GET_DFC_REV reply below "
3423*4882a593Smuzhiyun 				"minimum size\n");
3424*4882a593Smuzhiyun 		rc = -EINVAL;
3425*4882a593Smuzhiyun 		goto job_error;
3426*4882a593Smuzhiyun 	}
3427*4882a593Smuzhiyun 
3428*4882a593Smuzhiyun 	event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3429*4882a593Smuzhiyun 	event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3430*4882a593Smuzhiyun job_error:
3431*4882a593Smuzhiyun 	bsg_reply->result = rc;
3432*4882a593Smuzhiyun 	if (rc == 0)
3433*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
3434*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
3435*4882a593Smuzhiyun 	return rc;
3436*4882a593Smuzhiyun }
3437*4882a593Smuzhiyun 
3438*4882a593Smuzhiyun /**
3439*4882a593Smuzhiyun  * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3440*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
3441*4882a593Smuzhiyun  * @pmboxq: Pointer to mailbox command.
3442*4882a593Smuzhiyun  *
3443*4882a593Smuzhiyun  * This is completion handler function for mailbox commands issued from
3444*4882a593Smuzhiyun  * lpfc_bsg_issue_mbox function. This function is called by the
3445*4882a593Smuzhiyun  * mailbox event handler function with no lock held. This function
3446*4882a593Smuzhiyun  * will wake up thread waiting on the wait queue pointed by context1
3447*4882a593Smuzhiyun  * of the mailbox.
3448*4882a593Smuzhiyun  **/
3449*4882a593Smuzhiyun static void
lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)3450*4882a593Smuzhiyun lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3451*4882a593Smuzhiyun {
3452*4882a593Smuzhiyun 	struct bsg_job_data *dd_data;
3453*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply;
3454*4882a593Smuzhiyun 	struct bsg_job *job;
3455*4882a593Smuzhiyun 	uint32_t size;
3456*4882a593Smuzhiyun 	unsigned long flags;
3457*4882a593Smuzhiyun 	uint8_t *pmb, *pmb_buf;
3458*4882a593Smuzhiyun 
3459*4882a593Smuzhiyun 	dd_data = pmboxq->ctx_ndlp;
3460*4882a593Smuzhiyun 
3461*4882a593Smuzhiyun 	/*
3462*4882a593Smuzhiyun 	 * The outgoing buffer is readily referred from the dma buffer,
3463*4882a593Smuzhiyun 	 * just need to get header part from mailboxq structure.
3464*4882a593Smuzhiyun 	 */
3465*4882a593Smuzhiyun 	pmb = (uint8_t *)&pmboxq->u.mb;
3466*4882a593Smuzhiyun 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3467*4882a593Smuzhiyun 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3468*4882a593Smuzhiyun 
3469*4882a593Smuzhiyun 	/* Determine if job has been aborted */
3470*4882a593Smuzhiyun 
3471*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3472*4882a593Smuzhiyun 	job = dd_data->set_job;
3473*4882a593Smuzhiyun 	if (job) {
3474*4882a593Smuzhiyun 		/* Prevent timeout handling from trying to abort job  */
3475*4882a593Smuzhiyun 		job->dd_data = NULL;
3476*4882a593Smuzhiyun 	}
3477*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3478*4882a593Smuzhiyun 
3479*4882a593Smuzhiyun 	/* Copy the mailbox data to the job if it is still active */
3480*4882a593Smuzhiyun 
3481*4882a593Smuzhiyun 	if (job) {
3482*4882a593Smuzhiyun 		bsg_reply = job->reply;
3483*4882a593Smuzhiyun 		size = job->reply_payload.payload_len;
3484*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len =
3485*4882a593Smuzhiyun 			sg_copy_from_buffer(job->reply_payload.sg_list,
3486*4882a593Smuzhiyun 					    job->reply_payload.sg_cnt,
3487*4882a593Smuzhiyun 					    pmb_buf, size);
3488*4882a593Smuzhiyun 	}
3489*4882a593Smuzhiyun 
3490*4882a593Smuzhiyun 	dd_data->set_job = NULL;
3491*4882a593Smuzhiyun 	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3492*4882a593Smuzhiyun 	lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3493*4882a593Smuzhiyun 	kfree(dd_data);
3494*4882a593Smuzhiyun 
3495*4882a593Smuzhiyun 	/* Complete the job if the job is still active */
3496*4882a593Smuzhiyun 
3497*4882a593Smuzhiyun 	if (job) {
3498*4882a593Smuzhiyun 		bsg_reply->result = 0;
3499*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
3500*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
3501*4882a593Smuzhiyun 	}
3502*4882a593Smuzhiyun 	return;
3503*4882a593Smuzhiyun }
3504*4882a593Smuzhiyun 
3505*4882a593Smuzhiyun /**
3506*4882a593Smuzhiyun  * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3507*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
3508*4882a593Smuzhiyun  * @mb: Pointer to a mailbox object.
3509*4882a593Smuzhiyun  * @vport: Pointer to a vport object.
3510*4882a593Smuzhiyun  *
3511*4882a593Smuzhiyun  * Some commands require the port to be offline, some may not be called from
3512*4882a593Smuzhiyun  * the application.
3513*4882a593Smuzhiyun  **/
lpfc_bsg_check_cmd_access(struct lpfc_hba * phba,MAILBOX_t * mb,struct lpfc_vport * vport)3514*4882a593Smuzhiyun static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3515*4882a593Smuzhiyun 	MAILBOX_t *mb, struct lpfc_vport *vport)
3516*4882a593Smuzhiyun {
3517*4882a593Smuzhiyun 	/* return negative error values for bsg job */
3518*4882a593Smuzhiyun 	switch (mb->mbxCommand) {
3519*4882a593Smuzhiyun 	/* Offline only */
3520*4882a593Smuzhiyun 	case MBX_INIT_LINK:
3521*4882a593Smuzhiyun 	case MBX_DOWN_LINK:
3522*4882a593Smuzhiyun 	case MBX_CONFIG_LINK:
3523*4882a593Smuzhiyun 	case MBX_CONFIG_RING:
3524*4882a593Smuzhiyun 	case MBX_RESET_RING:
3525*4882a593Smuzhiyun 	case MBX_UNREG_LOGIN:
3526*4882a593Smuzhiyun 	case MBX_CLEAR_LA:
3527*4882a593Smuzhiyun 	case MBX_DUMP_CONTEXT:
3528*4882a593Smuzhiyun 	case MBX_RUN_DIAGS:
3529*4882a593Smuzhiyun 	case MBX_RESTART:
3530*4882a593Smuzhiyun 	case MBX_SET_MASK:
3531*4882a593Smuzhiyun 		if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3532*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3533*4882a593Smuzhiyun 				"2743 Command 0x%x is illegal in on-line "
3534*4882a593Smuzhiyun 				"state\n",
3535*4882a593Smuzhiyun 				mb->mbxCommand);
3536*4882a593Smuzhiyun 			return -EPERM;
3537*4882a593Smuzhiyun 		}
3538*4882a593Smuzhiyun 	case MBX_WRITE_NV:
3539*4882a593Smuzhiyun 	case MBX_WRITE_VPARMS:
3540*4882a593Smuzhiyun 	case MBX_LOAD_SM:
3541*4882a593Smuzhiyun 	case MBX_READ_NV:
3542*4882a593Smuzhiyun 	case MBX_READ_CONFIG:
3543*4882a593Smuzhiyun 	case MBX_READ_RCONFIG:
3544*4882a593Smuzhiyun 	case MBX_READ_STATUS:
3545*4882a593Smuzhiyun 	case MBX_READ_XRI:
3546*4882a593Smuzhiyun 	case MBX_READ_REV:
3547*4882a593Smuzhiyun 	case MBX_READ_LNK_STAT:
3548*4882a593Smuzhiyun 	case MBX_DUMP_MEMORY:
3549*4882a593Smuzhiyun 	case MBX_DOWN_LOAD:
3550*4882a593Smuzhiyun 	case MBX_UPDATE_CFG:
3551*4882a593Smuzhiyun 	case MBX_KILL_BOARD:
3552*4882a593Smuzhiyun 	case MBX_READ_TOPOLOGY:
3553*4882a593Smuzhiyun 	case MBX_LOAD_AREA:
3554*4882a593Smuzhiyun 	case MBX_LOAD_EXP_ROM:
3555*4882a593Smuzhiyun 	case MBX_BEACON:
3556*4882a593Smuzhiyun 	case MBX_DEL_LD_ENTRY:
3557*4882a593Smuzhiyun 	case MBX_SET_DEBUG:
3558*4882a593Smuzhiyun 	case MBX_WRITE_WWN:
3559*4882a593Smuzhiyun 	case MBX_SLI4_CONFIG:
3560*4882a593Smuzhiyun 	case MBX_READ_EVENT_LOG:
3561*4882a593Smuzhiyun 	case MBX_READ_EVENT_LOG_STATUS:
3562*4882a593Smuzhiyun 	case MBX_WRITE_EVENT_LOG:
3563*4882a593Smuzhiyun 	case MBX_PORT_CAPABILITIES:
3564*4882a593Smuzhiyun 	case MBX_PORT_IOV_CONTROL:
3565*4882a593Smuzhiyun 	case MBX_RUN_BIU_DIAG64:
3566*4882a593Smuzhiyun 		break;
3567*4882a593Smuzhiyun 	case MBX_SET_VARIABLE:
3568*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3569*4882a593Smuzhiyun 			"1226 mbox: set_variable 0x%x, 0x%x\n",
3570*4882a593Smuzhiyun 			mb->un.varWords[0],
3571*4882a593Smuzhiyun 			mb->un.varWords[1]);
3572*4882a593Smuzhiyun 		if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3573*4882a593Smuzhiyun 			&& (mb->un.varWords[1] == 1)) {
3574*4882a593Smuzhiyun 			phba->wait_4_mlo_maint_flg = 1;
3575*4882a593Smuzhiyun 		} else if (mb->un.varWords[0] == SETVAR_MLORST) {
3576*4882a593Smuzhiyun 			spin_lock_irq(&phba->hbalock);
3577*4882a593Smuzhiyun 			phba->link_flag &= ~LS_LOOPBACK_MODE;
3578*4882a593Smuzhiyun 			spin_unlock_irq(&phba->hbalock);
3579*4882a593Smuzhiyun 			phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3580*4882a593Smuzhiyun 		}
3581*4882a593Smuzhiyun 		break;
3582*4882a593Smuzhiyun 	case MBX_READ_SPARM64:
3583*4882a593Smuzhiyun 	case MBX_REG_LOGIN:
3584*4882a593Smuzhiyun 	case MBX_REG_LOGIN64:
3585*4882a593Smuzhiyun 	case MBX_CONFIG_PORT:
3586*4882a593Smuzhiyun 	case MBX_RUN_BIU_DIAG:
3587*4882a593Smuzhiyun 	default:
3588*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3589*4882a593Smuzhiyun 			"2742 Unknown Command 0x%x\n",
3590*4882a593Smuzhiyun 			mb->mbxCommand);
3591*4882a593Smuzhiyun 		return -EPERM;
3592*4882a593Smuzhiyun 	}
3593*4882a593Smuzhiyun 
3594*4882a593Smuzhiyun 	return 0; /* ok */
3595*4882a593Smuzhiyun }
3596*4882a593Smuzhiyun 
3597*4882a593Smuzhiyun /**
3598*4882a593Smuzhiyun  * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3599*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
3600*4882a593Smuzhiyun  *
3601*4882a593Smuzhiyun  * This is routine clean up and reset BSG handling of multi-buffer mbox
3602*4882a593Smuzhiyun  * command session.
3603*4882a593Smuzhiyun  **/
3604*4882a593Smuzhiyun static void
lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba * phba)3605*4882a593Smuzhiyun lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3606*4882a593Smuzhiyun {
3607*4882a593Smuzhiyun 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3608*4882a593Smuzhiyun 		return;
3609*4882a593Smuzhiyun 
3610*4882a593Smuzhiyun 	/* free all memory, including dma buffers */
3611*4882a593Smuzhiyun 	lpfc_bsg_dma_page_list_free(phba,
3612*4882a593Smuzhiyun 				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3613*4882a593Smuzhiyun 	lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3614*4882a593Smuzhiyun 	/* multi-buffer write mailbox command pass-through complete */
3615*4882a593Smuzhiyun 	memset((char *)&phba->mbox_ext_buf_ctx, 0,
3616*4882a593Smuzhiyun 	       sizeof(struct lpfc_mbox_ext_buf_ctx));
3617*4882a593Smuzhiyun 	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3618*4882a593Smuzhiyun 
3619*4882a593Smuzhiyun 	return;
3620*4882a593Smuzhiyun }
3621*4882a593Smuzhiyun 
3622*4882a593Smuzhiyun /**
3623*4882a593Smuzhiyun  * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3624*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
3625*4882a593Smuzhiyun  * @pmboxq: Pointer to mailbox command.
3626*4882a593Smuzhiyun  *
3627*4882a593Smuzhiyun  * This is routine handles BSG job for mailbox commands completions with
3628*4882a593Smuzhiyun  * multiple external buffers.
3629*4882a593Smuzhiyun  **/
3630*4882a593Smuzhiyun static struct bsg_job *
lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)3631*4882a593Smuzhiyun lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3632*4882a593Smuzhiyun {
3633*4882a593Smuzhiyun 	struct bsg_job_data *dd_data;
3634*4882a593Smuzhiyun 	struct bsg_job *job;
3635*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply;
3636*4882a593Smuzhiyun 	uint8_t *pmb, *pmb_buf;
3637*4882a593Smuzhiyun 	unsigned long flags;
3638*4882a593Smuzhiyun 	uint32_t size;
3639*4882a593Smuzhiyun 	int rc = 0;
3640*4882a593Smuzhiyun 	struct lpfc_dmabuf *dmabuf;
3641*4882a593Smuzhiyun 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3642*4882a593Smuzhiyun 	uint8_t *pmbx;
3643*4882a593Smuzhiyun 
3644*4882a593Smuzhiyun 	dd_data = pmboxq->ctx_buf;
3645*4882a593Smuzhiyun 
3646*4882a593Smuzhiyun 	/* Determine if job has been aborted */
3647*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3648*4882a593Smuzhiyun 	job = dd_data->set_job;
3649*4882a593Smuzhiyun 	if (job) {
3650*4882a593Smuzhiyun 		bsg_reply = job->reply;
3651*4882a593Smuzhiyun 		/* Prevent timeout handling from trying to abort job  */
3652*4882a593Smuzhiyun 		job->dd_data = NULL;
3653*4882a593Smuzhiyun 	}
3654*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3655*4882a593Smuzhiyun 
3656*4882a593Smuzhiyun 	/*
3657*4882a593Smuzhiyun 	 * The outgoing buffer is readily referred from the dma buffer,
3658*4882a593Smuzhiyun 	 * just need to get header part from mailboxq structure.
3659*4882a593Smuzhiyun 	 */
3660*4882a593Smuzhiyun 
3661*4882a593Smuzhiyun 	pmb = (uint8_t *)&pmboxq->u.mb;
3662*4882a593Smuzhiyun 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3663*4882a593Smuzhiyun 	/* Copy the byte swapped response mailbox back to the user */
3664*4882a593Smuzhiyun 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3665*4882a593Smuzhiyun 	/* if there is any non-embedded extended data copy that too */
3666*4882a593Smuzhiyun 	dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3667*4882a593Smuzhiyun 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3668*4882a593Smuzhiyun 	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3669*4882a593Smuzhiyun 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3670*4882a593Smuzhiyun 		pmbx = (uint8_t *)dmabuf->virt;
3671*4882a593Smuzhiyun 		/* byte swap the extended data following the mailbox command */
3672*4882a593Smuzhiyun 		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3673*4882a593Smuzhiyun 			&pmbx[sizeof(MAILBOX_t)],
3674*4882a593Smuzhiyun 			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3675*4882a593Smuzhiyun 	}
3676*4882a593Smuzhiyun 
3677*4882a593Smuzhiyun 	/* Complete the job if the job is still active */
3678*4882a593Smuzhiyun 
3679*4882a593Smuzhiyun 	if (job) {
3680*4882a593Smuzhiyun 		size = job->reply_payload.payload_len;
3681*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len =
3682*4882a593Smuzhiyun 			sg_copy_from_buffer(job->reply_payload.sg_list,
3683*4882a593Smuzhiyun 					    job->reply_payload.sg_cnt,
3684*4882a593Smuzhiyun 					    pmb_buf, size);
3685*4882a593Smuzhiyun 
3686*4882a593Smuzhiyun 		/* result for successful */
3687*4882a593Smuzhiyun 		bsg_reply->result = 0;
3688*4882a593Smuzhiyun 
3689*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3690*4882a593Smuzhiyun 				"2937 SLI_CONFIG ext-buffer mailbox command "
3691*4882a593Smuzhiyun 				"(x%x/x%x) complete bsg job done, bsize:%d\n",
3692*4882a593Smuzhiyun 				phba->mbox_ext_buf_ctx.nembType,
3693*4882a593Smuzhiyun 				phba->mbox_ext_buf_ctx.mboxType, size);
3694*4882a593Smuzhiyun 		lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3695*4882a593Smuzhiyun 					phba->mbox_ext_buf_ctx.nembType,
3696*4882a593Smuzhiyun 					phba->mbox_ext_buf_ctx.mboxType,
3697*4882a593Smuzhiyun 					dma_ebuf, sta_pos_addr,
3698*4882a593Smuzhiyun 					phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3699*4882a593Smuzhiyun 	} else {
3700*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3701*4882a593Smuzhiyun 				"2938 SLI_CONFIG ext-buffer mailbox "
3702*4882a593Smuzhiyun 				"command (x%x/x%x) failure, rc:x%x\n",
3703*4882a593Smuzhiyun 				phba->mbox_ext_buf_ctx.nembType,
3704*4882a593Smuzhiyun 				phba->mbox_ext_buf_ctx.mboxType, rc);
3705*4882a593Smuzhiyun 	}
3706*4882a593Smuzhiyun 
3707*4882a593Smuzhiyun 
3708*4882a593Smuzhiyun 	/* state change */
3709*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3710*4882a593Smuzhiyun 	kfree(dd_data);
3711*4882a593Smuzhiyun 	return job;
3712*4882a593Smuzhiyun }
3713*4882a593Smuzhiyun 
3714*4882a593Smuzhiyun /**
3715*4882a593Smuzhiyun  * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3716*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
3717*4882a593Smuzhiyun  * @pmboxq: Pointer to mailbox command.
3718*4882a593Smuzhiyun  *
3719*4882a593Smuzhiyun  * This is completion handler function for mailbox read commands with multiple
3720*4882a593Smuzhiyun  * external buffers.
3721*4882a593Smuzhiyun  **/
3722*4882a593Smuzhiyun static void
lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)3723*4882a593Smuzhiyun lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3724*4882a593Smuzhiyun {
3725*4882a593Smuzhiyun 	struct bsg_job *job;
3726*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply;
3727*4882a593Smuzhiyun 
3728*4882a593Smuzhiyun 	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3729*4882a593Smuzhiyun 
3730*4882a593Smuzhiyun 	/* handle the BSG job with mailbox command */
3731*4882a593Smuzhiyun 	if (!job)
3732*4882a593Smuzhiyun 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3733*4882a593Smuzhiyun 
3734*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3735*4882a593Smuzhiyun 			"2939 SLI_CONFIG ext-buffer rd mailbox command "
3736*4882a593Smuzhiyun 			"complete, ctxState:x%x, mbxStatus:x%x\n",
3737*4882a593Smuzhiyun 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3738*4882a593Smuzhiyun 
3739*4882a593Smuzhiyun 	if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3740*4882a593Smuzhiyun 		lpfc_bsg_mbox_ext_session_reset(phba);
3741*4882a593Smuzhiyun 
3742*4882a593Smuzhiyun 	/* free base driver mailbox structure memory */
3743*4882a593Smuzhiyun 	mempool_free(pmboxq, phba->mbox_mem_pool);
3744*4882a593Smuzhiyun 
3745*4882a593Smuzhiyun 	/* if the job is still active, call job done */
3746*4882a593Smuzhiyun 	if (job) {
3747*4882a593Smuzhiyun 		bsg_reply = job->reply;
3748*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
3749*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
3750*4882a593Smuzhiyun 	}
3751*4882a593Smuzhiyun 	return;
3752*4882a593Smuzhiyun }
3753*4882a593Smuzhiyun 
3754*4882a593Smuzhiyun /**
3755*4882a593Smuzhiyun  * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3756*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
3757*4882a593Smuzhiyun  * @pmboxq: Pointer to mailbox command.
3758*4882a593Smuzhiyun  *
3759*4882a593Smuzhiyun  * This is completion handler function for mailbox write commands with multiple
3760*4882a593Smuzhiyun  * external buffers.
3761*4882a593Smuzhiyun  **/
3762*4882a593Smuzhiyun static void
lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)3763*4882a593Smuzhiyun lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3764*4882a593Smuzhiyun {
3765*4882a593Smuzhiyun 	struct bsg_job *job;
3766*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply;
3767*4882a593Smuzhiyun 
3768*4882a593Smuzhiyun 	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3769*4882a593Smuzhiyun 
3770*4882a593Smuzhiyun 	/* handle the BSG job with the mailbox command */
3771*4882a593Smuzhiyun 	if (!job)
3772*4882a593Smuzhiyun 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3773*4882a593Smuzhiyun 
3774*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3775*4882a593Smuzhiyun 			"2940 SLI_CONFIG ext-buffer wr mailbox command "
3776*4882a593Smuzhiyun 			"complete, ctxState:x%x, mbxStatus:x%x\n",
3777*4882a593Smuzhiyun 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3778*4882a593Smuzhiyun 
3779*4882a593Smuzhiyun 	/* free all memory, including dma buffers */
3780*4882a593Smuzhiyun 	mempool_free(pmboxq, phba->mbox_mem_pool);
3781*4882a593Smuzhiyun 	lpfc_bsg_mbox_ext_session_reset(phba);
3782*4882a593Smuzhiyun 
3783*4882a593Smuzhiyun 	/* if the job is still active, call job done */
3784*4882a593Smuzhiyun 	if (job) {
3785*4882a593Smuzhiyun 		bsg_reply = job->reply;
3786*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
3787*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
3788*4882a593Smuzhiyun 	}
3789*4882a593Smuzhiyun 
3790*4882a593Smuzhiyun 	return;
3791*4882a593Smuzhiyun }
3792*4882a593Smuzhiyun 
3793*4882a593Smuzhiyun static void
lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba * phba,enum nemb_type nemb_tp,uint32_t index,struct lpfc_dmabuf * mbx_dmabuf,struct lpfc_dmabuf * ext_dmabuf)3794*4882a593Smuzhiyun lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3795*4882a593Smuzhiyun 				uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3796*4882a593Smuzhiyun 				struct lpfc_dmabuf *ext_dmabuf)
3797*4882a593Smuzhiyun {
3798*4882a593Smuzhiyun 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3799*4882a593Smuzhiyun 
3800*4882a593Smuzhiyun 	/* pointer to the start of mailbox command */
3801*4882a593Smuzhiyun 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3802*4882a593Smuzhiyun 
3803*4882a593Smuzhiyun 	if (nemb_tp == nemb_mse) {
3804*4882a593Smuzhiyun 		if (index == 0) {
3805*4882a593Smuzhiyun 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3806*4882a593Smuzhiyun 				mse[index].pa_hi =
3807*4882a593Smuzhiyun 				putPaddrHigh(mbx_dmabuf->phys +
3808*4882a593Smuzhiyun 					     sizeof(MAILBOX_t));
3809*4882a593Smuzhiyun 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3810*4882a593Smuzhiyun 				mse[index].pa_lo =
3811*4882a593Smuzhiyun 				putPaddrLow(mbx_dmabuf->phys +
3812*4882a593Smuzhiyun 					    sizeof(MAILBOX_t));
3813*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3814*4882a593Smuzhiyun 					"2943 SLI_CONFIG(mse)[%d], "
3815*4882a593Smuzhiyun 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3816*4882a593Smuzhiyun 					index,
3817*4882a593Smuzhiyun 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3818*4882a593Smuzhiyun 					mse[index].buf_len,
3819*4882a593Smuzhiyun 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3820*4882a593Smuzhiyun 					mse[index].pa_hi,
3821*4882a593Smuzhiyun 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3822*4882a593Smuzhiyun 					mse[index].pa_lo);
3823*4882a593Smuzhiyun 		} else {
3824*4882a593Smuzhiyun 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3825*4882a593Smuzhiyun 				mse[index].pa_hi =
3826*4882a593Smuzhiyun 				putPaddrHigh(ext_dmabuf->phys);
3827*4882a593Smuzhiyun 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3828*4882a593Smuzhiyun 				mse[index].pa_lo =
3829*4882a593Smuzhiyun 				putPaddrLow(ext_dmabuf->phys);
3830*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3831*4882a593Smuzhiyun 					"2944 SLI_CONFIG(mse)[%d], "
3832*4882a593Smuzhiyun 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3833*4882a593Smuzhiyun 					index,
3834*4882a593Smuzhiyun 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3835*4882a593Smuzhiyun 					mse[index].buf_len,
3836*4882a593Smuzhiyun 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3837*4882a593Smuzhiyun 					mse[index].pa_hi,
3838*4882a593Smuzhiyun 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3839*4882a593Smuzhiyun 					mse[index].pa_lo);
3840*4882a593Smuzhiyun 		}
3841*4882a593Smuzhiyun 	} else {
3842*4882a593Smuzhiyun 		if (index == 0) {
3843*4882a593Smuzhiyun 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3844*4882a593Smuzhiyun 				hbd[index].pa_hi =
3845*4882a593Smuzhiyun 				putPaddrHigh(mbx_dmabuf->phys +
3846*4882a593Smuzhiyun 					     sizeof(MAILBOX_t));
3847*4882a593Smuzhiyun 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3848*4882a593Smuzhiyun 				hbd[index].pa_lo =
3849*4882a593Smuzhiyun 				putPaddrLow(mbx_dmabuf->phys +
3850*4882a593Smuzhiyun 					    sizeof(MAILBOX_t));
3851*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3852*4882a593Smuzhiyun 					"3007 SLI_CONFIG(hbd)[%d], "
3853*4882a593Smuzhiyun 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3854*4882a593Smuzhiyun 				index,
3855*4882a593Smuzhiyun 				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3856*4882a593Smuzhiyun 				&sli_cfg_mbx->un.
3857*4882a593Smuzhiyun 				sli_config_emb1_subsys.hbd[index]),
3858*4882a593Smuzhiyun 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3859*4882a593Smuzhiyun 				hbd[index].pa_hi,
3860*4882a593Smuzhiyun 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3861*4882a593Smuzhiyun 				hbd[index].pa_lo);
3862*4882a593Smuzhiyun 
3863*4882a593Smuzhiyun 		} else {
3864*4882a593Smuzhiyun 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3865*4882a593Smuzhiyun 				hbd[index].pa_hi =
3866*4882a593Smuzhiyun 				putPaddrHigh(ext_dmabuf->phys);
3867*4882a593Smuzhiyun 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3868*4882a593Smuzhiyun 				hbd[index].pa_lo =
3869*4882a593Smuzhiyun 				putPaddrLow(ext_dmabuf->phys);
3870*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3871*4882a593Smuzhiyun 					"3008 SLI_CONFIG(hbd)[%d], "
3872*4882a593Smuzhiyun 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3873*4882a593Smuzhiyun 				index,
3874*4882a593Smuzhiyun 				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3875*4882a593Smuzhiyun 				&sli_cfg_mbx->un.
3876*4882a593Smuzhiyun 				sli_config_emb1_subsys.hbd[index]),
3877*4882a593Smuzhiyun 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3878*4882a593Smuzhiyun 				hbd[index].pa_hi,
3879*4882a593Smuzhiyun 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3880*4882a593Smuzhiyun 				hbd[index].pa_lo);
3881*4882a593Smuzhiyun 		}
3882*4882a593Smuzhiyun 	}
3883*4882a593Smuzhiyun 	return;
3884*4882a593Smuzhiyun }
3885*4882a593Smuzhiyun 
3886*4882a593Smuzhiyun /**
3887*4882a593Smuzhiyun  * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3888*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
3889*4882a593Smuzhiyun  * @mb: Pointer to a BSG mailbox object.
3890*4882a593Smuzhiyun  * @nemb_tp: Enumerate of non-embedded mailbox command type.
3891*4882a593Smuzhiyun  * @dmabuff: Pointer to a DMA buffer descriptor.
3892*4882a593Smuzhiyun  *
3893*4882a593Smuzhiyun  * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3894*4882a593Smuzhiyun  * non-embedded external bufffers.
3895*4882a593Smuzhiyun  **/
3896*4882a593Smuzhiyun static int
lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba * phba,struct bsg_job * job,enum nemb_type nemb_tp,struct lpfc_dmabuf * dmabuf)3897*4882a593Smuzhiyun lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3898*4882a593Smuzhiyun 			      enum nemb_type nemb_tp,
3899*4882a593Smuzhiyun 			      struct lpfc_dmabuf *dmabuf)
3900*4882a593Smuzhiyun {
3901*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
3902*4882a593Smuzhiyun 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3903*4882a593Smuzhiyun 	struct dfc_mbox_req *mbox_req;
3904*4882a593Smuzhiyun 	struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3905*4882a593Smuzhiyun 	uint32_t ext_buf_cnt, ext_buf_index;
3906*4882a593Smuzhiyun 	struct lpfc_dmabuf *ext_dmabuf = NULL;
3907*4882a593Smuzhiyun 	struct bsg_job_data *dd_data = NULL;
3908*4882a593Smuzhiyun 	LPFC_MBOXQ_t *pmboxq = NULL;
3909*4882a593Smuzhiyun 	MAILBOX_t *pmb;
3910*4882a593Smuzhiyun 	uint8_t *pmbx;
3911*4882a593Smuzhiyun 	int rc, i;
3912*4882a593Smuzhiyun 
3913*4882a593Smuzhiyun 	mbox_req =
3914*4882a593Smuzhiyun 	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
3915*4882a593Smuzhiyun 
3916*4882a593Smuzhiyun 	/* pointer to the start of mailbox command */
3917*4882a593Smuzhiyun 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3918*4882a593Smuzhiyun 
3919*4882a593Smuzhiyun 	if (nemb_tp == nemb_mse) {
3920*4882a593Smuzhiyun 		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3921*4882a593Smuzhiyun 			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3922*4882a593Smuzhiyun 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3923*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3924*4882a593Smuzhiyun 					"2945 Handled SLI_CONFIG(mse) rd, "
3925*4882a593Smuzhiyun 					"ext_buf_cnt(%d) out of range(%d)\n",
3926*4882a593Smuzhiyun 					ext_buf_cnt,
3927*4882a593Smuzhiyun 					LPFC_MBX_SLI_CONFIG_MAX_MSE);
3928*4882a593Smuzhiyun 			rc = -ERANGE;
3929*4882a593Smuzhiyun 			goto job_error;
3930*4882a593Smuzhiyun 		}
3931*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3932*4882a593Smuzhiyun 				"2941 Handled SLI_CONFIG(mse) rd, "
3933*4882a593Smuzhiyun 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3934*4882a593Smuzhiyun 	} else {
3935*4882a593Smuzhiyun 		/* sanity check on interface type for support */
3936*4882a593Smuzhiyun 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
3937*4882a593Smuzhiyun 		    LPFC_SLI_INTF_IF_TYPE_2) {
3938*4882a593Smuzhiyun 			rc = -ENODEV;
3939*4882a593Smuzhiyun 			goto job_error;
3940*4882a593Smuzhiyun 		}
3941*4882a593Smuzhiyun 		/* nemb_tp == nemb_hbd */
3942*4882a593Smuzhiyun 		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3943*4882a593Smuzhiyun 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3944*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3945*4882a593Smuzhiyun 					"2946 Handled SLI_CONFIG(hbd) rd, "
3946*4882a593Smuzhiyun 					"ext_buf_cnt(%d) out of range(%d)\n",
3947*4882a593Smuzhiyun 					ext_buf_cnt,
3948*4882a593Smuzhiyun 					LPFC_MBX_SLI_CONFIG_MAX_HBD);
3949*4882a593Smuzhiyun 			rc = -ERANGE;
3950*4882a593Smuzhiyun 			goto job_error;
3951*4882a593Smuzhiyun 		}
3952*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3953*4882a593Smuzhiyun 				"2942 Handled SLI_CONFIG(hbd) rd, "
3954*4882a593Smuzhiyun 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3955*4882a593Smuzhiyun 	}
3956*4882a593Smuzhiyun 
3957*4882a593Smuzhiyun 	/* before dma descriptor setup */
3958*4882a593Smuzhiyun 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3959*4882a593Smuzhiyun 					sta_pre_addr, dmabuf, ext_buf_cnt);
3960*4882a593Smuzhiyun 
3961*4882a593Smuzhiyun 	/* reject non-embedded mailbox command with none external buffer */
3962*4882a593Smuzhiyun 	if (ext_buf_cnt == 0) {
3963*4882a593Smuzhiyun 		rc = -EPERM;
3964*4882a593Smuzhiyun 		goto job_error;
3965*4882a593Smuzhiyun 	} else if (ext_buf_cnt > 1) {
3966*4882a593Smuzhiyun 		/* additional external read buffers */
3967*4882a593Smuzhiyun 		for (i = 1; i < ext_buf_cnt; i++) {
3968*4882a593Smuzhiyun 			ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3969*4882a593Smuzhiyun 			if (!ext_dmabuf) {
3970*4882a593Smuzhiyun 				rc = -ENOMEM;
3971*4882a593Smuzhiyun 				goto job_error;
3972*4882a593Smuzhiyun 			}
3973*4882a593Smuzhiyun 			list_add_tail(&ext_dmabuf->list,
3974*4882a593Smuzhiyun 				      &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3975*4882a593Smuzhiyun 		}
3976*4882a593Smuzhiyun 	}
3977*4882a593Smuzhiyun 
3978*4882a593Smuzhiyun 	/* bsg tracking structure */
3979*4882a593Smuzhiyun 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3980*4882a593Smuzhiyun 	if (!dd_data) {
3981*4882a593Smuzhiyun 		rc = -ENOMEM;
3982*4882a593Smuzhiyun 		goto job_error;
3983*4882a593Smuzhiyun 	}
3984*4882a593Smuzhiyun 
3985*4882a593Smuzhiyun 	/* mailbox command structure for base driver */
3986*4882a593Smuzhiyun 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3987*4882a593Smuzhiyun 	if (!pmboxq) {
3988*4882a593Smuzhiyun 		rc = -ENOMEM;
3989*4882a593Smuzhiyun 		goto job_error;
3990*4882a593Smuzhiyun 	}
3991*4882a593Smuzhiyun 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3992*4882a593Smuzhiyun 
3993*4882a593Smuzhiyun 	/* for the first external buffer */
3994*4882a593Smuzhiyun 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3995*4882a593Smuzhiyun 
3996*4882a593Smuzhiyun 	/* for the rest of external buffer descriptors if any */
3997*4882a593Smuzhiyun 	if (ext_buf_cnt > 1) {
3998*4882a593Smuzhiyun 		ext_buf_index = 1;
3999*4882a593Smuzhiyun 		list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
4000*4882a593Smuzhiyun 				&phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
4001*4882a593Smuzhiyun 			lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
4002*4882a593Smuzhiyun 						ext_buf_index, dmabuf,
4003*4882a593Smuzhiyun 						curr_dmabuf);
4004*4882a593Smuzhiyun 			ext_buf_index++;
4005*4882a593Smuzhiyun 		}
4006*4882a593Smuzhiyun 	}
4007*4882a593Smuzhiyun 
4008*4882a593Smuzhiyun 	/* after dma descriptor setup */
4009*4882a593Smuzhiyun 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
4010*4882a593Smuzhiyun 					sta_pos_addr, dmabuf, ext_buf_cnt);
4011*4882a593Smuzhiyun 
4012*4882a593Smuzhiyun 	/* construct base driver mbox command */
4013*4882a593Smuzhiyun 	pmb = &pmboxq->u.mb;
4014*4882a593Smuzhiyun 	pmbx = (uint8_t *)dmabuf->virt;
4015*4882a593Smuzhiyun 	memcpy(pmb, pmbx, sizeof(*pmb));
4016*4882a593Smuzhiyun 	pmb->mbxOwner = OWN_HOST;
4017*4882a593Smuzhiyun 	pmboxq->vport = phba->pport;
4018*4882a593Smuzhiyun 
4019*4882a593Smuzhiyun 	/* multi-buffer handling context */
4020*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4021*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
4022*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4023*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4024*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4025*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4026*4882a593Smuzhiyun 
4027*4882a593Smuzhiyun 	/* callback for multi-buffer read mailbox command */
4028*4882a593Smuzhiyun 	pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
4029*4882a593Smuzhiyun 
4030*4882a593Smuzhiyun 	/* context fields to callback function */
4031*4882a593Smuzhiyun 	pmboxq->ctx_buf = dd_data;
4032*4882a593Smuzhiyun 	dd_data->type = TYPE_MBOX;
4033*4882a593Smuzhiyun 	dd_data->set_job = job;
4034*4882a593Smuzhiyun 	dd_data->context_un.mbox.pmboxq = pmboxq;
4035*4882a593Smuzhiyun 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4036*4882a593Smuzhiyun 	job->dd_data = dd_data;
4037*4882a593Smuzhiyun 
4038*4882a593Smuzhiyun 	/* state change */
4039*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4040*4882a593Smuzhiyun 
4041*4882a593Smuzhiyun 	/*
4042*4882a593Smuzhiyun 	 * Non-embedded mailbox subcommand data gets byte swapped here because
4043*4882a593Smuzhiyun 	 * the lower level driver code only does the first 64 mailbox words.
4044*4882a593Smuzhiyun 	 */
4045*4882a593Smuzhiyun 	if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
4046*4882a593Smuzhiyun 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
4047*4882a593Smuzhiyun 		(nemb_tp == nemb_mse))
4048*4882a593Smuzhiyun 		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
4049*4882a593Smuzhiyun 			&pmbx[sizeof(MAILBOX_t)],
4050*4882a593Smuzhiyun 				sli_cfg_mbx->un.sli_config_emb0_subsys.
4051*4882a593Smuzhiyun 					mse[0].buf_len);
4052*4882a593Smuzhiyun 
4053*4882a593Smuzhiyun 	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4054*4882a593Smuzhiyun 	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4055*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4056*4882a593Smuzhiyun 				"2947 Issued SLI_CONFIG ext-buffer "
4057*4882a593Smuzhiyun 				"mailbox command, rc:x%x\n", rc);
4058*4882a593Smuzhiyun 		return SLI_CONFIG_HANDLED;
4059*4882a593Smuzhiyun 	}
4060*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4061*4882a593Smuzhiyun 			"2948 Failed to issue SLI_CONFIG ext-buffer "
4062*4882a593Smuzhiyun 			"mailbox command, rc:x%x\n", rc);
4063*4882a593Smuzhiyun 	rc = -EPIPE;
4064*4882a593Smuzhiyun 
4065*4882a593Smuzhiyun job_error:
4066*4882a593Smuzhiyun 	if (pmboxq)
4067*4882a593Smuzhiyun 		mempool_free(pmboxq, phba->mbox_mem_pool);
4068*4882a593Smuzhiyun 	lpfc_bsg_dma_page_list_free(phba,
4069*4882a593Smuzhiyun 				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4070*4882a593Smuzhiyun 	kfree(dd_data);
4071*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4072*4882a593Smuzhiyun 	return rc;
4073*4882a593Smuzhiyun }
4074*4882a593Smuzhiyun 
4075*4882a593Smuzhiyun /**
4076*4882a593Smuzhiyun  * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
4077*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
4078*4882a593Smuzhiyun  * @mb: Pointer to a BSG mailbox object.
4079*4882a593Smuzhiyun  * @dmabuff: Pointer to a DMA buffer descriptor.
4080*4882a593Smuzhiyun  *
4081*4882a593Smuzhiyun  * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
4082*4882a593Smuzhiyun  * non-embedded external bufffers.
4083*4882a593Smuzhiyun  **/
4084*4882a593Smuzhiyun static int
lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba * phba,struct bsg_job * job,enum nemb_type nemb_tp,struct lpfc_dmabuf * dmabuf)4085*4882a593Smuzhiyun lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
4086*4882a593Smuzhiyun 			       enum nemb_type nemb_tp,
4087*4882a593Smuzhiyun 			       struct lpfc_dmabuf *dmabuf)
4088*4882a593Smuzhiyun {
4089*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
4090*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
4091*4882a593Smuzhiyun 	struct dfc_mbox_req *mbox_req;
4092*4882a593Smuzhiyun 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4093*4882a593Smuzhiyun 	uint32_t ext_buf_cnt;
4094*4882a593Smuzhiyun 	struct bsg_job_data *dd_data = NULL;
4095*4882a593Smuzhiyun 	LPFC_MBOXQ_t *pmboxq = NULL;
4096*4882a593Smuzhiyun 	MAILBOX_t *pmb;
4097*4882a593Smuzhiyun 	uint8_t *mbx;
4098*4882a593Smuzhiyun 	int rc = SLI_CONFIG_NOT_HANDLED, i;
4099*4882a593Smuzhiyun 
4100*4882a593Smuzhiyun 	mbox_req =
4101*4882a593Smuzhiyun 	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4102*4882a593Smuzhiyun 
4103*4882a593Smuzhiyun 	/* pointer to the start of mailbox command */
4104*4882a593Smuzhiyun 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4105*4882a593Smuzhiyun 
4106*4882a593Smuzhiyun 	if (nemb_tp == nemb_mse) {
4107*4882a593Smuzhiyun 		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
4108*4882a593Smuzhiyun 			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
4109*4882a593Smuzhiyun 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
4110*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4111*4882a593Smuzhiyun 					"2953 Failed SLI_CONFIG(mse) wr, "
4112*4882a593Smuzhiyun 					"ext_buf_cnt(%d) out of range(%d)\n",
4113*4882a593Smuzhiyun 					ext_buf_cnt,
4114*4882a593Smuzhiyun 					LPFC_MBX_SLI_CONFIG_MAX_MSE);
4115*4882a593Smuzhiyun 			return -ERANGE;
4116*4882a593Smuzhiyun 		}
4117*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4118*4882a593Smuzhiyun 				"2949 Handled SLI_CONFIG(mse) wr, "
4119*4882a593Smuzhiyun 				"ext_buf_cnt:%d\n", ext_buf_cnt);
4120*4882a593Smuzhiyun 	} else {
4121*4882a593Smuzhiyun 		/* sanity check on interface type for support */
4122*4882a593Smuzhiyun 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
4123*4882a593Smuzhiyun 		    LPFC_SLI_INTF_IF_TYPE_2)
4124*4882a593Smuzhiyun 			return -ENODEV;
4125*4882a593Smuzhiyun 		/* nemb_tp == nemb_hbd */
4126*4882a593Smuzhiyun 		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
4127*4882a593Smuzhiyun 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
4128*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4129*4882a593Smuzhiyun 					"2954 Failed SLI_CONFIG(hbd) wr, "
4130*4882a593Smuzhiyun 					"ext_buf_cnt(%d) out of range(%d)\n",
4131*4882a593Smuzhiyun 					ext_buf_cnt,
4132*4882a593Smuzhiyun 					LPFC_MBX_SLI_CONFIG_MAX_HBD);
4133*4882a593Smuzhiyun 			return -ERANGE;
4134*4882a593Smuzhiyun 		}
4135*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4136*4882a593Smuzhiyun 				"2950 Handled SLI_CONFIG(hbd) wr, "
4137*4882a593Smuzhiyun 				"ext_buf_cnt:%d\n", ext_buf_cnt);
4138*4882a593Smuzhiyun 	}
4139*4882a593Smuzhiyun 
4140*4882a593Smuzhiyun 	/* before dma buffer descriptor setup */
4141*4882a593Smuzhiyun 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4142*4882a593Smuzhiyun 					sta_pre_addr, dmabuf, ext_buf_cnt);
4143*4882a593Smuzhiyun 
4144*4882a593Smuzhiyun 	if (ext_buf_cnt == 0)
4145*4882a593Smuzhiyun 		return -EPERM;
4146*4882a593Smuzhiyun 
4147*4882a593Smuzhiyun 	/* for the first external buffer */
4148*4882a593Smuzhiyun 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
4149*4882a593Smuzhiyun 
4150*4882a593Smuzhiyun 	/* after dma descriptor setup */
4151*4882a593Smuzhiyun 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4152*4882a593Smuzhiyun 					sta_pos_addr, dmabuf, ext_buf_cnt);
4153*4882a593Smuzhiyun 
4154*4882a593Smuzhiyun 	/* log for looking forward */
4155*4882a593Smuzhiyun 	for (i = 1; i < ext_buf_cnt; i++) {
4156*4882a593Smuzhiyun 		if (nemb_tp == nemb_mse)
4157*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4158*4882a593Smuzhiyun 				"2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
4159*4882a593Smuzhiyun 				i, sli_cfg_mbx->un.sli_config_emb0_subsys.
4160*4882a593Smuzhiyun 				mse[i].buf_len);
4161*4882a593Smuzhiyun 		else
4162*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4163*4882a593Smuzhiyun 				"2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
4164*4882a593Smuzhiyun 				i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4165*4882a593Smuzhiyun 				&sli_cfg_mbx->un.sli_config_emb1_subsys.
4166*4882a593Smuzhiyun 				hbd[i]));
4167*4882a593Smuzhiyun 	}
4168*4882a593Smuzhiyun 
4169*4882a593Smuzhiyun 	/* multi-buffer handling context */
4170*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4171*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
4172*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4173*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4174*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4175*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4176*4882a593Smuzhiyun 
4177*4882a593Smuzhiyun 	if (ext_buf_cnt == 1) {
4178*4882a593Smuzhiyun 		/* bsg tracking structure */
4179*4882a593Smuzhiyun 		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4180*4882a593Smuzhiyun 		if (!dd_data) {
4181*4882a593Smuzhiyun 			rc = -ENOMEM;
4182*4882a593Smuzhiyun 			goto job_error;
4183*4882a593Smuzhiyun 		}
4184*4882a593Smuzhiyun 
4185*4882a593Smuzhiyun 		/* mailbox command structure for base driver */
4186*4882a593Smuzhiyun 		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4187*4882a593Smuzhiyun 		if (!pmboxq) {
4188*4882a593Smuzhiyun 			rc = -ENOMEM;
4189*4882a593Smuzhiyun 			goto job_error;
4190*4882a593Smuzhiyun 		}
4191*4882a593Smuzhiyun 		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4192*4882a593Smuzhiyun 		pmb = &pmboxq->u.mb;
4193*4882a593Smuzhiyun 		mbx = (uint8_t *)dmabuf->virt;
4194*4882a593Smuzhiyun 		memcpy(pmb, mbx, sizeof(*pmb));
4195*4882a593Smuzhiyun 		pmb->mbxOwner = OWN_HOST;
4196*4882a593Smuzhiyun 		pmboxq->vport = phba->pport;
4197*4882a593Smuzhiyun 
4198*4882a593Smuzhiyun 		/* callback for multi-buffer read mailbox command */
4199*4882a593Smuzhiyun 		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4200*4882a593Smuzhiyun 
4201*4882a593Smuzhiyun 		/* context fields to callback function */
4202*4882a593Smuzhiyun 		pmboxq->ctx_buf = dd_data;
4203*4882a593Smuzhiyun 		dd_data->type = TYPE_MBOX;
4204*4882a593Smuzhiyun 		dd_data->set_job = job;
4205*4882a593Smuzhiyun 		dd_data->context_un.mbox.pmboxq = pmboxq;
4206*4882a593Smuzhiyun 		dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
4207*4882a593Smuzhiyun 		job->dd_data = dd_data;
4208*4882a593Smuzhiyun 
4209*4882a593Smuzhiyun 		/* state change */
4210*4882a593Smuzhiyun 
4211*4882a593Smuzhiyun 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4212*4882a593Smuzhiyun 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4213*4882a593Smuzhiyun 		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4214*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4215*4882a593Smuzhiyun 					"2955 Issued SLI_CONFIG ext-buffer "
4216*4882a593Smuzhiyun 					"mailbox command, rc:x%x\n", rc);
4217*4882a593Smuzhiyun 			return SLI_CONFIG_HANDLED;
4218*4882a593Smuzhiyun 		}
4219*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4220*4882a593Smuzhiyun 				"2956 Failed to issue SLI_CONFIG ext-buffer "
4221*4882a593Smuzhiyun 				"mailbox command, rc:x%x\n", rc);
4222*4882a593Smuzhiyun 		rc = -EPIPE;
4223*4882a593Smuzhiyun 		goto job_error;
4224*4882a593Smuzhiyun 	}
4225*4882a593Smuzhiyun 
4226*4882a593Smuzhiyun 	/* wait for additoinal external buffers */
4227*4882a593Smuzhiyun 
4228*4882a593Smuzhiyun 	bsg_reply->result = 0;
4229*4882a593Smuzhiyun 	bsg_job_done(job, bsg_reply->result,
4230*4882a593Smuzhiyun 		       bsg_reply->reply_payload_rcv_len);
4231*4882a593Smuzhiyun 	return SLI_CONFIG_HANDLED;
4232*4882a593Smuzhiyun 
4233*4882a593Smuzhiyun job_error:
4234*4882a593Smuzhiyun 	if (pmboxq)
4235*4882a593Smuzhiyun 		mempool_free(pmboxq, phba->mbox_mem_pool);
4236*4882a593Smuzhiyun 	kfree(dd_data);
4237*4882a593Smuzhiyun 
4238*4882a593Smuzhiyun 	return rc;
4239*4882a593Smuzhiyun }
4240*4882a593Smuzhiyun 
4241*4882a593Smuzhiyun /**
4242*4882a593Smuzhiyun  * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
4243*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
4244*4882a593Smuzhiyun  * @mb: Pointer to a BSG mailbox object.
4245*4882a593Smuzhiyun  * @dmabuff: Pointer to a DMA buffer descriptor.
4246*4882a593Smuzhiyun  *
4247*4882a593Smuzhiyun  * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
4248*4882a593Smuzhiyun  * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
4249*4882a593Smuzhiyun  * with embedded sussystem 0x1 and opcodes with external HBDs.
4250*4882a593Smuzhiyun  **/
4251*4882a593Smuzhiyun static int
lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_dmabuf * dmabuf)4252*4882a593Smuzhiyun lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4253*4882a593Smuzhiyun 			     struct lpfc_dmabuf *dmabuf)
4254*4882a593Smuzhiyun {
4255*4882a593Smuzhiyun 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4256*4882a593Smuzhiyun 	uint32_t subsys;
4257*4882a593Smuzhiyun 	uint32_t opcode;
4258*4882a593Smuzhiyun 	int rc = SLI_CONFIG_NOT_HANDLED;
4259*4882a593Smuzhiyun 
4260*4882a593Smuzhiyun 	/* state change on new multi-buffer pass-through mailbox command */
4261*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
4262*4882a593Smuzhiyun 
4263*4882a593Smuzhiyun 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4264*4882a593Smuzhiyun 
4265*4882a593Smuzhiyun 	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
4266*4882a593Smuzhiyun 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
4267*4882a593Smuzhiyun 		subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
4268*4882a593Smuzhiyun 				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
4269*4882a593Smuzhiyun 		opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
4270*4882a593Smuzhiyun 				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
4271*4882a593Smuzhiyun 		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
4272*4882a593Smuzhiyun 			switch (opcode) {
4273*4882a593Smuzhiyun 			case FCOE_OPCODE_READ_FCF:
4274*4882a593Smuzhiyun 			case FCOE_OPCODE_GET_DPORT_RESULTS:
4275*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4276*4882a593Smuzhiyun 						"2957 Handled SLI_CONFIG "
4277*4882a593Smuzhiyun 						"subsys_fcoe, opcode:x%x\n",
4278*4882a593Smuzhiyun 						opcode);
4279*4882a593Smuzhiyun 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4280*4882a593Smuzhiyun 							nemb_mse, dmabuf);
4281*4882a593Smuzhiyun 				break;
4282*4882a593Smuzhiyun 			case FCOE_OPCODE_ADD_FCF:
4283*4882a593Smuzhiyun 			case FCOE_OPCODE_SET_DPORT_MODE:
4284*4882a593Smuzhiyun 			case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
4285*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4286*4882a593Smuzhiyun 						"2958 Handled SLI_CONFIG "
4287*4882a593Smuzhiyun 						"subsys_fcoe, opcode:x%x\n",
4288*4882a593Smuzhiyun 						opcode);
4289*4882a593Smuzhiyun 				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4290*4882a593Smuzhiyun 							nemb_mse, dmabuf);
4291*4882a593Smuzhiyun 				break;
4292*4882a593Smuzhiyun 			default:
4293*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4294*4882a593Smuzhiyun 						"2959 Reject SLI_CONFIG "
4295*4882a593Smuzhiyun 						"subsys_fcoe, opcode:x%x\n",
4296*4882a593Smuzhiyun 						opcode);
4297*4882a593Smuzhiyun 				rc = -EPERM;
4298*4882a593Smuzhiyun 				break;
4299*4882a593Smuzhiyun 			}
4300*4882a593Smuzhiyun 		} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4301*4882a593Smuzhiyun 			switch (opcode) {
4302*4882a593Smuzhiyun 			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4303*4882a593Smuzhiyun 			case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4304*4882a593Smuzhiyun 			case COMN_OPCODE_GET_PROFILE_CONFIG:
4305*4882a593Smuzhiyun 			case COMN_OPCODE_SET_FEATURES:
4306*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4307*4882a593Smuzhiyun 						"3106 Handled SLI_CONFIG "
4308*4882a593Smuzhiyun 						"subsys_comn, opcode:x%x\n",
4309*4882a593Smuzhiyun 						opcode);
4310*4882a593Smuzhiyun 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4311*4882a593Smuzhiyun 							nemb_mse, dmabuf);
4312*4882a593Smuzhiyun 				break;
4313*4882a593Smuzhiyun 			default:
4314*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4315*4882a593Smuzhiyun 						"3107 Reject SLI_CONFIG "
4316*4882a593Smuzhiyun 						"subsys_comn, opcode:x%x\n",
4317*4882a593Smuzhiyun 						opcode);
4318*4882a593Smuzhiyun 				rc = -EPERM;
4319*4882a593Smuzhiyun 				break;
4320*4882a593Smuzhiyun 			}
4321*4882a593Smuzhiyun 		} else {
4322*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4323*4882a593Smuzhiyun 					"2977 Reject SLI_CONFIG "
4324*4882a593Smuzhiyun 					"subsys:x%d, opcode:x%x\n",
4325*4882a593Smuzhiyun 					subsys, opcode);
4326*4882a593Smuzhiyun 			rc = -EPERM;
4327*4882a593Smuzhiyun 		}
4328*4882a593Smuzhiyun 	} else {
4329*4882a593Smuzhiyun 		subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4330*4882a593Smuzhiyun 				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4331*4882a593Smuzhiyun 		opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4332*4882a593Smuzhiyun 				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4333*4882a593Smuzhiyun 		if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4334*4882a593Smuzhiyun 			switch (opcode) {
4335*4882a593Smuzhiyun 			case COMN_OPCODE_READ_OBJECT:
4336*4882a593Smuzhiyun 			case COMN_OPCODE_READ_OBJECT_LIST:
4337*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4338*4882a593Smuzhiyun 						"2960 Handled SLI_CONFIG "
4339*4882a593Smuzhiyun 						"subsys_comn, opcode:x%x\n",
4340*4882a593Smuzhiyun 						opcode);
4341*4882a593Smuzhiyun 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4342*4882a593Smuzhiyun 							nemb_hbd, dmabuf);
4343*4882a593Smuzhiyun 				break;
4344*4882a593Smuzhiyun 			case COMN_OPCODE_WRITE_OBJECT:
4345*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4346*4882a593Smuzhiyun 						"2961 Handled SLI_CONFIG "
4347*4882a593Smuzhiyun 						"subsys_comn, opcode:x%x\n",
4348*4882a593Smuzhiyun 						opcode);
4349*4882a593Smuzhiyun 				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4350*4882a593Smuzhiyun 							nemb_hbd, dmabuf);
4351*4882a593Smuzhiyun 				break;
4352*4882a593Smuzhiyun 			default:
4353*4882a593Smuzhiyun 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4354*4882a593Smuzhiyun 						"2962 Not handled SLI_CONFIG "
4355*4882a593Smuzhiyun 						"subsys_comn, opcode:x%x\n",
4356*4882a593Smuzhiyun 						opcode);
4357*4882a593Smuzhiyun 				rc = SLI_CONFIG_NOT_HANDLED;
4358*4882a593Smuzhiyun 				break;
4359*4882a593Smuzhiyun 			}
4360*4882a593Smuzhiyun 		} else {
4361*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4362*4882a593Smuzhiyun 					"2978 Not handled SLI_CONFIG "
4363*4882a593Smuzhiyun 					"subsys:x%d, opcode:x%x\n",
4364*4882a593Smuzhiyun 					subsys, opcode);
4365*4882a593Smuzhiyun 			rc = SLI_CONFIG_NOT_HANDLED;
4366*4882a593Smuzhiyun 		}
4367*4882a593Smuzhiyun 	}
4368*4882a593Smuzhiyun 
4369*4882a593Smuzhiyun 	/* state reset on not handled new multi-buffer mailbox command */
4370*4882a593Smuzhiyun 	if (rc != SLI_CONFIG_HANDLED)
4371*4882a593Smuzhiyun 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4372*4882a593Smuzhiyun 
4373*4882a593Smuzhiyun 	return rc;
4374*4882a593Smuzhiyun }
4375*4882a593Smuzhiyun 
4376*4882a593Smuzhiyun /**
4377*4882a593Smuzhiyun  * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
4378*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
4379*4882a593Smuzhiyun  *
4380*4882a593Smuzhiyun  * This routine is for requesting to abort a pass-through mailbox command with
4381*4882a593Smuzhiyun  * multiple external buffers due to error condition.
4382*4882a593Smuzhiyun  **/
4383*4882a593Smuzhiyun static void
lpfc_bsg_mbox_ext_abort(struct lpfc_hba * phba)4384*4882a593Smuzhiyun lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4385*4882a593Smuzhiyun {
4386*4882a593Smuzhiyun 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4387*4882a593Smuzhiyun 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4388*4882a593Smuzhiyun 	else
4389*4882a593Smuzhiyun 		lpfc_bsg_mbox_ext_session_reset(phba);
4390*4882a593Smuzhiyun 	return;
4391*4882a593Smuzhiyun }
4392*4882a593Smuzhiyun 
4393*4882a593Smuzhiyun /**
4394*4882a593Smuzhiyun  * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4395*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
4396*4882a593Smuzhiyun  * @dmabuf: Pointer to a DMA buffer descriptor.
4397*4882a593Smuzhiyun  *
4398*4882a593Smuzhiyun  * This routine extracts the next mailbox read external buffer back to
4399*4882a593Smuzhiyun  * user space through BSG.
4400*4882a593Smuzhiyun  **/
4401*4882a593Smuzhiyun static int
lpfc_bsg_read_ebuf_get(struct lpfc_hba * phba,struct bsg_job * job)4402*4882a593Smuzhiyun lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
4403*4882a593Smuzhiyun {
4404*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
4405*4882a593Smuzhiyun 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4406*4882a593Smuzhiyun 	struct lpfc_dmabuf *dmabuf;
4407*4882a593Smuzhiyun 	uint8_t *pbuf;
4408*4882a593Smuzhiyun 	uint32_t size;
4409*4882a593Smuzhiyun 	uint32_t index;
4410*4882a593Smuzhiyun 
4411*4882a593Smuzhiyun 	index = phba->mbox_ext_buf_ctx.seqNum;
4412*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.seqNum++;
4413*4882a593Smuzhiyun 
4414*4882a593Smuzhiyun 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4415*4882a593Smuzhiyun 			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4416*4882a593Smuzhiyun 
4417*4882a593Smuzhiyun 	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4418*4882a593Smuzhiyun 		size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4419*4882a593Smuzhiyun 			&sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4420*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4421*4882a593Smuzhiyun 				"2963 SLI_CONFIG (mse) ext-buffer rd get "
4422*4882a593Smuzhiyun 				"buffer[%d], size:%d\n", index, size);
4423*4882a593Smuzhiyun 	} else {
4424*4882a593Smuzhiyun 		size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4425*4882a593Smuzhiyun 			&sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4426*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4427*4882a593Smuzhiyun 				"2964 SLI_CONFIG (hbd) ext-buffer rd get "
4428*4882a593Smuzhiyun 				"buffer[%d], size:%d\n", index, size);
4429*4882a593Smuzhiyun 	}
4430*4882a593Smuzhiyun 	if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4431*4882a593Smuzhiyun 		return -EPIPE;
4432*4882a593Smuzhiyun 	dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4433*4882a593Smuzhiyun 				  struct lpfc_dmabuf, list);
4434*4882a593Smuzhiyun 	list_del_init(&dmabuf->list);
4435*4882a593Smuzhiyun 
4436*4882a593Smuzhiyun 	/* after dma buffer descriptor setup */
4437*4882a593Smuzhiyun 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4438*4882a593Smuzhiyun 					mbox_rd, dma_ebuf, sta_pos_addr,
4439*4882a593Smuzhiyun 					dmabuf, index);
4440*4882a593Smuzhiyun 
4441*4882a593Smuzhiyun 	pbuf = (uint8_t *)dmabuf->virt;
4442*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len =
4443*4882a593Smuzhiyun 		sg_copy_from_buffer(job->reply_payload.sg_list,
4444*4882a593Smuzhiyun 				    job->reply_payload.sg_cnt,
4445*4882a593Smuzhiyun 				    pbuf, size);
4446*4882a593Smuzhiyun 
4447*4882a593Smuzhiyun 	lpfc_bsg_dma_page_free(phba, dmabuf);
4448*4882a593Smuzhiyun 
4449*4882a593Smuzhiyun 	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4450*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4451*4882a593Smuzhiyun 				"2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4452*4882a593Smuzhiyun 				"command session done\n");
4453*4882a593Smuzhiyun 		lpfc_bsg_mbox_ext_session_reset(phba);
4454*4882a593Smuzhiyun 	}
4455*4882a593Smuzhiyun 
4456*4882a593Smuzhiyun 	bsg_reply->result = 0;
4457*4882a593Smuzhiyun 	bsg_job_done(job, bsg_reply->result,
4458*4882a593Smuzhiyun 		       bsg_reply->reply_payload_rcv_len);
4459*4882a593Smuzhiyun 
4460*4882a593Smuzhiyun 	return SLI_CONFIG_HANDLED;
4461*4882a593Smuzhiyun }
4462*4882a593Smuzhiyun 
4463*4882a593Smuzhiyun /**
4464*4882a593Smuzhiyun  * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4465*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
4466*4882a593Smuzhiyun  * @dmabuf: Pointer to a DMA buffer descriptor.
4467*4882a593Smuzhiyun  *
4468*4882a593Smuzhiyun  * This routine sets up the next mailbox read external buffer obtained
4469*4882a593Smuzhiyun  * from user space through BSG.
4470*4882a593Smuzhiyun  **/
4471*4882a593Smuzhiyun static int
lpfc_bsg_write_ebuf_set(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_dmabuf * dmabuf)4472*4882a593Smuzhiyun lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4473*4882a593Smuzhiyun 			struct lpfc_dmabuf *dmabuf)
4474*4882a593Smuzhiyun {
4475*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
4476*4882a593Smuzhiyun 	struct bsg_job_data *dd_data = NULL;
4477*4882a593Smuzhiyun 	LPFC_MBOXQ_t *pmboxq = NULL;
4478*4882a593Smuzhiyun 	MAILBOX_t *pmb;
4479*4882a593Smuzhiyun 	enum nemb_type nemb_tp;
4480*4882a593Smuzhiyun 	uint8_t *pbuf;
4481*4882a593Smuzhiyun 	uint32_t size;
4482*4882a593Smuzhiyun 	uint32_t index;
4483*4882a593Smuzhiyun 	int rc;
4484*4882a593Smuzhiyun 
4485*4882a593Smuzhiyun 	index = phba->mbox_ext_buf_ctx.seqNum;
4486*4882a593Smuzhiyun 	phba->mbox_ext_buf_ctx.seqNum++;
4487*4882a593Smuzhiyun 	nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4488*4882a593Smuzhiyun 
4489*4882a593Smuzhiyun 	pbuf = (uint8_t *)dmabuf->virt;
4490*4882a593Smuzhiyun 	size = job->request_payload.payload_len;
4491*4882a593Smuzhiyun 	sg_copy_to_buffer(job->request_payload.sg_list,
4492*4882a593Smuzhiyun 			  job->request_payload.sg_cnt,
4493*4882a593Smuzhiyun 			  pbuf, size);
4494*4882a593Smuzhiyun 
4495*4882a593Smuzhiyun 	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4496*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4497*4882a593Smuzhiyun 				"2966 SLI_CONFIG (mse) ext-buffer wr set "
4498*4882a593Smuzhiyun 				"buffer[%d], size:%d\n",
4499*4882a593Smuzhiyun 				phba->mbox_ext_buf_ctx.seqNum, size);
4500*4882a593Smuzhiyun 
4501*4882a593Smuzhiyun 	} else {
4502*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4503*4882a593Smuzhiyun 				"2967 SLI_CONFIG (hbd) ext-buffer wr set "
4504*4882a593Smuzhiyun 				"buffer[%d], size:%d\n",
4505*4882a593Smuzhiyun 				phba->mbox_ext_buf_ctx.seqNum, size);
4506*4882a593Smuzhiyun 
4507*4882a593Smuzhiyun 	}
4508*4882a593Smuzhiyun 
4509*4882a593Smuzhiyun 	/* set up external buffer descriptor and add to external buffer list */
4510*4882a593Smuzhiyun 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4511*4882a593Smuzhiyun 					phba->mbox_ext_buf_ctx.mbx_dmabuf,
4512*4882a593Smuzhiyun 					dmabuf);
4513*4882a593Smuzhiyun 	list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4514*4882a593Smuzhiyun 
4515*4882a593Smuzhiyun 	/* after write dma buffer */
4516*4882a593Smuzhiyun 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4517*4882a593Smuzhiyun 					mbox_wr, dma_ebuf, sta_pos_addr,
4518*4882a593Smuzhiyun 					dmabuf, index);
4519*4882a593Smuzhiyun 
4520*4882a593Smuzhiyun 	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4521*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4522*4882a593Smuzhiyun 				"2968 SLI_CONFIG ext-buffer wr all %d "
4523*4882a593Smuzhiyun 				"ebuffers received\n",
4524*4882a593Smuzhiyun 				phba->mbox_ext_buf_ctx.numBuf);
4525*4882a593Smuzhiyun 
4526*4882a593Smuzhiyun 		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4527*4882a593Smuzhiyun 		if (!dd_data) {
4528*4882a593Smuzhiyun 			rc = -ENOMEM;
4529*4882a593Smuzhiyun 			goto job_error;
4530*4882a593Smuzhiyun 		}
4531*4882a593Smuzhiyun 
4532*4882a593Smuzhiyun 		/* mailbox command structure for base driver */
4533*4882a593Smuzhiyun 		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4534*4882a593Smuzhiyun 		if (!pmboxq) {
4535*4882a593Smuzhiyun 			rc = -ENOMEM;
4536*4882a593Smuzhiyun 			goto job_error;
4537*4882a593Smuzhiyun 		}
4538*4882a593Smuzhiyun 		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4539*4882a593Smuzhiyun 		pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4540*4882a593Smuzhiyun 		pmb = &pmboxq->u.mb;
4541*4882a593Smuzhiyun 		memcpy(pmb, pbuf, sizeof(*pmb));
4542*4882a593Smuzhiyun 		pmb->mbxOwner = OWN_HOST;
4543*4882a593Smuzhiyun 		pmboxq->vport = phba->pport;
4544*4882a593Smuzhiyun 
4545*4882a593Smuzhiyun 		/* callback for multi-buffer write mailbox command */
4546*4882a593Smuzhiyun 		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4547*4882a593Smuzhiyun 
4548*4882a593Smuzhiyun 		/* context fields to callback function */
4549*4882a593Smuzhiyun 		pmboxq->ctx_buf = dd_data;
4550*4882a593Smuzhiyun 		dd_data->type = TYPE_MBOX;
4551*4882a593Smuzhiyun 		dd_data->set_job = job;
4552*4882a593Smuzhiyun 		dd_data->context_un.mbox.pmboxq = pmboxq;
4553*4882a593Smuzhiyun 		dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4554*4882a593Smuzhiyun 		job->dd_data = dd_data;
4555*4882a593Smuzhiyun 
4556*4882a593Smuzhiyun 		/* state change */
4557*4882a593Smuzhiyun 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4558*4882a593Smuzhiyun 
4559*4882a593Smuzhiyun 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4560*4882a593Smuzhiyun 		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4561*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4562*4882a593Smuzhiyun 					"2969 Issued SLI_CONFIG ext-buffer "
4563*4882a593Smuzhiyun 					"mailbox command, rc:x%x\n", rc);
4564*4882a593Smuzhiyun 			return SLI_CONFIG_HANDLED;
4565*4882a593Smuzhiyun 		}
4566*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4567*4882a593Smuzhiyun 				"2970 Failed to issue SLI_CONFIG ext-buffer "
4568*4882a593Smuzhiyun 				"mailbox command, rc:x%x\n", rc);
4569*4882a593Smuzhiyun 		rc = -EPIPE;
4570*4882a593Smuzhiyun 		goto job_error;
4571*4882a593Smuzhiyun 	}
4572*4882a593Smuzhiyun 
4573*4882a593Smuzhiyun 	/* wait for additoinal external buffers */
4574*4882a593Smuzhiyun 	bsg_reply->result = 0;
4575*4882a593Smuzhiyun 	bsg_job_done(job, bsg_reply->result,
4576*4882a593Smuzhiyun 		       bsg_reply->reply_payload_rcv_len);
4577*4882a593Smuzhiyun 	return SLI_CONFIG_HANDLED;
4578*4882a593Smuzhiyun 
4579*4882a593Smuzhiyun job_error:
4580*4882a593Smuzhiyun 	if (pmboxq)
4581*4882a593Smuzhiyun 		mempool_free(pmboxq, phba->mbox_mem_pool);
4582*4882a593Smuzhiyun 	lpfc_bsg_dma_page_free(phba, dmabuf);
4583*4882a593Smuzhiyun 	kfree(dd_data);
4584*4882a593Smuzhiyun 
4585*4882a593Smuzhiyun 	return rc;
4586*4882a593Smuzhiyun }
4587*4882a593Smuzhiyun 
4588*4882a593Smuzhiyun /**
4589*4882a593Smuzhiyun  * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4590*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
4591*4882a593Smuzhiyun  * @mb: Pointer to a BSG mailbox object.
4592*4882a593Smuzhiyun  * @dmabuff: Pointer to a DMA buffer descriptor.
4593*4882a593Smuzhiyun  *
4594*4882a593Smuzhiyun  * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4595*4882a593Smuzhiyun  * command with multiple non-embedded external buffers.
4596*4882a593Smuzhiyun  **/
4597*4882a593Smuzhiyun static int
lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_dmabuf * dmabuf)4598*4882a593Smuzhiyun lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
4599*4882a593Smuzhiyun 			     struct lpfc_dmabuf *dmabuf)
4600*4882a593Smuzhiyun {
4601*4882a593Smuzhiyun 	int rc;
4602*4882a593Smuzhiyun 
4603*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4604*4882a593Smuzhiyun 			"2971 SLI_CONFIG buffer (type:x%x)\n",
4605*4882a593Smuzhiyun 			phba->mbox_ext_buf_ctx.mboxType);
4606*4882a593Smuzhiyun 
4607*4882a593Smuzhiyun 	if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4608*4882a593Smuzhiyun 		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4609*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4610*4882a593Smuzhiyun 					"2972 SLI_CONFIG rd buffer state "
4611*4882a593Smuzhiyun 					"mismatch:x%x\n",
4612*4882a593Smuzhiyun 					phba->mbox_ext_buf_ctx.state);
4613*4882a593Smuzhiyun 			lpfc_bsg_mbox_ext_abort(phba);
4614*4882a593Smuzhiyun 			return -EPIPE;
4615*4882a593Smuzhiyun 		}
4616*4882a593Smuzhiyun 		rc = lpfc_bsg_read_ebuf_get(phba, job);
4617*4882a593Smuzhiyun 		if (rc == SLI_CONFIG_HANDLED)
4618*4882a593Smuzhiyun 			lpfc_bsg_dma_page_free(phba, dmabuf);
4619*4882a593Smuzhiyun 	} else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4620*4882a593Smuzhiyun 		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4621*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4622*4882a593Smuzhiyun 					"2973 SLI_CONFIG wr buffer state "
4623*4882a593Smuzhiyun 					"mismatch:x%x\n",
4624*4882a593Smuzhiyun 					phba->mbox_ext_buf_ctx.state);
4625*4882a593Smuzhiyun 			lpfc_bsg_mbox_ext_abort(phba);
4626*4882a593Smuzhiyun 			return -EPIPE;
4627*4882a593Smuzhiyun 		}
4628*4882a593Smuzhiyun 		rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4629*4882a593Smuzhiyun 	}
4630*4882a593Smuzhiyun 	return rc;
4631*4882a593Smuzhiyun }
4632*4882a593Smuzhiyun 
4633*4882a593Smuzhiyun /**
4634*4882a593Smuzhiyun  * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4635*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
4636*4882a593Smuzhiyun  * @mb: Pointer to a BSG mailbox object.
4637*4882a593Smuzhiyun  * @dmabuff: Pointer to a DMA buffer descriptor.
4638*4882a593Smuzhiyun  *
4639*4882a593Smuzhiyun  * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4640*4882a593Smuzhiyun  * (0x9B) mailbox commands and external buffers.
4641*4882a593Smuzhiyun  **/
4642*4882a593Smuzhiyun static int
lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_dmabuf * dmabuf)4643*4882a593Smuzhiyun lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
4644*4882a593Smuzhiyun 			    struct lpfc_dmabuf *dmabuf)
4645*4882a593Smuzhiyun {
4646*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
4647*4882a593Smuzhiyun 	struct dfc_mbox_req *mbox_req;
4648*4882a593Smuzhiyun 	int rc = SLI_CONFIG_NOT_HANDLED;
4649*4882a593Smuzhiyun 
4650*4882a593Smuzhiyun 	mbox_req =
4651*4882a593Smuzhiyun 	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4652*4882a593Smuzhiyun 
4653*4882a593Smuzhiyun 	/* mbox command with/without single external buffer */
4654*4882a593Smuzhiyun 	if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4655*4882a593Smuzhiyun 		return rc;
4656*4882a593Smuzhiyun 
4657*4882a593Smuzhiyun 	/* mbox command and first external buffer */
4658*4882a593Smuzhiyun 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4659*4882a593Smuzhiyun 		if (mbox_req->extSeqNum == 1) {
4660*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4661*4882a593Smuzhiyun 					"2974 SLI_CONFIG mailbox: tag:%d, "
4662*4882a593Smuzhiyun 					"seq:%d\n", mbox_req->extMboxTag,
4663*4882a593Smuzhiyun 					mbox_req->extSeqNum);
4664*4882a593Smuzhiyun 			rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4665*4882a593Smuzhiyun 			return rc;
4666*4882a593Smuzhiyun 		} else
4667*4882a593Smuzhiyun 			goto sli_cfg_ext_error;
4668*4882a593Smuzhiyun 	}
4669*4882a593Smuzhiyun 
4670*4882a593Smuzhiyun 	/*
4671*4882a593Smuzhiyun 	 * handle additional external buffers
4672*4882a593Smuzhiyun 	 */
4673*4882a593Smuzhiyun 
4674*4882a593Smuzhiyun 	/* check broken pipe conditions */
4675*4882a593Smuzhiyun 	if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4676*4882a593Smuzhiyun 		goto sli_cfg_ext_error;
4677*4882a593Smuzhiyun 	if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4678*4882a593Smuzhiyun 		goto sli_cfg_ext_error;
4679*4882a593Smuzhiyun 	if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4680*4882a593Smuzhiyun 		goto sli_cfg_ext_error;
4681*4882a593Smuzhiyun 
4682*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4683*4882a593Smuzhiyun 			"2975 SLI_CONFIG mailbox external buffer: "
4684*4882a593Smuzhiyun 			"extSta:x%x, tag:%d, seq:%d\n",
4685*4882a593Smuzhiyun 			phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4686*4882a593Smuzhiyun 			mbox_req->extSeqNum);
4687*4882a593Smuzhiyun 	rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4688*4882a593Smuzhiyun 	return rc;
4689*4882a593Smuzhiyun 
4690*4882a593Smuzhiyun sli_cfg_ext_error:
4691*4882a593Smuzhiyun 	/* all other cases, broken pipe */
4692*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4693*4882a593Smuzhiyun 			"2976 SLI_CONFIG mailbox broken pipe: "
4694*4882a593Smuzhiyun 			"ctxSta:x%x, ctxNumBuf:%d "
4695*4882a593Smuzhiyun 			"ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4696*4882a593Smuzhiyun 			phba->mbox_ext_buf_ctx.state,
4697*4882a593Smuzhiyun 			phba->mbox_ext_buf_ctx.numBuf,
4698*4882a593Smuzhiyun 			phba->mbox_ext_buf_ctx.mbxTag,
4699*4882a593Smuzhiyun 			phba->mbox_ext_buf_ctx.seqNum,
4700*4882a593Smuzhiyun 			mbox_req->extMboxTag, mbox_req->extSeqNum);
4701*4882a593Smuzhiyun 
4702*4882a593Smuzhiyun 	lpfc_bsg_mbox_ext_session_reset(phba);
4703*4882a593Smuzhiyun 
4704*4882a593Smuzhiyun 	return -EPIPE;
4705*4882a593Smuzhiyun }
4706*4882a593Smuzhiyun 
4707*4882a593Smuzhiyun /**
4708*4882a593Smuzhiyun  * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4709*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
4710*4882a593Smuzhiyun  * @mb: Pointer to a mailbox object.
4711*4882a593Smuzhiyun  * @vport: Pointer to a vport object.
4712*4882a593Smuzhiyun  *
4713*4882a593Smuzhiyun  * Allocate a tracking object, mailbox command memory, get a mailbox
4714*4882a593Smuzhiyun  * from the mailbox pool, copy the caller mailbox command.
4715*4882a593Smuzhiyun  *
4716*4882a593Smuzhiyun  * If offline and the sli is active we need to poll for the command (port is
4717*4882a593Smuzhiyun  * being reset) and com-plete the job, otherwise issue the mailbox command and
4718*4882a593Smuzhiyun  * let our completion handler finish the command.
4719*4882a593Smuzhiyun  **/
4720*4882a593Smuzhiyun static int
lpfc_bsg_issue_mbox(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_vport * vport)4721*4882a593Smuzhiyun lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4722*4882a593Smuzhiyun 	struct lpfc_vport *vport)
4723*4882a593Smuzhiyun {
4724*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
4725*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
4726*4882a593Smuzhiyun 	LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4727*4882a593Smuzhiyun 	MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4728*4882a593Smuzhiyun 	/* a 4k buffer to hold the mb and extended data from/to the bsg */
4729*4882a593Smuzhiyun 	uint8_t *pmbx = NULL;
4730*4882a593Smuzhiyun 	struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4731*4882a593Smuzhiyun 	struct lpfc_dmabuf *dmabuf = NULL;
4732*4882a593Smuzhiyun 	struct dfc_mbox_req *mbox_req;
4733*4882a593Smuzhiyun 	struct READ_EVENT_LOG_VAR *rdEventLog;
4734*4882a593Smuzhiyun 	uint32_t transmit_length, receive_length, mode;
4735*4882a593Smuzhiyun 	struct lpfc_mbx_sli4_config *sli4_config;
4736*4882a593Smuzhiyun 	struct lpfc_mbx_nembed_cmd *nembed_sge;
4737*4882a593Smuzhiyun 	struct ulp_bde64 *bde;
4738*4882a593Smuzhiyun 	uint8_t *ext = NULL;
4739*4882a593Smuzhiyun 	int rc = 0;
4740*4882a593Smuzhiyun 	uint8_t *from;
4741*4882a593Smuzhiyun 	uint32_t size;
4742*4882a593Smuzhiyun 
4743*4882a593Smuzhiyun 	/* in case no data is transferred */
4744*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
4745*4882a593Smuzhiyun 
4746*4882a593Smuzhiyun 	/* sanity check to protect driver */
4747*4882a593Smuzhiyun 	if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4748*4882a593Smuzhiyun 	    job->request_payload.payload_len > BSG_MBOX_SIZE) {
4749*4882a593Smuzhiyun 		rc = -ERANGE;
4750*4882a593Smuzhiyun 		goto job_done;
4751*4882a593Smuzhiyun 	}
4752*4882a593Smuzhiyun 
4753*4882a593Smuzhiyun 	/*
4754*4882a593Smuzhiyun 	 * Don't allow mailbox commands to be sent when blocked or when in
4755*4882a593Smuzhiyun 	 * the middle of discovery
4756*4882a593Smuzhiyun 	 */
4757*4882a593Smuzhiyun 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4758*4882a593Smuzhiyun 		rc = -EAGAIN;
4759*4882a593Smuzhiyun 		goto job_done;
4760*4882a593Smuzhiyun 	}
4761*4882a593Smuzhiyun 
4762*4882a593Smuzhiyun 	mbox_req =
4763*4882a593Smuzhiyun 	    (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4764*4882a593Smuzhiyun 
4765*4882a593Smuzhiyun 	/* check if requested extended data lengths are valid */
4766*4882a593Smuzhiyun 	if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4767*4882a593Smuzhiyun 	    (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4768*4882a593Smuzhiyun 		rc = -ERANGE;
4769*4882a593Smuzhiyun 		goto job_done;
4770*4882a593Smuzhiyun 	}
4771*4882a593Smuzhiyun 
4772*4882a593Smuzhiyun 	dmabuf = lpfc_bsg_dma_page_alloc(phba);
4773*4882a593Smuzhiyun 	if (!dmabuf || !dmabuf->virt) {
4774*4882a593Smuzhiyun 		rc = -ENOMEM;
4775*4882a593Smuzhiyun 		goto job_done;
4776*4882a593Smuzhiyun 	}
4777*4882a593Smuzhiyun 
4778*4882a593Smuzhiyun 	/* Get the mailbox command or external buffer from BSG */
4779*4882a593Smuzhiyun 	pmbx = (uint8_t *)dmabuf->virt;
4780*4882a593Smuzhiyun 	size = job->request_payload.payload_len;
4781*4882a593Smuzhiyun 	sg_copy_to_buffer(job->request_payload.sg_list,
4782*4882a593Smuzhiyun 			  job->request_payload.sg_cnt, pmbx, size);
4783*4882a593Smuzhiyun 
4784*4882a593Smuzhiyun 	/* Handle possible SLI_CONFIG with non-embedded payloads */
4785*4882a593Smuzhiyun 	if (phba->sli_rev == LPFC_SLI_REV4) {
4786*4882a593Smuzhiyun 		rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4787*4882a593Smuzhiyun 		if (rc == SLI_CONFIG_HANDLED)
4788*4882a593Smuzhiyun 			goto job_cont;
4789*4882a593Smuzhiyun 		if (rc)
4790*4882a593Smuzhiyun 			goto job_done;
4791*4882a593Smuzhiyun 		/* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4792*4882a593Smuzhiyun 	}
4793*4882a593Smuzhiyun 
4794*4882a593Smuzhiyun 	rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4795*4882a593Smuzhiyun 	if (rc != 0)
4796*4882a593Smuzhiyun 		goto job_done; /* must be negative */
4797*4882a593Smuzhiyun 
4798*4882a593Smuzhiyun 	/* allocate our bsg tracking structure */
4799*4882a593Smuzhiyun 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4800*4882a593Smuzhiyun 	if (!dd_data) {
4801*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4802*4882a593Smuzhiyun 				"2727 Failed allocation of dd_data\n");
4803*4882a593Smuzhiyun 		rc = -ENOMEM;
4804*4882a593Smuzhiyun 		goto job_done;
4805*4882a593Smuzhiyun 	}
4806*4882a593Smuzhiyun 
4807*4882a593Smuzhiyun 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4808*4882a593Smuzhiyun 	if (!pmboxq) {
4809*4882a593Smuzhiyun 		rc = -ENOMEM;
4810*4882a593Smuzhiyun 		goto job_done;
4811*4882a593Smuzhiyun 	}
4812*4882a593Smuzhiyun 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4813*4882a593Smuzhiyun 
4814*4882a593Smuzhiyun 	pmb = &pmboxq->u.mb;
4815*4882a593Smuzhiyun 	memcpy(pmb, pmbx, sizeof(*pmb));
4816*4882a593Smuzhiyun 	pmb->mbxOwner = OWN_HOST;
4817*4882a593Smuzhiyun 	pmboxq->vport = vport;
4818*4882a593Smuzhiyun 
4819*4882a593Smuzhiyun 	/* If HBA encountered an error attention, allow only DUMP
4820*4882a593Smuzhiyun 	 * or RESTART mailbox commands until the HBA is restarted.
4821*4882a593Smuzhiyun 	 */
4822*4882a593Smuzhiyun 	if (phba->pport->stopped &&
4823*4882a593Smuzhiyun 	    pmb->mbxCommand != MBX_DUMP_MEMORY &&
4824*4882a593Smuzhiyun 	    pmb->mbxCommand != MBX_RESTART &&
4825*4882a593Smuzhiyun 	    pmb->mbxCommand != MBX_WRITE_VPARMS &&
4826*4882a593Smuzhiyun 	    pmb->mbxCommand != MBX_WRITE_WWN)
4827*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4828*4882a593Smuzhiyun 				"2797 mbox: Issued mailbox cmd "
4829*4882a593Smuzhiyun 				"0x%x while in stopped state.\n",
4830*4882a593Smuzhiyun 				pmb->mbxCommand);
4831*4882a593Smuzhiyun 
4832*4882a593Smuzhiyun 	/* extended mailbox commands will need an extended buffer */
4833*4882a593Smuzhiyun 	if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4834*4882a593Smuzhiyun 		from = pmbx;
4835*4882a593Smuzhiyun 		ext = from + sizeof(MAILBOX_t);
4836*4882a593Smuzhiyun 		pmboxq->ctx_buf = ext;
4837*4882a593Smuzhiyun 		pmboxq->in_ext_byte_len =
4838*4882a593Smuzhiyun 			mbox_req->inExtWLen * sizeof(uint32_t);
4839*4882a593Smuzhiyun 		pmboxq->out_ext_byte_len =
4840*4882a593Smuzhiyun 			mbox_req->outExtWLen * sizeof(uint32_t);
4841*4882a593Smuzhiyun 		pmboxq->mbox_offset_word = mbox_req->mbOffset;
4842*4882a593Smuzhiyun 	}
4843*4882a593Smuzhiyun 
4844*4882a593Smuzhiyun 	/* biu diag will need a kernel buffer to transfer the data
4845*4882a593Smuzhiyun 	 * allocate our own buffer and setup the mailbox command to
4846*4882a593Smuzhiyun 	 * use ours
4847*4882a593Smuzhiyun 	 */
4848*4882a593Smuzhiyun 	if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4849*4882a593Smuzhiyun 		transmit_length = pmb->un.varWords[1];
4850*4882a593Smuzhiyun 		receive_length = pmb->un.varWords[4];
4851*4882a593Smuzhiyun 		/* transmit length cannot be greater than receive length or
4852*4882a593Smuzhiyun 		 * mailbox extension size
4853*4882a593Smuzhiyun 		 */
4854*4882a593Smuzhiyun 		if ((transmit_length > receive_length) ||
4855*4882a593Smuzhiyun 			(transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4856*4882a593Smuzhiyun 			rc = -ERANGE;
4857*4882a593Smuzhiyun 			goto job_done;
4858*4882a593Smuzhiyun 		}
4859*4882a593Smuzhiyun 		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4860*4882a593Smuzhiyun 			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4861*4882a593Smuzhiyun 		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4862*4882a593Smuzhiyun 			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4863*4882a593Smuzhiyun 
4864*4882a593Smuzhiyun 		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4865*4882a593Smuzhiyun 			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4866*4882a593Smuzhiyun 			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4867*4882a593Smuzhiyun 		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4868*4882a593Smuzhiyun 			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4869*4882a593Smuzhiyun 			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4870*4882a593Smuzhiyun 	} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4871*4882a593Smuzhiyun 		rdEventLog = &pmb->un.varRdEventLog;
4872*4882a593Smuzhiyun 		receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4873*4882a593Smuzhiyun 		mode = bf_get(lpfc_event_log, rdEventLog);
4874*4882a593Smuzhiyun 
4875*4882a593Smuzhiyun 		/* receive length cannot be greater than mailbox
4876*4882a593Smuzhiyun 		 * extension size
4877*4882a593Smuzhiyun 		 */
4878*4882a593Smuzhiyun 		if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4879*4882a593Smuzhiyun 			rc = -ERANGE;
4880*4882a593Smuzhiyun 			goto job_done;
4881*4882a593Smuzhiyun 		}
4882*4882a593Smuzhiyun 
4883*4882a593Smuzhiyun 		/* mode zero uses a bde like biu diags command */
4884*4882a593Smuzhiyun 		if (mode == 0) {
4885*4882a593Smuzhiyun 			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4886*4882a593Smuzhiyun 							+ sizeof(MAILBOX_t));
4887*4882a593Smuzhiyun 			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4888*4882a593Smuzhiyun 							+ sizeof(MAILBOX_t));
4889*4882a593Smuzhiyun 		}
4890*4882a593Smuzhiyun 	} else if (phba->sli_rev == LPFC_SLI_REV4) {
4891*4882a593Smuzhiyun 		/* Let type 4 (well known data) through because the data is
4892*4882a593Smuzhiyun 		 * returned in varwords[4-8]
4893*4882a593Smuzhiyun 		 * otherwise check the recieve length and fetch the buffer addr
4894*4882a593Smuzhiyun 		 */
4895*4882a593Smuzhiyun 		if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4896*4882a593Smuzhiyun 			(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4897*4882a593Smuzhiyun 			/* rebuild the command for sli4 using our own buffers
4898*4882a593Smuzhiyun 			* like we do for biu diags
4899*4882a593Smuzhiyun 			*/
4900*4882a593Smuzhiyun 			receive_length = pmb->un.varWords[2];
4901*4882a593Smuzhiyun 			/* receive length cannot be greater than mailbox
4902*4882a593Smuzhiyun 			 * extension size
4903*4882a593Smuzhiyun 			 */
4904*4882a593Smuzhiyun 			if (receive_length == 0) {
4905*4882a593Smuzhiyun 				rc = -ERANGE;
4906*4882a593Smuzhiyun 				goto job_done;
4907*4882a593Smuzhiyun 			}
4908*4882a593Smuzhiyun 			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4909*4882a593Smuzhiyun 						+ sizeof(MAILBOX_t));
4910*4882a593Smuzhiyun 			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4911*4882a593Smuzhiyun 						+ sizeof(MAILBOX_t));
4912*4882a593Smuzhiyun 		} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4913*4882a593Smuzhiyun 			pmb->un.varUpdateCfg.co) {
4914*4882a593Smuzhiyun 			bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4915*4882a593Smuzhiyun 
4916*4882a593Smuzhiyun 			/* bde size cannot be greater than mailbox ext size */
4917*4882a593Smuzhiyun 			if (bde->tus.f.bdeSize >
4918*4882a593Smuzhiyun 			    BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4919*4882a593Smuzhiyun 				rc = -ERANGE;
4920*4882a593Smuzhiyun 				goto job_done;
4921*4882a593Smuzhiyun 			}
4922*4882a593Smuzhiyun 			bde->addrHigh = putPaddrHigh(dmabuf->phys
4923*4882a593Smuzhiyun 						+ sizeof(MAILBOX_t));
4924*4882a593Smuzhiyun 			bde->addrLow = putPaddrLow(dmabuf->phys
4925*4882a593Smuzhiyun 						+ sizeof(MAILBOX_t));
4926*4882a593Smuzhiyun 		} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4927*4882a593Smuzhiyun 			/* Handling non-embedded SLI_CONFIG mailbox command */
4928*4882a593Smuzhiyun 			sli4_config = &pmboxq->u.mqe.un.sli4_config;
4929*4882a593Smuzhiyun 			if (!bf_get(lpfc_mbox_hdr_emb,
4930*4882a593Smuzhiyun 			    &sli4_config->header.cfg_mhdr)) {
4931*4882a593Smuzhiyun 				/* rebuild the command for sli4 using our
4932*4882a593Smuzhiyun 				 * own buffers like we do for biu diags
4933*4882a593Smuzhiyun 				 */
4934*4882a593Smuzhiyun 				nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4935*4882a593Smuzhiyun 						&pmb->un.varWords[0];
4936*4882a593Smuzhiyun 				receive_length = nembed_sge->sge[0].length;
4937*4882a593Smuzhiyun 
4938*4882a593Smuzhiyun 				/* receive length cannot be greater than
4939*4882a593Smuzhiyun 				 * mailbox extension size
4940*4882a593Smuzhiyun 				 */
4941*4882a593Smuzhiyun 				if ((receive_length == 0) ||
4942*4882a593Smuzhiyun 				    (receive_length >
4943*4882a593Smuzhiyun 				     BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4944*4882a593Smuzhiyun 					rc = -ERANGE;
4945*4882a593Smuzhiyun 					goto job_done;
4946*4882a593Smuzhiyun 				}
4947*4882a593Smuzhiyun 
4948*4882a593Smuzhiyun 				nembed_sge->sge[0].pa_hi =
4949*4882a593Smuzhiyun 						putPaddrHigh(dmabuf->phys
4950*4882a593Smuzhiyun 						   + sizeof(MAILBOX_t));
4951*4882a593Smuzhiyun 				nembed_sge->sge[0].pa_lo =
4952*4882a593Smuzhiyun 						putPaddrLow(dmabuf->phys
4953*4882a593Smuzhiyun 						   + sizeof(MAILBOX_t));
4954*4882a593Smuzhiyun 			}
4955*4882a593Smuzhiyun 		}
4956*4882a593Smuzhiyun 	}
4957*4882a593Smuzhiyun 
4958*4882a593Smuzhiyun 	dd_data->context_un.mbox.dmabuffers = dmabuf;
4959*4882a593Smuzhiyun 
4960*4882a593Smuzhiyun 	/* setup wake call as IOCB callback */
4961*4882a593Smuzhiyun 	pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4962*4882a593Smuzhiyun 
4963*4882a593Smuzhiyun 	/* setup context field to pass wait_queue pointer to wake function */
4964*4882a593Smuzhiyun 	pmboxq->ctx_ndlp = dd_data;
4965*4882a593Smuzhiyun 	dd_data->type = TYPE_MBOX;
4966*4882a593Smuzhiyun 	dd_data->set_job = job;
4967*4882a593Smuzhiyun 	dd_data->context_un.mbox.pmboxq = pmboxq;
4968*4882a593Smuzhiyun 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4969*4882a593Smuzhiyun 	dd_data->context_un.mbox.ext = ext;
4970*4882a593Smuzhiyun 	dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4971*4882a593Smuzhiyun 	dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4972*4882a593Smuzhiyun 	dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4973*4882a593Smuzhiyun 	job->dd_data = dd_data;
4974*4882a593Smuzhiyun 
4975*4882a593Smuzhiyun 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4976*4882a593Smuzhiyun 	    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4977*4882a593Smuzhiyun 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4978*4882a593Smuzhiyun 		if (rc != MBX_SUCCESS) {
4979*4882a593Smuzhiyun 			rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4980*4882a593Smuzhiyun 			goto job_done;
4981*4882a593Smuzhiyun 		}
4982*4882a593Smuzhiyun 
4983*4882a593Smuzhiyun 		/* job finished, copy the data */
4984*4882a593Smuzhiyun 		memcpy(pmbx, pmb, sizeof(*pmb));
4985*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len =
4986*4882a593Smuzhiyun 			sg_copy_from_buffer(job->reply_payload.sg_list,
4987*4882a593Smuzhiyun 					    job->reply_payload.sg_cnt,
4988*4882a593Smuzhiyun 					    pmbx, size);
4989*4882a593Smuzhiyun 		/* not waiting mbox already done */
4990*4882a593Smuzhiyun 		rc = 0;
4991*4882a593Smuzhiyun 		goto job_done;
4992*4882a593Smuzhiyun 	}
4993*4882a593Smuzhiyun 
4994*4882a593Smuzhiyun 	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4995*4882a593Smuzhiyun 	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4996*4882a593Smuzhiyun 		return 1; /* job started */
4997*4882a593Smuzhiyun 
4998*4882a593Smuzhiyun job_done:
4999*4882a593Smuzhiyun 	/* common exit for error or job completed inline */
5000*4882a593Smuzhiyun 	if (pmboxq)
5001*4882a593Smuzhiyun 		mempool_free(pmboxq, phba->mbox_mem_pool);
5002*4882a593Smuzhiyun 	lpfc_bsg_dma_page_free(phba, dmabuf);
5003*4882a593Smuzhiyun 	kfree(dd_data);
5004*4882a593Smuzhiyun 
5005*4882a593Smuzhiyun job_cont:
5006*4882a593Smuzhiyun 	return rc;
5007*4882a593Smuzhiyun }
5008*4882a593Smuzhiyun 
5009*4882a593Smuzhiyun /**
5010*4882a593Smuzhiyun  * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
5011*4882a593Smuzhiyun  * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
5012*4882a593Smuzhiyun  **/
5013*4882a593Smuzhiyun static int
lpfc_bsg_mbox_cmd(struct bsg_job * job)5014*4882a593Smuzhiyun lpfc_bsg_mbox_cmd(struct bsg_job *job)
5015*4882a593Smuzhiyun {
5016*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5017*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
5018*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
5019*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
5020*4882a593Smuzhiyun 	struct dfc_mbox_req *mbox_req;
5021*4882a593Smuzhiyun 	int rc = 0;
5022*4882a593Smuzhiyun 
5023*4882a593Smuzhiyun 	/* mix-and-match backward compatibility */
5024*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
5025*4882a593Smuzhiyun 	if (job->request_len <
5026*4882a593Smuzhiyun 	    sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
5027*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
5028*4882a593Smuzhiyun 				"2737 Mix-and-match backward compatibility "
5029*4882a593Smuzhiyun 				"between MBOX_REQ old size:%d and "
5030*4882a593Smuzhiyun 				"new request size:%d\n",
5031*4882a593Smuzhiyun 				(int)(job->request_len -
5032*4882a593Smuzhiyun 				      sizeof(struct fc_bsg_request)),
5033*4882a593Smuzhiyun 				(int)sizeof(struct dfc_mbox_req));
5034*4882a593Smuzhiyun 		mbox_req = (struct dfc_mbox_req *)
5035*4882a593Smuzhiyun 				bsg_request->rqst_data.h_vendor.vendor_cmd;
5036*4882a593Smuzhiyun 		mbox_req->extMboxTag = 0;
5037*4882a593Smuzhiyun 		mbox_req->extSeqNum = 0;
5038*4882a593Smuzhiyun 	}
5039*4882a593Smuzhiyun 
5040*4882a593Smuzhiyun 	rc = lpfc_bsg_issue_mbox(phba, job, vport);
5041*4882a593Smuzhiyun 
5042*4882a593Smuzhiyun 	if (rc == 0) {
5043*4882a593Smuzhiyun 		/* job done */
5044*4882a593Smuzhiyun 		bsg_reply->result = 0;
5045*4882a593Smuzhiyun 		job->dd_data = NULL;
5046*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
5047*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
5048*4882a593Smuzhiyun 	} else if (rc == 1)
5049*4882a593Smuzhiyun 		/* job submitted, will complete later*/
5050*4882a593Smuzhiyun 		rc = 0; /* return zero, no error */
5051*4882a593Smuzhiyun 	else {
5052*4882a593Smuzhiyun 		/* some error occurred */
5053*4882a593Smuzhiyun 		bsg_reply->result = rc;
5054*4882a593Smuzhiyun 		job->dd_data = NULL;
5055*4882a593Smuzhiyun 	}
5056*4882a593Smuzhiyun 
5057*4882a593Smuzhiyun 	return rc;
5058*4882a593Smuzhiyun }
5059*4882a593Smuzhiyun 
5060*4882a593Smuzhiyun /**
5061*4882a593Smuzhiyun  * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
5062*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
5063*4882a593Smuzhiyun  * @cmdiocbq: Pointer to command iocb.
5064*4882a593Smuzhiyun  * @rspiocbq: Pointer to response iocb.
5065*4882a593Smuzhiyun  *
5066*4882a593Smuzhiyun  * This function is the completion handler for iocbs issued using
5067*4882a593Smuzhiyun  * lpfc_menlo_cmd function. This function is called by the
5068*4882a593Smuzhiyun  * ring event handler function without any lock held. This function
5069*4882a593Smuzhiyun  * can be called from both worker thread context and interrupt
5070*4882a593Smuzhiyun  * context. This function also can be called from another thread which
5071*4882a593Smuzhiyun  * cleans up the SLI layer objects.
5072*4882a593Smuzhiyun  * This function copies the contents of the response iocb to the
5073*4882a593Smuzhiyun  * response iocb memory object provided by the caller of
5074*4882a593Smuzhiyun  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
5075*4882a593Smuzhiyun  * sleeps for the iocb completion.
5076*4882a593Smuzhiyun  **/
5077*4882a593Smuzhiyun static void
lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)5078*4882a593Smuzhiyun lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
5079*4882a593Smuzhiyun 			struct lpfc_iocbq *cmdiocbq,
5080*4882a593Smuzhiyun 			struct lpfc_iocbq *rspiocbq)
5081*4882a593Smuzhiyun {
5082*4882a593Smuzhiyun 	struct bsg_job_data *dd_data;
5083*4882a593Smuzhiyun 	struct bsg_job *job;
5084*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply;
5085*4882a593Smuzhiyun 	IOCB_t *rsp;
5086*4882a593Smuzhiyun 	struct lpfc_dmabuf *bmp, *cmp, *rmp;
5087*4882a593Smuzhiyun 	struct lpfc_bsg_menlo *menlo;
5088*4882a593Smuzhiyun 	unsigned long flags;
5089*4882a593Smuzhiyun 	struct menlo_response *menlo_resp;
5090*4882a593Smuzhiyun 	unsigned int rsp_size;
5091*4882a593Smuzhiyun 	int rc = 0;
5092*4882a593Smuzhiyun 
5093*4882a593Smuzhiyun 	dd_data = cmdiocbq->context1;
5094*4882a593Smuzhiyun 	cmp = cmdiocbq->context2;
5095*4882a593Smuzhiyun 	bmp = cmdiocbq->context3;
5096*4882a593Smuzhiyun 	menlo = &dd_data->context_un.menlo;
5097*4882a593Smuzhiyun 	rmp = menlo->rmp;
5098*4882a593Smuzhiyun 	rsp = &rspiocbq->iocb;
5099*4882a593Smuzhiyun 
5100*4882a593Smuzhiyun 	/* Determine if job has been aborted */
5101*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
5102*4882a593Smuzhiyun 	job = dd_data->set_job;
5103*4882a593Smuzhiyun 	if (job) {
5104*4882a593Smuzhiyun 		bsg_reply = job->reply;
5105*4882a593Smuzhiyun 		/* Prevent timeout handling from trying to abort job  */
5106*4882a593Smuzhiyun 		job->dd_data = NULL;
5107*4882a593Smuzhiyun 	}
5108*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5109*4882a593Smuzhiyun 
5110*4882a593Smuzhiyun 	/* Copy the job data or set the failing status for the job */
5111*4882a593Smuzhiyun 
5112*4882a593Smuzhiyun 	if (job) {
5113*4882a593Smuzhiyun 		/* always return the xri, this would be used in the case
5114*4882a593Smuzhiyun 		 * of a menlo download to allow the data to be sent as a
5115*4882a593Smuzhiyun 		 * continuation of the exchange.
5116*4882a593Smuzhiyun 		 */
5117*4882a593Smuzhiyun 
5118*4882a593Smuzhiyun 		menlo_resp = (struct menlo_response *)
5119*4882a593Smuzhiyun 			bsg_reply->reply_data.vendor_reply.vendor_rsp;
5120*4882a593Smuzhiyun 		menlo_resp->xri = rsp->ulpContext;
5121*4882a593Smuzhiyun 		if (rsp->ulpStatus) {
5122*4882a593Smuzhiyun 			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
5123*4882a593Smuzhiyun 				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
5124*4882a593Smuzhiyun 				case IOERR_SEQUENCE_TIMEOUT:
5125*4882a593Smuzhiyun 					rc = -ETIMEDOUT;
5126*4882a593Smuzhiyun 					break;
5127*4882a593Smuzhiyun 				case IOERR_INVALID_RPI:
5128*4882a593Smuzhiyun 					rc = -EFAULT;
5129*4882a593Smuzhiyun 					break;
5130*4882a593Smuzhiyun 				default:
5131*4882a593Smuzhiyun 					rc = -EACCES;
5132*4882a593Smuzhiyun 					break;
5133*4882a593Smuzhiyun 				}
5134*4882a593Smuzhiyun 			} else {
5135*4882a593Smuzhiyun 				rc = -EACCES;
5136*4882a593Smuzhiyun 			}
5137*4882a593Smuzhiyun 		} else {
5138*4882a593Smuzhiyun 			rsp_size = rsp->un.genreq64.bdl.bdeSize;
5139*4882a593Smuzhiyun 			bsg_reply->reply_payload_rcv_len =
5140*4882a593Smuzhiyun 				lpfc_bsg_copy_data(rmp, &job->reply_payload,
5141*4882a593Smuzhiyun 						   rsp_size, 0);
5142*4882a593Smuzhiyun 		}
5143*4882a593Smuzhiyun 
5144*4882a593Smuzhiyun 	}
5145*4882a593Smuzhiyun 
5146*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, cmdiocbq);
5147*4882a593Smuzhiyun 	lpfc_free_bsg_buffers(phba, cmp);
5148*4882a593Smuzhiyun 	lpfc_free_bsg_buffers(phba, rmp);
5149*4882a593Smuzhiyun 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5150*4882a593Smuzhiyun 	kfree(bmp);
5151*4882a593Smuzhiyun 	kfree(dd_data);
5152*4882a593Smuzhiyun 
5153*4882a593Smuzhiyun 	/* Complete the job if active */
5154*4882a593Smuzhiyun 
5155*4882a593Smuzhiyun 	if (job) {
5156*4882a593Smuzhiyun 		bsg_reply->result = rc;
5157*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
5158*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
5159*4882a593Smuzhiyun 	}
5160*4882a593Smuzhiyun 
5161*4882a593Smuzhiyun 	return;
5162*4882a593Smuzhiyun }
5163*4882a593Smuzhiyun 
5164*4882a593Smuzhiyun /**
5165*4882a593Smuzhiyun  * lpfc_menlo_cmd - send an ioctl for menlo hardware
5166*4882a593Smuzhiyun  * @job: fc_bsg_job to handle
5167*4882a593Smuzhiyun  *
5168*4882a593Smuzhiyun  * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
5169*4882a593Smuzhiyun  * all the command completions will return the xri for the command.
5170*4882a593Smuzhiyun  * For menlo data requests a gen request 64 CX is used to continue the exchange
5171*4882a593Smuzhiyun  * supplied in the menlo request header xri field.
5172*4882a593Smuzhiyun  **/
5173*4882a593Smuzhiyun static int
lpfc_menlo_cmd(struct bsg_job * job)5174*4882a593Smuzhiyun lpfc_menlo_cmd(struct bsg_job *job)
5175*4882a593Smuzhiyun {
5176*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5177*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
5178*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
5179*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
5180*4882a593Smuzhiyun 	struct lpfc_iocbq *cmdiocbq;
5181*4882a593Smuzhiyun 	IOCB_t *cmd;
5182*4882a593Smuzhiyun 	int rc = 0;
5183*4882a593Smuzhiyun 	struct menlo_command *menlo_cmd;
5184*4882a593Smuzhiyun 	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
5185*4882a593Smuzhiyun 	int request_nseg;
5186*4882a593Smuzhiyun 	int reply_nseg;
5187*4882a593Smuzhiyun 	struct bsg_job_data *dd_data;
5188*4882a593Smuzhiyun 	struct ulp_bde64 *bpl = NULL;
5189*4882a593Smuzhiyun 
5190*4882a593Smuzhiyun 	/* in case no data is returned return just the return code */
5191*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
5192*4882a593Smuzhiyun 
5193*4882a593Smuzhiyun 	if (job->request_len <
5194*4882a593Smuzhiyun 	    sizeof(struct fc_bsg_request) +
5195*4882a593Smuzhiyun 		sizeof(struct menlo_command)) {
5196*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5197*4882a593Smuzhiyun 				"2784 Received MENLO_CMD request below "
5198*4882a593Smuzhiyun 				"minimum size\n");
5199*4882a593Smuzhiyun 		rc = -ERANGE;
5200*4882a593Smuzhiyun 		goto no_dd_data;
5201*4882a593Smuzhiyun 	}
5202*4882a593Smuzhiyun 
5203*4882a593Smuzhiyun 	if (job->reply_len < sizeof(*bsg_reply) +
5204*4882a593Smuzhiyun 				sizeof(struct menlo_response)) {
5205*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5206*4882a593Smuzhiyun 				"2785 Received MENLO_CMD reply below "
5207*4882a593Smuzhiyun 				"minimum size\n");
5208*4882a593Smuzhiyun 		rc = -ERANGE;
5209*4882a593Smuzhiyun 		goto no_dd_data;
5210*4882a593Smuzhiyun 	}
5211*4882a593Smuzhiyun 
5212*4882a593Smuzhiyun 	if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
5213*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5214*4882a593Smuzhiyun 				"2786 Adapter does not support menlo "
5215*4882a593Smuzhiyun 				"commands\n");
5216*4882a593Smuzhiyun 		rc = -EPERM;
5217*4882a593Smuzhiyun 		goto no_dd_data;
5218*4882a593Smuzhiyun 	}
5219*4882a593Smuzhiyun 
5220*4882a593Smuzhiyun 	menlo_cmd = (struct menlo_command *)
5221*4882a593Smuzhiyun 		bsg_request->rqst_data.h_vendor.vendor_cmd;
5222*4882a593Smuzhiyun 
5223*4882a593Smuzhiyun 	/* allocate our bsg tracking structure */
5224*4882a593Smuzhiyun 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
5225*4882a593Smuzhiyun 	if (!dd_data) {
5226*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5227*4882a593Smuzhiyun 				"2787 Failed allocation of dd_data\n");
5228*4882a593Smuzhiyun 		rc = -ENOMEM;
5229*4882a593Smuzhiyun 		goto no_dd_data;
5230*4882a593Smuzhiyun 	}
5231*4882a593Smuzhiyun 
5232*4882a593Smuzhiyun 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5233*4882a593Smuzhiyun 	if (!bmp) {
5234*4882a593Smuzhiyun 		rc = -ENOMEM;
5235*4882a593Smuzhiyun 		goto free_dd;
5236*4882a593Smuzhiyun 	}
5237*4882a593Smuzhiyun 
5238*4882a593Smuzhiyun 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
5239*4882a593Smuzhiyun 	if (!bmp->virt) {
5240*4882a593Smuzhiyun 		rc = -ENOMEM;
5241*4882a593Smuzhiyun 		goto free_bmp;
5242*4882a593Smuzhiyun 	}
5243*4882a593Smuzhiyun 
5244*4882a593Smuzhiyun 	INIT_LIST_HEAD(&bmp->list);
5245*4882a593Smuzhiyun 
5246*4882a593Smuzhiyun 	bpl = (struct ulp_bde64 *)bmp->virt;
5247*4882a593Smuzhiyun 	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
5248*4882a593Smuzhiyun 	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
5249*4882a593Smuzhiyun 				     1, bpl, &request_nseg);
5250*4882a593Smuzhiyun 	if (!cmp) {
5251*4882a593Smuzhiyun 		rc = -ENOMEM;
5252*4882a593Smuzhiyun 		goto free_bmp;
5253*4882a593Smuzhiyun 	}
5254*4882a593Smuzhiyun 	lpfc_bsg_copy_data(cmp, &job->request_payload,
5255*4882a593Smuzhiyun 			   job->request_payload.payload_len, 1);
5256*4882a593Smuzhiyun 
5257*4882a593Smuzhiyun 	bpl += request_nseg;
5258*4882a593Smuzhiyun 	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
5259*4882a593Smuzhiyun 	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
5260*4882a593Smuzhiyun 				     bpl, &reply_nseg);
5261*4882a593Smuzhiyun 	if (!rmp) {
5262*4882a593Smuzhiyun 		rc = -ENOMEM;
5263*4882a593Smuzhiyun 		goto free_cmp;
5264*4882a593Smuzhiyun 	}
5265*4882a593Smuzhiyun 
5266*4882a593Smuzhiyun 	cmdiocbq = lpfc_sli_get_iocbq(phba);
5267*4882a593Smuzhiyun 	if (!cmdiocbq) {
5268*4882a593Smuzhiyun 		rc = -ENOMEM;
5269*4882a593Smuzhiyun 		goto free_rmp;
5270*4882a593Smuzhiyun 	}
5271*4882a593Smuzhiyun 
5272*4882a593Smuzhiyun 	cmd = &cmdiocbq->iocb;
5273*4882a593Smuzhiyun 	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
5274*4882a593Smuzhiyun 	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
5275*4882a593Smuzhiyun 	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
5276*4882a593Smuzhiyun 	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
5277*4882a593Smuzhiyun 	cmd->un.genreq64.bdl.bdeSize =
5278*4882a593Smuzhiyun 	    (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
5279*4882a593Smuzhiyun 	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
5280*4882a593Smuzhiyun 	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
5281*4882a593Smuzhiyun 	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
5282*4882a593Smuzhiyun 	cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
5283*4882a593Smuzhiyun 	cmd->ulpBdeCount = 1;
5284*4882a593Smuzhiyun 	cmd->ulpClass = CLASS3;
5285*4882a593Smuzhiyun 	cmd->ulpOwner = OWN_CHIP;
5286*4882a593Smuzhiyun 	cmd->ulpLe = 1; /* Limited Edition */
5287*4882a593Smuzhiyun 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
5288*4882a593Smuzhiyun 	cmdiocbq->vport = phba->pport;
5289*4882a593Smuzhiyun 	/* We want the firmware to timeout before we do */
5290*4882a593Smuzhiyun 	cmd->ulpTimeout = MENLO_TIMEOUT - 5;
5291*4882a593Smuzhiyun 	cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
5292*4882a593Smuzhiyun 	cmdiocbq->context1 = dd_data;
5293*4882a593Smuzhiyun 	cmdiocbq->context2 = cmp;
5294*4882a593Smuzhiyun 	cmdiocbq->context3 = bmp;
5295*4882a593Smuzhiyun 	if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
5296*4882a593Smuzhiyun 		cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5297*4882a593Smuzhiyun 		cmd->ulpPU = MENLO_PU; /* 3 */
5298*4882a593Smuzhiyun 		cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
5299*4882a593Smuzhiyun 		cmd->ulpContext = MENLO_CONTEXT; /* 0 */
5300*4882a593Smuzhiyun 	} else {
5301*4882a593Smuzhiyun 		cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
5302*4882a593Smuzhiyun 		cmd->ulpPU = 1;
5303*4882a593Smuzhiyun 		cmd->un.ulpWord[4] = 0;
5304*4882a593Smuzhiyun 		cmd->ulpContext = menlo_cmd->xri;
5305*4882a593Smuzhiyun 	}
5306*4882a593Smuzhiyun 
5307*4882a593Smuzhiyun 	dd_data->type = TYPE_MENLO;
5308*4882a593Smuzhiyun 	dd_data->set_job = job;
5309*4882a593Smuzhiyun 	dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
5310*4882a593Smuzhiyun 	dd_data->context_un.menlo.rmp = rmp;
5311*4882a593Smuzhiyun 	job->dd_data = dd_data;
5312*4882a593Smuzhiyun 
5313*4882a593Smuzhiyun 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
5314*4882a593Smuzhiyun 		MENLO_TIMEOUT - 5);
5315*4882a593Smuzhiyun 	if (rc == IOCB_SUCCESS)
5316*4882a593Smuzhiyun 		return 0; /* done for now */
5317*4882a593Smuzhiyun 
5318*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, cmdiocbq);
5319*4882a593Smuzhiyun 
5320*4882a593Smuzhiyun free_rmp:
5321*4882a593Smuzhiyun 	lpfc_free_bsg_buffers(phba, rmp);
5322*4882a593Smuzhiyun free_cmp:
5323*4882a593Smuzhiyun 	lpfc_free_bsg_buffers(phba, cmp);
5324*4882a593Smuzhiyun free_bmp:
5325*4882a593Smuzhiyun 	if (bmp->virt)
5326*4882a593Smuzhiyun 		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5327*4882a593Smuzhiyun 	kfree(bmp);
5328*4882a593Smuzhiyun free_dd:
5329*4882a593Smuzhiyun 	kfree(dd_data);
5330*4882a593Smuzhiyun no_dd_data:
5331*4882a593Smuzhiyun 	/* make error code available to userspace */
5332*4882a593Smuzhiyun 	bsg_reply->result = rc;
5333*4882a593Smuzhiyun 	job->dd_data = NULL;
5334*4882a593Smuzhiyun 	return rc;
5335*4882a593Smuzhiyun }
5336*4882a593Smuzhiyun 
5337*4882a593Smuzhiyun static int
lpfc_forced_link_speed(struct bsg_job * job)5338*4882a593Smuzhiyun lpfc_forced_link_speed(struct bsg_job *job)
5339*4882a593Smuzhiyun {
5340*4882a593Smuzhiyun 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5341*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(shost);
5342*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
5343*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
5344*4882a593Smuzhiyun 	struct forced_link_speed_support_reply *forced_reply;
5345*4882a593Smuzhiyun 	int rc = 0;
5346*4882a593Smuzhiyun 
5347*4882a593Smuzhiyun 	if (job->request_len <
5348*4882a593Smuzhiyun 	    sizeof(struct fc_bsg_request) +
5349*4882a593Smuzhiyun 	    sizeof(struct get_forced_link_speed_support)) {
5350*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5351*4882a593Smuzhiyun 				"0048 Received FORCED_LINK_SPEED request "
5352*4882a593Smuzhiyun 				"below minimum size\n");
5353*4882a593Smuzhiyun 		rc = -EINVAL;
5354*4882a593Smuzhiyun 		goto job_error;
5355*4882a593Smuzhiyun 	}
5356*4882a593Smuzhiyun 
5357*4882a593Smuzhiyun 	forced_reply = (struct forced_link_speed_support_reply *)
5358*4882a593Smuzhiyun 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5359*4882a593Smuzhiyun 
5360*4882a593Smuzhiyun 	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
5361*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5362*4882a593Smuzhiyun 				"0049 Received FORCED_LINK_SPEED reply below "
5363*4882a593Smuzhiyun 				"minimum size\n");
5364*4882a593Smuzhiyun 		rc = -EINVAL;
5365*4882a593Smuzhiyun 		goto job_error;
5366*4882a593Smuzhiyun 	}
5367*4882a593Smuzhiyun 
5368*4882a593Smuzhiyun 	forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
5369*4882a593Smuzhiyun 				   ? LPFC_FORCED_LINK_SPEED_SUPPORTED
5370*4882a593Smuzhiyun 				   : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
5371*4882a593Smuzhiyun job_error:
5372*4882a593Smuzhiyun 	bsg_reply->result = rc;
5373*4882a593Smuzhiyun 	if (rc == 0)
5374*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
5375*4882a593Smuzhiyun 			       bsg_reply->reply_payload_rcv_len);
5376*4882a593Smuzhiyun 	return rc;
5377*4882a593Smuzhiyun }
5378*4882a593Smuzhiyun 
5379*4882a593Smuzhiyun /**
5380*4882a593Smuzhiyun  * lpfc_check_fwlog_support: Check FW log support on the adapter
5381*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
5382*4882a593Smuzhiyun  *
5383*4882a593Smuzhiyun  * Check if FW Logging support by the adapter
5384*4882a593Smuzhiyun  **/
5385*4882a593Smuzhiyun int
lpfc_check_fwlog_support(struct lpfc_hba * phba)5386*4882a593Smuzhiyun lpfc_check_fwlog_support(struct lpfc_hba *phba)
5387*4882a593Smuzhiyun {
5388*4882a593Smuzhiyun 	struct lpfc_ras_fwlog *ras_fwlog = NULL;
5389*4882a593Smuzhiyun 
5390*4882a593Smuzhiyun 	ras_fwlog = &phba->ras_fwlog;
5391*4882a593Smuzhiyun 
5392*4882a593Smuzhiyun 	if (ras_fwlog->ras_hwsupport == false)
5393*4882a593Smuzhiyun 		return -EACCES;
5394*4882a593Smuzhiyun 	else if (ras_fwlog->ras_enabled == false)
5395*4882a593Smuzhiyun 		return -EPERM;
5396*4882a593Smuzhiyun 	else
5397*4882a593Smuzhiyun 		return 0;
5398*4882a593Smuzhiyun }
5399*4882a593Smuzhiyun 
5400*4882a593Smuzhiyun /**
5401*4882a593Smuzhiyun  * lpfc_bsg_get_ras_config: Get RAS configuration settings
5402*4882a593Smuzhiyun  * @job: fc_bsg_job to handle
5403*4882a593Smuzhiyun  *
5404*4882a593Smuzhiyun  * Get RAS configuration values set.
5405*4882a593Smuzhiyun  **/
5406*4882a593Smuzhiyun static int
lpfc_bsg_get_ras_config(struct bsg_job * job)5407*4882a593Smuzhiyun lpfc_bsg_get_ras_config(struct bsg_job *job)
5408*4882a593Smuzhiyun {
5409*4882a593Smuzhiyun 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5410*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(shost);
5411*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
5412*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
5413*4882a593Smuzhiyun 	struct lpfc_bsg_get_ras_config_reply *ras_reply;
5414*4882a593Smuzhiyun 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5415*4882a593Smuzhiyun 	int rc = 0;
5416*4882a593Smuzhiyun 
5417*4882a593Smuzhiyun 	if (job->request_len <
5418*4882a593Smuzhiyun 	    sizeof(struct fc_bsg_request) +
5419*4882a593Smuzhiyun 	    sizeof(struct lpfc_bsg_ras_req)) {
5420*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5421*4882a593Smuzhiyun 				"6192 FW_LOG request received "
5422*4882a593Smuzhiyun 				"below minimum size\n");
5423*4882a593Smuzhiyun 		rc = -EINVAL;
5424*4882a593Smuzhiyun 		goto ras_job_error;
5425*4882a593Smuzhiyun 	}
5426*4882a593Smuzhiyun 
5427*4882a593Smuzhiyun 	/* Check FW log status */
5428*4882a593Smuzhiyun 	rc = lpfc_check_fwlog_support(phba);
5429*4882a593Smuzhiyun 	if (rc)
5430*4882a593Smuzhiyun 		goto ras_job_error;
5431*4882a593Smuzhiyun 
5432*4882a593Smuzhiyun 	ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
5433*4882a593Smuzhiyun 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5434*4882a593Smuzhiyun 
5435*4882a593Smuzhiyun 	/* Current logging state */
5436*4882a593Smuzhiyun 	spin_lock_irq(&phba->hbalock);
5437*4882a593Smuzhiyun 	if (ras_fwlog->state == ACTIVE)
5438*4882a593Smuzhiyun 		ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
5439*4882a593Smuzhiyun 	else
5440*4882a593Smuzhiyun 		ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
5441*4882a593Smuzhiyun 	spin_unlock_irq(&phba->hbalock);
5442*4882a593Smuzhiyun 
5443*4882a593Smuzhiyun 	ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
5444*4882a593Smuzhiyun 	ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
5445*4882a593Smuzhiyun 
5446*4882a593Smuzhiyun ras_job_error:
5447*4882a593Smuzhiyun 	/* make error code available to userspace */
5448*4882a593Smuzhiyun 	bsg_reply->result = rc;
5449*4882a593Smuzhiyun 
5450*4882a593Smuzhiyun 	/* complete the job back to userspace */
5451*4882a593Smuzhiyun 	if (!rc)
5452*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
5453*4882a593Smuzhiyun 			     bsg_reply->reply_payload_rcv_len);
5454*4882a593Smuzhiyun 	return rc;
5455*4882a593Smuzhiyun }
5456*4882a593Smuzhiyun 
5457*4882a593Smuzhiyun /**
5458*4882a593Smuzhiyun  * lpfc_bsg_set_ras_config: Set FW logging parameters
5459*4882a593Smuzhiyun  * @job: fc_bsg_job to handle
5460*4882a593Smuzhiyun  *
5461*4882a593Smuzhiyun  * Set log-level parameters for FW-logging in host memory
5462*4882a593Smuzhiyun  **/
5463*4882a593Smuzhiyun static int
lpfc_bsg_set_ras_config(struct bsg_job * job)5464*4882a593Smuzhiyun lpfc_bsg_set_ras_config(struct bsg_job *job)
5465*4882a593Smuzhiyun {
5466*4882a593Smuzhiyun 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5467*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(shost);
5468*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
5469*4882a593Smuzhiyun 	struct lpfc_bsg_set_ras_config_req *ras_req;
5470*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
5471*4882a593Smuzhiyun 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5472*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
5473*4882a593Smuzhiyun 	uint8_t action = 0, log_level = 0;
5474*4882a593Smuzhiyun 	int rc = 0, action_status = 0;
5475*4882a593Smuzhiyun 
5476*4882a593Smuzhiyun 	if (job->request_len <
5477*4882a593Smuzhiyun 	    sizeof(struct fc_bsg_request) +
5478*4882a593Smuzhiyun 	    sizeof(struct lpfc_bsg_set_ras_config_req)) {
5479*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5480*4882a593Smuzhiyun 				"6182 Received RAS_LOG request "
5481*4882a593Smuzhiyun 				"below minimum size\n");
5482*4882a593Smuzhiyun 		rc = -EINVAL;
5483*4882a593Smuzhiyun 		goto ras_job_error;
5484*4882a593Smuzhiyun 	}
5485*4882a593Smuzhiyun 
5486*4882a593Smuzhiyun 	/* Check FW log status */
5487*4882a593Smuzhiyun 	rc = lpfc_check_fwlog_support(phba);
5488*4882a593Smuzhiyun 	if (rc)
5489*4882a593Smuzhiyun 		goto ras_job_error;
5490*4882a593Smuzhiyun 
5491*4882a593Smuzhiyun 	ras_req = (struct lpfc_bsg_set_ras_config_req *)
5492*4882a593Smuzhiyun 		bsg_request->rqst_data.h_vendor.vendor_cmd;
5493*4882a593Smuzhiyun 	action = ras_req->action;
5494*4882a593Smuzhiyun 	log_level = ras_req->log_level;
5495*4882a593Smuzhiyun 
5496*4882a593Smuzhiyun 	if (action == LPFC_RASACTION_STOP_LOGGING) {
5497*4882a593Smuzhiyun 		/* Check if already disabled */
5498*4882a593Smuzhiyun 		spin_lock_irq(&phba->hbalock);
5499*4882a593Smuzhiyun 		if (ras_fwlog->state != ACTIVE) {
5500*4882a593Smuzhiyun 			spin_unlock_irq(&phba->hbalock);
5501*4882a593Smuzhiyun 			rc = -ESRCH;
5502*4882a593Smuzhiyun 			goto ras_job_error;
5503*4882a593Smuzhiyun 		}
5504*4882a593Smuzhiyun 		spin_unlock_irq(&phba->hbalock);
5505*4882a593Smuzhiyun 
5506*4882a593Smuzhiyun 		/* Disable logging */
5507*4882a593Smuzhiyun 		lpfc_ras_stop_fwlog(phba);
5508*4882a593Smuzhiyun 	} else {
5509*4882a593Smuzhiyun 		/*action = LPFC_RASACTION_START_LOGGING*/
5510*4882a593Smuzhiyun 
5511*4882a593Smuzhiyun 		/* Even though FW-logging is active re-initialize
5512*4882a593Smuzhiyun 		 * FW-logging with new log-level. Return status
5513*4882a593Smuzhiyun 		 * "Logging already Running" to caller.
5514*4882a593Smuzhiyun 		 **/
5515*4882a593Smuzhiyun 		spin_lock_irq(&phba->hbalock);
5516*4882a593Smuzhiyun 		if (ras_fwlog->state != INACTIVE)
5517*4882a593Smuzhiyun 			action_status = -EINPROGRESS;
5518*4882a593Smuzhiyun 		spin_unlock_irq(&phba->hbalock);
5519*4882a593Smuzhiyun 
5520*4882a593Smuzhiyun 		/* Enable logging */
5521*4882a593Smuzhiyun 		rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
5522*4882a593Smuzhiyun 					      LPFC_RAS_ENABLE_LOGGING);
5523*4882a593Smuzhiyun 		if (rc) {
5524*4882a593Smuzhiyun 			rc = -EINVAL;
5525*4882a593Smuzhiyun 			goto ras_job_error;
5526*4882a593Smuzhiyun 		}
5527*4882a593Smuzhiyun 
5528*4882a593Smuzhiyun 		/* Check if FW-logging is re-initialized */
5529*4882a593Smuzhiyun 		if (action_status == -EINPROGRESS)
5530*4882a593Smuzhiyun 			rc = action_status;
5531*4882a593Smuzhiyun 	}
5532*4882a593Smuzhiyun ras_job_error:
5533*4882a593Smuzhiyun 	/* make error code available to userspace */
5534*4882a593Smuzhiyun 	bsg_reply->result = rc;
5535*4882a593Smuzhiyun 
5536*4882a593Smuzhiyun 	/* complete the job back to userspace */
5537*4882a593Smuzhiyun 	if (!rc)
5538*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
5539*4882a593Smuzhiyun 			     bsg_reply->reply_payload_rcv_len);
5540*4882a593Smuzhiyun 
5541*4882a593Smuzhiyun 	return rc;
5542*4882a593Smuzhiyun }
5543*4882a593Smuzhiyun 
5544*4882a593Smuzhiyun /**
5545*4882a593Smuzhiyun  * lpfc_bsg_get_ras_lwpd: Get log write position data
5546*4882a593Smuzhiyun  * @job: fc_bsg_job to handle
5547*4882a593Smuzhiyun  *
5548*4882a593Smuzhiyun  * Get Offset/Wrap count of the log message written
5549*4882a593Smuzhiyun  * in host memory
5550*4882a593Smuzhiyun  **/
5551*4882a593Smuzhiyun static int
lpfc_bsg_get_ras_lwpd(struct bsg_job * job)5552*4882a593Smuzhiyun lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
5553*4882a593Smuzhiyun {
5554*4882a593Smuzhiyun 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5555*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(shost);
5556*4882a593Smuzhiyun 	struct lpfc_bsg_get_ras_lwpd *ras_reply;
5557*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
5558*4882a593Smuzhiyun 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5559*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
5560*4882a593Smuzhiyun 	u32 *lwpd_ptr = NULL;
5561*4882a593Smuzhiyun 	int rc = 0;
5562*4882a593Smuzhiyun 
5563*4882a593Smuzhiyun 	rc = lpfc_check_fwlog_support(phba);
5564*4882a593Smuzhiyun 	if (rc)
5565*4882a593Smuzhiyun 		goto ras_job_error;
5566*4882a593Smuzhiyun 
5567*4882a593Smuzhiyun 	if (job->request_len <
5568*4882a593Smuzhiyun 	    sizeof(struct fc_bsg_request) +
5569*4882a593Smuzhiyun 	    sizeof(struct lpfc_bsg_ras_req)) {
5570*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5571*4882a593Smuzhiyun 				"6183 Received RAS_LOG request "
5572*4882a593Smuzhiyun 				"below minimum size\n");
5573*4882a593Smuzhiyun 		rc = -EINVAL;
5574*4882a593Smuzhiyun 		goto ras_job_error;
5575*4882a593Smuzhiyun 	}
5576*4882a593Smuzhiyun 
5577*4882a593Smuzhiyun 	ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
5578*4882a593Smuzhiyun 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5579*4882a593Smuzhiyun 
5580*4882a593Smuzhiyun 	if (!ras_fwlog->lwpd.virt) {
5581*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5582*4882a593Smuzhiyun 				"6193 Restart FW Logging\n");
5583*4882a593Smuzhiyun 		rc = -EINVAL;
5584*4882a593Smuzhiyun 		goto ras_job_error;
5585*4882a593Smuzhiyun 	}
5586*4882a593Smuzhiyun 
5587*4882a593Smuzhiyun 	/* Get lwpd offset */
5588*4882a593Smuzhiyun 	lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
5589*4882a593Smuzhiyun 	ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
5590*4882a593Smuzhiyun 
5591*4882a593Smuzhiyun 	/* Get wrap count */
5592*4882a593Smuzhiyun 	ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
5593*4882a593Smuzhiyun 
5594*4882a593Smuzhiyun ras_job_error:
5595*4882a593Smuzhiyun 	/* make error code available to userspace */
5596*4882a593Smuzhiyun 	bsg_reply->result = rc;
5597*4882a593Smuzhiyun 
5598*4882a593Smuzhiyun 	/* complete the job back to userspace */
5599*4882a593Smuzhiyun 	if (!rc)
5600*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
5601*4882a593Smuzhiyun 			     bsg_reply->reply_payload_rcv_len);
5602*4882a593Smuzhiyun 
5603*4882a593Smuzhiyun 	return rc;
5604*4882a593Smuzhiyun }
5605*4882a593Smuzhiyun 
5606*4882a593Smuzhiyun /**
5607*4882a593Smuzhiyun  * lpfc_bsg_get_ras_fwlog: Read FW log
5608*4882a593Smuzhiyun  * @job: fc_bsg_job to handle
5609*4882a593Smuzhiyun  *
5610*4882a593Smuzhiyun  * Copy the FW log into the passed buffer.
5611*4882a593Smuzhiyun  **/
5612*4882a593Smuzhiyun static int
lpfc_bsg_get_ras_fwlog(struct bsg_job * job)5613*4882a593Smuzhiyun lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5614*4882a593Smuzhiyun {
5615*4882a593Smuzhiyun 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5616*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(shost);
5617*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
5618*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
5619*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
5620*4882a593Smuzhiyun 	struct lpfc_bsg_get_fwlog_req *ras_req;
5621*4882a593Smuzhiyun 	u32 rd_offset, rd_index, offset;
5622*4882a593Smuzhiyun 	void *src, *fwlog_buff;
5623*4882a593Smuzhiyun 	struct lpfc_ras_fwlog *ras_fwlog = NULL;
5624*4882a593Smuzhiyun 	struct lpfc_dmabuf *dmabuf, *next;
5625*4882a593Smuzhiyun 	int rc = 0;
5626*4882a593Smuzhiyun 
5627*4882a593Smuzhiyun 	ras_fwlog = &phba->ras_fwlog;
5628*4882a593Smuzhiyun 
5629*4882a593Smuzhiyun 	rc = lpfc_check_fwlog_support(phba);
5630*4882a593Smuzhiyun 	if (rc)
5631*4882a593Smuzhiyun 		goto ras_job_error;
5632*4882a593Smuzhiyun 
5633*4882a593Smuzhiyun 	/* Logging to be stopped before reading */
5634*4882a593Smuzhiyun 	spin_lock_irq(&phba->hbalock);
5635*4882a593Smuzhiyun 	if (ras_fwlog->state == ACTIVE) {
5636*4882a593Smuzhiyun 		spin_unlock_irq(&phba->hbalock);
5637*4882a593Smuzhiyun 		rc = -EINPROGRESS;
5638*4882a593Smuzhiyun 		goto ras_job_error;
5639*4882a593Smuzhiyun 	}
5640*4882a593Smuzhiyun 	spin_unlock_irq(&phba->hbalock);
5641*4882a593Smuzhiyun 
5642*4882a593Smuzhiyun 	if (job->request_len <
5643*4882a593Smuzhiyun 	    sizeof(struct fc_bsg_request) +
5644*4882a593Smuzhiyun 	    sizeof(struct lpfc_bsg_get_fwlog_req)) {
5645*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5646*4882a593Smuzhiyun 				"6184 Received RAS_LOG request "
5647*4882a593Smuzhiyun 				"below minimum size\n");
5648*4882a593Smuzhiyun 		rc = -EINVAL;
5649*4882a593Smuzhiyun 		goto ras_job_error;
5650*4882a593Smuzhiyun 	}
5651*4882a593Smuzhiyun 
5652*4882a593Smuzhiyun 	ras_req = (struct lpfc_bsg_get_fwlog_req *)
5653*4882a593Smuzhiyun 		bsg_request->rqst_data.h_vendor.vendor_cmd;
5654*4882a593Smuzhiyun 	rd_offset = ras_req->read_offset;
5655*4882a593Smuzhiyun 
5656*4882a593Smuzhiyun 	/* Allocate memory to read fw log*/
5657*4882a593Smuzhiyun 	fwlog_buff = vmalloc(ras_req->read_size);
5658*4882a593Smuzhiyun 	if (!fwlog_buff) {
5659*4882a593Smuzhiyun 		rc = -ENOMEM;
5660*4882a593Smuzhiyun 		goto ras_job_error;
5661*4882a593Smuzhiyun 	}
5662*4882a593Smuzhiyun 
5663*4882a593Smuzhiyun 	rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
5664*4882a593Smuzhiyun 	offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
5665*4882a593Smuzhiyun 
5666*4882a593Smuzhiyun 	list_for_each_entry_safe(dmabuf, next,
5667*4882a593Smuzhiyun 			      &ras_fwlog->fwlog_buff_list, list) {
5668*4882a593Smuzhiyun 
5669*4882a593Smuzhiyun 		if (dmabuf->buffer_tag < rd_index)
5670*4882a593Smuzhiyun 			continue;
5671*4882a593Smuzhiyun 
5672*4882a593Smuzhiyun 		src = dmabuf->virt + offset;
5673*4882a593Smuzhiyun 		memcpy(fwlog_buff, src, ras_req->read_size);
5674*4882a593Smuzhiyun 		break;
5675*4882a593Smuzhiyun 	}
5676*4882a593Smuzhiyun 
5677*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len =
5678*4882a593Smuzhiyun 		sg_copy_from_buffer(job->reply_payload.sg_list,
5679*4882a593Smuzhiyun 				    job->reply_payload.sg_cnt,
5680*4882a593Smuzhiyun 				    fwlog_buff, ras_req->read_size);
5681*4882a593Smuzhiyun 
5682*4882a593Smuzhiyun 	vfree(fwlog_buff);
5683*4882a593Smuzhiyun 
5684*4882a593Smuzhiyun ras_job_error:
5685*4882a593Smuzhiyun 	bsg_reply->result = rc;
5686*4882a593Smuzhiyun 	if (!rc)
5687*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
5688*4882a593Smuzhiyun 			     bsg_reply->reply_payload_rcv_len);
5689*4882a593Smuzhiyun 
5690*4882a593Smuzhiyun 	return rc;
5691*4882a593Smuzhiyun }
5692*4882a593Smuzhiyun 
5693*4882a593Smuzhiyun static int
lpfc_get_trunk_info(struct bsg_job * job)5694*4882a593Smuzhiyun lpfc_get_trunk_info(struct bsg_job *job)
5695*4882a593Smuzhiyun {
5696*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5697*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
5698*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
5699*4882a593Smuzhiyun 	struct lpfc_trunk_info *event_reply;
5700*4882a593Smuzhiyun 	int rc = 0;
5701*4882a593Smuzhiyun 
5702*4882a593Smuzhiyun 	if (job->request_len <
5703*4882a593Smuzhiyun 	    sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
5704*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5705*4882a593Smuzhiyun 				"2744 Received GET TRUNK _INFO request below "
5706*4882a593Smuzhiyun 				"minimum size\n");
5707*4882a593Smuzhiyun 		rc = -EINVAL;
5708*4882a593Smuzhiyun 		goto job_error;
5709*4882a593Smuzhiyun 	}
5710*4882a593Smuzhiyun 
5711*4882a593Smuzhiyun 	event_reply = (struct lpfc_trunk_info *)
5712*4882a593Smuzhiyun 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5713*4882a593Smuzhiyun 
5714*4882a593Smuzhiyun 	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
5715*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5716*4882a593Smuzhiyun 				"2728 Received GET TRUNK _INFO reply below "
5717*4882a593Smuzhiyun 				"minimum size\n");
5718*4882a593Smuzhiyun 		rc = -EINVAL;
5719*4882a593Smuzhiyun 		goto job_error;
5720*4882a593Smuzhiyun 	}
5721*4882a593Smuzhiyun 	if (event_reply == NULL) {
5722*4882a593Smuzhiyun 		rc = -EINVAL;
5723*4882a593Smuzhiyun 		goto job_error;
5724*4882a593Smuzhiyun 	}
5725*4882a593Smuzhiyun 
5726*4882a593Smuzhiyun 	bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
5727*4882a593Smuzhiyun 		   (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
5728*4882a593Smuzhiyun 
5729*4882a593Smuzhiyun 	bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
5730*4882a593Smuzhiyun 		   (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
5731*4882a593Smuzhiyun 
5732*4882a593Smuzhiyun 	bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
5733*4882a593Smuzhiyun 		   (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
5734*4882a593Smuzhiyun 
5735*4882a593Smuzhiyun 	bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
5736*4882a593Smuzhiyun 		   (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
5737*4882a593Smuzhiyun 
5738*4882a593Smuzhiyun 	bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
5739*4882a593Smuzhiyun 		   (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
5740*4882a593Smuzhiyun 
5741*4882a593Smuzhiyun 	bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
5742*4882a593Smuzhiyun 		   bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
5743*4882a593Smuzhiyun 
5744*4882a593Smuzhiyun 	bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
5745*4882a593Smuzhiyun 		   bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
5746*4882a593Smuzhiyun 
5747*4882a593Smuzhiyun 	bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
5748*4882a593Smuzhiyun 		   bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
5749*4882a593Smuzhiyun 
5750*4882a593Smuzhiyun 	bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
5751*4882a593Smuzhiyun 		   bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
5752*4882a593Smuzhiyun 
5753*4882a593Smuzhiyun 	event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
5754*4882a593Smuzhiyun 	event_reply->logical_speed =
5755*4882a593Smuzhiyun 				phba->sli4_hba.link_state.logical_speed / 1000;
5756*4882a593Smuzhiyun job_error:
5757*4882a593Smuzhiyun 	bsg_reply->result = rc;
5758*4882a593Smuzhiyun 	if (!rc)
5759*4882a593Smuzhiyun 		bsg_job_done(job, bsg_reply->result,
5760*4882a593Smuzhiyun 			     bsg_reply->reply_payload_rcv_len);
5761*4882a593Smuzhiyun 	return rc;
5762*4882a593Smuzhiyun 
5763*4882a593Smuzhiyun }
5764*4882a593Smuzhiyun 
5765*4882a593Smuzhiyun /**
5766*4882a593Smuzhiyun  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5767*4882a593Smuzhiyun  * @job: fc_bsg_job to handle
5768*4882a593Smuzhiyun  **/
5769*4882a593Smuzhiyun static int
lpfc_bsg_hst_vendor(struct bsg_job * job)5770*4882a593Smuzhiyun lpfc_bsg_hst_vendor(struct bsg_job *job)
5771*4882a593Smuzhiyun {
5772*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
5773*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
5774*4882a593Smuzhiyun 	int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
5775*4882a593Smuzhiyun 	int rc;
5776*4882a593Smuzhiyun 
5777*4882a593Smuzhiyun 	switch (command) {
5778*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_SET_CT_EVENT:
5779*4882a593Smuzhiyun 		rc = lpfc_bsg_hba_set_event(job);
5780*4882a593Smuzhiyun 		break;
5781*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_GET_CT_EVENT:
5782*4882a593Smuzhiyun 		rc = lpfc_bsg_hba_get_event(job);
5783*4882a593Smuzhiyun 		break;
5784*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5785*4882a593Smuzhiyun 		rc = lpfc_bsg_send_mgmt_rsp(job);
5786*4882a593Smuzhiyun 		break;
5787*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_DIAG_MODE:
5788*4882a593Smuzhiyun 		rc = lpfc_bsg_diag_loopback_mode(job);
5789*4882a593Smuzhiyun 		break;
5790*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_DIAG_MODE_END:
5791*4882a593Smuzhiyun 		rc = lpfc_sli4_bsg_diag_mode_end(job);
5792*4882a593Smuzhiyun 		break;
5793*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5794*4882a593Smuzhiyun 		rc = lpfc_bsg_diag_loopback_run(job);
5795*4882a593Smuzhiyun 		break;
5796*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5797*4882a593Smuzhiyun 		rc = lpfc_sli4_bsg_link_diag_test(job);
5798*4882a593Smuzhiyun 		break;
5799*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_GET_MGMT_REV:
5800*4882a593Smuzhiyun 		rc = lpfc_bsg_get_dfc_rev(job);
5801*4882a593Smuzhiyun 		break;
5802*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_MBOX:
5803*4882a593Smuzhiyun 		rc = lpfc_bsg_mbox_cmd(job);
5804*4882a593Smuzhiyun 		break;
5805*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_MENLO_CMD:
5806*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_MENLO_DATA:
5807*4882a593Smuzhiyun 		rc = lpfc_menlo_cmd(job);
5808*4882a593Smuzhiyun 		break;
5809*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
5810*4882a593Smuzhiyun 		rc = lpfc_forced_link_speed(job);
5811*4882a593Smuzhiyun 		break;
5812*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_RAS_GET_LWPD:
5813*4882a593Smuzhiyun 		rc = lpfc_bsg_get_ras_lwpd(job);
5814*4882a593Smuzhiyun 		break;
5815*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
5816*4882a593Smuzhiyun 		rc = lpfc_bsg_get_ras_fwlog(job);
5817*4882a593Smuzhiyun 		break;
5818*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
5819*4882a593Smuzhiyun 		rc = lpfc_bsg_get_ras_config(job);
5820*4882a593Smuzhiyun 		break;
5821*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
5822*4882a593Smuzhiyun 		rc = lpfc_bsg_set_ras_config(job);
5823*4882a593Smuzhiyun 		break;
5824*4882a593Smuzhiyun 	case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
5825*4882a593Smuzhiyun 		rc = lpfc_get_trunk_info(job);
5826*4882a593Smuzhiyun 		break;
5827*4882a593Smuzhiyun 	default:
5828*4882a593Smuzhiyun 		rc = -EINVAL;
5829*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len = 0;
5830*4882a593Smuzhiyun 		/* make error code available to userspace */
5831*4882a593Smuzhiyun 		bsg_reply->result = rc;
5832*4882a593Smuzhiyun 		break;
5833*4882a593Smuzhiyun 	}
5834*4882a593Smuzhiyun 
5835*4882a593Smuzhiyun 	return rc;
5836*4882a593Smuzhiyun }
5837*4882a593Smuzhiyun 
5838*4882a593Smuzhiyun /**
5839*4882a593Smuzhiyun  * lpfc_bsg_request - handle a bsg request from the FC transport
5840*4882a593Smuzhiyun  * @job: bsg_job to handle
5841*4882a593Smuzhiyun  **/
5842*4882a593Smuzhiyun int
lpfc_bsg_request(struct bsg_job * job)5843*4882a593Smuzhiyun lpfc_bsg_request(struct bsg_job *job)
5844*4882a593Smuzhiyun {
5845*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
5846*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
5847*4882a593Smuzhiyun 	uint32_t msgcode;
5848*4882a593Smuzhiyun 	int rc;
5849*4882a593Smuzhiyun 
5850*4882a593Smuzhiyun 	msgcode = bsg_request->msgcode;
5851*4882a593Smuzhiyun 	switch (msgcode) {
5852*4882a593Smuzhiyun 	case FC_BSG_HST_VENDOR:
5853*4882a593Smuzhiyun 		rc = lpfc_bsg_hst_vendor(job);
5854*4882a593Smuzhiyun 		break;
5855*4882a593Smuzhiyun 	case FC_BSG_RPT_ELS:
5856*4882a593Smuzhiyun 		rc = lpfc_bsg_rport_els(job);
5857*4882a593Smuzhiyun 		break;
5858*4882a593Smuzhiyun 	case FC_BSG_RPT_CT:
5859*4882a593Smuzhiyun 		rc = lpfc_bsg_send_mgmt_cmd(job);
5860*4882a593Smuzhiyun 		break;
5861*4882a593Smuzhiyun 	default:
5862*4882a593Smuzhiyun 		rc = -EINVAL;
5863*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len = 0;
5864*4882a593Smuzhiyun 		/* make error code available to userspace */
5865*4882a593Smuzhiyun 		bsg_reply->result = rc;
5866*4882a593Smuzhiyun 		break;
5867*4882a593Smuzhiyun 	}
5868*4882a593Smuzhiyun 
5869*4882a593Smuzhiyun 	return rc;
5870*4882a593Smuzhiyun }
5871*4882a593Smuzhiyun 
5872*4882a593Smuzhiyun /**
5873*4882a593Smuzhiyun  * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5874*4882a593Smuzhiyun  * @job: bsg_job that has timed out
5875*4882a593Smuzhiyun  *
5876*4882a593Smuzhiyun  * This function just aborts the job's IOCB.  The aborted IOCB will return to
5877*4882a593Smuzhiyun  * the waiting function which will handle passing the error back to userspace
5878*4882a593Smuzhiyun  **/
5879*4882a593Smuzhiyun int
lpfc_bsg_timeout(struct bsg_job * job)5880*4882a593Smuzhiyun lpfc_bsg_timeout(struct bsg_job *job)
5881*4882a593Smuzhiyun {
5882*4882a593Smuzhiyun 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5883*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
5884*4882a593Smuzhiyun 	struct lpfc_iocbq *cmdiocb;
5885*4882a593Smuzhiyun 	struct lpfc_sli_ring *pring;
5886*4882a593Smuzhiyun 	struct bsg_job_data *dd_data;
5887*4882a593Smuzhiyun 	unsigned long flags;
5888*4882a593Smuzhiyun 	int rc = 0;
5889*4882a593Smuzhiyun 	LIST_HEAD(completions);
5890*4882a593Smuzhiyun 	struct lpfc_iocbq *check_iocb, *next_iocb;
5891*4882a593Smuzhiyun 
5892*4882a593Smuzhiyun 	pring = lpfc_phba_elsring(phba);
5893*4882a593Smuzhiyun 	if (unlikely(!pring))
5894*4882a593Smuzhiyun 		return -EIO;
5895*4882a593Smuzhiyun 
5896*4882a593Smuzhiyun 	/* if job's driver data is NULL, the command completed or is in the
5897*4882a593Smuzhiyun 	 * the process of completing.  In this case, return status to request
5898*4882a593Smuzhiyun 	 * so the timeout is retried.  This avoids double completion issues
5899*4882a593Smuzhiyun 	 * and the request will be pulled off the timer queue when the
5900*4882a593Smuzhiyun 	 * command's completion handler executes.  Otherwise, prevent the
5901*4882a593Smuzhiyun 	 * command's completion handler from executing the job done callback
5902*4882a593Smuzhiyun 	 * and continue processing to abort the outstanding the command.
5903*4882a593Smuzhiyun 	 */
5904*4882a593Smuzhiyun 
5905*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
5906*4882a593Smuzhiyun 	dd_data = (struct bsg_job_data *)job->dd_data;
5907*4882a593Smuzhiyun 	if (dd_data) {
5908*4882a593Smuzhiyun 		dd_data->set_job = NULL;
5909*4882a593Smuzhiyun 		job->dd_data = NULL;
5910*4882a593Smuzhiyun 	} else {
5911*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5912*4882a593Smuzhiyun 		return -EAGAIN;
5913*4882a593Smuzhiyun 	}
5914*4882a593Smuzhiyun 
5915*4882a593Smuzhiyun 	switch (dd_data->type) {
5916*4882a593Smuzhiyun 	case TYPE_IOCB:
5917*4882a593Smuzhiyun 		/* Check to see if IOCB was issued to the port or not. If not,
5918*4882a593Smuzhiyun 		 * remove it from the txq queue and call cancel iocbs.
5919*4882a593Smuzhiyun 		 * Otherwise, call abort iotag
5920*4882a593Smuzhiyun 		 */
5921*4882a593Smuzhiyun 		cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5922*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5923*4882a593Smuzhiyun 
5924*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->hbalock, flags);
5925*4882a593Smuzhiyun 		/* make sure the I/O abort window is still open */
5926*4882a593Smuzhiyun 		if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
5927*4882a593Smuzhiyun 			spin_unlock_irqrestore(&phba->hbalock, flags);
5928*4882a593Smuzhiyun 			return -EAGAIN;
5929*4882a593Smuzhiyun 		}
5930*4882a593Smuzhiyun 		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5931*4882a593Smuzhiyun 					 list) {
5932*4882a593Smuzhiyun 			if (check_iocb == cmdiocb) {
5933*4882a593Smuzhiyun 				list_move_tail(&check_iocb->list, &completions);
5934*4882a593Smuzhiyun 				break;
5935*4882a593Smuzhiyun 			}
5936*4882a593Smuzhiyun 		}
5937*4882a593Smuzhiyun 		if (list_empty(&completions))
5938*4882a593Smuzhiyun 			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5939*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, flags);
5940*4882a593Smuzhiyun 		if (!list_empty(&completions)) {
5941*4882a593Smuzhiyun 			lpfc_sli_cancel_iocbs(phba, &completions,
5942*4882a593Smuzhiyun 					      IOSTAT_LOCAL_REJECT,
5943*4882a593Smuzhiyun 					      IOERR_SLI_ABORTED);
5944*4882a593Smuzhiyun 		}
5945*4882a593Smuzhiyun 		break;
5946*4882a593Smuzhiyun 
5947*4882a593Smuzhiyun 	case TYPE_EVT:
5948*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5949*4882a593Smuzhiyun 		break;
5950*4882a593Smuzhiyun 
5951*4882a593Smuzhiyun 	case TYPE_MBOX:
5952*4882a593Smuzhiyun 		/* Update the ext buf ctx state if needed */
5953*4882a593Smuzhiyun 
5954*4882a593Smuzhiyun 		if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5955*4882a593Smuzhiyun 			phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5956*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5957*4882a593Smuzhiyun 		break;
5958*4882a593Smuzhiyun 	case TYPE_MENLO:
5959*4882a593Smuzhiyun 		/* Check to see if IOCB was issued to the port or not. If not,
5960*4882a593Smuzhiyun 		 * remove it from the txq queue and call cancel iocbs.
5961*4882a593Smuzhiyun 		 * Otherwise, call abort iotag.
5962*4882a593Smuzhiyun 		 */
5963*4882a593Smuzhiyun 		cmdiocb = dd_data->context_un.menlo.cmdiocbq;
5964*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5965*4882a593Smuzhiyun 
5966*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->hbalock, flags);
5967*4882a593Smuzhiyun 		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5968*4882a593Smuzhiyun 					 list) {
5969*4882a593Smuzhiyun 			if (check_iocb == cmdiocb) {
5970*4882a593Smuzhiyun 				list_move_tail(&check_iocb->list, &completions);
5971*4882a593Smuzhiyun 				break;
5972*4882a593Smuzhiyun 			}
5973*4882a593Smuzhiyun 		}
5974*4882a593Smuzhiyun 		if (list_empty(&completions))
5975*4882a593Smuzhiyun 			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5976*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, flags);
5977*4882a593Smuzhiyun 		if (!list_empty(&completions)) {
5978*4882a593Smuzhiyun 			lpfc_sli_cancel_iocbs(phba, &completions,
5979*4882a593Smuzhiyun 					      IOSTAT_LOCAL_REJECT,
5980*4882a593Smuzhiyun 					      IOERR_SLI_ABORTED);
5981*4882a593Smuzhiyun 		}
5982*4882a593Smuzhiyun 		break;
5983*4882a593Smuzhiyun 	default:
5984*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5985*4882a593Smuzhiyun 		break;
5986*4882a593Smuzhiyun 	}
5987*4882a593Smuzhiyun 
5988*4882a593Smuzhiyun 	/* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5989*4882a593Smuzhiyun 	 * otherwise an error message will be displayed on the console
5990*4882a593Smuzhiyun 	 * so always return success (zero)
5991*4882a593Smuzhiyun 	 */
5992*4882a593Smuzhiyun 	return rc;
5993*4882a593Smuzhiyun }
5994