xref: /OK3568_Linux_fs/kernel/drivers/scsi/lpfc/lpfc_nvmet.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*******************************************************************
2*4882a593Smuzhiyun  * This file is part of the Emulex Linux Device Driver for         *
3*4882a593Smuzhiyun  * Fibre Channsel Host Bus Adapters.                               *
4*4882a593Smuzhiyun  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5*4882a593Smuzhiyun  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6*4882a593Smuzhiyun  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7*4882a593Smuzhiyun  * EMULEX and SLI are trademarks of Emulex.                        *
8*4882a593Smuzhiyun  * www.broadcom.com                                                *
9*4882a593Smuzhiyun  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10*4882a593Smuzhiyun  *                                                                 *
11*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or   *
12*4882a593Smuzhiyun  * modify it under the terms of version 2 of the GNU General       *
13*4882a593Smuzhiyun  * Public License as published by the Free Software Foundation.    *
14*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful. *
15*4882a593Smuzhiyun  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16*4882a593Smuzhiyun  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18*4882a593Smuzhiyun  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19*4882a593Smuzhiyun  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20*4882a593Smuzhiyun  * more details, a copy of which can be found in the file COPYING  *
21*4882a593Smuzhiyun  * included with this package.                                     *
22*4882a593Smuzhiyun  ********************************************************************/
23*4882a593Smuzhiyun #include <linux/pci.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/interrupt.h>
26*4882a593Smuzhiyun #include <linux/delay.h>
27*4882a593Smuzhiyun #include <asm/unaligned.h>
28*4882a593Smuzhiyun #include <linux/crc-t10dif.h>
29*4882a593Smuzhiyun #include <net/checksum.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <scsi/scsi.h>
32*4882a593Smuzhiyun #include <scsi/scsi_device.h>
33*4882a593Smuzhiyun #include <scsi/scsi_eh.h>
34*4882a593Smuzhiyun #include <scsi/scsi_host.h>
35*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
36*4882a593Smuzhiyun #include <scsi/scsi_transport_fc.h>
37*4882a593Smuzhiyun #include <scsi/fc/fc_fs.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #include "lpfc_version.h"
40*4882a593Smuzhiyun #include "lpfc_hw4.h"
41*4882a593Smuzhiyun #include "lpfc_hw.h"
42*4882a593Smuzhiyun #include "lpfc_sli.h"
43*4882a593Smuzhiyun #include "lpfc_sli4.h"
44*4882a593Smuzhiyun #include "lpfc_nl.h"
45*4882a593Smuzhiyun #include "lpfc_disc.h"
46*4882a593Smuzhiyun #include "lpfc.h"
47*4882a593Smuzhiyun #include "lpfc_scsi.h"
48*4882a593Smuzhiyun #include "lpfc_nvme.h"
49*4882a593Smuzhiyun #include "lpfc_logmsg.h"
50*4882a593Smuzhiyun #include "lpfc_crtn.h"
51*4882a593Smuzhiyun #include "lpfc_vport.h"
52*4882a593Smuzhiyun #include "lpfc_debugfs.h"
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
55*4882a593Smuzhiyun 						 struct lpfc_async_xchg_ctx *,
56*4882a593Smuzhiyun 						 dma_addr_t rspbuf,
57*4882a593Smuzhiyun 						 uint16_t rspsize);
58*4882a593Smuzhiyun static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
59*4882a593Smuzhiyun 						  struct lpfc_async_xchg_ctx *);
60*4882a593Smuzhiyun static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
61*4882a593Smuzhiyun 					  struct lpfc_async_xchg_ctx *,
62*4882a593Smuzhiyun 					  uint32_t, uint16_t);
63*4882a593Smuzhiyun static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
64*4882a593Smuzhiyun 					    struct lpfc_async_xchg_ctx *,
65*4882a593Smuzhiyun 					    uint32_t, uint16_t);
66*4882a593Smuzhiyun static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
67*4882a593Smuzhiyun 				    struct lpfc_async_xchg_ctx *);
68*4882a593Smuzhiyun static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun static union lpfc_wqe128 lpfc_tsend_cmd_template;
73*4882a593Smuzhiyun static union lpfc_wqe128 lpfc_treceive_cmd_template;
74*4882a593Smuzhiyun static union lpfc_wqe128 lpfc_trsp_cmd_template;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* Setup WQE templates for NVME IOs */
77*4882a593Smuzhiyun void
lpfc_nvmet_cmd_template(void)78*4882a593Smuzhiyun lpfc_nvmet_cmd_template(void)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	union lpfc_wqe128 *wqe;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/* TSEND template */
83*4882a593Smuzhiyun 	wqe = &lpfc_tsend_cmd_template;
84*4882a593Smuzhiyun 	memset(wqe, 0, sizeof(union lpfc_wqe128));
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	/* Word 0, 1, 2 - BDE is variable */
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/* Word 3 - payload_offset_len is zero */
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/* Word 4 - relative_offset is variable */
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/* Word 5 - is zero */
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/* Word 6 - ctxt_tag, xri_tag is variable */
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	/* Word 7 - wqe_ar is variable */
97*4882a593Smuzhiyun 	bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
98*4882a593Smuzhiyun 	bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
99*4882a593Smuzhiyun 	bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
100*4882a593Smuzhiyun 	bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
101*4882a593Smuzhiyun 	bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	/* Word 8 - abort_tag is variable */
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	/* Word 9  - reqtag, rcvoxid is variable */
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/* Word 10 - wqes, xc is variable */
108*4882a593Smuzhiyun 	bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
109*4882a593Smuzhiyun 	bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
110*4882a593Smuzhiyun 	bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
111*4882a593Smuzhiyun 	bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
112*4882a593Smuzhiyun 	bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
113*4882a593Smuzhiyun 	bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/* Word 11 - sup, irsp, irsplen is variable */
116*4882a593Smuzhiyun 	bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
117*4882a593Smuzhiyun 	bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
118*4882a593Smuzhiyun 	bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
119*4882a593Smuzhiyun 	bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
120*4882a593Smuzhiyun 	bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
121*4882a593Smuzhiyun 	bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	/* Word 12 - fcp_data_len is variable */
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/* Word 13, 14, 15 - PBDE is zero */
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/* TRECEIVE template */
128*4882a593Smuzhiyun 	wqe = &lpfc_treceive_cmd_template;
129*4882a593Smuzhiyun 	memset(wqe, 0, sizeof(union lpfc_wqe128));
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/* Word 0, 1, 2 - BDE is variable */
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/* Word 3 */
134*4882a593Smuzhiyun 	wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	/* Word 4 - relative_offset is variable */
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/* Word 5 - is zero */
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/* Word 6 - ctxt_tag, xri_tag is variable */
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/* Word 7 */
143*4882a593Smuzhiyun 	bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
144*4882a593Smuzhiyun 	bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
145*4882a593Smuzhiyun 	bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
146*4882a593Smuzhiyun 	bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
147*4882a593Smuzhiyun 	bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* Word 8 - abort_tag is variable */
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/* Word 9  - reqtag, rcvoxid is variable */
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* Word 10 - xc is variable */
154*4882a593Smuzhiyun 	bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
155*4882a593Smuzhiyun 	bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
156*4882a593Smuzhiyun 	bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
157*4882a593Smuzhiyun 	bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
158*4882a593Smuzhiyun 	bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
159*4882a593Smuzhiyun 	bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/* Word 11 - pbde is variable */
162*4882a593Smuzhiyun 	bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
163*4882a593Smuzhiyun 	bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
164*4882a593Smuzhiyun 	bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
165*4882a593Smuzhiyun 	bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
166*4882a593Smuzhiyun 	bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
167*4882a593Smuzhiyun 	bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	/* Word 12 - fcp_data_len is variable */
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	/* Word 13, 14, 15 - PBDE is variable */
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/* TRSP template */
174*4882a593Smuzhiyun 	wqe = &lpfc_trsp_cmd_template;
175*4882a593Smuzhiyun 	memset(wqe, 0, sizeof(union lpfc_wqe128));
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/* Word 0, 1, 2 - BDE is variable */
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/* Word 3 - response_len is variable */
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/* Word 4, 5 - is zero */
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* Word 6 - ctxt_tag, xri_tag is variable */
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/* Word 7 */
186*4882a593Smuzhiyun 	bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
187*4882a593Smuzhiyun 	bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
188*4882a593Smuzhiyun 	bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
189*4882a593Smuzhiyun 	bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
190*4882a593Smuzhiyun 	bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* Word 8 - abort_tag is variable */
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/* Word 9  - reqtag is variable */
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/* Word 10 wqes, xc is variable */
197*4882a593Smuzhiyun 	bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
198*4882a593Smuzhiyun 	bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
199*4882a593Smuzhiyun 	bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
200*4882a593Smuzhiyun 	bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
201*4882a593Smuzhiyun 	bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
202*4882a593Smuzhiyun 	bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/* Word 11 irsp, irsplen is variable */
205*4882a593Smuzhiyun 	bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
206*4882a593Smuzhiyun 	bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
207*4882a593Smuzhiyun 	bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
208*4882a593Smuzhiyun 	bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
209*4882a593Smuzhiyun 	bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
210*4882a593Smuzhiyun 	bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/* Word 12, 13, 14, 15 - is zero */
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
216*4882a593Smuzhiyun static struct lpfc_async_xchg_ctx *
lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba * phba,u16 xri)217*4882a593Smuzhiyun lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp;
220*4882a593Smuzhiyun 	unsigned long iflag;
221*4882a593Smuzhiyun 	bool found = false;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
224*4882a593Smuzhiyun 	list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
225*4882a593Smuzhiyun 		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
226*4882a593Smuzhiyun 			continue;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 		found = true;
229*4882a593Smuzhiyun 		break;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
232*4882a593Smuzhiyun 	if (found)
233*4882a593Smuzhiyun 		return ctxp;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	return NULL;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun static struct lpfc_async_xchg_ctx *
lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba * phba,u16 oxid,u32 sid)239*4882a593Smuzhiyun lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp;
242*4882a593Smuzhiyun 	unsigned long iflag;
243*4882a593Smuzhiyun 	bool found = false;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
246*4882a593Smuzhiyun 	list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
247*4882a593Smuzhiyun 		if (ctxp->oxid != oxid || ctxp->sid != sid)
248*4882a593Smuzhiyun 			continue;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		found = true;
251*4882a593Smuzhiyun 		break;
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
254*4882a593Smuzhiyun 	if (found)
255*4882a593Smuzhiyun 		return ctxp;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	return NULL;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun #endif
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun static void
lpfc_nvmet_defer_release(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp)262*4882a593Smuzhiyun lpfc_nvmet_defer_release(struct lpfc_hba *phba,
263*4882a593Smuzhiyun 			struct lpfc_async_xchg_ctx *ctxp)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	lockdep_assert_held(&ctxp->ctxlock);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
268*4882a593Smuzhiyun 			"6313 NVMET Defer ctx release oxid x%x flg x%x\n",
269*4882a593Smuzhiyun 			ctxp->oxid, ctxp->flag);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	if (ctxp->flag & LPFC_NVME_CTX_RLS)
272*4882a593Smuzhiyun 		return;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	ctxp->flag |= LPFC_NVME_CTX_RLS;
275*4882a593Smuzhiyun 	spin_lock(&phba->sli4_hba.t_active_list_lock);
276*4882a593Smuzhiyun 	list_del(&ctxp->list);
277*4882a593Smuzhiyun 	spin_unlock(&phba->sli4_hba.t_active_list_lock);
278*4882a593Smuzhiyun 	spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
279*4882a593Smuzhiyun 	list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
280*4882a593Smuzhiyun 	spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun /**
284*4882a593Smuzhiyun  * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the
285*4882a593Smuzhiyun  *         transmission of an NVME LS response.
286*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
287*4882a593Smuzhiyun  * @cmdwqe: Pointer to driver command WQE object.
288*4882a593Smuzhiyun  * @wcqe: Pointer to driver response CQE object.
289*4882a593Smuzhiyun  *
290*4882a593Smuzhiyun  * The function is called from SLI ring event handler with no
291*4882a593Smuzhiyun  * lock held. The function frees memory resources used for the command
292*4882a593Smuzhiyun  * used to send the NVME LS RSP.
293*4882a593Smuzhiyun  **/
294*4882a593Smuzhiyun void
__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)295*4882a593Smuzhiyun __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
296*4882a593Smuzhiyun 			   struct lpfc_wcqe_complete *wcqe)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
299*4882a593Smuzhiyun 	struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
300*4882a593Smuzhiyun 	uint32_t status, result;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
303*4882a593Smuzhiyun 	result = wcqe->parameter;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
306*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
307*4882a593Smuzhiyun 				"6410 NVMEx LS cmpl state mismatch IO x%x: "
308*4882a593Smuzhiyun 				"%d %d\n",
309*4882a593Smuzhiyun 				axchg->oxid, axchg->state, axchg->entry_cnt);
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	lpfc_nvmeio_data(phba, "NVMEx LS  CMPL: xri x%x stat x%x result x%x\n",
313*4882a593Smuzhiyun 			 axchg->oxid, status, result);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
316*4882a593Smuzhiyun 			"6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
317*4882a593Smuzhiyun 			status, result, axchg->oxid);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	lpfc_nlp_put(cmdwqe->context1);
320*4882a593Smuzhiyun 	cmdwqe->context2 = NULL;
321*4882a593Smuzhiyun 	cmdwqe->context3 = NULL;
322*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, cmdwqe);
323*4882a593Smuzhiyun 	ls_rsp->done(ls_rsp);
324*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
325*4882a593Smuzhiyun 			"6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
326*4882a593Smuzhiyun 			status, axchg->oxid);
327*4882a593Smuzhiyun 	kfree(axchg);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun /**
331*4882a593Smuzhiyun  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
332*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
333*4882a593Smuzhiyun  * @cmdwqe: Pointer to driver command WQE object.
334*4882a593Smuzhiyun  * @wcqe: Pointer to driver response CQE object.
335*4882a593Smuzhiyun  *
336*4882a593Smuzhiyun  * The function is called from SLI ring event handler with no
337*4882a593Smuzhiyun  * lock held. This function is the completion handler for NVME LS commands
338*4882a593Smuzhiyun  * The function updates any states and statistics, then calls the
339*4882a593Smuzhiyun  * generic completion handler to free resources.
340*4882a593Smuzhiyun  **/
341*4882a593Smuzhiyun static void
lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)342*4882a593Smuzhiyun lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
343*4882a593Smuzhiyun 			  struct lpfc_wcqe_complete *wcqe)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
346*4882a593Smuzhiyun 	uint32_t status, result;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	if (!phba->targetport)
349*4882a593Smuzhiyun 		goto finish;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
352*4882a593Smuzhiyun 	result = wcqe->parameter;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
355*4882a593Smuzhiyun 	if (tgtp) {
356*4882a593Smuzhiyun 		if (status) {
357*4882a593Smuzhiyun 			atomic_inc(&tgtp->xmt_ls_rsp_error);
358*4882a593Smuzhiyun 			if (result == IOERR_ABORT_REQUESTED)
359*4882a593Smuzhiyun 				atomic_inc(&tgtp->xmt_ls_rsp_aborted);
360*4882a593Smuzhiyun 			if (bf_get(lpfc_wcqe_c_xb, wcqe))
361*4882a593Smuzhiyun 				atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
362*4882a593Smuzhiyun 		} else {
363*4882a593Smuzhiyun 			atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
364*4882a593Smuzhiyun 		}
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun finish:
368*4882a593Smuzhiyun 	__lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun /**
372*4882a593Smuzhiyun  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
373*4882a593Smuzhiyun  * @phba: HBA buffer is associated with
374*4882a593Smuzhiyun  * @ctxp: context to clean up
375*4882a593Smuzhiyun  * @mp: Buffer to free
376*4882a593Smuzhiyun  *
377*4882a593Smuzhiyun  * Description: Frees the given DMA buffer in the appropriate way given by
378*4882a593Smuzhiyun  * reposting it to its associated RQ so it can be reused.
379*4882a593Smuzhiyun  *
380*4882a593Smuzhiyun  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
381*4882a593Smuzhiyun  *
382*4882a593Smuzhiyun  * Returns: None
383*4882a593Smuzhiyun  **/
384*4882a593Smuzhiyun void
lpfc_nvmet_ctxbuf_post(struct lpfc_hba * phba,struct lpfc_nvmet_ctxbuf * ctx_buf)385*4882a593Smuzhiyun lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
388*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
389*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
390*4882a593Smuzhiyun 	struct fc_frame_header *fc_hdr;
391*4882a593Smuzhiyun 	struct rqb_dmabuf *nvmebuf;
392*4882a593Smuzhiyun 	struct lpfc_nvmet_ctx_info *infop;
393*4882a593Smuzhiyun 	uint32_t size, oxid, sid;
394*4882a593Smuzhiyun 	int cpu;
395*4882a593Smuzhiyun 	unsigned long iflag;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (ctxp->state == LPFC_NVME_STE_FREE) {
398*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
399*4882a593Smuzhiyun 				"6411 NVMET free, already free IO x%x: %d %d\n",
400*4882a593Smuzhiyun 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (ctxp->rqb_buffer) {
404*4882a593Smuzhiyun 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
405*4882a593Smuzhiyun 		nvmebuf = ctxp->rqb_buffer;
406*4882a593Smuzhiyun 		/* check if freed in another path whilst acquiring lock */
407*4882a593Smuzhiyun 		if (nvmebuf) {
408*4882a593Smuzhiyun 			ctxp->rqb_buffer = NULL;
409*4882a593Smuzhiyun 			if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
410*4882a593Smuzhiyun 				ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
411*4882a593Smuzhiyun 				spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
412*4882a593Smuzhiyun 				nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
413*4882a593Smuzhiyun 								    nvmebuf);
414*4882a593Smuzhiyun 			} else {
415*4882a593Smuzhiyun 				spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
416*4882a593Smuzhiyun 				/* repost */
417*4882a593Smuzhiyun 				lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
418*4882a593Smuzhiyun 			}
419*4882a593Smuzhiyun 		} else {
420*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
421*4882a593Smuzhiyun 		}
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 	ctxp->state = LPFC_NVME_STE_FREE;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
426*4882a593Smuzhiyun 	if (phba->sli4_hba.nvmet_io_wait_cnt) {
427*4882a593Smuzhiyun 		list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
428*4882a593Smuzhiyun 				 nvmebuf, struct rqb_dmabuf,
429*4882a593Smuzhiyun 				 hbuf.list);
430*4882a593Smuzhiyun 		phba->sli4_hba.nvmet_io_wait_cnt--;
431*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
432*4882a593Smuzhiyun 				       iflag);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 		fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
435*4882a593Smuzhiyun 		oxid = be16_to_cpu(fc_hdr->fh_ox_id);
436*4882a593Smuzhiyun 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
437*4882a593Smuzhiyun 		size = nvmebuf->bytes_recv;
438*4882a593Smuzhiyun 		sid = sli4_sid_from_fc_hdr(fc_hdr);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
441*4882a593Smuzhiyun 		ctxp->wqeq = NULL;
442*4882a593Smuzhiyun 		ctxp->offset = 0;
443*4882a593Smuzhiyun 		ctxp->phba = phba;
444*4882a593Smuzhiyun 		ctxp->size = size;
445*4882a593Smuzhiyun 		ctxp->oxid = oxid;
446*4882a593Smuzhiyun 		ctxp->sid = sid;
447*4882a593Smuzhiyun 		ctxp->state = LPFC_NVME_STE_RCV;
448*4882a593Smuzhiyun 		ctxp->entry_cnt = 1;
449*4882a593Smuzhiyun 		ctxp->flag = 0;
450*4882a593Smuzhiyun 		ctxp->ctxbuf = ctx_buf;
451*4882a593Smuzhiyun 		ctxp->rqb_buffer = (void *)nvmebuf;
452*4882a593Smuzhiyun 		spin_lock_init(&ctxp->ctxlock);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
455*4882a593Smuzhiyun 		/* NOTE: isr time stamp is stale when context is re-assigned*/
456*4882a593Smuzhiyun 		if (ctxp->ts_isr_cmd) {
457*4882a593Smuzhiyun 			ctxp->ts_cmd_nvme = 0;
458*4882a593Smuzhiyun 			ctxp->ts_nvme_data = 0;
459*4882a593Smuzhiyun 			ctxp->ts_data_wqput = 0;
460*4882a593Smuzhiyun 			ctxp->ts_isr_data = 0;
461*4882a593Smuzhiyun 			ctxp->ts_data_nvme = 0;
462*4882a593Smuzhiyun 			ctxp->ts_nvme_status = 0;
463*4882a593Smuzhiyun 			ctxp->ts_status_wqput = 0;
464*4882a593Smuzhiyun 			ctxp->ts_isr_status = 0;
465*4882a593Smuzhiyun 			ctxp->ts_status_nvme = 0;
466*4882a593Smuzhiyun 		}
467*4882a593Smuzhiyun #endif
468*4882a593Smuzhiyun 		atomic_inc(&tgtp->rcv_fcp_cmd_in);
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 		/* Indicate that a replacement buffer has been posted */
471*4882a593Smuzhiyun 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
472*4882a593Smuzhiyun 		ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
473*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 		if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
476*4882a593Smuzhiyun 			atomic_inc(&tgtp->rcv_fcp_cmd_drop);
477*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
478*4882a593Smuzhiyun 					"6181 Unable to queue deferred work "
479*4882a593Smuzhiyun 					"for oxid x%x. "
480*4882a593Smuzhiyun 					"FCP Drop IO [x%x x%x x%x]\n",
481*4882a593Smuzhiyun 					ctxp->oxid,
482*4882a593Smuzhiyun 					atomic_read(&tgtp->rcv_fcp_cmd_in),
483*4882a593Smuzhiyun 					atomic_read(&tgtp->rcv_fcp_cmd_out),
484*4882a593Smuzhiyun 					atomic_read(&tgtp->xmt_fcp_release));
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 			spin_lock_irqsave(&ctxp->ctxlock, iflag);
487*4882a593Smuzhiyun 			lpfc_nvmet_defer_release(phba, ctxp);
488*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
489*4882a593Smuzhiyun 			lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
490*4882a593Smuzhiyun 		}
491*4882a593Smuzhiyun 		return;
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	/*
496*4882a593Smuzhiyun 	 * Use the CPU context list, from the MRQ the IO was received on
497*4882a593Smuzhiyun 	 * (ctxp->idx), to save context structure.
498*4882a593Smuzhiyun 	 */
499*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
500*4882a593Smuzhiyun 	list_del_init(&ctxp->list);
501*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
502*4882a593Smuzhiyun 	cpu = raw_smp_processor_id();
503*4882a593Smuzhiyun 	infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
504*4882a593Smuzhiyun 	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
505*4882a593Smuzhiyun 	list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
506*4882a593Smuzhiyun 	infop->nvmet_ctx_list_cnt++;
507*4882a593Smuzhiyun 	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
508*4882a593Smuzhiyun #endif
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
512*4882a593Smuzhiyun static void
lpfc_nvmet_ktime(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp)513*4882a593Smuzhiyun lpfc_nvmet_ktime(struct lpfc_hba *phba,
514*4882a593Smuzhiyun 		 struct lpfc_async_xchg_ctx *ctxp)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	uint64_t seg1, seg2, seg3, seg4, seg5;
517*4882a593Smuzhiyun 	uint64_t seg6, seg7, seg8, seg9, seg10;
518*4882a593Smuzhiyun 	uint64_t segsum;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
521*4882a593Smuzhiyun 	    !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
522*4882a593Smuzhiyun 	    !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
523*4882a593Smuzhiyun 	    !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
524*4882a593Smuzhiyun 	    !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
525*4882a593Smuzhiyun 		return;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
528*4882a593Smuzhiyun 		return;
529*4882a593Smuzhiyun 	if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
530*4882a593Smuzhiyun 		return;
531*4882a593Smuzhiyun 	if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
532*4882a593Smuzhiyun 		return;
533*4882a593Smuzhiyun 	if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
534*4882a593Smuzhiyun 		return;
535*4882a593Smuzhiyun 	if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
536*4882a593Smuzhiyun 		return;
537*4882a593Smuzhiyun 	if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
538*4882a593Smuzhiyun 		return;
539*4882a593Smuzhiyun 	if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
540*4882a593Smuzhiyun 		return;
541*4882a593Smuzhiyun 	if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
542*4882a593Smuzhiyun 		return;
543*4882a593Smuzhiyun 	if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
544*4882a593Smuzhiyun 		return;
545*4882a593Smuzhiyun 	if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
546*4882a593Smuzhiyun 		return;
547*4882a593Smuzhiyun 	/*
548*4882a593Smuzhiyun 	 * Segment 1 - Time from FCP command received by MSI-X ISR
549*4882a593Smuzhiyun 	 * to FCP command is passed to NVME Layer.
550*4882a593Smuzhiyun 	 * Segment 2 - Time from FCP command payload handed
551*4882a593Smuzhiyun 	 * off to NVME Layer to Driver receives a Command op
552*4882a593Smuzhiyun 	 * from NVME Layer.
553*4882a593Smuzhiyun 	 * Segment 3 - Time from Driver receives a Command op
554*4882a593Smuzhiyun 	 * from NVME Layer to Command is put on WQ.
555*4882a593Smuzhiyun 	 * Segment 4 - Time from Driver WQ put is done
556*4882a593Smuzhiyun 	 * to MSI-X ISR for Command cmpl.
557*4882a593Smuzhiyun 	 * Segment 5 - Time from MSI-X ISR for Command cmpl to
558*4882a593Smuzhiyun 	 * Command cmpl is passed to NVME Layer.
559*4882a593Smuzhiyun 	 * Segment 6 - Time from Command cmpl is passed to NVME
560*4882a593Smuzhiyun 	 * Layer to Driver receives a RSP op from NVME Layer.
561*4882a593Smuzhiyun 	 * Segment 7 - Time from Driver receives a RSP op from
562*4882a593Smuzhiyun 	 * NVME Layer to WQ put is done on TRSP FCP Status.
563*4882a593Smuzhiyun 	 * Segment 8 - Time from Driver WQ put is done on TRSP
564*4882a593Smuzhiyun 	 * FCP Status to MSI-X ISR for TRSP cmpl.
565*4882a593Smuzhiyun 	 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
566*4882a593Smuzhiyun 	 * TRSP cmpl is passed to NVME Layer.
567*4882a593Smuzhiyun 	 * Segment 10 - Time from FCP command received by
568*4882a593Smuzhiyun 	 * MSI-X ISR to command is completed on wire.
569*4882a593Smuzhiyun 	 * (Segments 1 thru 8) for READDATA / WRITEDATA
570*4882a593Smuzhiyun 	 * (Segments 1 thru 4) for READDATA_RSP
571*4882a593Smuzhiyun 	 */
572*4882a593Smuzhiyun 	seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
573*4882a593Smuzhiyun 	segsum = seg1;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
576*4882a593Smuzhiyun 	if (segsum > seg2)
577*4882a593Smuzhiyun 		return;
578*4882a593Smuzhiyun 	seg2 -= segsum;
579*4882a593Smuzhiyun 	segsum += seg2;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
582*4882a593Smuzhiyun 	if (segsum > seg3)
583*4882a593Smuzhiyun 		return;
584*4882a593Smuzhiyun 	seg3 -= segsum;
585*4882a593Smuzhiyun 	segsum += seg3;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
588*4882a593Smuzhiyun 	if (segsum > seg4)
589*4882a593Smuzhiyun 		return;
590*4882a593Smuzhiyun 	seg4 -= segsum;
591*4882a593Smuzhiyun 	segsum += seg4;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
594*4882a593Smuzhiyun 	if (segsum > seg5)
595*4882a593Smuzhiyun 		return;
596*4882a593Smuzhiyun 	seg5 -= segsum;
597*4882a593Smuzhiyun 	segsum += seg5;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	/* For auto rsp commands seg6 thru seg10 will be 0 */
601*4882a593Smuzhiyun 	if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
602*4882a593Smuzhiyun 		seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
603*4882a593Smuzhiyun 		if (segsum > seg6)
604*4882a593Smuzhiyun 			return;
605*4882a593Smuzhiyun 		seg6 -= segsum;
606*4882a593Smuzhiyun 		segsum += seg6;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 		seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
609*4882a593Smuzhiyun 		if (segsum > seg7)
610*4882a593Smuzhiyun 			return;
611*4882a593Smuzhiyun 		seg7 -= segsum;
612*4882a593Smuzhiyun 		segsum += seg7;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 		seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
615*4882a593Smuzhiyun 		if (segsum > seg8)
616*4882a593Smuzhiyun 			return;
617*4882a593Smuzhiyun 		seg8 -= segsum;
618*4882a593Smuzhiyun 		segsum += seg8;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 		seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
621*4882a593Smuzhiyun 		if (segsum > seg9)
622*4882a593Smuzhiyun 			return;
623*4882a593Smuzhiyun 		seg9 -= segsum;
624*4882a593Smuzhiyun 		segsum += seg9;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
627*4882a593Smuzhiyun 			return;
628*4882a593Smuzhiyun 		seg10 = (ctxp->ts_isr_status -
629*4882a593Smuzhiyun 			ctxp->ts_isr_cmd);
630*4882a593Smuzhiyun 	} else {
631*4882a593Smuzhiyun 		if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
632*4882a593Smuzhiyun 			return;
633*4882a593Smuzhiyun 		seg6 =  0;
634*4882a593Smuzhiyun 		seg7 =  0;
635*4882a593Smuzhiyun 		seg8 =  0;
636*4882a593Smuzhiyun 		seg9 =  0;
637*4882a593Smuzhiyun 		seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
638*4882a593Smuzhiyun 	}
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	phba->ktime_seg1_total += seg1;
641*4882a593Smuzhiyun 	if (seg1 < phba->ktime_seg1_min)
642*4882a593Smuzhiyun 		phba->ktime_seg1_min = seg1;
643*4882a593Smuzhiyun 	else if (seg1 > phba->ktime_seg1_max)
644*4882a593Smuzhiyun 		phba->ktime_seg1_max = seg1;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	phba->ktime_seg2_total += seg2;
647*4882a593Smuzhiyun 	if (seg2 < phba->ktime_seg2_min)
648*4882a593Smuzhiyun 		phba->ktime_seg2_min = seg2;
649*4882a593Smuzhiyun 	else if (seg2 > phba->ktime_seg2_max)
650*4882a593Smuzhiyun 		phba->ktime_seg2_max = seg2;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	phba->ktime_seg3_total += seg3;
653*4882a593Smuzhiyun 	if (seg3 < phba->ktime_seg3_min)
654*4882a593Smuzhiyun 		phba->ktime_seg3_min = seg3;
655*4882a593Smuzhiyun 	else if (seg3 > phba->ktime_seg3_max)
656*4882a593Smuzhiyun 		phba->ktime_seg3_max = seg3;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	phba->ktime_seg4_total += seg4;
659*4882a593Smuzhiyun 	if (seg4 < phba->ktime_seg4_min)
660*4882a593Smuzhiyun 		phba->ktime_seg4_min = seg4;
661*4882a593Smuzhiyun 	else if (seg4 > phba->ktime_seg4_max)
662*4882a593Smuzhiyun 		phba->ktime_seg4_max = seg4;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	phba->ktime_seg5_total += seg5;
665*4882a593Smuzhiyun 	if (seg5 < phba->ktime_seg5_min)
666*4882a593Smuzhiyun 		phba->ktime_seg5_min = seg5;
667*4882a593Smuzhiyun 	else if (seg5 > phba->ktime_seg5_max)
668*4882a593Smuzhiyun 		phba->ktime_seg5_max = seg5;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	phba->ktime_data_samples++;
671*4882a593Smuzhiyun 	if (!seg6)
672*4882a593Smuzhiyun 		goto out;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	phba->ktime_seg6_total += seg6;
675*4882a593Smuzhiyun 	if (seg6 < phba->ktime_seg6_min)
676*4882a593Smuzhiyun 		phba->ktime_seg6_min = seg6;
677*4882a593Smuzhiyun 	else if (seg6 > phba->ktime_seg6_max)
678*4882a593Smuzhiyun 		phba->ktime_seg6_max = seg6;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	phba->ktime_seg7_total += seg7;
681*4882a593Smuzhiyun 	if (seg7 < phba->ktime_seg7_min)
682*4882a593Smuzhiyun 		phba->ktime_seg7_min = seg7;
683*4882a593Smuzhiyun 	else if (seg7 > phba->ktime_seg7_max)
684*4882a593Smuzhiyun 		phba->ktime_seg7_max = seg7;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	phba->ktime_seg8_total += seg8;
687*4882a593Smuzhiyun 	if (seg8 < phba->ktime_seg8_min)
688*4882a593Smuzhiyun 		phba->ktime_seg8_min = seg8;
689*4882a593Smuzhiyun 	else if (seg8 > phba->ktime_seg8_max)
690*4882a593Smuzhiyun 		phba->ktime_seg8_max = seg8;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	phba->ktime_seg9_total += seg9;
693*4882a593Smuzhiyun 	if (seg9 < phba->ktime_seg9_min)
694*4882a593Smuzhiyun 		phba->ktime_seg9_min = seg9;
695*4882a593Smuzhiyun 	else if (seg9 > phba->ktime_seg9_max)
696*4882a593Smuzhiyun 		phba->ktime_seg9_max = seg9;
697*4882a593Smuzhiyun out:
698*4882a593Smuzhiyun 	phba->ktime_seg10_total += seg10;
699*4882a593Smuzhiyun 	if (seg10 < phba->ktime_seg10_min)
700*4882a593Smuzhiyun 		phba->ktime_seg10_min = seg10;
701*4882a593Smuzhiyun 	else if (seg10 > phba->ktime_seg10_max)
702*4882a593Smuzhiyun 		phba->ktime_seg10_max = seg10;
703*4882a593Smuzhiyun 	phba->ktime_status_samples++;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun #endif
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun /**
708*4882a593Smuzhiyun  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
709*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
710*4882a593Smuzhiyun  * @cmdwqe: Pointer to driver command WQE object.
711*4882a593Smuzhiyun  * @wcqe: Pointer to driver response CQE object.
712*4882a593Smuzhiyun  *
713*4882a593Smuzhiyun  * The function is called from SLI ring event handler with no
714*4882a593Smuzhiyun  * lock held. This function is the completion handler for NVME FCP commands
715*4882a593Smuzhiyun  * The function frees memory resources used for the NVME commands.
716*4882a593Smuzhiyun  **/
717*4882a593Smuzhiyun static void
lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)718*4882a593Smuzhiyun lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
719*4882a593Smuzhiyun 			  struct lpfc_wcqe_complete *wcqe)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
722*4882a593Smuzhiyun 	struct nvmefc_tgt_fcp_req *rsp;
723*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp;
724*4882a593Smuzhiyun 	uint32_t status, result, op, start_clean, logerr;
725*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
726*4882a593Smuzhiyun 	int id;
727*4882a593Smuzhiyun #endif
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	ctxp = cmdwqe->context2;
730*4882a593Smuzhiyun 	ctxp->flag &= ~LPFC_NVME_IO_INP;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	rsp = &ctxp->hdlrctx.fcp_req;
733*4882a593Smuzhiyun 	op = rsp->op;
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	status = bf_get(lpfc_wcqe_c_status, wcqe);
736*4882a593Smuzhiyun 	result = wcqe->parameter;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	if (phba->targetport)
739*4882a593Smuzhiyun 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
740*4882a593Smuzhiyun 	else
741*4882a593Smuzhiyun 		tgtp = NULL;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
744*4882a593Smuzhiyun 			 ctxp->oxid, op, status);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	if (status) {
747*4882a593Smuzhiyun 		rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
748*4882a593Smuzhiyun 		rsp->transferred_length = 0;
749*4882a593Smuzhiyun 		if (tgtp) {
750*4882a593Smuzhiyun 			atomic_inc(&tgtp->xmt_fcp_rsp_error);
751*4882a593Smuzhiyun 			if (result == IOERR_ABORT_REQUESTED)
752*4882a593Smuzhiyun 				atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
753*4882a593Smuzhiyun 		}
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 		logerr = LOG_NVME_IOERR;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 		/* pick up SLI4 exhange busy condition */
758*4882a593Smuzhiyun 		if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
759*4882a593Smuzhiyun 			ctxp->flag |= LPFC_NVME_XBUSY;
760*4882a593Smuzhiyun 			logerr |= LOG_NVME_ABTS;
761*4882a593Smuzhiyun 			if (tgtp)
762*4882a593Smuzhiyun 				atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 		} else {
765*4882a593Smuzhiyun 			ctxp->flag &= ~LPFC_NVME_XBUSY;
766*4882a593Smuzhiyun 		}
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, logerr,
769*4882a593Smuzhiyun 				"6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
770*4882a593Smuzhiyun 				"XBUSY:x%x\n",
771*4882a593Smuzhiyun 				ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
772*4882a593Smuzhiyun 				status, result, ctxp->flag);
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	} else {
775*4882a593Smuzhiyun 		rsp->fcp_error = NVME_SC_SUCCESS;
776*4882a593Smuzhiyun 		if (op == NVMET_FCOP_RSP)
777*4882a593Smuzhiyun 			rsp->transferred_length = rsp->rsplen;
778*4882a593Smuzhiyun 		else
779*4882a593Smuzhiyun 			rsp->transferred_length = rsp->transfer_length;
780*4882a593Smuzhiyun 		if (tgtp)
781*4882a593Smuzhiyun 			atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
782*4882a593Smuzhiyun 	}
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	if ((op == NVMET_FCOP_READDATA_RSP) ||
785*4882a593Smuzhiyun 	    (op == NVMET_FCOP_RSP)) {
786*4882a593Smuzhiyun 		/* Sanity check */
787*4882a593Smuzhiyun 		ctxp->state = LPFC_NVME_STE_DONE;
788*4882a593Smuzhiyun 		ctxp->entry_cnt++;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
791*4882a593Smuzhiyun 		if (ctxp->ts_cmd_nvme) {
792*4882a593Smuzhiyun 			if (rsp->op == NVMET_FCOP_READDATA_RSP) {
793*4882a593Smuzhiyun 				ctxp->ts_isr_data =
794*4882a593Smuzhiyun 					cmdwqe->isr_timestamp;
795*4882a593Smuzhiyun 				ctxp->ts_data_nvme =
796*4882a593Smuzhiyun 					ktime_get_ns();
797*4882a593Smuzhiyun 				ctxp->ts_nvme_status =
798*4882a593Smuzhiyun 					ctxp->ts_data_nvme;
799*4882a593Smuzhiyun 				ctxp->ts_status_wqput =
800*4882a593Smuzhiyun 					ctxp->ts_data_nvme;
801*4882a593Smuzhiyun 				ctxp->ts_isr_status =
802*4882a593Smuzhiyun 					ctxp->ts_data_nvme;
803*4882a593Smuzhiyun 				ctxp->ts_status_nvme =
804*4882a593Smuzhiyun 					ctxp->ts_data_nvme;
805*4882a593Smuzhiyun 			} else {
806*4882a593Smuzhiyun 				ctxp->ts_isr_status =
807*4882a593Smuzhiyun 					cmdwqe->isr_timestamp;
808*4882a593Smuzhiyun 				ctxp->ts_status_nvme =
809*4882a593Smuzhiyun 					ktime_get_ns();
810*4882a593Smuzhiyun 			}
811*4882a593Smuzhiyun 		}
812*4882a593Smuzhiyun #endif
813*4882a593Smuzhiyun 		rsp->done(rsp);
814*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
815*4882a593Smuzhiyun 		if (ctxp->ts_cmd_nvme)
816*4882a593Smuzhiyun 			lpfc_nvmet_ktime(phba, ctxp);
817*4882a593Smuzhiyun #endif
818*4882a593Smuzhiyun 		/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
819*4882a593Smuzhiyun 	} else {
820*4882a593Smuzhiyun 		ctxp->entry_cnt++;
821*4882a593Smuzhiyun 		start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
822*4882a593Smuzhiyun 		memset(((char *)cmdwqe) + start_clean, 0,
823*4882a593Smuzhiyun 		       (sizeof(struct lpfc_iocbq) - start_clean));
824*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
825*4882a593Smuzhiyun 		if (ctxp->ts_cmd_nvme) {
826*4882a593Smuzhiyun 			ctxp->ts_isr_data = cmdwqe->isr_timestamp;
827*4882a593Smuzhiyun 			ctxp->ts_data_nvme = ktime_get_ns();
828*4882a593Smuzhiyun 		}
829*4882a593Smuzhiyun #endif
830*4882a593Smuzhiyun 		rsp->done(rsp);
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
833*4882a593Smuzhiyun 	if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
834*4882a593Smuzhiyun 		id = raw_smp_processor_id();
835*4882a593Smuzhiyun 		this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
836*4882a593Smuzhiyun 		if (ctxp->cpu != id)
837*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
838*4882a593Smuzhiyun 					"6704 CPU Check cmdcmpl: "
839*4882a593Smuzhiyun 					"cpu %d expect %d\n",
840*4882a593Smuzhiyun 					id, ctxp->cpu);
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun #endif
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun /**
846*4882a593Smuzhiyun  * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit
847*4882a593Smuzhiyun  *         an NVME LS rsp for a prior NVME LS request that was received.
848*4882a593Smuzhiyun  * @axchg: pointer to exchange context for the NVME LS request the response
849*4882a593Smuzhiyun  *         is for.
850*4882a593Smuzhiyun  * @ls_rsp: pointer to the transport LS RSP that is to be sent
851*4882a593Smuzhiyun  * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done
852*4882a593Smuzhiyun  *
853*4882a593Smuzhiyun  * This routine is used to format and send a WQE to transmit a NVME LS
854*4882a593Smuzhiyun  * Response.  The response is for a prior NVME LS request that was
855*4882a593Smuzhiyun  * received and posted to the transport.
856*4882a593Smuzhiyun  *
857*4882a593Smuzhiyun  * Returns:
858*4882a593Smuzhiyun  *  0 : if response successfully transmit
859*4882a593Smuzhiyun  *  non-zero : if response failed to transmit, of the form -Exxx.
860*4882a593Smuzhiyun  **/
861*4882a593Smuzhiyun int
__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx * axchg,struct nvmefc_ls_rsp * ls_rsp,void (* xmt_ls_rsp_cmp)(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe))862*4882a593Smuzhiyun __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
863*4882a593Smuzhiyun 			struct nvmefc_ls_rsp *ls_rsp,
864*4882a593Smuzhiyun 			void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
865*4882a593Smuzhiyun 				struct lpfc_iocbq *cmdwqe,
866*4882a593Smuzhiyun 				struct lpfc_wcqe_complete *wcqe))
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun 	struct lpfc_hba *phba = axchg->phba;
869*4882a593Smuzhiyun 	struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
870*4882a593Smuzhiyun 	struct lpfc_iocbq *nvmewqeq;
871*4882a593Smuzhiyun 	struct lpfc_dmabuf dmabuf;
872*4882a593Smuzhiyun 	struct ulp_bde64 bpl;
873*4882a593Smuzhiyun 	int rc;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	if (phba->pport->load_flag & FC_UNLOADING)
876*4882a593Smuzhiyun 		return -ENODEV;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
879*4882a593Smuzhiyun 			"6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
882*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
883*4882a593Smuzhiyun 				"6412 NVMEx LS rsp state mismatch "
884*4882a593Smuzhiyun 				"oxid x%x: %d %d\n",
885*4882a593Smuzhiyun 				axchg->oxid, axchg->state, axchg->entry_cnt);
886*4882a593Smuzhiyun 		return -EALREADY;
887*4882a593Smuzhiyun 	}
888*4882a593Smuzhiyun 	axchg->state = LPFC_NVME_STE_LS_RSP;
889*4882a593Smuzhiyun 	axchg->entry_cnt++;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
892*4882a593Smuzhiyun 					 ls_rsp->rsplen);
893*4882a593Smuzhiyun 	if (nvmewqeq == NULL) {
894*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
895*4882a593Smuzhiyun 				"6150 NVMEx LS Drop Rsp x%x: Prep\n",
896*4882a593Smuzhiyun 				axchg->oxid);
897*4882a593Smuzhiyun 		rc = -ENOMEM;
898*4882a593Smuzhiyun 		goto out_free_buf;
899*4882a593Smuzhiyun 	}
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	/* Save numBdes for bpl2sgl */
902*4882a593Smuzhiyun 	nvmewqeq->rsvd2 = 1;
903*4882a593Smuzhiyun 	nvmewqeq->hba_wqidx = 0;
904*4882a593Smuzhiyun 	nvmewqeq->context3 = &dmabuf;
905*4882a593Smuzhiyun 	dmabuf.virt = &bpl;
906*4882a593Smuzhiyun 	bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
907*4882a593Smuzhiyun 	bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
908*4882a593Smuzhiyun 	bpl.tus.f.bdeSize = ls_rsp->rsplen;
909*4882a593Smuzhiyun 	bpl.tus.f.bdeFlags = 0;
910*4882a593Smuzhiyun 	bpl.tus.w = le32_to_cpu(bpl.tus.w);
911*4882a593Smuzhiyun 	/*
912*4882a593Smuzhiyun 	 * Note: although we're using stack space for the dmabuf, the
913*4882a593Smuzhiyun 	 * call to lpfc_sli4_issue_wqe is synchronous, so it will not
914*4882a593Smuzhiyun 	 * be referenced after it returns back to this routine.
915*4882a593Smuzhiyun 	 */
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
918*4882a593Smuzhiyun 	nvmewqeq->iocb_cmpl = NULL;
919*4882a593Smuzhiyun 	nvmewqeq->context2 = axchg;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
922*4882a593Smuzhiyun 			 axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	/* clear to be sure there's no reference */
927*4882a593Smuzhiyun 	nvmewqeq->context3 = NULL;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	if (rc == WQE_SUCCESS) {
930*4882a593Smuzhiyun 		/*
931*4882a593Smuzhiyun 		 * Okay to repost buffer here, but wait till cmpl
932*4882a593Smuzhiyun 		 * before freeing ctxp and iocbq.
933*4882a593Smuzhiyun 		 */
934*4882a593Smuzhiyun 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
935*4882a593Smuzhiyun 		return 0;
936*4882a593Smuzhiyun 	}
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
939*4882a593Smuzhiyun 			"6151 NVMEx LS RSP x%x: failed to transmit %d\n",
940*4882a593Smuzhiyun 			axchg->oxid, rc);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	rc = -ENXIO;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	lpfc_nlp_put(nvmewqeq->context1);
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun out_free_buf:
947*4882a593Smuzhiyun 	/* Give back resources */
948*4882a593Smuzhiyun 	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	/*
951*4882a593Smuzhiyun 	 * As transport doesn't track completions of responses, if the rsp
952*4882a593Smuzhiyun 	 * fails to send, the transport will effectively ignore the rsp
953*4882a593Smuzhiyun 	 * and consider the LS done. However, the driver has an active
954*4882a593Smuzhiyun 	 * exchange open for the LS - so be sure to abort the exchange
955*4882a593Smuzhiyun 	 * if the response isn't sent.
956*4882a593Smuzhiyun 	 */
957*4882a593Smuzhiyun 	lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
958*4882a593Smuzhiyun 	return rc;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun /**
962*4882a593Smuzhiyun  * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response
963*4882a593Smuzhiyun  * @tgtport: pointer to target port that NVME LS is to be transmit from.
964*4882a593Smuzhiyun  * @ls_rsp: pointer to the transport LS RSP that is to be sent
965*4882a593Smuzhiyun  *
966*4882a593Smuzhiyun  * Driver registers this routine to transmit responses for received NVME
967*4882a593Smuzhiyun  * LS requests.
968*4882a593Smuzhiyun  *
969*4882a593Smuzhiyun  * This routine is used to format and send a WQE to transmit a NVME LS
970*4882a593Smuzhiyun  * Response. The ls_rsp is used to reverse-map the LS to the original
971*4882a593Smuzhiyun  * NVME LS request sequence, which provides addressing information for
972*4882a593Smuzhiyun  * the remote port the LS to be sent to, as well as the exchange id
973*4882a593Smuzhiyun  * that is the LS is bound to.
974*4882a593Smuzhiyun  *
975*4882a593Smuzhiyun  * Returns:
976*4882a593Smuzhiyun  *  0 : if response successfully transmit
977*4882a593Smuzhiyun  *  non-zero : if response failed to transmit, of the form -Exxx.
978*4882a593Smuzhiyun  **/
979*4882a593Smuzhiyun static int
lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port * tgtport,struct nvmefc_ls_rsp * ls_rsp)980*4882a593Smuzhiyun lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
981*4882a593Smuzhiyun 		      struct nvmefc_ls_rsp *ls_rsp)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *axchg =
984*4882a593Smuzhiyun 		container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
985*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
986*4882a593Smuzhiyun 	int rc;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	if (axchg->phba->pport->load_flag & FC_UNLOADING)
989*4882a593Smuzhiyun 		return -ENODEV;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	if (rc) {
994*4882a593Smuzhiyun 		atomic_inc(&nvmep->xmt_ls_drop);
995*4882a593Smuzhiyun 		/*
996*4882a593Smuzhiyun 		 * unless the failure is due to having already sent
997*4882a593Smuzhiyun 		 * the response, an abort will be generated for the
998*4882a593Smuzhiyun 		 * exchange if the rsp can't be sent.
999*4882a593Smuzhiyun 		 */
1000*4882a593Smuzhiyun 		if (rc != -EALREADY)
1001*4882a593Smuzhiyun 			atomic_inc(&nvmep->xmt_ls_abort);
1002*4882a593Smuzhiyun 		return rc;
1003*4882a593Smuzhiyun 	}
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	atomic_inc(&nvmep->xmt_ls_rsp);
1006*4882a593Smuzhiyun 	return 0;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun static int
lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * rsp)1010*4882a593Smuzhiyun lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
1011*4882a593Smuzhiyun 		      struct nvmefc_tgt_fcp_req *rsp)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1014*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp =
1015*4882a593Smuzhiyun 		container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1016*4882a593Smuzhiyun 	struct lpfc_hba *phba = ctxp->phba;
1017*4882a593Smuzhiyun 	struct lpfc_queue *wq;
1018*4882a593Smuzhiyun 	struct lpfc_iocbq *nvmewqeq;
1019*4882a593Smuzhiyun 	struct lpfc_sli_ring *pring;
1020*4882a593Smuzhiyun 	unsigned long iflags;
1021*4882a593Smuzhiyun 	int rc;
1022*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1023*4882a593Smuzhiyun 	int id;
1024*4882a593Smuzhiyun #endif
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	if (phba->pport->load_flag & FC_UNLOADING) {
1027*4882a593Smuzhiyun 		rc = -ENODEV;
1028*4882a593Smuzhiyun 		goto aerr;
1029*4882a593Smuzhiyun 	}
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1032*4882a593Smuzhiyun 	if (ctxp->ts_cmd_nvme) {
1033*4882a593Smuzhiyun 		if (rsp->op == NVMET_FCOP_RSP)
1034*4882a593Smuzhiyun 			ctxp->ts_nvme_status = ktime_get_ns();
1035*4882a593Smuzhiyun 		else
1036*4882a593Smuzhiyun 			ctxp->ts_nvme_data = ktime_get_ns();
1037*4882a593Smuzhiyun 	}
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	/* Setup the hdw queue if not already set */
1040*4882a593Smuzhiyun 	if (!ctxp->hdwq)
1041*4882a593Smuzhiyun 		ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
1044*4882a593Smuzhiyun 		id = raw_smp_processor_id();
1045*4882a593Smuzhiyun 		this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1046*4882a593Smuzhiyun 		if (rsp->hwqid != id)
1047*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1048*4882a593Smuzhiyun 					"6705 CPU Check OP: "
1049*4882a593Smuzhiyun 					"cpu %d expect %d\n",
1050*4882a593Smuzhiyun 					id, rsp->hwqid);
1051*4882a593Smuzhiyun 		ctxp->cpu = id; /* Setup cpu for cmpl check */
1052*4882a593Smuzhiyun 	}
1053*4882a593Smuzhiyun #endif
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	/* Sanity check */
1056*4882a593Smuzhiyun 	if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1057*4882a593Smuzhiyun 	    (ctxp->state == LPFC_NVME_STE_ABORT)) {
1058*4882a593Smuzhiyun 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1059*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1060*4882a593Smuzhiyun 				"6102 IO oxid x%x aborted\n",
1061*4882a593Smuzhiyun 				ctxp->oxid);
1062*4882a593Smuzhiyun 		rc = -ENXIO;
1063*4882a593Smuzhiyun 		goto aerr;
1064*4882a593Smuzhiyun 	}
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
1067*4882a593Smuzhiyun 	if (nvmewqeq == NULL) {
1068*4882a593Smuzhiyun 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1069*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1070*4882a593Smuzhiyun 				"6152 FCP Drop IO x%x: Prep\n",
1071*4882a593Smuzhiyun 				ctxp->oxid);
1072*4882a593Smuzhiyun 		rc = -ENXIO;
1073*4882a593Smuzhiyun 		goto aerr;
1074*4882a593Smuzhiyun 	}
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1077*4882a593Smuzhiyun 	nvmewqeq->iocb_cmpl = NULL;
1078*4882a593Smuzhiyun 	nvmewqeq->context2 = ctxp;
1079*4882a593Smuzhiyun 	nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
1080*4882a593Smuzhiyun 	ctxp->wqeq->hba_wqidx = rsp->hwqid;
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1083*4882a593Smuzhiyun 			 ctxp->oxid, rsp->op, rsp->rsplen);
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	ctxp->flag |= LPFC_NVME_IO_INP;
1086*4882a593Smuzhiyun 	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1087*4882a593Smuzhiyun 	if (rc == WQE_SUCCESS) {
1088*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1089*4882a593Smuzhiyun 		if (!ctxp->ts_cmd_nvme)
1090*4882a593Smuzhiyun 			return 0;
1091*4882a593Smuzhiyun 		if (rsp->op == NVMET_FCOP_RSP)
1092*4882a593Smuzhiyun 			ctxp->ts_status_wqput = ktime_get_ns();
1093*4882a593Smuzhiyun 		else
1094*4882a593Smuzhiyun 			ctxp->ts_data_wqput = ktime_get_ns();
1095*4882a593Smuzhiyun #endif
1096*4882a593Smuzhiyun 		return 0;
1097*4882a593Smuzhiyun 	}
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	if (rc == -EBUSY) {
1100*4882a593Smuzhiyun 		/*
1101*4882a593Smuzhiyun 		 * WQ was full, so queue nvmewqeq to be sent after
1102*4882a593Smuzhiyun 		 * WQE release CQE
1103*4882a593Smuzhiyun 		 */
1104*4882a593Smuzhiyun 		ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1105*4882a593Smuzhiyun 		wq = ctxp->hdwq->io_wq;
1106*4882a593Smuzhiyun 		pring = wq->pring;
1107*4882a593Smuzhiyun 		spin_lock_irqsave(&pring->ring_lock, iflags);
1108*4882a593Smuzhiyun 		list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1109*4882a593Smuzhiyun 		wq->q_flag |= HBA_NVMET_WQFULL;
1110*4882a593Smuzhiyun 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
1111*4882a593Smuzhiyun 		atomic_inc(&lpfc_nvmep->defer_wqfull);
1112*4882a593Smuzhiyun 		return 0;
1113*4882a593Smuzhiyun 	}
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	/* Give back resources */
1116*4882a593Smuzhiyun 	atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1117*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1118*4882a593Smuzhiyun 			"6153 FCP Drop IO x%x: Issue: %d\n",
1119*4882a593Smuzhiyun 			ctxp->oxid, rc);
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	ctxp->wqeq->hba_wqidx = 0;
1122*4882a593Smuzhiyun 	nvmewqeq->context2 = NULL;
1123*4882a593Smuzhiyun 	nvmewqeq->context3 = NULL;
1124*4882a593Smuzhiyun 	rc = -EBUSY;
1125*4882a593Smuzhiyun aerr:
1126*4882a593Smuzhiyun 	return rc;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun static void
lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port * targetport)1130*4882a593Smuzhiyun lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tport = targetport->private;
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	/* release any threads waiting for the unreg to complete */
1135*4882a593Smuzhiyun 	if (tport->phba->targetport)
1136*4882a593Smuzhiyun 		complete(tport->tport_unreg_cmp);
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun static void
lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * req)1140*4882a593Smuzhiyun lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1141*4882a593Smuzhiyun 			 struct nvmefc_tgt_fcp_req *req)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1144*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp =
1145*4882a593Smuzhiyun 		container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1146*4882a593Smuzhiyun 	struct lpfc_hba *phba = ctxp->phba;
1147*4882a593Smuzhiyun 	struct lpfc_queue *wq;
1148*4882a593Smuzhiyun 	unsigned long flags;
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	if (phba->pport->load_flag & FC_UNLOADING)
1151*4882a593Smuzhiyun 		return;
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	if (!ctxp->hdwq)
1154*4882a593Smuzhiyun 		ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1157*4882a593Smuzhiyun 			"6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1158*4882a593Smuzhiyun 			ctxp->oxid, ctxp->flag, ctxp->state);
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1161*4882a593Smuzhiyun 			 ctxp->oxid, ctxp->flag, ctxp->state);
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	spin_lock_irqsave(&ctxp->ctxlock, flags);
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	/* Since iaab/iaar are NOT set, we need to check
1168*4882a593Smuzhiyun 	 * if the firmware is in process of aborting IO
1169*4882a593Smuzhiyun 	 */
1170*4882a593Smuzhiyun 	if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
1171*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1172*4882a593Smuzhiyun 		return;
1173*4882a593Smuzhiyun 	}
1174*4882a593Smuzhiyun 	ctxp->flag |= LPFC_NVME_ABORT_OP;
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1177*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1178*4882a593Smuzhiyun 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1179*4882a593Smuzhiyun 						 ctxp->oxid);
1180*4882a593Smuzhiyun 		wq = ctxp->hdwq->io_wq;
1181*4882a593Smuzhiyun 		lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1182*4882a593Smuzhiyun 		return;
1183*4882a593Smuzhiyun 	}
1184*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	/* A state of LPFC_NVME_STE_RCV means we have just received
1187*4882a593Smuzhiyun 	 * the NVME command and have not started processing it.
1188*4882a593Smuzhiyun 	 * (by issuing any IO WQEs on this exchange yet)
1189*4882a593Smuzhiyun 	 */
1190*4882a593Smuzhiyun 	if (ctxp->state == LPFC_NVME_STE_RCV)
1191*4882a593Smuzhiyun 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1192*4882a593Smuzhiyun 						 ctxp->oxid);
1193*4882a593Smuzhiyun 	else
1194*4882a593Smuzhiyun 		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1195*4882a593Smuzhiyun 					       ctxp->oxid);
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun static void
lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * rsp)1199*4882a593Smuzhiyun lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1200*4882a593Smuzhiyun 			   struct nvmefc_tgt_fcp_req *rsp)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1203*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp =
1204*4882a593Smuzhiyun 		container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1205*4882a593Smuzhiyun 	struct lpfc_hba *phba = ctxp->phba;
1206*4882a593Smuzhiyun 	unsigned long flags;
1207*4882a593Smuzhiyun 	bool aborting = false;
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	spin_lock_irqsave(&ctxp->ctxlock, flags);
1210*4882a593Smuzhiyun 	if (ctxp->flag & LPFC_NVME_XBUSY)
1211*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1212*4882a593Smuzhiyun 				"6027 NVMET release with XBUSY flag x%x"
1213*4882a593Smuzhiyun 				" oxid x%x\n",
1214*4882a593Smuzhiyun 				ctxp->flag, ctxp->oxid);
1215*4882a593Smuzhiyun 	else if (ctxp->state != LPFC_NVME_STE_DONE &&
1216*4882a593Smuzhiyun 		 ctxp->state != LPFC_NVME_STE_ABORT)
1217*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1218*4882a593Smuzhiyun 				"6413 NVMET release bad state %d %d oxid x%x\n",
1219*4882a593Smuzhiyun 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1222*4882a593Smuzhiyun 	    (ctxp->flag & LPFC_NVME_XBUSY)) {
1223*4882a593Smuzhiyun 		aborting = true;
1224*4882a593Smuzhiyun 		/* let the abort path do the real release */
1225*4882a593Smuzhiyun 		lpfc_nvmet_defer_release(phba, ctxp);
1226*4882a593Smuzhiyun 	}
1227*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1230*4882a593Smuzhiyun 			 ctxp->state, aborting);
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1233*4882a593Smuzhiyun 	ctxp->flag &= ~LPFC_NVME_TNOTIFY;
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	if (aborting)
1236*4882a593Smuzhiyun 		return;
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 	lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun static void
lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * rsp)1242*4882a593Smuzhiyun lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1243*4882a593Smuzhiyun 		     struct nvmefc_tgt_fcp_req *rsp)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
1246*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp =
1247*4882a593Smuzhiyun 		container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1248*4882a593Smuzhiyun 	struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1249*4882a593Smuzhiyun 	struct lpfc_hba *phba = ctxp->phba;
1250*4882a593Smuzhiyun 	unsigned long iflag;
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 	lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1254*4882a593Smuzhiyun 			 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	if (!nvmebuf) {
1257*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1258*4882a593Smuzhiyun 				"6425 Defer rcv: no buffer oxid x%x: "
1259*4882a593Smuzhiyun 				"flg %x ste %x\n",
1260*4882a593Smuzhiyun 				ctxp->oxid, ctxp->flag, ctxp->state);
1261*4882a593Smuzhiyun 		return;
1262*4882a593Smuzhiyun 	}
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 	tgtp = phba->targetport->private;
1265*4882a593Smuzhiyun 	if (tgtp)
1266*4882a593Smuzhiyun 		atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	/* Free the nvmebuf since a new buffer already replaced it */
1269*4882a593Smuzhiyun 	nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1270*4882a593Smuzhiyun 	spin_lock_irqsave(&ctxp->ctxlock, iflag);
1271*4882a593Smuzhiyun 	ctxp->rqb_buffer = NULL;
1272*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun /**
1276*4882a593Smuzhiyun  * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
1277*4882a593Smuzhiyun  * @phba: Pointer to HBA context object
1278*4882a593Smuzhiyun  * @cmdwqe: Pointer to driver command WQE object.
1279*4882a593Smuzhiyun  * @wcqe: Pointer to driver response CQE object.
1280*4882a593Smuzhiyun  *
1281*4882a593Smuzhiyun  * This function is the completion handler for NVME LS requests.
1282*4882a593Smuzhiyun  * The function updates any states and statistics, then calls the
1283*4882a593Smuzhiyun  * generic completion handler to finish completion of the request.
1284*4882a593Smuzhiyun  **/
1285*4882a593Smuzhiyun static void
lpfc_nvmet_ls_req_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)1286*4882a593Smuzhiyun lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1287*4882a593Smuzhiyun 		       struct lpfc_wcqe_complete *wcqe)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun 	__lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun /**
1293*4882a593Smuzhiyun  * lpfc_nvmet_ls_req - Issue an Link Service request
1294*4882a593Smuzhiyun  * @targetport - pointer to target instance registered with nvmet transport.
1295*4882a593Smuzhiyun  * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
1296*4882a593Smuzhiyun  *               Driver sets this value to the ndlp pointer.
1297*4882a593Smuzhiyun  * @pnvme_lsreq - the transport nvme_ls_req structure for the LS
1298*4882a593Smuzhiyun  *
1299*4882a593Smuzhiyun  * Driver registers this routine to handle any link service request
1300*4882a593Smuzhiyun  * from the nvme_fc transport to a remote nvme-aware port.
1301*4882a593Smuzhiyun  *
1302*4882a593Smuzhiyun  * Return value :
1303*4882a593Smuzhiyun  *   0 - Success
1304*4882a593Smuzhiyun  *   non-zero: various error codes, in form of -Exxx
1305*4882a593Smuzhiyun  **/
1306*4882a593Smuzhiyun static int
lpfc_nvmet_ls_req(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * pnvme_lsreq)1307*4882a593Smuzhiyun lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
1308*4882a593Smuzhiyun 		  void *hosthandle,
1309*4882a593Smuzhiyun 		  struct nvmefc_ls_req *pnvme_lsreq)
1310*4882a593Smuzhiyun {
1311*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1312*4882a593Smuzhiyun 	struct lpfc_hba *phba;
1313*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
1314*4882a593Smuzhiyun 	int ret;
1315*4882a593Smuzhiyun 	u32 hstate;
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	if (!lpfc_nvmet)
1318*4882a593Smuzhiyun 		return -EINVAL;
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	phba = lpfc_nvmet->phba;
1321*4882a593Smuzhiyun 	if (phba->pport->load_flag & FC_UNLOADING)
1322*4882a593Smuzhiyun 		return -EINVAL;
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	hstate = atomic_read(&lpfc_nvmet->state);
1325*4882a593Smuzhiyun 	if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
1326*4882a593Smuzhiyun 		return -EACCES;
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	ndlp = (struct lpfc_nodelist *)hosthandle;
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
1331*4882a593Smuzhiyun 				 lpfc_nvmet_ls_req_cmp);
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	return ret;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun /**
1337*4882a593Smuzhiyun  * lpfc_nvmet_ls_abort - Abort a prior NVME LS request
1338*4882a593Smuzhiyun  * @targetport: Transport targetport, that LS was issued from.
1339*4882a593Smuzhiyun  * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
1340*4882a593Smuzhiyun  *               Driver sets this value to the ndlp pointer.
1341*4882a593Smuzhiyun  * @pnvme_lsreq - the transport nvme_ls_req structure for LS to be aborted
1342*4882a593Smuzhiyun  *
1343*4882a593Smuzhiyun  * Driver registers this routine to abort an NVME LS request that is
1344*4882a593Smuzhiyun  * in progress (from the transports perspective).
1345*4882a593Smuzhiyun  **/
1346*4882a593Smuzhiyun static void
lpfc_nvmet_ls_abort(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * pnvme_lsreq)1347*4882a593Smuzhiyun lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
1348*4882a593Smuzhiyun 		    void *hosthandle,
1349*4882a593Smuzhiyun 		    struct nvmefc_ls_req *pnvme_lsreq)
1350*4882a593Smuzhiyun {
1351*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1352*4882a593Smuzhiyun 	struct lpfc_hba *phba;
1353*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
1354*4882a593Smuzhiyun 	int ret;
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	phba = lpfc_nvmet->phba;
1357*4882a593Smuzhiyun 	if (phba->pport->load_flag & FC_UNLOADING)
1358*4882a593Smuzhiyun 		return;
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	ndlp = (struct lpfc_nodelist *)hosthandle;
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
1363*4882a593Smuzhiyun 	if (!ret)
1364*4882a593Smuzhiyun 		atomic_inc(&lpfc_nvmet->xmt_ls_abort);
1365*4882a593Smuzhiyun }
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun static void
lpfc_nvmet_host_release(void * hosthandle)1368*4882a593Smuzhiyun lpfc_nvmet_host_release(void *hosthandle)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp = hosthandle;
1371*4882a593Smuzhiyun 	struct lpfc_hba *phba = NULL;
1372*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	phba = ndlp->phba;
1375*4882a593Smuzhiyun 	if (!phba->targetport || !phba->targetport->private)
1376*4882a593Smuzhiyun 		return;
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1379*4882a593Smuzhiyun 			"6202 NVMET XPT releasing hosthandle x%px\n",
1380*4882a593Smuzhiyun 			hosthandle);
1381*4882a593Smuzhiyun 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382*4882a593Smuzhiyun 	atomic_set(&tgtp->state, 0);
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun static void
lpfc_nvmet_discovery_event(struct nvmet_fc_target_port * tgtport)1386*4882a593Smuzhiyun lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
1389*4882a593Smuzhiyun 	struct lpfc_hba *phba;
1390*4882a593Smuzhiyun 	uint32_t rc;
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun 	tgtp = tgtport->private;
1393*4882a593Smuzhiyun 	phba = tgtp->phba;
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	rc = lpfc_issue_els_rscn(phba->pport, 0);
1396*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1397*4882a593Smuzhiyun 			"6420 NVMET subsystem change: Notification %s\n",
1398*4882a593Smuzhiyun 			(rc) ? "Failed" : "Sent");
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun static struct nvmet_fc_target_template lpfc_tgttemplate = {
1402*4882a593Smuzhiyun 	.targetport_delete = lpfc_nvmet_targetport_delete,
1403*4882a593Smuzhiyun 	.xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
1404*4882a593Smuzhiyun 	.fcp_op         = lpfc_nvmet_xmt_fcp_op,
1405*4882a593Smuzhiyun 	.fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
1406*4882a593Smuzhiyun 	.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1407*4882a593Smuzhiyun 	.defer_rcv	= lpfc_nvmet_defer_rcv,
1408*4882a593Smuzhiyun 	.discovery_event = lpfc_nvmet_discovery_event,
1409*4882a593Smuzhiyun 	.ls_req         = lpfc_nvmet_ls_req,
1410*4882a593Smuzhiyun 	.ls_abort       = lpfc_nvmet_ls_abort,
1411*4882a593Smuzhiyun 	.host_release   = lpfc_nvmet_host_release,
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	.max_hw_queues  = 1,
1414*4882a593Smuzhiyun 	.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1415*4882a593Smuzhiyun 	.max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1416*4882a593Smuzhiyun 	.dma_boundary = 0xFFFFFFFF,
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	/* optional features */
1419*4882a593Smuzhiyun 	.target_features = 0,
1420*4882a593Smuzhiyun 	/* sizes of additional private data for data structures */
1421*4882a593Smuzhiyun 	.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1422*4882a593Smuzhiyun 	.lsrqst_priv_sz = 0,
1423*4882a593Smuzhiyun };
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun static void
__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba * phba,struct lpfc_nvmet_ctx_info * infop)1426*4882a593Smuzhiyun __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1427*4882a593Smuzhiyun 		struct lpfc_nvmet_ctx_info *infop)
1428*4882a593Smuzhiyun {
1429*4882a593Smuzhiyun 	struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1430*4882a593Smuzhiyun 	unsigned long flags;
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1433*4882a593Smuzhiyun 	list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1434*4882a593Smuzhiyun 				&infop->nvmet_ctx_list, list) {
1435*4882a593Smuzhiyun 		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1436*4882a593Smuzhiyun 		list_del_init(&ctx_buf->list);
1437*4882a593Smuzhiyun 		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 		__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1440*4882a593Smuzhiyun 		ctx_buf->sglq->state = SGL_FREED;
1441*4882a593Smuzhiyun 		ctx_buf->sglq->ndlp = NULL;
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 		spin_lock(&phba->sli4_hba.sgl_list_lock);
1444*4882a593Smuzhiyun 		list_add_tail(&ctx_buf->sglq->list,
1445*4882a593Smuzhiyun 				&phba->sli4_hba.lpfc_nvmet_sgl_list);
1446*4882a593Smuzhiyun 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 		lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1449*4882a593Smuzhiyun 		kfree(ctx_buf->context);
1450*4882a593Smuzhiyun 	}
1451*4882a593Smuzhiyun 	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun static void
lpfc_nvmet_cleanup_io_context(struct lpfc_hba * phba)1455*4882a593Smuzhiyun lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1456*4882a593Smuzhiyun {
1457*4882a593Smuzhiyun 	struct lpfc_nvmet_ctx_info *infop;
1458*4882a593Smuzhiyun 	int i, j;
1459*4882a593Smuzhiyun 
1460*4882a593Smuzhiyun 	/* The first context list, MRQ 0 CPU 0 */
1461*4882a593Smuzhiyun 	infop = phba->sli4_hba.nvmet_ctx_info;
1462*4882a593Smuzhiyun 	if (!infop)
1463*4882a593Smuzhiyun 		return;
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	/* Cycle the the entire CPU context list for every MRQ */
1466*4882a593Smuzhiyun 	for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1467*4882a593Smuzhiyun 		for_each_present_cpu(j) {
1468*4882a593Smuzhiyun 			infop = lpfc_get_ctx_list(phba, j, i);
1469*4882a593Smuzhiyun 			__lpfc_nvmet_clean_io_for_cpu(phba, infop);
1470*4882a593Smuzhiyun 		}
1471*4882a593Smuzhiyun 	}
1472*4882a593Smuzhiyun 	kfree(phba->sli4_hba.nvmet_ctx_info);
1473*4882a593Smuzhiyun 	phba->sli4_hba.nvmet_ctx_info = NULL;
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun static int
lpfc_nvmet_setup_io_context(struct lpfc_hba * phba)1477*4882a593Smuzhiyun lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1478*4882a593Smuzhiyun {
1479*4882a593Smuzhiyun 	struct lpfc_nvmet_ctxbuf *ctx_buf;
1480*4882a593Smuzhiyun 	struct lpfc_iocbq *nvmewqe;
1481*4882a593Smuzhiyun 	union lpfc_wqe128 *wqe;
1482*4882a593Smuzhiyun 	struct lpfc_nvmet_ctx_info *last_infop;
1483*4882a593Smuzhiyun 	struct lpfc_nvmet_ctx_info *infop;
1484*4882a593Smuzhiyun 	int i, j, idx, cpu;
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1487*4882a593Smuzhiyun 			"6403 Allocate NVMET resources for %d XRIs\n",
1488*4882a593Smuzhiyun 			phba->sli4_hba.nvmet_xri_cnt);
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 	phba->sli4_hba.nvmet_ctx_info = kcalloc(
1491*4882a593Smuzhiyun 		phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1492*4882a593Smuzhiyun 		sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1493*4882a593Smuzhiyun 	if (!phba->sli4_hba.nvmet_ctx_info) {
1494*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1495*4882a593Smuzhiyun 				"6419 Failed allocate memory for "
1496*4882a593Smuzhiyun 				"nvmet context lists\n");
1497*4882a593Smuzhiyun 		return -ENOMEM;
1498*4882a593Smuzhiyun 	}
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	/*
1501*4882a593Smuzhiyun 	 * Assuming X CPUs in the system, and Y MRQs, allocate some
1502*4882a593Smuzhiyun 	 * lpfc_nvmet_ctx_info structures as follows:
1503*4882a593Smuzhiyun 	 *
1504*4882a593Smuzhiyun 	 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1505*4882a593Smuzhiyun 	 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1506*4882a593Smuzhiyun 	 * ...
1507*4882a593Smuzhiyun 	 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1508*4882a593Smuzhiyun 	 *
1509*4882a593Smuzhiyun 	 * Each line represents a MRQ "silo" containing an entry for
1510*4882a593Smuzhiyun 	 * every CPU.
1511*4882a593Smuzhiyun 	 *
1512*4882a593Smuzhiyun 	 * MRQ X is initially assumed to be associated with CPU X, thus
1513*4882a593Smuzhiyun 	 * contexts are initially distributed across all MRQs using
1514*4882a593Smuzhiyun 	 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1515*4882a593Smuzhiyun 	 * freed, the are freed to the MRQ silo based on the CPU number
1516*4882a593Smuzhiyun 	 * of the IO completion. Thus a context that was allocated for MRQ A
1517*4882a593Smuzhiyun 	 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1518*4882a593Smuzhiyun 	 */
1519*4882a593Smuzhiyun 	for_each_possible_cpu(i) {
1520*4882a593Smuzhiyun 		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1521*4882a593Smuzhiyun 			infop = lpfc_get_ctx_list(phba, i, j);
1522*4882a593Smuzhiyun 			INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1523*4882a593Smuzhiyun 			spin_lock_init(&infop->nvmet_ctx_list_lock);
1524*4882a593Smuzhiyun 			infop->nvmet_ctx_list_cnt = 0;
1525*4882a593Smuzhiyun 		}
1526*4882a593Smuzhiyun 	}
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	/*
1529*4882a593Smuzhiyun 	 * Setup the next CPU context info ptr for each MRQ.
1530*4882a593Smuzhiyun 	 * MRQ 0 will cycle thru CPUs 0 - X separately from
1531*4882a593Smuzhiyun 	 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1532*4882a593Smuzhiyun 	 */
1533*4882a593Smuzhiyun 	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1534*4882a593Smuzhiyun 		last_infop = lpfc_get_ctx_list(phba,
1535*4882a593Smuzhiyun 					       cpumask_first(cpu_present_mask),
1536*4882a593Smuzhiyun 					       j);
1537*4882a593Smuzhiyun 		for (i = phba->sli4_hba.num_possible_cpu - 1;  i >= 0; i--) {
1538*4882a593Smuzhiyun 			infop = lpfc_get_ctx_list(phba, i, j);
1539*4882a593Smuzhiyun 			infop->nvmet_ctx_next_cpu = last_infop;
1540*4882a593Smuzhiyun 			last_infop = infop;
1541*4882a593Smuzhiyun 		}
1542*4882a593Smuzhiyun 	}
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	/* For all nvmet xris, allocate resources needed to process a
1545*4882a593Smuzhiyun 	 * received command on a per xri basis.
1546*4882a593Smuzhiyun 	 */
1547*4882a593Smuzhiyun 	idx = 0;
1548*4882a593Smuzhiyun 	cpu = cpumask_first(cpu_present_mask);
1549*4882a593Smuzhiyun 	for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1550*4882a593Smuzhiyun 		ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1551*4882a593Smuzhiyun 		if (!ctx_buf) {
1552*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1553*4882a593Smuzhiyun 					"6404 Ran out of memory for NVMET\n");
1554*4882a593Smuzhiyun 			return -ENOMEM;
1555*4882a593Smuzhiyun 		}
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 		ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1558*4882a593Smuzhiyun 					   GFP_KERNEL);
1559*4882a593Smuzhiyun 		if (!ctx_buf->context) {
1560*4882a593Smuzhiyun 			kfree(ctx_buf);
1561*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1562*4882a593Smuzhiyun 					"6405 Ran out of NVMET "
1563*4882a593Smuzhiyun 					"context memory\n");
1564*4882a593Smuzhiyun 			return -ENOMEM;
1565*4882a593Smuzhiyun 		}
1566*4882a593Smuzhiyun 		ctx_buf->context->ctxbuf = ctx_buf;
1567*4882a593Smuzhiyun 		ctx_buf->context->state = LPFC_NVME_STE_FREE;
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun 		ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1570*4882a593Smuzhiyun 		if (!ctx_buf->iocbq) {
1571*4882a593Smuzhiyun 			kfree(ctx_buf->context);
1572*4882a593Smuzhiyun 			kfree(ctx_buf);
1573*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1574*4882a593Smuzhiyun 					"6406 Ran out of NVMET iocb/WQEs\n");
1575*4882a593Smuzhiyun 			return -ENOMEM;
1576*4882a593Smuzhiyun 		}
1577*4882a593Smuzhiyun 		ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1578*4882a593Smuzhiyun 		nvmewqe = ctx_buf->iocbq;
1579*4882a593Smuzhiyun 		wqe = &nvmewqe->wqe;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 		/* Initialize WQE */
1582*4882a593Smuzhiyun 		memset(wqe, 0, sizeof(union lpfc_wqe));
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 		ctx_buf->iocbq->context1 = NULL;
1585*4882a593Smuzhiyun 		spin_lock(&phba->sli4_hba.sgl_list_lock);
1586*4882a593Smuzhiyun 		ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1587*4882a593Smuzhiyun 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
1588*4882a593Smuzhiyun 		if (!ctx_buf->sglq) {
1589*4882a593Smuzhiyun 			lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1590*4882a593Smuzhiyun 			kfree(ctx_buf->context);
1591*4882a593Smuzhiyun 			kfree(ctx_buf);
1592*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1593*4882a593Smuzhiyun 					"6407 Ran out of NVMET XRIs\n");
1594*4882a593Smuzhiyun 			return -ENOMEM;
1595*4882a593Smuzhiyun 		}
1596*4882a593Smuzhiyun 		INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 		/*
1599*4882a593Smuzhiyun 		 * Add ctx to MRQidx context list. Our initial assumption
1600*4882a593Smuzhiyun 		 * is MRQidx will be associated with CPUidx. This association
1601*4882a593Smuzhiyun 		 * can change on the fly.
1602*4882a593Smuzhiyun 		 */
1603*4882a593Smuzhiyun 		infop = lpfc_get_ctx_list(phba, cpu, idx);
1604*4882a593Smuzhiyun 		spin_lock(&infop->nvmet_ctx_list_lock);
1605*4882a593Smuzhiyun 		list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1606*4882a593Smuzhiyun 		infop->nvmet_ctx_list_cnt++;
1607*4882a593Smuzhiyun 		spin_unlock(&infop->nvmet_ctx_list_lock);
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 		/* Spread ctx structures evenly across all MRQs */
1610*4882a593Smuzhiyun 		idx++;
1611*4882a593Smuzhiyun 		if (idx >= phba->cfg_nvmet_mrq) {
1612*4882a593Smuzhiyun 			idx = 0;
1613*4882a593Smuzhiyun 			cpu = cpumask_first(cpu_present_mask);
1614*4882a593Smuzhiyun 			continue;
1615*4882a593Smuzhiyun 		}
1616*4882a593Smuzhiyun 		cpu = cpumask_next(cpu, cpu_present_mask);
1617*4882a593Smuzhiyun 		if (cpu == nr_cpu_ids)
1618*4882a593Smuzhiyun 			cpu = cpumask_first(cpu_present_mask);
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	}
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun 	for_each_present_cpu(i) {
1623*4882a593Smuzhiyun 		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1624*4882a593Smuzhiyun 			infop = lpfc_get_ctx_list(phba, i, j);
1625*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1626*4882a593Smuzhiyun 					"6408 TOTAL NVMET ctx for CPU %d "
1627*4882a593Smuzhiyun 					"MRQ %d: cnt %d nextcpu x%px\n",
1628*4882a593Smuzhiyun 					i, j, infop->nvmet_ctx_list_cnt,
1629*4882a593Smuzhiyun 					infop->nvmet_ctx_next_cpu);
1630*4882a593Smuzhiyun 		}
1631*4882a593Smuzhiyun 	}
1632*4882a593Smuzhiyun 	return 0;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun int
lpfc_nvmet_create_targetport(struct lpfc_hba * phba)1636*4882a593Smuzhiyun lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun 	struct lpfc_vport  *vport = phba->pport;
1639*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
1640*4882a593Smuzhiyun 	struct nvmet_fc_port_info pinfo;
1641*4882a593Smuzhiyun 	int error;
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	if (phba->targetport)
1644*4882a593Smuzhiyun 		return 0;
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	error = lpfc_nvmet_setup_io_context(phba);
1647*4882a593Smuzhiyun 	if (error)
1648*4882a593Smuzhiyun 		return error;
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1651*4882a593Smuzhiyun 	pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1652*4882a593Smuzhiyun 	pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1653*4882a593Smuzhiyun 	pinfo.port_id = vport->fc_myDID;
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun 	/* We need to tell the transport layer + 1 because it takes page
1656*4882a593Smuzhiyun 	 * alignment into account. When space for the SGL is allocated we
1657*4882a593Smuzhiyun 	 * allocate + 3, one for cmd, one for rsp and one for this alignment
1658*4882a593Smuzhiyun 	 */
1659*4882a593Smuzhiyun 	lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1660*4882a593Smuzhiyun 	lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1661*4882a593Smuzhiyun 	lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1664*4882a593Smuzhiyun 	error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1665*4882a593Smuzhiyun 					     &phba->pcidev->dev,
1666*4882a593Smuzhiyun 					     &phba->targetport);
1667*4882a593Smuzhiyun #else
1668*4882a593Smuzhiyun 	error = -ENOENT;
1669*4882a593Smuzhiyun #endif
1670*4882a593Smuzhiyun 	if (error) {
1671*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1672*4882a593Smuzhiyun 				"6025 Cannot register NVME targetport x%x: "
1673*4882a593Smuzhiyun 				"portnm %llx nodenm %llx segs %d qs %d\n",
1674*4882a593Smuzhiyun 				error,
1675*4882a593Smuzhiyun 				pinfo.port_name, pinfo.node_name,
1676*4882a593Smuzhiyun 				lpfc_tgttemplate.max_sgl_segments,
1677*4882a593Smuzhiyun 				lpfc_tgttemplate.max_hw_queues);
1678*4882a593Smuzhiyun 		phba->targetport = NULL;
1679*4882a593Smuzhiyun 		phba->nvmet_support = 0;
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 		lpfc_nvmet_cleanup_io_context(phba);
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun 	} else {
1684*4882a593Smuzhiyun 		tgtp = (struct lpfc_nvmet_tgtport *)
1685*4882a593Smuzhiyun 			phba->targetport->private;
1686*4882a593Smuzhiyun 		tgtp->phba = phba;
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1689*4882a593Smuzhiyun 				"6026 Registered NVME "
1690*4882a593Smuzhiyun 				"targetport: x%px, private x%px "
1691*4882a593Smuzhiyun 				"portnm %llx nodenm %llx segs %d qs %d\n",
1692*4882a593Smuzhiyun 				phba->targetport, tgtp,
1693*4882a593Smuzhiyun 				pinfo.port_name, pinfo.node_name,
1694*4882a593Smuzhiyun 				lpfc_tgttemplate.max_sgl_segments,
1695*4882a593Smuzhiyun 				lpfc_tgttemplate.max_hw_queues);
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 		atomic_set(&tgtp->rcv_ls_req_in, 0);
1698*4882a593Smuzhiyun 		atomic_set(&tgtp->rcv_ls_req_out, 0);
1699*4882a593Smuzhiyun 		atomic_set(&tgtp->rcv_ls_req_drop, 0);
1700*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_ls_abort, 0);
1701*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1702*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_ls_rsp, 0);
1703*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_ls_drop, 0);
1704*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1705*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1706*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1707*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1708*4882a593Smuzhiyun 		atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1709*4882a593Smuzhiyun 		atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1710*4882a593Smuzhiyun 		atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1711*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_drop, 0);
1712*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1713*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_read, 0);
1714*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_write, 0);
1715*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_rsp, 0);
1716*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_release, 0);
1717*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1718*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1719*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1720*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1721*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1722*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1723*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_abort, 0);
1724*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1725*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_abort_unsol, 0);
1726*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_abort_sol, 0);
1727*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_abort_rsp, 0);
1728*4882a593Smuzhiyun 		atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1729*4882a593Smuzhiyun 		atomic_set(&tgtp->defer_ctx, 0);
1730*4882a593Smuzhiyun 		atomic_set(&tgtp->defer_fod, 0);
1731*4882a593Smuzhiyun 		atomic_set(&tgtp->defer_wqfull, 0);
1732*4882a593Smuzhiyun 	}
1733*4882a593Smuzhiyun 	return error;
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun int
lpfc_nvmet_update_targetport(struct lpfc_hba * phba)1737*4882a593Smuzhiyun lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1738*4882a593Smuzhiyun {
1739*4882a593Smuzhiyun 	struct lpfc_vport  *vport = phba->pport;
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun 	if (!phba->targetport)
1742*4882a593Smuzhiyun 		return 0;
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1745*4882a593Smuzhiyun 			 "6007 Update NVMET port x%px did x%x\n",
1746*4882a593Smuzhiyun 			 phba->targetport, vport->fc_myDID);
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	phba->targetport->port_id = vport->fc_myDID;
1749*4882a593Smuzhiyun 	return 0;
1750*4882a593Smuzhiyun }
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun /**
1753*4882a593Smuzhiyun  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1754*4882a593Smuzhiyun  * @phba: pointer to lpfc hba data structure.
1755*4882a593Smuzhiyun  * @axri: pointer to the nvmet xri abort wcqe structure.
1756*4882a593Smuzhiyun  *
1757*4882a593Smuzhiyun  * This routine is invoked by the worker thread to process a SLI4 fast-path
1758*4882a593Smuzhiyun  * NVMET aborted xri.
1759*4882a593Smuzhiyun  **/
1760*4882a593Smuzhiyun void
lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri)1761*4882a593Smuzhiyun lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1762*4882a593Smuzhiyun 			    struct sli4_wcqe_xri_aborted *axri)
1763*4882a593Smuzhiyun {
1764*4882a593Smuzhiyun #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1765*4882a593Smuzhiyun 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1766*4882a593Smuzhiyun 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1767*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1768*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
1769*4882a593Smuzhiyun 	struct nvmefc_tgt_fcp_req *req = NULL;
1770*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
1771*4882a593Smuzhiyun 	unsigned long iflag = 0;
1772*4882a593Smuzhiyun 	int rrq_empty = 0;
1773*4882a593Smuzhiyun 	bool released = false;
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1776*4882a593Smuzhiyun 			"6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1779*4882a593Smuzhiyun 		return;
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 	if (phba->targetport) {
1782*4882a593Smuzhiyun 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1783*4882a593Smuzhiyun 		atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1784*4882a593Smuzhiyun 	}
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, iflag);
1787*4882a593Smuzhiyun 	spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1788*4882a593Smuzhiyun 	list_for_each_entry_safe(ctxp, next_ctxp,
1789*4882a593Smuzhiyun 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1790*4882a593Smuzhiyun 				 list) {
1791*4882a593Smuzhiyun 		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1792*4882a593Smuzhiyun 			continue;
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 		spin_lock(&ctxp->ctxlock);
1795*4882a593Smuzhiyun 		/* Check if we already received a free context call
1796*4882a593Smuzhiyun 		 * and we have completed processing an abort situation.
1797*4882a593Smuzhiyun 		 */
1798*4882a593Smuzhiyun 		if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1799*4882a593Smuzhiyun 		    !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1800*4882a593Smuzhiyun 			list_del_init(&ctxp->list);
1801*4882a593Smuzhiyun 			released = true;
1802*4882a593Smuzhiyun 		}
1803*4882a593Smuzhiyun 		ctxp->flag &= ~LPFC_NVME_XBUSY;
1804*4882a593Smuzhiyun 		spin_unlock(&ctxp->ctxlock);
1805*4882a593Smuzhiyun 		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1806*4882a593Smuzhiyun 
1807*4882a593Smuzhiyun 		rrq_empty = list_empty(&phba->active_rrq_list);
1808*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, iflag);
1809*4882a593Smuzhiyun 		ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1810*4882a593Smuzhiyun 		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1811*4882a593Smuzhiyun 		    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1812*4882a593Smuzhiyun 		     ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1813*4882a593Smuzhiyun 			lpfc_set_rrq_active(phba, ndlp,
1814*4882a593Smuzhiyun 				ctxp->ctxbuf->sglq->sli4_lxritag,
1815*4882a593Smuzhiyun 				rxid, 1);
1816*4882a593Smuzhiyun 			lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1817*4882a593Smuzhiyun 		}
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1820*4882a593Smuzhiyun 				"6318 XB aborted oxid x%x flg x%x (%x)\n",
1821*4882a593Smuzhiyun 				ctxp->oxid, ctxp->flag, released);
1822*4882a593Smuzhiyun 		if (released)
1823*4882a593Smuzhiyun 			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun 		if (rrq_empty)
1826*4882a593Smuzhiyun 			lpfc_worker_wake_up(phba);
1827*4882a593Smuzhiyun 		return;
1828*4882a593Smuzhiyun 	}
1829*4882a593Smuzhiyun 	spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1830*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun 	ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1833*4882a593Smuzhiyun 	if (ctxp) {
1834*4882a593Smuzhiyun 		/*
1835*4882a593Smuzhiyun 		 *  Abort already done by FW, so BA_ACC sent.
1836*4882a593Smuzhiyun 		 *  However, the transport may be unaware.
1837*4882a593Smuzhiyun 		 */
1838*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1839*4882a593Smuzhiyun 				"6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1840*4882a593Smuzhiyun 				"flag x%x oxid x%x rxid x%x\n",
1841*4882a593Smuzhiyun 				xri, ctxp->state, ctxp->flag, ctxp->oxid,
1842*4882a593Smuzhiyun 				rxid);
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
1845*4882a593Smuzhiyun 		ctxp->flag |= LPFC_NVME_ABTS_RCV;
1846*4882a593Smuzhiyun 		ctxp->state = LPFC_NVME_STE_ABORT;
1847*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 		lpfc_nvmeio_data(phba,
1850*4882a593Smuzhiyun 				 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1851*4882a593Smuzhiyun 				 xri, raw_smp_processor_id(), 0);
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 		req = &ctxp->hdlrctx.fcp_req;
1854*4882a593Smuzhiyun 		if (req)
1855*4882a593Smuzhiyun 			nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1856*4882a593Smuzhiyun 	}
1857*4882a593Smuzhiyun #endif
1858*4882a593Smuzhiyun }
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun int
lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport * vport,struct fc_frame_header * fc_hdr)1861*4882a593Smuzhiyun lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1862*4882a593Smuzhiyun 			   struct fc_frame_header *fc_hdr)
1863*4882a593Smuzhiyun {
1864*4882a593Smuzhiyun #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1865*4882a593Smuzhiyun 	struct lpfc_hba *phba = vport->phba;
1866*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1867*4882a593Smuzhiyun 	struct nvmefc_tgt_fcp_req *rsp;
1868*4882a593Smuzhiyun 	uint32_t sid;
1869*4882a593Smuzhiyun 	uint16_t oxid, xri;
1870*4882a593Smuzhiyun 	unsigned long iflag = 0;
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 	sid = sli4_sid_from_fc_hdr(fc_hdr);
1873*4882a593Smuzhiyun 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1874*4882a593Smuzhiyun 
1875*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, iflag);
1876*4882a593Smuzhiyun 	spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1877*4882a593Smuzhiyun 	list_for_each_entry_safe(ctxp, next_ctxp,
1878*4882a593Smuzhiyun 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1879*4882a593Smuzhiyun 				 list) {
1880*4882a593Smuzhiyun 		if (ctxp->oxid != oxid || ctxp->sid != sid)
1881*4882a593Smuzhiyun 			continue;
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 		xri = ctxp->ctxbuf->sglq->sli4_xritag;
1884*4882a593Smuzhiyun 
1885*4882a593Smuzhiyun 		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1886*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, iflag);
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
1889*4882a593Smuzhiyun 		ctxp->flag |= LPFC_NVME_ABTS_RCV;
1890*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun 		lpfc_nvmeio_data(phba,
1893*4882a593Smuzhiyun 			"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1894*4882a593Smuzhiyun 			xri, raw_smp_processor_id(), 0);
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1897*4882a593Smuzhiyun 				"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 		rsp = &ctxp->hdlrctx.fcp_req;
1900*4882a593Smuzhiyun 		nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 		/* Respond with BA_ACC accordingly */
1903*4882a593Smuzhiyun 		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1904*4882a593Smuzhiyun 		return 0;
1905*4882a593Smuzhiyun 	}
1906*4882a593Smuzhiyun 	spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1907*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun 	/* check the wait list */
1910*4882a593Smuzhiyun 	if (phba->sli4_hba.nvmet_io_wait_cnt) {
1911*4882a593Smuzhiyun 		struct rqb_dmabuf *nvmebuf;
1912*4882a593Smuzhiyun 		struct fc_frame_header *fc_hdr_tmp;
1913*4882a593Smuzhiyun 		u32 sid_tmp;
1914*4882a593Smuzhiyun 		u16 oxid_tmp;
1915*4882a593Smuzhiyun 		bool found = false;
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1918*4882a593Smuzhiyun 
1919*4882a593Smuzhiyun 		/* match by oxid and s_id */
1920*4882a593Smuzhiyun 		list_for_each_entry(nvmebuf,
1921*4882a593Smuzhiyun 				    &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1922*4882a593Smuzhiyun 				    hbuf.list) {
1923*4882a593Smuzhiyun 			fc_hdr_tmp = (struct fc_frame_header *)
1924*4882a593Smuzhiyun 					(nvmebuf->hbuf.virt);
1925*4882a593Smuzhiyun 			oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1926*4882a593Smuzhiyun 			sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1927*4882a593Smuzhiyun 			if (oxid_tmp != oxid || sid_tmp != sid)
1928*4882a593Smuzhiyun 				continue;
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1931*4882a593Smuzhiyun 					"6321 NVMET Rcv ABTS oxid x%x from x%x "
1932*4882a593Smuzhiyun 					"is waiting for a ctxp\n",
1933*4882a593Smuzhiyun 					oxid, sid);
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 			list_del_init(&nvmebuf->hbuf.list);
1936*4882a593Smuzhiyun 			phba->sli4_hba.nvmet_io_wait_cnt--;
1937*4882a593Smuzhiyun 			found = true;
1938*4882a593Smuzhiyun 			break;
1939*4882a593Smuzhiyun 		}
1940*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1941*4882a593Smuzhiyun 				       iflag);
1942*4882a593Smuzhiyun 
1943*4882a593Smuzhiyun 		/* free buffer since already posted a new DMA buffer to RQ */
1944*4882a593Smuzhiyun 		if (found) {
1945*4882a593Smuzhiyun 			nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1946*4882a593Smuzhiyun 			/* Respond with BA_ACC accordingly */
1947*4882a593Smuzhiyun 			lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1948*4882a593Smuzhiyun 			return 0;
1949*4882a593Smuzhiyun 		}
1950*4882a593Smuzhiyun 	}
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	/* check active list */
1953*4882a593Smuzhiyun 	ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1954*4882a593Smuzhiyun 	if (ctxp) {
1955*4882a593Smuzhiyun 		xri = ctxp->ctxbuf->sglq->sli4_xritag;
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
1958*4882a593Smuzhiyun 		ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1959*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1960*4882a593Smuzhiyun 
1961*4882a593Smuzhiyun 		lpfc_nvmeio_data(phba,
1962*4882a593Smuzhiyun 				 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1963*4882a593Smuzhiyun 				 xri, raw_smp_processor_id(), 0);
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1966*4882a593Smuzhiyun 				"6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1967*4882a593Smuzhiyun 				"flag x%x state x%x\n",
1968*4882a593Smuzhiyun 				ctxp->oxid, xri, ctxp->flag, ctxp->state);
1969*4882a593Smuzhiyun 
1970*4882a593Smuzhiyun 		if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1971*4882a593Smuzhiyun 			/* Notify the transport */
1972*4882a593Smuzhiyun 			nvmet_fc_rcv_fcp_abort(phba->targetport,
1973*4882a593Smuzhiyun 					       &ctxp->hdlrctx.fcp_req);
1974*4882a593Smuzhiyun 		} else {
1975*4882a593Smuzhiyun 			cancel_work_sync(&ctxp->ctxbuf->defer_work);
1976*4882a593Smuzhiyun 			spin_lock_irqsave(&ctxp->ctxlock, iflag);
1977*4882a593Smuzhiyun 			lpfc_nvmet_defer_release(phba, ctxp);
1978*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1979*4882a593Smuzhiyun 		}
1980*4882a593Smuzhiyun 		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1981*4882a593Smuzhiyun 					       ctxp->oxid);
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1984*4882a593Smuzhiyun 		return 0;
1985*4882a593Smuzhiyun 	}
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 	lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1988*4882a593Smuzhiyun 			 oxid, raw_smp_processor_id(), 1);
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1991*4882a593Smuzhiyun 			"6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 	/* Respond with BA_RJT accordingly */
1994*4882a593Smuzhiyun 	lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1995*4882a593Smuzhiyun #endif
1996*4882a593Smuzhiyun 	return 0;
1997*4882a593Smuzhiyun }
1998*4882a593Smuzhiyun 
1999*4882a593Smuzhiyun static void
lpfc_nvmet_wqfull_flush(struct lpfc_hba * phba,struct lpfc_queue * wq,struct lpfc_async_xchg_ctx * ctxp)2000*4882a593Smuzhiyun lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
2001*4882a593Smuzhiyun 			struct lpfc_async_xchg_ctx *ctxp)
2002*4882a593Smuzhiyun {
2003*4882a593Smuzhiyun 	struct lpfc_sli_ring *pring;
2004*4882a593Smuzhiyun 	struct lpfc_iocbq *nvmewqeq;
2005*4882a593Smuzhiyun 	struct lpfc_iocbq *next_nvmewqeq;
2006*4882a593Smuzhiyun 	unsigned long iflags;
2007*4882a593Smuzhiyun 	struct lpfc_wcqe_complete wcqe;
2008*4882a593Smuzhiyun 	struct lpfc_wcqe_complete *wcqep;
2009*4882a593Smuzhiyun 
2010*4882a593Smuzhiyun 	pring = wq->pring;
2011*4882a593Smuzhiyun 	wcqep = &wcqe;
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	/* Fake an ABORT error code back to cmpl routine */
2014*4882a593Smuzhiyun 	memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
2015*4882a593Smuzhiyun 	bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
2016*4882a593Smuzhiyun 	wcqep->parameter = IOERR_ABORT_REQUESTED;
2017*4882a593Smuzhiyun 
2018*4882a593Smuzhiyun 	spin_lock_irqsave(&pring->ring_lock, iflags);
2019*4882a593Smuzhiyun 	list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
2020*4882a593Smuzhiyun 				 &wq->wqfull_list, list) {
2021*4882a593Smuzhiyun 		if (ctxp) {
2022*4882a593Smuzhiyun 			/* Checking for a specific IO to flush */
2023*4882a593Smuzhiyun 			if (nvmewqeq->context2 == ctxp) {
2024*4882a593Smuzhiyun 				list_del(&nvmewqeq->list);
2025*4882a593Smuzhiyun 				spin_unlock_irqrestore(&pring->ring_lock,
2026*4882a593Smuzhiyun 						       iflags);
2027*4882a593Smuzhiyun 				lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
2028*4882a593Smuzhiyun 							  wcqep);
2029*4882a593Smuzhiyun 				return;
2030*4882a593Smuzhiyun 			}
2031*4882a593Smuzhiyun 			continue;
2032*4882a593Smuzhiyun 		} else {
2033*4882a593Smuzhiyun 			/* Flush all IOs */
2034*4882a593Smuzhiyun 			list_del(&nvmewqeq->list);
2035*4882a593Smuzhiyun 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
2036*4882a593Smuzhiyun 			lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
2037*4882a593Smuzhiyun 			spin_lock_irqsave(&pring->ring_lock, iflags);
2038*4882a593Smuzhiyun 		}
2039*4882a593Smuzhiyun 	}
2040*4882a593Smuzhiyun 	if (!ctxp)
2041*4882a593Smuzhiyun 		wq->q_flag &= ~HBA_NVMET_WQFULL;
2042*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
2043*4882a593Smuzhiyun }
2044*4882a593Smuzhiyun 
2045*4882a593Smuzhiyun void
lpfc_nvmet_wqfull_process(struct lpfc_hba * phba,struct lpfc_queue * wq)2046*4882a593Smuzhiyun lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
2047*4882a593Smuzhiyun 			  struct lpfc_queue *wq)
2048*4882a593Smuzhiyun {
2049*4882a593Smuzhiyun #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2050*4882a593Smuzhiyun 	struct lpfc_sli_ring *pring;
2051*4882a593Smuzhiyun 	struct lpfc_iocbq *nvmewqeq;
2052*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp;
2053*4882a593Smuzhiyun 	unsigned long iflags;
2054*4882a593Smuzhiyun 	int rc;
2055*4882a593Smuzhiyun 
2056*4882a593Smuzhiyun 	/*
2057*4882a593Smuzhiyun 	 * Some WQE slots are available, so try to re-issue anything
2058*4882a593Smuzhiyun 	 * on the WQ wqfull_list.
2059*4882a593Smuzhiyun 	 */
2060*4882a593Smuzhiyun 	pring = wq->pring;
2061*4882a593Smuzhiyun 	spin_lock_irqsave(&pring->ring_lock, iflags);
2062*4882a593Smuzhiyun 	while (!list_empty(&wq->wqfull_list)) {
2063*4882a593Smuzhiyun 		list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
2064*4882a593Smuzhiyun 				 list);
2065*4882a593Smuzhiyun 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
2066*4882a593Smuzhiyun 		ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
2067*4882a593Smuzhiyun 		rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
2068*4882a593Smuzhiyun 		spin_lock_irqsave(&pring->ring_lock, iflags);
2069*4882a593Smuzhiyun 		if (rc == -EBUSY) {
2070*4882a593Smuzhiyun 			/* WQ was full again, so put it back on the list */
2071*4882a593Smuzhiyun 			list_add(&nvmewqeq->list, &wq->wqfull_list);
2072*4882a593Smuzhiyun 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
2073*4882a593Smuzhiyun 			return;
2074*4882a593Smuzhiyun 		}
2075*4882a593Smuzhiyun 		if (rc == WQE_SUCCESS) {
2076*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2077*4882a593Smuzhiyun 			if (ctxp->ts_cmd_nvme) {
2078*4882a593Smuzhiyun 				if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2079*4882a593Smuzhiyun 					ctxp->ts_status_wqput = ktime_get_ns();
2080*4882a593Smuzhiyun 				else
2081*4882a593Smuzhiyun 					ctxp->ts_data_wqput = ktime_get_ns();
2082*4882a593Smuzhiyun 			}
2083*4882a593Smuzhiyun #endif
2084*4882a593Smuzhiyun 		} else {
2085*4882a593Smuzhiyun 			WARN_ON(rc);
2086*4882a593Smuzhiyun 		}
2087*4882a593Smuzhiyun 	}
2088*4882a593Smuzhiyun 	wq->q_flag &= ~HBA_NVMET_WQFULL;
2089*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun #endif
2092*4882a593Smuzhiyun }
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun void
lpfc_nvmet_destroy_targetport(struct lpfc_hba * phba)2095*4882a593Smuzhiyun lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
2096*4882a593Smuzhiyun {
2097*4882a593Smuzhiyun #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2098*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
2099*4882a593Smuzhiyun 	struct lpfc_queue *wq;
2100*4882a593Smuzhiyun 	uint32_t qidx;
2101*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun 	if (phba->nvmet_support == 0)
2104*4882a593Smuzhiyun 		return;
2105*4882a593Smuzhiyun 	if (phba->targetport) {
2106*4882a593Smuzhiyun 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2107*4882a593Smuzhiyun 		for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
2108*4882a593Smuzhiyun 			wq = phba->sli4_hba.hdwq[qidx].io_wq;
2109*4882a593Smuzhiyun 			lpfc_nvmet_wqfull_flush(phba, wq, NULL);
2110*4882a593Smuzhiyun 		}
2111*4882a593Smuzhiyun 		tgtp->tport_unreg_cmp = &tport_unreg_cmp;
2112*4882a593Smuzhiyun 		nvmet_fc_unregister_targetport(phba->targetport);
2113*4882a593Smuzhiyun 		if (!wait_for_completion_timeout(&tport_unreg_cmp,
2114*4882a593Smuzhiyun 					msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
2115*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2116*4882a593Smuzhiyun 					"6179 Unreg targetport x%px timeout "
2117*4882a593Smuzhiyun 					"reached.\n", phba->targetport);
2118*4882a593Smuzhiyun 		lpfc_nvmet_cleanup_io_context(phba);
2119*4882a593Smuzhiyun 	}
2120*4882a593Smuzhiyun 	phba->targetport = NULL;
2121*4882a593Smuzhiyun #endif
2122*4882a593Smuzhiyun }
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun /**
2125*4882a593Smuzhiyun  * lpfc_nvmet_handle_lsreq - Process an NVME LS request
2126*4882a593Smuzhiyun  * @phba: pointer to lpfc hba data structure.
2127*4882a593Smuzhiyun  * @axchg: pointer to exchange context for the NVME LS request
2128*4882a593Smuzhiyun  *
2129*4882a593Smuzhiyun  * This routine is used for processing an asychronously received NVME LS
2130*4882a593Smuzhiyun  * request. Any remaining validation is done and the LS is then forwarded
2131*4882a593Smuzhiyun  * to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
2132*4882a593Smuzhiyun  *
2133*4882a593Smuzhiyun  * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
2134*4882a593Smuzhiyun  * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
2135*4882a593Smuzhiyun  * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
2136*4882a593Smuzhiyun  *
2137*4882a593Smuzhiyun  * Returns 0 if LS was handled and delivered to the transport
2138*4882a593Smuzhiyun  * Returns 1 if LS failed to be handled and should be dropped
2139*4882a593Smuzhiyun  */
2140*4882a593Smuzhiyun int
lpfc_nvmet_handle_lsreq(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * axchg)2141*4882a593Smuzhiyun lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
2142*4882a593Smuzhiyun 			struct lpfc_async_xchg_ctx *axchg)
2143*4882a593Smuzhiyun {
2144*4882a593Smuzhiyun #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2145*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
2146*4882a593Smuzhiyun 	uint32_t *payload = axchg->payload;
2147*4882a593Smuzhiyun 	int rc;
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun 	atomic_inc(&tgtp->rcv_ls_req_in);
2150*4882a593Smuzhiyun 
2151*4882a593Smuzhiyun 	/*
2152*4882a593Smuzhiyun 	 * Driver passes the ndlp as the hosthandle argument allowing
2153*4882a593Smuzhiyun 	 * the transport to generate LS requests for any associateions
2154*4882a593Smuzhiyun 	 * that are created.
2155*4882a593Smuzhiyun 	 */
2156*4882a593Smuzhiyun 	rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
2157*4882a593Smuzhiyun 				 axchg->payload, axchg->size);
2158*4882a593Smuzhiyun 
2159*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2160*4882a593Smuzhiyun 			"6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2161*4882a593Smuzhiyun 			"%08x %08x %08x\n", axchg->size, rc,
2162*4882a593Smuzhiyun 			*payload, *(payload+1), *(payload+2),
2163*4882a593Smuzhiyun 			*(payload+3), *(payload+4), *(payload+5));
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun 	if (!rc) {
2166*4882a593Smuzhiyun 		atomic_inc(&tgtp->rcv_ls_req_out);
2167*4882a593Smuzhiyun 		return 0;
2168*4882a593Smuzhiyun 	}
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 	atomic_inc(&tgtp->rcv_ls_req_drop);
2171*4882a593Smuzhiyun #endif
2172*4882a593Smuzhiyun 	return 1;
2173*4882a593Smuzhiyun }
2174*4882a593Smuzhiyun 
2175*4882a593Smuzhiyun static void
lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf * ctx_buf)2176*4882a593Smuzhiyun lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2177*4882a593Smuzhiyun {
2178*4882a593Smuzhiyun #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2179*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2180*4882a593Smuzhiyun 	struct lpfc_hba *phba = ctxp->phba;
2181*4882a593Smuzhiyun 	struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2182*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
2183*4882a593Smuzhiyun 	uint32_t *payload, qno;
2184*4882a593Smuzhiyun 	uint32_t rc;
2185*4882a593Smuzhiyun 	unsigned long iflags;
2186*4882a593Smuzhiyun 
2187*4882a593Smuzhiyun 	if (!nvmebuf) {
2188*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2189*4882a593Smuzhiyun 			"6159 process_rcv_fcp_req, nvmebuf is NULL, "
2190*4882a593Smuzhiyun 			"oxid: x%x flg: x%x state: x%x\n",
2191*4882a593Smuzhiyun 			ctxp->oxid, ctxp->flag, ctxp->state);
2192*4882a593Smuzhiyun 		spin_lock_irqsave(&ctxp->ctxlock, iflags);
2193*4882a593Smuzhiyun 		lpfc_nvmet_defer_release(phba, ctxp);
2194*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2195*4882a593Smuzhiyun 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2196*4882a593Smuzhiyun 						 ctxp->oxid);
2197*4882a593Smuzhiyun 		return;
2198*4882a593Smuzhiyun 	}
2199*4882a593Smuzhiyun 
2200*4882a593Smuzhiyun 	if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2201*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2202*4882a593Smuzhiyun 				"6324 IO oxid x%x aborted\n",
2203*4882a593Smuzhiyun 				ctxp->oxid);
2204*4882a593Smuzhiyun 		return;
2205*4882a593Smuzhiyun 	}
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 	payload = (uint32_t *)(nvmebuf->dbuf.virt);
2208*4882a593Smuzhiyun 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2209*4882a593Smuzhiyun 	ctxp->flag |= LPFC_NVME_TNOTIFY;
2210*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2211*4882a593Smuzhiyun 	if (ctxp->ts_isr_cmd)
2212*4882a593Smuzhiyun 		ctxp->ts_cmd_nvme = ktime_get_ns();
2213*4882a593Smuzhiyun #endif
2214*4882a593Smuzhiyun 	/*
2215*4882a593Smuzhiyun 	 * The calling sequence should be:
2216*4882a593Smuzhiyun 	 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2217*4882a593Smuzhiyun 	 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2218*4882a593Smuzhiyun 	 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2219*4882a593Smuzhiyun 	 * the NVME command / FC header is stored.
2220*4882a593Smuzhiyun 	 * A buffer has already been reposted for this IO, so just free
2221*4882a593Smuzhiyun 	 * the nvmebuf.
2222*4882a593Smuzhiyun 	 */
2223*4882a593Smuzhiyun 	rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2224*4882a593Smuzhiyun 				  payload, ctxp->size);
2225*4882a593Smuzhiyun 	/* Process FCP command */
2226*4882a593Smuzhiyun 	if (rc == 0) {
2227*4882a593Smuzhiyun 		atomic_inc(&tgtp->rcv_fcp_cmd_out);
2228*4882a593Smuzhiyun 		spin_lock_irqsave(&ctxp->ctxlock, iflags);
2229*4882a593Smuzhiyun 		if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2230*4882a593Smuzhiyun 		    (nvmebuf != ctxp->rqb_buffer)) {
2231*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2232*4882a593Smuzhiyun 			return;
2233*4882a593Smuzhiyun 		}
2234*4882a593Smuzhiyun 		ctxp->rqb_buffer = NULL;
2235*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2236*4882a593Smuzhiyun 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2237*4882a593Smuzhiyun 		return;
2238*4882a593Smuzhiyun 	}
2239*4882a593Smuzhiyun 
2240*4882a593Smuzhiyun 	/* Processing of FCP command is deferred */
2241*4882a593Smuzhiyun 	if (rc == -EOVERFLOW) {
2242*4882a593Smuzhiyun 		lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2243*4882a593Smuzhiyun 				 "from %06x\n",
2244*4882a593Smuzhiyun 				 ctxp->oxid, ctxp->size, ctxp->sid);
2245*4882a593Smuzhiyun 		atomic_inc(&tgtp->rcv_fcp_cmd_out);
2246*4882a593Smuzhiyun 		atomic_inc(&tgtp->defer_fod);
2247*4882a593Smuzhiyun 		spin_lock_irqsave(&ctxp->ctxlock, iflags);
2248*4882a593Smuzhiyun 		if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2249*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2250*4882a593Smuzhiyun 			return;
2251*4882a593Smuzhiyun 		}
2252*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2253*4882a593Smuzhiyun 		/*
2254*4882a593Smuzhiyun 		 * Post a replacement DMA buffer to RQ and defer
2255*4882a593Smuzhiyun 		 * freeing rcv buffer till .defer_rcv callback
2256*4882a593Smuzhiyun 		 */
2257*4882a593Smuzhiyun 		qno = nvmebuf->idx;
2258*4882a593Smuzhiyun 		lpfc_post_rq_buffer(
2259*4882a593Smuzhiyun 			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2260*4882a593Smuzhiyun 			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2261*4882a593Smuzhiyun 		return;
2262*4882a593Smuzhiyun 	}
2263*4882a593Smuzhiyun 	ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2264*4882a593Smuzhiyun 	atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2265*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2266*4882a593Smuzhiyun 			"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2267*4882a593Smuzhiyun 			ctxp->oxid, rc,
2268*4882a593Smuzhiyun 			atomic_read(&tgtp->rcv_fcp_cmd_in),
2269*4882a593Smuzhiyun 			atomic_read(&tgtp->rcv_fcp_cmd_out),
2270*4882a593Smuzhiyun 			atomic_read(&tgtp->xmt_fcp_release));
2271*4882a593Smuzhiyun 	lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2272*4882a593Smuzhiyun 			 ctxp->oxid, ctxp->size, ctxp->sid);
2273*4882a593Smuzhiyun 	spin_lock_irqsave(&ctxp->ctxlock, iflags);
2274*4882a593Smuzhiyun 	lpfc_nvmet_defer_release(phba, ctxp);
2275*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2276*4882a593Smuzhiyun 	lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2277*4882a593Smuzhiyun #endif
2278*4882a593Smuzhiyun }
2279*4882a593Smuzhiyun 
2280*4882a593Smuzhiyun static void
lpfc_nvmet_fcp_rqst_defer_work(struct work_struct * work)2281*4882a593Smuzhiyun lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2282*4882a593Smuzhiyun {
2283*4882a593Smuzhiyun #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2284*4882a593Smuzhiyun 	struct lpfc_nvmet_ctxbuf *ctx_buf =
2285*4882a593Smuzhiyun 		container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2288*4882a593Smuzhiyun #endif
2289*4882a593Smuzhiyun }
2290*4882a593Smuzhiyun 
2291*4882a593Smuzhiyun static struct lpfc_nvmet_ctxbuf *
lpfc_nvmet_replenish_context(struct lpfc_hba * phba,struct lpfc_nvmet_ctx_info * current_infop)2292*4882a593Smuzhiyun lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2293*4882a593Smuzhiyun 			     struct lpfc_nvmet_ctx_info *current_infop)
2294*4882a593Smuzhiyun {
2295*4882a593Smuzhiyun #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2296*4882a593Smuzhiyun 	struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2297*4882a593Smuzhiyun 	struct lpfc_nvmet_ctx_info *get_infop;
2298*4882a593Smuzhiyun 	int i;
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 	/*
2301*4882a593Smuzhiyun 	 * The current_infop for the MRQ a NVME command IU was received
2302*4882a593Smuzhiyun 	 * on is empty. Our goal is to replenish this MRQs context
2303*4882a593Smuzhiyun 	 * list from a another CPUs.
2304*4882a593Smuzhiyun 	 *
2305*4882a593Smuzhiyun 	 * First we need to pick a context list to start looking on.
2306*4882a593Smuzhiyun 	 * nvmet_ctx_start_cpu has available context the last time
2307*4882a593Smuzhiyun 	 * we needed to replenish this CPU where nvmet_ctx_next_cpu
2308*4882a593Smuzhiyun 	 * is just the next sequential CPU for this MRQ.
2309*4882a593Smuzhiyun 	 */
2310*4882a593Smuzhiyun 	if (current_infop->nvmet_ctx_start_cpu)
2311*4882a593Smuzhiyun 		get_infop = current_infop->nvmet_ctx_start_cpu;
2312*4882a593Smuzhiyun 	else
2313*4882a593Smuzhiyun 		get_infop = current_infop->nvmet_ctx_next_cpu;
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun 	for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2316*4882a593Smuzhiyun 		if (get_infop == current_infop) {
2317*4882a593Smuzhiyun 			get_infop = get_infop->nvmet_ctx_next_cpu;
2318*4882a593Smuzhiyun 			continue;
2319*4882a593Smuzhiyun 		}
2320*4882a593Smuzhiyun 		spin_lock(&get_infop->nvmet_ctx_list_lock);
2321*4882a593Smuzhiyun 
2322*4882a593Smuzhiyun 		/* Just take the entire context list, if there are any */
2323*4882a593Smuzhiyun 		if (get_infop->nvmet_ctx_list_cnt) {
2324*4882a593Smuzhiyun 			list_splice_init(&get_infop->nvmet_ctx_list,
2325*4882a593Smuzhiyun 				    &current_infop->nvmet_ctx_list);
2326*4882a593Smuzhiyun 			current_infop->nvmet_ctx_list_cnt =
2327*4882a593Smuzhiyun 				get_infop->nvmet_ctx_list_cnt - 1;
2328*4882a593Smuzhiyun 			get_infop->nvmet_ctx_list_cnt = 0;
2329*4882a593Smuzhiyun 			spin_unlock(&get_infop->nvmet_ctx_list_lock);
2330*4882a593Smuzhiyun 
2331*4882a593Smuzhiyun 			current_infop->nvmet_ctx_start_cpu = get_infop;
2332*4882a593Smuzhiyun 			list_remove_head(&current_infop->nvmet_ctx_list,
2333*4882a593Smuzhiyun 					 ctx_buf, struct lpfc_nvmet_ctxbuf,
2334*4882a593Smuzhiyun 					 list);
2335*4882a593Smuzhiyun 			return ctx_buf;
2336*4882a593Smuzhiyun 		}
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 		/* Otherwise, move on to the next CPU for this MRQ */
2339*4882a593Smuzhiyun 		spin_unlock(&get_infop->nvmet_ctx_list_lock);
2340*4882a593Smuzhiyun 		get_infop = get_infop->nvmet_ctx_next_cpu;
2341*4882a593Smuzhiyun 	}
2342*4882a593Smuzhiyun 
2343*4882a593Smuzhiyun #endif
2344*4882a593Smuzhiyun 	/* Nothing found, all contexts for the MRQ are in-flight */
2345*4882a593Smuzhiyun 	return NULL;
2346*4882a593Smuzhiyun }
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun /**
2349*4882a593Smuzhiyun  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2350*4882a593Smuzhiyun  * @phba: pointer to lpfc hba data structure.
2351*4882a593Smuzhiyun  * @idx: relative index of MRQ vector
2352*4882a593Smuzhiyun  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2353*4882a593Smuzhiyun  * @isr_timestamp: in jiffies.
2354*4882a593Smuzhiyun  * @cqflag: cq processing information regarding workload.
2355*4882a593Smuzhiyun  *
2356*4882a593Smuzhiyun  * This routine is used for processing the WQE associated with a unsolicited
2357*4882a593Smuzhiyun  * event. It first determines whether there is an existing ndlp that matches
2358*4882a593Smuzhiyun  * the DID from the unsolicited WQE. If not, it will create a new one with
2359*4882a593Smuzhiyun  * the DID from the unsolicited WQE. The ELS command from the unsolicited
2360*4882a593Smuzhiyun  * WQE is then used to invoke the proper routine and to set up proper state
2361*4882a593Smuzhiyun  * of the discovery state machine.
2362*4882a593Smuzhiyun  **/
2363*4882a593Smuzhiyun static void
lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba * phba,uint32_t idx,struct rqb_dmabuf * nvmebuf,uint64_t isr_timestamp,uint8_t cqflag)2364*4882a593Smuzhiyun lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2365*4882a593Smuzhiyun 			    uint32_t idx,
2366*4882a593Smuzhiyun 			    struct rqb_dmabuf *nvmebuf,
2367*4882a593Smuzhiyun 			    uint64_t isr_timestamp,
2368*4882a593Smuzhiyun 			    uint8_t cqflag)
2369*4882a593Smuzhiyun {
2370*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp;
2371*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
2372*4882a593Smuzhiyun 	struct fc_frame_header *fc_hdr;
2373*4882a593Smuzhiyun 	struct lpfc_nvmet_ctxbuf *ctx_buf;
2374*4882a593Smuzhiyun 	struct lpfc_nvmet_ctx_info *current_infop;
2375*4882a593Smuzhiyun 	uint32_t size, oxid, sid, qno;
2376*4882a593Smuzhiyun 	unsigned long iflag;
2377*4882a593Smuzhiyun 	int current_cpu;
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2380*4882a593Smuzhiyun 		return;
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 	ctx_buf = NULL;
2383*4882a593Smuzhiyun 	if (!nvmebuf || !phba->targetport) {
2384*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2385*4882a593Smuzhiyun 				"6157 NVMET FCP Drop IO\n");
2386*4882a593Smuzhiyun 		if (nvmebuf)
2387*4882a593Smuzhiyun 			lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2388*4882a593Smuzhiyun 		return;
2389*4882a593Smuzhiyun 	}
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 	/*
2392*4882a593Smuzhiyun 	 * Get a pointer to the context list for this MRQ based on
2393*4882a593Smuzhiyun 	 * the CPU this MRQ IRQ is associated with. If the CPU association
2394*4882a593Smuzhiyun 	 * changes from our initial assumption, the context list could
2395*4882a593Smuzhiyun 	 * be empty, thus it would need to be replenished with the
2396*4882a593Smuzhiyun 	 * context list from another CPU for this MRQ.
2397*4882a593Smuzhiyun 	 */
2398*4882a593Smuzhiyun 	current_cpu = raw_smp_processor_id();
2399*4882a593Smuzhiyun 	current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2400*4882a593Smuzhiyun 	spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
2401*4882a593Smuzhiyun 	if (current_infop->nvmet_ctx_list_cnt) {
2402*4882a593Smuzhiyun 		list_remove_head(&current_infop->nvmet_ctx_list,
2403*4882a593Smuzhiyun 				 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2404*4882a593Smuzhiyun 		current_infop->nvmet_ctx_list_cnt--;
2405*4882a593Smuzhiyun 	} else {
2406*4882a593Smuzhiyun 		ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2407*4882a593Smuzhiyun 	}
2408*4882a593Smuzhiyun 	spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
2409*4882a593Smuzhiyun 
2410*4882a593Smuzhiyun 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2411*4882a593Smuzhiyun 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2412*4882a593Smuzhiyun 	size = nvmebuf->bytes_recv;
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2415*4882a593Smuzhiyun 	if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
2416*4882a593Smuzhiyun 		this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
2417*4882a593Smuzhiyun 		if (idx != current_cpu)
2418*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2419*4882a593Smuzhiyun 					"6703 CPU Check rcv: "
2420*4882a593Smuzhiyun 					"cpu %d expect %d\n",
2421*4882a593Smuzhiyun 					current_cpu, idx);
2422*4882a593Smuzhiyun 	}
2423*4882a593Smuzhiyun #endif
2424*4882a593Smuzhiyun 
2425*4882a593Smuzhiyun 	lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
2426*4882a593Smuzhiyun 			 oxid, size, raw_smp_processor_id());
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 	if (!ctx_buf) {
2431*4882a593Smuzhiyun 		/* Queue this NVME IO to process later */
2432*4882a593Smuzhiyun 		spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2433*4882a593Smuzhiyun 		list_add_tail(&nvmebuf->hbuf.list,
2434*4882a593Smuzhiyun 			      &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2435*4882a593Smuzhiyun 		phba->sli4_hba.nvmet_io_wait_cnt++;
2436*4882a593Smuzhiyun 		phba->sli4_hba.nvmet_io_wait_total++;
2437*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2438*4882a593Smuzhiyun 				       iflag);
2439*4882a593Smuzhiyun 
2440*4882a593Smuzhiyun 		/* Post a brand new DMA buffer to RQ */
2441*4882a593Smuzhiyun 		qno = nvmebuf->idx;
2442*4882a593Smuzhiyun 		lpfc_post_rq_buffer(
2443*4882a593Smuzhiyun 			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2444*4882a593Smuzhiyun 			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2445*4882a593Smuzhiyun 
2446*4882a593Smuzhiyun 		atomic_inc(&tgtp->defer_ctx);
2447*4882a593Smuzhiyun 		return;
2448*4882a593Smuzhiyun 	}
2449*4882a593Smuzhiyun 
2450*4882a593Smuzhiyun 	sid = sli4_sid_from_fc_hdr(fc_hdr);
2451*4882a593Smuzhiyun 
2452*4882a593Smuzhiyun 	ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2453*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2454*4882a593Smuzhiyun 	list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2455*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2456*4882a593Smuzhiyun 	if (ctxp->state != LPFC_NVME_STE_FREE) {
2457*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2458*4882a593Smuzhiyun 				"6414 NVMET Context corrupt %d %d oxid x%x\n",
2459*4882a593Smuzhiyun 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2460*4882a593Smuzhiyun 	}
2461*4882a593Smuzhiyun 	ctxp->wqeq = NULL;
2462*4882a593Smuzhiyun 	ctxp->offset = 0;
2463*4882a593Smuzhiyun 	ctxp->phba = phba;
2464*4882a593Smuzhiyun 	ctxp->size = size;
2465*4882a593Smuzhiyun 	ctxp->oxid = oxid;
2466*4882a593Smuzhiyun 	ctxp->sid = sid;
2467*4882a593Smuzhiyun 	ctxp->idx = idx;
2468*4882a593Smuzhiyun 	ctxp->state = LPFC_NVME_STE_RCV;
2469*4882a593Smuzhiyun 	ctxp->entry_cnt = 1;
2470*4882a593Smuzhiyun 	ctxp->flag = 0;
2471*4882a593Smuzhiyun 	ctxp->ctxbuf = ctx_buf;
2472*4882a593Smuzhiyun 	ctxp->rqb_buffer = (void *)nvmebuf;
2473*4882a593Smuzhiyun 	ctxp->hdwq = NULL;
2474*4882a593Smuzhiyun 	spin_lock_init(&ctxp->ctxlock);
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2477*4882a593Smuzhiyun 	if (isr_timestamp)
2478*4882a593Smuzhiyun 		ctxp->ts_isr_cmd = isr_timestamp;
2479*4882a593Smuzhiyun 	ctxp->ts_cmd_nvme = 0;
2480*4882a593Smuzhiyun 	ctxp->ts_nvme_data = 0;
2481*4882a593Smuzhiyun 	ctxp->ts_data_wqput = 0;
2482*4882a593Smuzhiyun 	ctxp->ts_isr_data = 0;
2483*4882a593Smuzhiyun 	ctxp->ts_data_nvme = 0;
2484*4882a593Smuzhiyun 	ctxp->ts_nvme_status = 0;
2485*4882a593Smuzhiyun 	ctxp->ts_status_wqput = 0;
2486*4882a593Smuzhiyun 	ctxp->ts_isr_status = 0;
2487*4882a593Smuzhiyun 	ctxp->ts_status_nvme = 0;
2488*4882a593Smuzhiyun #endif
2489*4882a593Smuzhiyun 
2490*4882a593Smuzhiyun 	atomic_inc(&tgtp->rcv_fcp_cmd_in);
2491*4882a593Smuzhiyun 	/* check for cq processing load */
2492*4882a593Smuzhiyun 	if (!cqflag) {
2493*4882a593Smuzhiyun 		lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2494*4882a593Smuzhiyun 		return;
2495*4882a593Smuzhiyun 	}
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun 	if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2498*4882a593Smuzhiyun 		atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2499*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2500*4882a593Smuzhiyun 				"6325 Unable to queue work for oxid x%x. "
2501*4882a593Smuzhiyun 				"FCP Drop IO [x%x x%x x%x]\n",
2502*4882a593Smuzhiyun 				ctxp->oxid,
2503*4882a593Smuzhiyun 				atomic_read(&tgtp->rcv_fcp_cmd_in),
2504*4882a593Smuzhiyun 				atomic_read(&tgtp->rcv_fcp_cmd_out),
2505*4882a593Smuzhiyun 				atomic_read(&tgtp->xmt_fcp_release));
2506*4882a593Smuzhiyun 
2507*4882a593Smuzhiyun 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
2508*4882a593Smuzhiyun 		lpfc_nvmet_defer_release(phba, ctxp);
2509*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2510*4882a593Smuzhiyun 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2511*4882a593Smuzhiyun 	}
2512*4882a593Smuzhiyun }
2513*4882a593Smuzhiyun 
2514*4882a593Smuzhiyun /**
2515*4882a593Smuzhiyun  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2516*4882a593Smuzhiyun  * @phba: pointer to lpfc hba data structure.
2517*4882a593Smuzhiyun  * @idx: relative index of MRQ vector
2518*4882a593Smuzhiyun  * @nvmebuf: pointer to received nvme data structure.
2519*4882a593Smuzhiyun  * @isr_timestamp: in jiffies.
2520*4882a593Smuzhiyun  * @cqflag: cq processing information regarding workload.
2521*4882a593Smuzhiyun  *
2522*4882a593Smuzhiyun  * This routine is used to process an unsolicited event received from a SLI
2523*4882a593Smuzhiyun  * (Service Level Interface) ring. The actual processing of the data buffer
2524*4882a593Smuzhiyun  * associated with the unsolicited event is done by invoking the routine
2525*4882a593Smuzhiyun  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2526*4882a593Smuzhiyun  * SLI RQ on which the unsolicited event was received.
2527*4882a593Smuzhiyun  **/
2528*4882a593Smuzhiyun void
lpfc_nvmet_unsol_fcp_event(struct lpfc_hba * phba,uint32_t idx,struct rqb_dmabuf * nvmebuf,uint64_t isr_timestamp,uint8_t cqflag)2529*4882a593Smuzhiyun lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2530*4882a593Smuzhiyun 			   uint32_t idx,
2531*4882a593Smuzhiyun 			   struct rqb_dmabuf *nvmebuf,
2532*4882a593Smuzhiyun 			   uint64_t isr_timestamp,
2533*4882a593Smuzhiyun 			   uint8_t cqflag)
2534*4882a593Smuzhiyun {
2535*4882a593Smuzhiyun 	if (!nvmebuf) {
2536*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2537*4882a593Smuzhiyun 				"3167 NVMET FCP Drop IO\n");
2538*4882a593Smuzhiyun 		return;
2539*4882a593Smuzhiyun 	}
2540*4882a593Smuzhiyun 	if (phba->nvmet_support == 0) {
2541*4882a593Smuzhiyun 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2542*4882a593Smuzhiyun 		return;
2543*4882a593Smuzhiyun 	}
2544*4882a593Smuzhiyun 	lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2545*4882a593Smuzhiyun }
2546*4882a593Smuzhiyun 
2547*4882a593Smuzhiyun /**
2548*4882a593Smuzhiyun  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2549*4882a593Smuzhiyun  * @phba: pointer to a host N_Port data structure.
2550*4882a593Smuzhiyun  * @ctxp: Context info for NVME LS Request
2551*4882a593Smuzhiyun  * @rspbuf: DMA buffer of NVME command.
2552*4882a593Smuzhiyun  * @rspsize: size of the NVME command.
2553*4882a593Smuzhiyun  *
2554*4882a593Smuzhiyun  * This routine is used for allocating a lpfc-WQE data structure from
2555*4882a593Smuzhiyun  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2556*4882a593Smuzhiyun  * passed into the routine for discovery state machine to issue an Extended
2557*4882a593Smuzhiyun  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2558*4882a593Smuzhiyun  * and preparation routine that is used by all the discovery state machine
2559*4882a593Smuzhiyun  * routines and the NVME command-specific fields will be later set up by
2560*4882a593Smuzhiyun  * the individual discovery machine routines after calling this routine
2561*4882a593Smuzhiyun  * allocating and preparing a generic WQE data structure. It fills in the
2562*4882a593Smuzhiyun  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2563*4882a593Smuzhiyun  * payload and response payload (if expected). The reference count on the
2564*4882a593Smuzhiyun  * ndlp is incremented by 1 and the reference to the ndlp is put into
2565*4882a593Smuzhiyun  * context1 of the WQE data structure for this WQE to hold the ndlp
2566*4882a593Smuzhiyun  * reference for the command's callback function to access later.
2567*4882a593Smuzhiyun  *
2568*4882a593Smuzhiyun  * Return code
2569*4882a593Smuzhiyun  *   Pointer to the newly allocated/prepared nvme wqe data structure
2570*4882a593Smuzhiyun  *   NULL - when nvme wqe data structure allocation/preparation failed
2571*4882a593Smuzhiyun  **/
2572*4882a593Smuzhiyun static struct lpfc_iocbq *
lpfc_nvmet_prep_ls_wqe(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,dma_addr_t rspbuf,uint16_t rspsize)2573*4882a593Smuzhiyun lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2574*4882a593Smuzhiyun 		       struct lpfc_async_xchg_ctx *ctxp,
2575*4882a593Smuzhiyun 		       dma_addr_t rspbuf, uint16_t rspsize)
2576*4882a593Smuzhiyun {
2577*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
2578*4882a593Smuzhiyun 	struct lpfc_iocbq *nvmewqe;
2579*4882a593Smuzhiyun 	union lpfc_wqe128 *wqe;
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	if (!lpfc_is_link_up(phba)) {
2582*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2583*4882a593Smuzhiyun 				"6104 NVMET prep LS wqe: link err: "
2584*4882a593Smuzhiyun 				"NPORT x%x oxid:x%x ste %d\n",
2585*4882a593Smuzhiyun 				ctxp->sid, ctxp->oxid, ctxp->state);
2586*4882a593Smuzhiyun 		return NULL;
2587*4882a593Smuzhiyun 	}
2588*4882a593Smuzhiyun 
2589*4882a593Smuzhiyun 	/* Allocate buffer for  command wqe */
2590*4882a593Smuzhiyun 	nvmewqe = lpfc_sli_get_iocbq(phba);
2591*4882a593Smuzhiyun 	if (nvmewqe == NULL) {
2592*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2593*4882a593Smuzhiyun 				"6105 NVMET prep LS wqe: No WQE: "
2594*4882a593Smuzhiyun 				"NPORT x%x oxid x%x ste %d\n",
2595*4882a593Smuzhiyun 				ctxp->sid, ctxp->oxid, ctxp->state);
2596*4882a593Smuzhiyun 		return NULL;
2597*4882a593Smuzhiyun 	}
2598*4882a593Smuzhiyun 
2599*4882a593Smuzhiyun 	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2600*4882a593Smuzhiyun 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2601*4882a593Smuzhiyun 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2602*4882a593Smuzhiyun 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2603*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2604*4882a593Smuzhiyun 				"6106 NVMET prep LS wqe: No ndlp: "
2605*4882a593Smuzhiyun 				"NPORT x%x oxid x%x ste %d\n",
2606*4882a593Smuzhiyun 				ctxp->sid, ctxp->oxid, ctxp->state);
2607*4882a593Smuzhiyun 		goto nvme_wqe_free_wqeq_exit;
2608*4882a593Smuzhiyun 	}
2609*4882a593Smuzhiyun 	ctxp->wqeq = nvmewqe;
2610*4882a593Smuzhiyun 
2611*4882a593Smuzhiyun 	/* prevent preparing wqe with NULL ndlp reference */
2612*4882a593Smuzhiyun 	nvmewqe->context1 = lpfc_nlp_get(ndlp);
2613*4882a593Smuzhiyun 	if (nvmewqe->context1 == NULL)
2614*4882a593Smuzhiyun 		goto nvme_wqe_free_wqeq_exit;
2615*4882a593Smuzhiyun 	nvmewqe->context2 = ctxp;
2616*4882a593Smuzhiyun 
2617*4882a593Smuzhiyun 	wqe = &nvmewqe->wqe;
2618*4882a593Smuzhiyun 	memset(wqe, 0, sizeof(union lpfc_wqe));
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun 	/* Words 0 - 2 */
2621*4882a593Smuzhiyun 	wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2622*4882a593Smuzhiyun 	wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2623*4882a593Smuzhiyun 	wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2624*4882a593Smuzhiyun 	wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2625*4882a593Smuzhiyun 
2626*4882a593Smuzhiyun 	/* Word 3 */
2627*4882a593Smuzhiyun 
2628*4882a593Smuzhiyun 	/* Word 4 */
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun 	/* Word 5 */
2631*4882a593Smuzhiyun 	bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2632*4882a593Smuzhiyun 	bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2633*4882a593Smuzhiyun 	bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2634*4882a593Smuzhiyun 	bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2635*4882a593Smuzhiyun 	bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2636*4882a593Smuzhiyun 
2637*4882a593Smuzhiyun 	/* Word 6 */
2638*4882a593Smuzhiyun 	bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2639*4882a593Smuzhiyun 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2640*4882a593Smuzhiyun 	bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2641*4882a593Smuzhiyun 
2642*4882a593Smuzhiyun 	/* Word 7 */
2643*4882a593Smuzhiyun 	bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2644*4882a593Smuzhiyun 	       CMD_XMIT_SEQUENCE64_WQE);
2645*4882a593Smuzhiyun 	bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2646*4882a593Smuzhiyun 	bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2647*4882a593Smuzhiyun 	bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2648*4882a593Smuzhiyun 
2649*4882a593Smuzhiyun 	/* Word 8 */
2650*4882a593Smuzhiyun 	wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2651*4882a593Smuzhiyun 
2652*4882a593Smuzhiyun 	/* Word 9 */
2653*4882a593Smuzhiyun 	bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2654*4882a593Smuzhiyun 	/* Needs to be set by caller */
2655*4882a593Smuzhiyun 	bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2656*4882a593Smuzhiyun 
2657*4882a593Smuzhiyun 	/* Word 10 */
2658*4882a593Smuzhiyun 	bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2659*4882a593Smuzhiyun 	bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2660*4882a593Smuzhiyun 	bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2661*4882a593Smuzhiyun 	       LPFC_WQE_LENLOC_WORD12);
2662*4882a593Smuzhiyun 	bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2663*4882a593Smuzhiyun 
2664*4882a593Smuzhiyun 	/* Word 11 */
2665*4882a593Smuzhiyun 	bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2666*4882a593Smuzhiyun 	       LPFC_WQE_CQ_ID_DEFAULT);
2667*4882a593Smuzhiyun 	bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2668*4882a593Smuzhiyun 	       OTHER_COMMAND);
2669*4882a593Smuzhiyun 
2670*4882a593Smuzhiyun 	/* Word 12 */
2671*4882a593Smuzhiyun 	wqe->xmit_sequence.xmit_len = rspsize;
2672*4882a593Smuzhiyun 
2673*4882a593Smuzhiyun 	nvmewqe->retry = 1;
2674*4882a593Smuzhiyun 	nvmewqe->vport = phba->pport;
2675*4882a593Smuzhiyun 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2676*4882a593Smuzhiyun 	nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2677*4882a593Smuzhiyun 
2678*4882a593Smuzhiyun 	/* Xmit NVMET response to remote NPORT <did> */
2679*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2680*4882a593Smuzhiyun 			"6039 Xmit NVMET LS response to remote "
2681*4882a593Smuzhiyun 			"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2682*4882a593Smuzhiyun 			ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2683*4882a593Smuzhiyun 			rspsize);
2684*4882a593Smuzhiyun 	return nvmewqe;
2685*4882a593Smuzhiyun 
2686*4882a593Smuzhiyun nvme_wqe_free_wqeq_exit:
2687*4882a593Smuzhiyun 	nvmewqe->context2 = NULL;
2688*4882a593Smuzhiyun 	nvmewqe->context3 = NULL;
2689*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, nvmewqe);
2690*4882a593Smuzhiyun 	return NULL;
2691*4882a593Smuzhiyun }
2692*4882a593Smuzhiyun 
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun static struct lpfc_iocbq *
lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp)2695*4882a593Smuzhiyun lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2696*4882a593Smuzhiyun 			struct lpfc_async_xchg_ctx *ctxp)
2697*4882a593Smuzhiyun {
2698*4882a593Smuzhiyun 	struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
2699*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
2700*4882a593Smuzhiyun 	struct sli4_sge *sgl;
2701*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
2702*4882a593Smuzhiyun 	struct lpfc_iocbq *nvmewqe;
2703*4882a593Smuzhiyun 	struct scatterlist *sgel;
2704*4882a593Smuzhiyun 	union lpfc_wqe128 *wqe;
2705*4882a593Smuzhiyun 	struct ulp_bde64 *bde;
2706*4882a593Smuzhiyun 	dma_addr_t physaddr;
2707*4882a593Smuzhiyun 	int i, cnt, nsegs;
2708*4882a593Smuzhiyun 	int do_pbde;
2709*4882a593Smuzhiyun 	int xc = 1;
2710*4882a593Smuzhiyun 
2711*4882a593Smuzhiyun 	if (!lpfc_is_link_up(phba)) {
2712*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2713*4882a593Smuzhiyun 				"6107 NVMET prep FCP wqe: link err:"
2714*4882a593Smuzhiyun 				"NPORT x%x oxid x%x ste %d\n",
2715*4882a593Smuzhiyun 				ctxp->sid, ctxp->oxid, ctxp->state);
2716*4882a593Smuzhiyun 		return NULL;
2717*4882a593Smuzhiyun 	}
2718*4882a593Smuzhiyun 
2719*4882a593Smuzhiyun 	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2720*4882a593Smuzhiyun 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2721*4882a593Smuzhiyun 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2722*4882a593Smuzhiyun 	     (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2723*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2724*4882a593Smuzhiyun 				"6108 NVMET prep FCP wqe: no ndlp: "
2725*4882a593Smuzhiyun 				"NPORT x%x oxid x%x ste %d\n",
2726*4882a593Smuzhiyun 				ctxp->sid, ctxp->oxid, ctxp->state);
2727*4882a593Smuzhiyun 		return NULL;
2728*4882a593Smuzhiyun 	}
2729*4882a593Smuzhiyun 
2730*4882a593Smuzhiyun 	if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2731*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2732*4882a593Smuzhiyun 				"6109 NVMET prep FCP wqe: seg cnt err: "
2733*4882a593Smuzhiyun 				"NPORT x%x oxid x%x ste %d cnt %d\n",
2734*4882a593Smuzhiyun 				ctxp->sid, ctxp->oxid, ctxp->state,
2735*4882a593Smuzhiyun 				phba->cfg_nvme_seg_cnt);
2736*4882a593Smuzhiyun 		return NULL;
2737*4882a593Smuzhiyun 	}
2738*4882a593Smuzhiyun 	nsegs = rsp->sg_cnt;
2739*4882a593Smuzhiyun 
2740*4882a593Smuzhiyun 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2741*4882a593Smuzhiyun 	nvmewqe = ctxp->wqeq;
2742*4882a593Smuzhiyun 	if (nvmewqe == NULL) {
2743*4882a593Smuzhiyun 		/* Allocate buffer for  command wqe */
2744*4882a593Smuzhiyun 		nvmewqe = ctxp->ctxbuf->iocbq;
2745*4882a593Smuzhiyun 		if (nvmewqe == NULL) {
2746*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2747*4882a593Smuzhiyun 					"6110 NVMET prep FCP wqe: No "
2748*4882a593Smuzhiyun 					"WQE: NPORT x%x oxid x%x ste %d\n",
2749*4882a593Smuzhiyun 					ctxp->sid, ctxp->oxid, ctxp->state);
2750*4882a593Smuzhiyun 			return NULL;
2751*4882a593Smuzhiyun 		}
2752*4882a593Smuzhiyun 		ctxp->wqeq = nvmewqe;
2753*4882a593Smuzhiyun 		xc = 0; /* create new XRI */
2754*4882a593Smuzhiyun 		nvmewqe->sli4_lxritag = NO_XRI;
2755*4882a593Smuzhiyun 		nvmewqe->sli4_xritag = NO_XRI;
2756*4882a593Smuzhiyun 	}
2757*4882a593Smuzhiyun 
2758*4882a593Smuzhiyun 	/* Sanity check */
2759*4882a593Smuzhiyun 	if (((ctxp->state == LPFC_NVME_STE_RCV) &&
2760*4882a593Smuzhiyun 	    (ctxp->entry_cnt == 1)) ||
2761*4882a593Smuzhiyun 	    (ctxp->state == LPFC_NVME_STE_DATA)) {
2762*4882a593Smuzhiyun 		wqe = &nvmewqe->wqe;
2763*4882a593Smuzhiyun 	} else {
2764*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2765*4882a593Smuzhiyun 				"6111 Wrong state NVMET FCP: %d  cnt %d\n",
2766*4882a593Smuzhiyun 				ctxp->state, ctxp->entry_cnt);
2767*4882a593Smuzhiyun 		return NULL;
2768*4882a593Smuzhiyun 	}
2769*4882a593Smuzhiyun 
2770*4882a593Smuzhiyun 	sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2771*4882a593Smuzhiyun 	switch (rsp->op) {
2772*4882a593Smuzhiyun 	case NVMET_FCOP_READDATA:
2773*4882a593Smuzhiyun 	case NVMET_FCOP_READDATA_RSP:
2774*4882a593Smuzhiyun 		/* From the tsend template, initialize words 7 - 11 */
2775*4882a593Smuzhiyun 		memcpy(&wqe->words[7],
2776*4882a593Smuzhiyun 		       &lpfc_tsend_cmd_template.words[7],
2777*4882a593Smuzhiyun 		       sizeof(uint32_t) * 5);
2778*4882a593Smuzhiyun 
2779*4882a593Smuzhiyun 		/* Words 0 - 2 : The first sg segment */
2780*4882a593Smuzhiyun 		sgel = &rsp->sg[0];
2781*4882a593Smuzhiyun 		physaddr = sg_dma_address(sgel);
2782*4882a593Smuzhiyun 		wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2783*4882a593Smuzhiyun 		wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2784*4882a593Smuzhiyun 		wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2785*4882a593Smuzhiyun 		wqe->fcp_tsend.bde.addrHigh =
2786*4882a593Smuzhiyun 			cpu_to_le32(putPaddrHigh(physaddr));
2787*4882a593Smuzhiyun 
2788*4882a593Smuzhiyun 		/* Word 3 */
2789*4882a593Smuzhiyun 		wqe->fcp_tsend.payload_offset_len = 0;
2790*4882a593Smuzhiyun 
2791*4882a593Smuzhiyun 		/* Word 4 */
2792*4882a593Smuzhiyun 		wqe->fcp_tsend.relative_offset = ctxp->offset;
2793*4882a593Smuzhiyun 
2794*4882a593Smuzhiyun 		/* Word 5 */
2795*4882a593Smuzhiyun 		wqe->fcp_tsend.reserved = 0;
2796*4882a593Smuzhiyun 
2797*4882a593Smuzhiyun 		/* Word 6 */
2798*4882a593Smuzhiyun 		bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2799*4882a593Smuzhiyun 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2800*4882a593Smuzhiyun 		bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2801*4882a593Smuzhiyun 		       nvmewqe->sli4_xritag);
2802*4882a593Smuzhiyun 
2803*4882a593Smuzhiyun 		/* Word 7 - set ar later */
2804*4882a593Smuzhiyun 
2805*4882a593Smuzhiyun 		/* Word 8 */
2806*4882a593Smuzhiyun 		wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2807*4882a593Smuzhiyun 
2808*4882a593Smuzhiyun 		/* Word 9 */
2809*4882a593Smuzhiyun 		bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2810*4882a593Smuzhiyun 		bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2811*4882a593Smuzhiyun 
2812*4882a593Smuzhiyun 		/* Word 10 - set wqes later, in template xc=1 */
2813*4882a593Smuzhiyun 		if (!xc)
2814*4882a593Smuzhiyun 			bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2815*4882a593Smuzhiyun 
2816*4882a593Smuzhiyun 		/* Word 11 - set sup, irsp, irsplen later */
2817*4882a593Smuzhiyun 		do_pbde = 0;
2818*4882a593Smuzhiyun 
2819*4882a593Smuzhiyun 		/* Word 12 */
2820*4882a593Smuzhiyun 		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2821*4882a593Smuzhiyun 
2822*4882a593Smuzhiyun 		/* Setup 2 SKIP SGEs */
2823*4882a593Smuzhiyun 		sgl->addr_hi = 0;
2824*4882a593Smuzhiyun 		sgl->addr_lo = 0;
2825*4882a593Smuzhiyun 		sgl->word2 = 0;
2826*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2827*4882a593Smuzhiyun 		sgl->word2 = cpu_to_le32(sgl->word2);
2828*4882a593Smuzhiyun 		sgl->sge_len = 0;
2829*4882a593Smuzhiyun 		sgl++;
2830*4882a593Smuzhiyun 		sgl->addr_hi = 0;
2831*4882a593Smuzhiyun 		sgl->addr_lo = 0;
2832*4882a593Smuzhiyun 		sgl->word2 = 0;
2833*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2834*4882a593Smuzhiyun 		sgl->word2 = cpu_to_le32(sgl->word2);
2835*4882a593Smuzhiyun 		sgl->sge_len = 0;
2836*4882a593Smuzhiyun 		sgl++;
2837*4882a593Smuzhiyun 		if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2838*4882a593Smuzhiyun 			atomic_inc(&tgtp->xmt_fcp_read_rsp);
2839*4882a593Smuzhiyun 
2840*4882a593Smuzhiyun 			/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2841*4882a593Smuzhiyun 
2842*4882a593Smuzhiyun 			if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2843*4882a593Smuzhiyun 				if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2844*4882a593Smuzhiyun 					bf_set(wqe_sup,
2845*4882a593Smuzhiyun 					       &wqe->fcp_tsend.wqe_com, 1);
2846*4882a593Smuzhiyun 			} else {
2847*4882a593Smuzhiyun 				bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2848*4882a593Smuzhiyun 				bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2849*4882a593Smuzhiyun 				bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2850*4882a593Smuzhiyun 				       ((rsp->rsplen >> 2) - 1));
2851*4882a593Smuzhiyun 				memcpy(&wqe->words[16], rsp->rspaddr,
2852*4882a593Smuzhiyun 				       rsp->rsplen);
2853*4882a593Smuzhiyun 			}
2854*4882a593Smuzhiyun 		} else {
2855*4882a593Smuzhiyun 			atomic_inc(&tgtp->xmt_fcp_read);
2856*4882a593Smuzhiyun 
2857*4882a593Smuzhiyun 			/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2858*4882a593Smuzhiyun 			bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2859*4882a593Smuzhiyun 		}
2860*4882a593Smuzhiyun 		break;
2861*4882a593Smuzhiyun 
2862*4882a593Smuzhiyun 	case NVMET_FCOP_WRITEDATA:
2863*4882a593Smuzhiyun 		/* From the treceive template, initialize words 3 - 11 */
2864*4882a593Smuzhiyun 		memcpy(&wqe->words[3],
2865*4882a593Smuzhiyun 		       &lpfc_treceive_cmd_template.words[3],
2866*4882a593Smuzhiyun 		       sizeof(uint32_t) * 9);
2867*4882a593Smuzhiyun 
2868*4882a593Smuzhiyun 		/* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
2869*4882a593Smuzhiyun 		wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2870*4882a593Smuzhiyun 		wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2871*4882a593Smuzhiyun 		wqe->fcp_treceive.bde.addrLow = 0;
2872*4882a593Smuzhiyun 		wqe->fcp_treceive.bde.addrHigh = 0;
2873*4882a593Smuzhiyun 
2874*4882a593Smuzhiyun 		/* Word 4 */
2875*4882a593Smuzhiyun 		wqe->fcp_treceive.relative_offset = ctxp->offset;
2876*4882a593Smuzhiyun 
2877*4882a593Smuzhiyun 		/* Word 6 */
2878*4882a593Smuzhiyun 		bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2879*4882a593Smuzhiyun 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2880*4882a593Smuzhiyun 		bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2881*4882a593Smuzhiyun 		       nvmewqe->sli4_xritag);
2882*4882a593Smuzhiyun 
2883*4882a593Smuzhiyun 		/* Word 7 */
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun 		/* Word 8 */
2886*4882a593Smuzhiyun 		wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2887*4882a593Smuzhiyun 
2888*4882a593Smuzhiyun 		/* Word 9 */
2889*4882a593Smuzhiyun 		bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2890*4882a593Smuzhiyun 		bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2891*4882a593Smuzhiyun 
2892*4882a593Smuzhiyun 		/* Word 10 - in template xc=1 */
2893*4882a593Smuzhiyun 		if (!xc)
2894*4882a593Smuzhiyun 			bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2895*4882a593Smuzhiyun 
2896*4882a593Smuzhiyun 		/* Word 11 - set pbde later */
2897*4882a593Smuzhiyun 		if (phba->cfg_enable_pbde) {
2898*4882a593Smuzhiyun 			do_pbde = 1;
2899*4882a593Smuzhiyun 		} else {
2900*4882a593Smuzhiyun 			bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2901*4882a593Smuzhiyun 			do_pbde = 0;
2902*4882a593Smuzhiyun 		}
2903*4882a593Smuzhiyun 
2904*4882a593Smuzhiyun 		/* Word 12 */
2905*4882a593Smuzhiyun 		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2906*4882a593Smuzhiyun 
2907*4882a593Smuzhiyun 		/* Setup 2 SKIP SGEs */
2908*4882a593Smuzhiyun 		sgl->addr_hi = 0;
2909*4882a593Smuzhiyun 		sgl->addr_lo = 0;
2910*4882a593Smuzhiyun 		sgl->word2 = 0;
2911*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2912*4882a593Smuzhiyun 		sgl->word2 = cpu_to_le32(sgl->word2);
2913*4882a593Smuzhiyun 		sgl->sge_len = 0;
2914*4882a593Smuzhiyun 		sgl++;
2915*4882a593Smuzhiyun 		sgl->addr_hi = 0;
2916*4882a593Smuzhiyun 		sgl->addr_lo = 0;
2917*4882a593Smuzhiyun 		sgl->word2 = 0;
2918*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2919*4882a593Smuzhiyun 		sgl->word2 = cpu_to_le32(sgl->word2);
2920*4882a593Smuzhiyun 		sgl->sge_len = 0;
2921*4882a593Smuzhiyun 		sgl++;
2922*4882a593Smuzhiyun 		atomic_inc(&tgtp->xmt_fcp_write);
2923*4882a593Smuzhiyun 		break;
2924*4882a593Smuzhiyun 
2925*4882a593Smuzhiyun 	case NVMET_FCOP_RSP:
2926*4882a593Smuzhiyun 		/* From the treceive template, initialize words 4 - 11 */
2927*4882a593Smuzhiyun 		memcpy(&wqe->words[4],
2928*4882a593Smuzhiyun 		       &lpfc_trsp_cmd_template.words[4],
2929*4882a593Smuzhiyun 		       sizeof(uint32_t) * 8);
2930*4882a593Smuzhiyun 
2931*4882a593Smuzhiyun 		/* Words 0 - 2 */
2932*4882a593Smuzhiyun 		physaddr = rsp->rspdma;
2933*4882a593Smuzhiyun 		wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2934*4882a593Smuzhiyun 		wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2935*4882a593Smuzhiyun 		wqe->fcp_trsp.bde.addrLow =
2936*4882a593Smuzhiyun 			cpu_to_le32(putPaddrLow(physaddr));
2937*4882a593Smuzhiyun 		wqe->fcp_trsp.bde.addrHigh =
2938*4882a593Smuzhiyun 			cpu_to_le32(putPaddrHigh(physaddr));
2939*4882a593Smuzhiyun 
2940*4882a593Smuzhiyun 		/* Word 3 */
2941*4882a593Smuzhiyun 		wqe->fcp_trsp.response_len = rsp->rsplen;
2942*4882a593Smuzhiyun 
2943*4882a593Smuzhiyun 		/* Word 6 */
2944*4882a593Smuzhiyun 		bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2945*4882a593Smuzhiyun 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2946*4882a593Smuzhiyun 		bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2947*4882a593Smuzhiyun 		       nvmewqe->sli4_xritag);
2948*4882a593Smuzhiyun 
2949*4882a593Smuzhiyun 		/* Word 7 */
2950*4882a593Smuzhiyun 
2951*4882a593Smuzhiyun 		/* Word 8 */
2952*4882a593Smuzhiyun 		wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2953*4882a593Smuzhiyun 
2954*4882a593Smuzhiyun 		/* Word 9 */
2955*4882a593Smuzhiyun 		bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2956*4882a593Smuzhiyun 		bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2957*4882a593Smuzhiyun 
2958*4882a593Smuzhiyun 		/* Word 10 */
2959*4882a593Smuzhiyun 		if (xc)
2960*4882a593Smuzhiyun 			bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2961*4882a593Smuzhiyun 
2962*4882a593Smuzhiyun 		/* Word 11 */
2963*4882a593Smuzhiyun 		/* In template wqes=0 irsp=0 irsplen=0 - good response */
2964*4882a593Smuzhiyun 		if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2965*4882a593Smuzhiyun 			/* Bad response - embed it */
2966*4882a593Smuzhiyun 			bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2967*4882a593Smuzhiyun 			bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2968*4882a593Smuzhiyun 			bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2969*4882a593Smuzhiyun 			       ((rsp->rsplen >> 2) - 1));
2970*4882a593Smuzhiyun 			memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2971*4882a593Smuzhiyun 		}
2972*4882a593Smuzhiyun 		do_pbde = 0;
2973*4882a593Smuzhiyun 
2974*4882a593Smuzhiyun 		/* Word 12 */
2975*4882a593Smuzhiyun 		wqe->fcp_trsp.rsvd_12_15[0] = 0;
2976*4882a593Smuzhiyun 
2977*4882a593Smuzhiyun 		/* Use rspbuf, NOT sg list */
2978*4882a593Smuzhiyun 		nsegs = 0;
2979*4882a593Smuzhiyun 		sgl->word2 = 0;
2980*4882a593Smuzhiyun 		atomic_inc(&tgtp->xmt_fcp_rsp);
2981*4882a593Smuzhiyun 		break;
2982*4882a593Smuzhiyun 
2983*4882a593Smuzhiyun 	default:
2984*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2985*4882a593Smuzhiyun 				"6064 Unknown Rsp Op %d\n",
2986*4882a593Smuzhiyun 				rsp->op);
2987*4882a593Smuzhiyun 		return NULL;
2988*4882a593Smuzhiyun 	}
2989*4882a593Smuzhiyun 
2990*4882a593Smuzhiyun 	nvmewqe->retry = 1;
2991*4882a593Smuzhiyun 	nvmewqe->vport = phba->pport;
2992*4882a593Smuzhiyun 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2993*4882a593Smuzhiyun 	nvmewqe->context1 = ndlp;
2994*4882a593Smuzhiyun 
2995*4882a593Smuzhiyun 	for_each_sg(rsp->sg, sgel, nsegs, i) {
2996*4882a593Smuzhiyun 		physaddr = sg_dma_address(sgel);
2997*4882a593Smuzhiyun 		cnt = sg_dma_len(sgel);
2998*4882a593Smuzhiyun 		sgl->addr_hi = putPaddrHigh(physaddr);
2999*4882a593Smuzhiyun 		sgl->addr_lo = putPaddrLow(physaddr);
3000*4882a593Smuzhiyun 		sgl->word2 = 0;
3001*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3002*4882a593Smuzhiyun 		bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
3003*4882a593Smuzhiyun 		if ((i+1) == rsp->sg_cnt)
3004*4882a593Smuzhiyun 			bf_set(lpfc_sli4_sge_last, sgl, 1);
3005*4882a593Smuzhiyun 		sgl->word2 = cpu_to_le32(sgl->word2);
3006*4882a593Smuzhiyun 		sgl->sge_len = cpu_to_le32(cnt);
3007*4882a593Smuzhiyun 		if (i == 0) {
3008*4882a593Smuzhiyun 			bde = (struct ulp_bde64 *)&wqe->words[13];
3009*4882a593Smuzhiyun 			if (do_pbde) {
3010*4882a593Smuzhiyun 				/* Words 13-15  (PBDE) */
3011*4882a593Smuzhiyun 				bde->addrLow = sgl->addr_lo;
3012*4882a593Smuzhiyun 				bde->addrHigh = sgl->addr_hi;
3013*4882a593Smuzhiyun 				bde->tus.f.bdeSize =
3014*4882a593Smuzhiyun 					le32_to_cpu(sgl->sge_len);
3015*4882a593Smuzhiyun 				bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3016*4882a593Smuzhiyun 				bde->tus.w = cpu_to_le32(bde->tus.w);
3017*4882a593Smuzhiyun 			} else {
3018*4882a593Smuzhiyun 				memset(bde, 0, sizeof(struct ulp_bde64));
3019*4882a593Smuzhiyun 			}
3020*4882a593Smuzhiyun 		}
3021*4882a593Smuzhiyun 		sgl++;
3022*4882a593Smuzhiyun 		ctxp->offset += cnt;
3023*4882a593Smuzhiyun 	}
3024*4882a593Smuzhiyun 	ctxp->state = LPFC_NVME_STE_DATA;
3025*4882a593Smuzhiyun 	ctxp->entry_cnt++;
3026*4882a593Smuzhiyun 	return nvmewqe;
3027*4882a593Smuzhiyun }
3028*4882a593Smuzhiyun 
3029*4882a593Smuzhiyun /**
3030*4882a593Smuzhiyun  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
3031*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
3032*4882a593Smuzhiyun  * @cmdwqe: Pointer to driver command WQE object.
3033*4882a593Smuzhiyun  * @wcqe: Pointer to driver response CQE object.
3034*4882a593Smuzhiyun  *
3035*4882a593Smuzhiyun  * The function is called from SLI ring event handler with no
3036*4882a593Smuzhiyun  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3037*4882a593Smuzhiyun  * The function frees memory resources used for the NVME commands.
3038*4882a593Smuzhiyun  **/
3039*4882a593Smuzhiyun static void
lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)3040*4882a593Smuzhiyun lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3041*4882a593Smuzhiyun 			     struct lpfc_wcqe_complete *wcqe)
3042*4882a593Smuzhiyun {
3043*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp;
3044*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
3045*4882a593Smuzhiyun 	uint32_t result;
3046*4882a593Smuzhiyun 	unsigned long flags;
3047*4882a593Smuzhiyun 	bool released = false;
3048*4882a593Smuzhiyun 
3049*4882a593Smuzhiyun 	ctxp = cmdwqe->context2;
3050*4882a593Smuzhiyun 	result = wcqe->parameter;
3051*4882a593Smuzhiyun 
3052*4882a593Smuzhiyun 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3053*4882a593Smuzhiyun 	if (ctxp->flag & LPFC_NVME_ABORT_OP)
3054*4882a593Smuzhiyun 		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3055*4882a593Smuzhiyun 
3056*4882a593Smuzhiyun 	spin_lock_irqsave(&ctxp->ctxlock, flags);
3057*4882a593Smuzhiyun 	ctxp->state = LPFC_NVME_STE_DONE;
3058*4882a593Smuzhiyun 
3059*4882a593Smuzhiyun 	/* Check if we already received a free context call
3060*4882a593Smuzhiyun 	 * and we have completed processing an abort situation.
3061*4882a593Smuzhiyun 	 */
3062*4882a593Smuzhiyun 	if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3063*4882a593Smuzhiyun 	    !(ctxp->flag & LPFC_NVME_XBUSY)) {
3064*4882a593Smuzhiyun 		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3065*4882a593Smuzhiyun 		list_del_init(&ctxp->list);
3066*4882a593Smuzhiyun 		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3067*4882a593Smuzhiyun 		released = true;
3068*4882a593Smuzhiyun 	}
3069*4882a593Smuzhiyun 	ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3070*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3071*4882a593Smuzhiyun 	atomic_inc(&tgtp->xmt_abort_rsp);
3072*4882a593Smuzhiyun 
3073*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3074*4882a593Smuzhiyun 			"6165 ABORT cmpl: oxid x%x flg x%x (%d) "
3075*4882a593Smuzhiyun 			"WCQE: %08x %08x %08x %08x\n",
3076*4882a593Smuzhiyun 			ctxp->oxid, ctxp->flag, released,
3077*4882a593Smuzhiyun 			wcqe->word0, wcqe->total_data_placed,
3078*4882a593Smuzhiyun 			result, wcqe->word3);
3079*4882a593Smuzhiyun 
3080*4882a593Smuzhiyun 	cmdwqe->context2 = NULL;
3081*4882a593Smuzhiyun 	cmdwqe->context3 = NULL;
3082*4882a593Smuzhiyun 	/*
3083*4882a593Smuzhiyun 	 * if transport has released ctx, then can reuse it. Otherwise,
3084*4882a593Smuzhiyun 	 * will be recycled by transport release call.
3085*4882a593Smuzhiyun 	 */
3086*4882a593Smuzhiyun 	if (released)
3087*4882a593Smuzhiyun 		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3088*4882a593Smuzhiyun 
3089*4882a593Smuzhiyun 	/* This is the iocbq for the abort, not the command */
3090*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, cmdwqe);
3091*4882a593Smuzhiyun 
3092*4882a593Smuzhiyun 	/* Since iaab/iaar are NOT set, there is no work left.
3093*4882a593Smuzhiyun 	 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3094*4882a593Smuzhiyun 	 * should have been called already.
3095*4882a593Smuzhiyun 	 */
3096*4882a593Smuzhiyun }
3097*4882a593Smuzhiyun 
3098*4882a593Smuzhiyun /**
3099*4882a593Smuzhiyun  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
3100*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
3101*4882a593Smuzhiyun  * @cmdwqe: Pointer to driver command WQE object.
3102*4882a593Smuzhiyun  * @wcqe: Pointer to driver response CQE object.
3103*4882a593Smuzhiyun  *
3104*4882a593Smuzhiyun  * The function is called from SLI ring event handler with no
3105*4882a593Smuzhiyun  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3106*4882a593Smuzhiyun  * The function frees memory resources used for the NVME commands.
3107*4882a593Smuzhiyun  **/
3108*4882a593Smuzhiyun static void
lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)3109*4882a593Smuzhiyun lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3110*4882a593Smuzhiyun 			       struct lpfc_wcqe_complete *wcqe)
3111*4882a593Smuzhiyun {
3112*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp;
3113*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
3114*4882a593Smuzhiyun 	unsigned long flags;
3115*4882a593Smuzhiyun 	uint32_t result;
3116*4882a593Smuzhiyun 	bool released = false;
3117*4882a593Smuzhiyun 
3118*4882a593Smuzhiyun 	ctxp = cmdwqe->context2;
3119*4882a593Smuzhiyun 	result = wcqe->parameter;
3120*4882a593Smuzhiyun 
3121*4882a593Smuzhiyun 	if (!ctxp) {
3122*4882a593Smuzhiyun 		/* if context is clear, related io alrady complete */
3123*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3124*4882a593Smuzhiyun 				"6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3125*4882a593Smuzhiyun 				wcqe->word0, wcqe->total_data_placed,
3126*4882a593Smuzhiyun 				result, wcqe->word3);
3127*4882a593Smuzhiyun 		return;
3128*4882a593Smuzhiyun 	}
3129*4882a593Smuzhiyun 
3130*4882a593Smuzhiyun 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3131*4882a593Smuzhiyun 	spin_lock_irqsave(&ctxp->ctxlock, flags);
3132*4882a593Smuzhiyun 	if (ctxp->flag & LPFC_NVME_ABORT_OP)
3133*4882a593Smuzhiyun 		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3134*4882a593Smuzhiyun 
3135*4882a593Smuzhiyun 	/* Sanity check */
3136*4882a593Smuzhiyun 	if (ctxp->state != LPFC_NVME_STE_ABORT) {
3137*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3138*4882a593Smuzhiyun 				"6112 ABTS Wrong state:%d oxid x%x\n",
3139*4882a593Smuzhiyun 				ctxp->state, ctxp->oxid);
3140*4882a593Smuzhiyun 	}
3141*4882a593Smuzhiyun 
3142*4882a593Smuzhiyun 	/* Check if we already received a free context call
3143*4882a593Smuzhiyun 	 * and we have completed processing an abort situation.
3144*4882a593Smuzhiyun 	 */
3145*4882a593Smuzhiyun 	ctxp->state = LPFC_NVME_STE_DONE;
3146*4882a593Smuzhiyun 	if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3147*4882a593Smuzhiyun 	    !(ctxp->flag & LPFC_NVME_XBUSY)) {
3148*4882a593Smuzhiyun 		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3149*4882a593Smuzhiyun 		list_del_init(&ctxp->list);
3150*4882a593Smuzhiyun 		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3151*4882a593Smuzhiyun 		released = true;
3152*4882a593Smuzhiyun 	}
3153*4882a593Smuzhiyun 	ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3154*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3155*4882a593Smuzhiyun 	atomic_inc(&tgtp->xmt_abort_rsp);
3156*4882a593Smuzhiyun 
3157*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3158*4882a593Smuzhiyun 			"6316 ABTS cmpl oxid x%x flg x%x (%x) "
3159*4882a593Smuzhiyun 			"WCQE: %08x %08x %08x %08x\n",
3160*4882a593Smuzhiyun 			ctxp->oxid, ctxp->flag, released,
3161*4882a593Smuzhiyun 			wcqe->word0, wcqe->total_data_placed,
3162*4882a593Smuzhiyun 			result, wcqe->word3);
3163*4882a593Smuzhiyun 
3164*4882a593Smuzhiyun 	cmdwqe->context2 = NULL;
3165*4882a593Smuzhiyun 	cmdwqe->context3 = NULL;
3166*4882a593Smuzhiyun 	/*
3167*4882a593Smuzhiyun 	 * if transport has released ctx, then can reuse it. Otherwise,
3168*4882a593Smuzhiyun 	 * will be recycled by transport release call.
3169*4882a593Smuzhiyun 	 */
3170*4882a593Smuzhiyun 	if (released)
3171*4882a593Smuzhiyun 		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3172*4882a593Smuzhiyun 
3173*4882a593Smuzhiyun 	/* Since iaab/iaar are NOT set, there is no work left.
3174*4882a593Smuzhiyun 	 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3175*4882a593Smuzhiyun 	 * should have been called already.
3176*4882a593Smuzhiyun 	 */
3177*4882a593Smuzhiyun }
3178*4882a593Smuzhiyun 
3179*4882a593Smuzhiyun /**
3180*4882a593Smuzhiyun  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3181*4882a593Smuzhiyun  * @phba: Pointer to HBA context object.
3182*4882a593Smuzhiyun  * @cmdwqe: Pointer to driver command WQE object.
3183*4882a593Smuzhiyun  * @wcqe: Pointer to driver response CQE object.
3184*4882a593Smuzhiyun  *
3185*4882a593Smuzhiyun  * The function is called from SLI ring event handler with no
3186*4882a593Smuzhiyun  * lock held. This function is the completion handler for NVME ABTS for LS cmds
3187*4882a593Smuzhiyun  * The function frees memory resources used for the NVME commands.
3188*4882a593Smuzhiyun  **/
3189*4882a593Smuzhiyun static void
lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)3190*4882a593Smuzhiyun lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3191*4882a593Smuzhiyun 			    struct lpfc_wcqe_complete *wcqe)
3192*4882a593Smuzhiyun {
3193*4882a593Smuzhiyun 	struct lpfc_async_xchg_ctx *ctxp;
3194*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
3195*4882a593Smuzhiyun 	uint32_t result;
3196*4882a593Smuzhiyun 
3197*4882a593Smuzhiyun 	ctxp = cmdwqe->context2;
3198*4882a593Smuzhiyun 	result = wcqe->parameter;
3199*4882a593Smuzhiyun 
3200*4882a593Smuzhiyun 	if (phba->nvmet_support) {
3201*4882a593Smuzhiyun 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3202*4882a593Smuzhiyun 		atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3203*4882a593Smuzhiyun 	}
3204*4882a593Smuzhiyun 
3205*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3206*4882a593Smuzhiyun 			"6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3207*4882a593Smuzhiyun 			ctxp, wcqe->word0, wcqe->total_data_placed,
3208*4882a593Smuzhiyun 			result, wcqe->word3);
3209*4882a593Smuzhiyun 
3210*4882a593Smuzhiyun 	if (!ctxp) {
3211*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3212*4882a593Smuzhiyun 				"6415 NVMET LS Abort No ctx: WCQE: "
3213*4882a593Smuzhiyun 				 "%08x %08x %08x %08x\n",
3214*4882a593Smuzhiyun 				wcqe->word0, wcqe->total_data_placed,
3215*4882a593Smuzhiyun 				result, wcqe->word3);
3216*4882a593Smuzhiyun 
3217*4882a593Smuzhiyun 		lpfc_sli_release_iocbq(phba, cmdwqe);
3218*4882a593Smuzhiyun 		return;
3219*4882a593Smuzhiyun 	}
3220*4882a593Smuzhiyun 
3221*4882a593Smuzhiyun 	if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3222*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3223*4882a593Smuzhiyun 				"6416 NVMET LS abort cmpl state mismatch: "
3224*4882a593Smuzhiyun 				"oxid x%x: %d %d\n",
3225*4882a593Smuzhiyun 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3226*4882a593Smuzhiyun 	}
3227*4882a593Smuzhiyun 
3228*4882a593Smuzhiyun 	cmdwqe->context2 = NULL;
3229*4882a593Smuzhiyun 	cmdwqe->context3 = NULL;
3230*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, cmdwqe);
3231*4882a593Smuzhiyun 	kfree(ctxp);
3232*4882a593Smuzhiyun }
3233*4882a593Smuzhiyun 
3234*4882a593Smuzhiyun static int
lpfc_nvmet_unsol_issue_abort(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,uint32_t sid,uint16_t xri)3235*4882a593Smuzhiyun lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3236*4882a593Smuzhiyun 			     struct lpfc_async_xchg_ctx *ctxp,
3237*4882a593Smuzhiyun 			     uint32_t sid, uint16_t xri)
3238*4882a593Smuzhiyun {
3239*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp = NULL;
3240*4882a593Smuzhiyun 	struct lpfc_iocbq *abts_wqeq;
3241*4882a593Smuzhiyun 	union lpfc_wqe128 *wqe_abts;
3242*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
3243*4882a593Smuzhiyun 
3244*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3245*4882a593Smuzhiyun 			"6067 ABTS: sid %x xri x%x/x%x\n",
3246*4882a593Smuzhiyun 			sid, xri, ctxp->wqeq->sli4_xritag);
3247*4882a593Smuzhiyun 
3248*4882a593Smuzhiyun 	if (phba->nvmet_support && phba->targetport)
3249*4882a593Smuzhiyun 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3250*4882a593Smuzhiyun 
3251*4882a593Smuzhiyun 	ndlp = lpfc_findnode_did(phba->pport, sid);
3252*4882a593Smuzhiyun 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3253*4882a593Smuzhiyun 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3254*4882a593Smuzhiyun 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3255*4882a593Smuzhiyun 		if (tgtp)
3256*4882a593Smuzhiyun 			atomic_inc(&tgtp->xmt_abort_rsp_error);
3257*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3258*4882a593Smuzhiyun 				"6134 Drop ABTS - wrong NDLP state x%x.\n",
3259*4882a593Smuzhiyun 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3260*4882a593Smuzhiyun 
3261*4882a593Smuzhiyun 		/* No failure to an ABTS request. */
3262*4882a593Smuzhiyun 		return 0;
3263*4882a593Smuzhiyun 	}
3264*4882a593Smuzhiyun 
3265*4882a593Smuzhiyun 	abts_wqeq = ctxp->wqeq;
3266*4882a593Smuzhiyun 	wqe_abts = &abts_wqeq->wqe;
3267*4882a593Smuzhiyun 
3268*4882a593Smuzhiyun 	/*
3269*4882a593Smuzhiyun 	 * Since we zero the whole WQE, we need to ensure we set the WQE fields
3270*4882a593Smuzhiyun 	 * that were initialized in lpfc_sli4_nvmet_alloc.
3271*4882a593Smuzhiyun 	 */
3272*4882a593Smuzhiyun 	memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3273*4882a593Smuzhiyun 
3274*4882a593Smuzhiyun 	/* Word 5 */
3275*4882a593Smuzhiyun 	bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3276*4882a593Smuzhiyun 	bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3277*4882a593Smuzhiyun 	bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3278*4882a593Smuzhiyun 	bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3279*4882a593Smuzhiyun 	bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3280*4882a593Smuzhiyun 
3281*4882a593Smuzhiyun 	/* Word 6 */
3282*4882a593Smuzhiyun 	bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3283*4882a593Smuzhiyun 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3284*4882a593Smuzhiyun 	bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3285*4882a593Smuzhiyun 	       abts_wqeq->sli4_xritag);
3286*4882a593Smuzhiyun 
3287*4882a593Smuzhiyun 	/* Word 7 */
3288*4882a593Smuzhiyun 	bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3289*4882a593Smuzhiyun 	       CMD_XMIT_SEQUENCE64_WQE);
3290*4882a593Smuzhiyun 	bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3291*4882a593Smuzhiyun 	bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3292*4882a593Smuzhiyun 	bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3293*4882a593Smuzhiyun 
3294*4882a593Smuzhiyun 	/* Word 8 */
3295*4882a593Smuzhiyun 	wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3296*4882a593Smuzhiyun 
3297*4882a593Smuzhiyun 	/* Word 9 */
3298*4882a593Smuzhiyun 	bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3299*4882a593Smuzhiyun 	/* Needs to be set by caller */
3300*4882a593Smuzhiyun 	bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3301*4882a593Smuzhiyun 
3302*4882a593Smuzhiyun 	/* Word 10 */
3303*4882a593Smuzhiyun 	bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3304*4882a593Smuzhiyun 	bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3305*4882a593Smuzhiyun 	       LPFC_WQE_LENLOC_WORD12);
3306*4882a593Smuzhiyun 	bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3307*4882a593Smuzhiyun 	bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3308*4882a593Smuzhiyun 
3309*4882a593Smuzhiyun 	/* Word 11 */
3310*4882a593Smuzhiyun 	bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3311*4882a593Smuzhiyun 	       LPFC_WQE_CQ_ID_DEFAULT);
3312*4882a593Smuzhiyun 	bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3313*4882a593Smuzhiyun 	       OTHER_COMMAND);
3314*4882a593Smuzhiyun 
3315*4882a593Smuzhiyun 	abts_wqeq->vport = phba->pport;
3316*4882a593Smuzhiyun 	abts_wqeq->context1 = ndlp;
3317*4882a593Smuzhiyun 	abts_wqeq->context2 = ctxp;
3318*4882a593Smuzhiyun 	abts_wqeq->context3 = NULL;
3319*4882a593Smuzhiyun 	abts_wqeq->rsvd2 = 0;
3320*4882a593Smuzhiyun 	/* hba_wqidx should already be setup from command we are aborting */
3321*4882a593Smuzhiyun 	abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3322*4882a593Smuzhiyun 	abts_wqeq->iocb.ulpLe = 1;
3323*4882a593Smuzhiyun 
3324*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3325*4882a593Smuzhiyun 			"6069 Issue ABTS to xri x%x reqtag x%x\n",
3326*4882a593Smuzhiyun 			xri, abts_wqeq->iotag);
3327*4882a593Smuzhiyun 	return 1;
3328*4882a593Smuzhiyun }
3329*4882a593Smuzhiyun 
3330*4882a593Smuzhiyun static int
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,uint32_t sid,uint16_t xri)3331*4882a593Smuzhiyun lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3332*4882a593Smuzhiyun 			       struct lpfc_async_xchg_ctx *ctxp,
3333*4882a593Smuzhiyun 			       uint32_t sid, uint16_t xri)
3334*4882a593Smuzhiyun {
3335*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
3336*4882a593Smuzhiyun 	struct lpfc_iocbq *abts_wqeq;
3337*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
3338*4882a593Smuzhiyun 	unsigned long flags;
3339*4882a593Smuzhiyun 	u8 opt;
3340*4882a593Smuzhiyun 	int rc;
3341*4882a593Smuzhiyun 
3342*4882a593Smuzhiyun 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3343*4882a593Smuzhiyun 	if (!ctxp->wqeq) {
3344*4882a593Smuzhiyun 		ctxp->wqeq = ctxp->ctxbuf->iocbq;
3345*4882a593Smuzhiyun 		ctxp->wqeq->hba_wqidx = 0;
3346*4882a593Smuzhiyun 	}
3347*4882a593Smuzhiyun 
3348*4882a593Smuzhiyun 	ndlp = lpfc_findnode_did(phba->pport, sid);
3349*4882a593Smuzhiyun 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3350*4882a593Smuzhiyun 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3351*4882a593Smuzhiyun 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3352*4882a593Smuzhiyun 		atomic_inc(&tgtp->xmt_abort_rsp_error);
3353*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3354*4882a593Smuzhiyun 				"6160 Drop ABORT - wrong NDLP state x%x.\n",
3355*4882a593Smuzhiyun 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3356*4882a593Smuzhiyun 
3357*4882a593Smuzhiyun 		/* No failure to an ABTS request. */
3358*4882a593Smuzhiyun 		spin_lock_irqsave(&ctxp->ctxlock, flags);
3359*4882a593Smuzhiyun 		ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3360*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3361*4882a593Smuzhiyun 		return 0;
3362*4882a593Smuzhiyun 	}
3363*4882a593Smuzhiyun 
3364*4882a593Smuzhiyun 	/* Issue ABTS for this WQE based on iotag */
3365*4882a593Smuzhiyun 	ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3366*4882a593Smuzhiyun 	spin_lock_irqsave(&ctxp->ctxlock, flags);
3367*4882a593Smuzhiyun 	if (!ctxp->abort_wqeq) {
3368*4882a593Smuzhiyun 		atomic_inc(&tgtp->xmt_abort_rsp_error);
3369*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3370*4882a593Smuzhiyun 				"6161 ABORT failed: No wqeqs: "
3371*4882a593Smuzhiyun 				"xri: x%x\n", ctxp->oxid);
3372*4882a593Smuzhiyun 		/* No failure to an ABTS request. */
3373*4882a593Smuzhiyun 		ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3374*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3375*4882a593Smuzhiyun 		return 0;
3376*4882a593Smuzhiyun 	}
3377*4882a593Smuzhiyun 	abts_wqeq = ctxp->abort_wqeq;
3378*4882a593Smuzhiyun 	ctxp->state = LPFC_NVME_STE_ABORT;
3379*4882a593Smuzhiyun 	opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
3380*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3381*4882a593Smuzhiyun 
3382*4882a593Smuzhiyun 	/* Announce entry to new IO submit field. */
3383*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3384*4882a593Smuzhiyun 			"6162 ABORT Request to rport DID x%06x "
3385*4882a593Smuzhiyun 			"for xri x%x x%x\n",
3386*4882a593Smuzhiyun 			ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3387*4882a593Smuzhiyun 
3388*4882a593Smuzhiyun 	/* If the hba is getting reset, this flag is set.  It is
3389*4882a593Smuzhiyun 	 * cleared when the reset is complete and rings reestablished.
3390*4882a593Smuzhiyun 	 */
3391*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, flags);
3392*4882a593Smuzhiyun 	/* driver queued commands are in process of being flushed */
3393*4882a593Smuzhiyun 	if (phba->hba_flag & HBA_IOQ_FLUSH) {
3394*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, flags);
3395*4882a593Smuzhiyun 		atomic_inc(&tgtp->xmt_abort_rsp_error);
3396*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3397*4882a593Smuzhiyun 				"6163 Driver in reset cleanup - flushing "
3398*4882a593Smuzhiyun 				"NVME Req now. hba_flag x%x oxid x%x\n",
3399*4882a593Smuzhiyun 				phba->hba_flag, ctxp->oxid);
3400*4882a593Smuzhiyun 		lpfc_sli_release_iocbq(phba, abts_wqeq);
3401*4882a593Smuzhiyun 		spin_lock_irqsave(&ctxp->ctxlock, flags);
3402*4882a593Smuzhiyun 		ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3403*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3404*4882a593Smuzhiyun 		return 0;
3405*4882a593Smuzhiyun 	}
3406*4882a593Smuzhiyun 
3407*4882a593Smuzhiyun 	/* Outstanding abort is in progress */
3408*4882a593Smuzhiyun 	if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3409*4882a593Smuzhiyun 		spin_unlock_irqrestore(&phba->hbalock, flags);
3410*4882a593Smuzhiyun 		atomic_inc(&tgtp->xmt_abort_rsp_error);
3411*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3412*4882a593Smuzhiyun 				"6164 Outstanding NVME I/O Abort Request "
3413*4882a593Smuzhiyun 				"still pending on oxid x%x\n",
3414*4882a593Smuzhiyun 				ctxp->oxid);
3415*4882a593Smuzhiyun 		lpfc_sli_release_iocbq(phba, abts_wqeq);
3416*4882a593Smuzhiyun 		spin_lock_irqsave(&ctxp->ctxlock, flags);
3417*4882a593Smuzhiyun 		ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3418*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3419*4882a593Smuzhiyun 		return 0;
3420*4882a593Smuzhiyun 	}
3421*4882a593Smuzhiyun 
3422*4882a593Smuzhiyun 	/* Ready - mark outstanding as aborted by driver. */
3423*4882a593Smuzhiyun 	abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3424*4882a593Smuzhiyun 
3425*4882a593Smuzhiyun 	lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
3426*4882a593Smuzhiyun 
3427*4882a593Smuzhiyun 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
3428*4882a593Smuzhiyun 	abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3429*4882a593Smuzhiyun 	abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3430*4882a593Smuzhiyun 	abts_wqeq->iocb_cmpl = NULL;
3431*4882a593Smuzhiyun 	abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3432*4882a593Smuzhiyun 	abts_wqeq->context2 = ctxp;
3433*4882a593Smuzhiyun 	abts_wqeq->vport = phba->pport;
3434*4882a593Smuzhiyun 	if (!ctxp->hdwq)
3435*4882a593Smuzhiyun 		ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3436*4882a593Smuzhiyun 
3437*4882a593Smuzhiyun 	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3438*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, flags);
3439*4882a593Smuzhiyun 	if (rc == WQE_SUCCESS) {
3440*4882a593Smuzhiyun 		atomic_inc(&tgtp->xmt_abort_sol);
3441*4882a593Smuzhiyun 		return 0;
3442*4882a593Smuzhiyun 	}
3443*4882a593Smuzhiyun 
3444*4882a593Smuzhiyun 	atomic_inc(&tgtp->xmt_abort_rsp_error);
3445*4882a593Smuzhiyun 	spin_lock_irqsave(&ctxp->ctxlock, flags);
3446*4882a593Smuzhiyun 	ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3447*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3448*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, abts_wqeq);
3449*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3450*4882a593Smuzhiyun 			"6166 Failed ABORT issue_wqe with status x%x "
3451*4882a593Smuzhiyun 			"for oxid x%x.\n",
3452*4882a593Smuzhiyun 			rc, ctxp->oxid);
3453*4882a593Smuzhiyun 	return 1;
3454*4882a593Smuzhiyun }
3455*4882a593Smuzhiyun 
3456*4882a593Smuzhiyun static int
lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,uint32_t sid,uint16_t xri)3457*4882a593Smuzhiyun lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3458*4882a593Smuzhiyun 				 struct lpfc_async_xchg_ctx *ctxp,
3459*4882a593Smuzhiyun 				 uint32_t sid, uint16_t xri)
3460*4882a593Smuzhiyun {
3461*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
3462*4882a593Smuzhiyun 	struct lpfc_iocbq *abts_wqeq;
3463*4882a593Smuzhiyun 	unsigned long flags;
3464*4882a593Smuzhiyun 	bool released = false;
3465*4882a593Smuzhiyun 	int rc;
3466*4882a593Smuzhiyun 
3467*4882a593Smuzhiyun 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3468*4882a593Smuzhiyun 	if (!ctxp->wqeq) {
3469*4882a593Smuzhiyun 		ctxp->wqeq = ctxp->ctxbuf->iocbq;
3470*4882a593Smuzhiyun 		ctxp->wqeq->hba_wqidx = 0;
3471*4882a593Smuzhiyun 	}
3472*4882a593Smuzhiyun 
3473*4882a593Smuzhiyun 	if (ctxp->state == LPFC_NVME_STE_FREE) {
3474*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3475*4882a593Smuzhiyun 				"6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3476*4882a593Smuzhiyun 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3477*4882a593Smuzhiyun 		rc = WQE_BUSY;
3478*4882a593Smuzhiyun 		goto aerr;
3479*4882a593Smuzhiyun 	}
3480*4882a593Smuzhiyun 	ctxp->state = LPFC_NVME_STE_ABORT;
3481*4882a593Smuzhiyun 	ctxp->entry_cnt++;
3482*4882a593Smuzhiyun 	rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3483*4882a593Smuzhiyun 	if (rc == 0)
3484*4882a593Smuzhiyun 		goto aerr;
3485*4882a593Smuzhiyun 
3486*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, flags);
3487*4882a593Smuzhiyun 	abts_wqeq = ctxp->wqeq;
3488*4882a593Smuzhiyun 	abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3489*4882a593Smuzhiyun 	abts_wqeq->iocb_cmpl = NULL;
3490*4882a593Smuzhiyun 	abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3491*4882a593Smuzhiyun 	if (!ctxp->hdwq)
3492*4882a593Smuzhiyun 		ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3493*4882a593Smuzhiyun 
3494*4882a593Smuzhiyun 	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3495*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, flags);
3496*4882a593Smuzhiyun 	if (rc == WQE_SUCCESS) {
3497*4882a593Smuzhiyun 		return 0;
3498*4882a593Smuzhiyun 	}
3499*4882a593Smuzhiyun 
3500*4882a593Smuzhiyun aerr:
3501*4882a593Smuzhiyun 	spin_lock_irqsave(&ctxp->ctxlock, flags);
3502*4882a593Smuzhiyun 	if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3503*4882a593Smuzhiyun 		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3504*4882a593Smuzhiyun 		list_del_init(&ctxp->list);
3505*4882a593Smuzhiyun 		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3506*4882a593Smuzhiyun 		released = true;
3507*4882a593Smuzhiyun 	}
3508*4882a593Smuzhiyun 	ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
3509*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3510*4882a593Smuzhiyun 
3511*4882a593Smuzhiyun 	atomic_inc(&tgtp->xmt_abort_rsp_error);
3512*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3513*4882a593Smuzhiyun 			"6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3514*4882a593Smuzhiyun 			"(%x)\n",
3515*4882a593Smuzhiyun 			ctxp->oxid, rc, released);
3516*4882a593Smuzhiyun 	if (released)
3517*4882a593Smuzhiyun 		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3518*4882a593Smuzhiyun 	return 1;
3519*4882a593Smuzhiyun }
3520*4882a593Smuzhiyun 
3521*4882a593Smuzhiyun /**
3522*4882a593Smuzhiyun  * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
3523*4882a593Smuzhiyun  *        via async frame receive where the frame is not handled.
3524*4882a593Smuzhiyun  * @phba: pointer to adapter structure
3525*4882a593Smuzhiyun  * @ctxp: pointer to the asynchronously received received sequence
3526*4882a593Smuzhiyun  * @sid: address of the remote port to send the ABTS to
3527*4882a593Smuzhiyun  * @xri: oxid value to for the ABTS (other side's exchange id).
3528*4882a593Smuzhiyun  **/
3529*4882a593Smuzhiyun int
lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,uint32_t sid,uint16_t xri)3530*4882a593Smuzhiyun lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
3531*4882a593Smuzhiyun 				struct lpfc_async_xchg_ctx *ctxp,
3532*4882a593Smuzhiyun 				uint32_t sid, uint16_t xri)
3533*4882a593Smuzhiyun {
3534*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp = NULL;
3535*4882a593Smuzhiyun 	struct lpfc_iocbq *abts_wqeq;
3536*4882a593Smuzhiyun 	unsigned long flags;
3537*4882a593Smuzhiyun 	int rc;
3538*4882a593Smuzhiyun 
3539*4882a593Smuzhiyun 	if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3540*4882a593Smuzhiyun 	    (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3541*4882a593Smuzhiyun 		ctxp->state = LPFC_NVME_STE_LS_ABORT;
3542*4882a593Smuzhiyun 		ctxp->entry_cnt++;
3543*4882a593Smuzhiyun 	} else {
3544*4882a593Smuzhiyun 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3545*4882a593Smuzhiyun 				"6418 NVMET LS abort state mismatch "
3546*4882a593Smuzhiyun 				"IO x%x: %d %d\n",
3547*4882a593Smuzhiyun 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3548*4882a593Smuzhiyun 		ctxp->state = LPFC_NVME_STE_LS_ABORT;
3549*4882a593Smuzhiyun 	}
3550*4882a593Smuzhiyun 
3551*4882a593Smuzhiyun 	if (phba->nvmet_support && phba->targetport)
3552*4882a593Smuzhiyun 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3553*4882a593Smuzhiyun 
3554*4882a593Smuzhiyun 	if (!ctxp->wqeq) {
3555*4882a593Smuzhiyun 		/* Issue ABTS for this WQE based on iotag */
3556*4882a593Smuzhiyun 		ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3557*4882a593Smuzhiyun 		if (!ctxp->wqeq) {
3558*4882a593Smuzhiyun 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3559*4882a593Smuzhiyun 					"6068 Abort failed: No wqeqs: "
3560*4882a593Smuzhiyun 					"xri: x%x\n", xri);
3561*4882a593Smuzhiyun 			/* No failure to an ABTS request. */
3562*4882a593Smuzhiyun 			kfree(ctxp);
3563*4882a593Smuzhiyun 			return 0;
3564*4882a593Smuzhiyun 		}
3565*4882a593Smuzhiyun 	}
3566*4882a593Smuzhiyun 	abts_wqeq = ctxp->wqeq;
3567*4882a593Smuzhiyun 
3568*4882a593Smuzhiyun 	if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3569*4882a593Smuzhiyun 		rc = WQE_BUSY;
3570*4882a593Smuzhiyun 		goto out;
3571*4882a593Smuzhiyun 	}
3572*4882a593Smuzhiyun 
3573*4882a593Smuzhiyun 	spin_lock_irqsave(&phba->hbalock, flags);
3574*4882a593Smuzhiyun 	abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3575*4882a593Smuzhiyun 	abts_wqeq->iocb_cmpl = NULL;
3576*4882a593Smuzhiyun 	abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
3577*4882a593Smuzhiyun 	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3578*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phba->hbalock, flags);
3579*4882a593Smuzhiyun 	if (rc == WQE_SUCCESS) {
3580*4882a593Smuzhiyun 		if (tgtp)
3581*4882a593Smuzhiyun 			atomic_inc(&tgtp->xmt_abort_unsol);
3582*4882a593Smuzhiyun 		return 0;
3583*4882a593Smuzhiyun 	}
3584*4882a593Smuzhiyun out:
3585*4882a593Smuzhiyun 	if (tgtp)
3586*4882a593Smuzhiyun 		atomic_inc(&tgtp->xmt_abort_rsp_error);
3587*4882a593Smuzhiyun 	abts_wqeq->context2 = NULL;
3588*4882a593Smuzhiyun 	abts_wqeq->context3 = NULL;
3589*4882a593Smuzhiyun 	lpfc_sli_release_iocbq(phba, abts_wqeq);
3590*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3591*4882a593Smuzhiyun 			"6056 Failed to Issue ABTS. Status x%x\n", rc);
3592*4882a593Smuzhiyun 	return 1;
3593*4882a593Smuzhiyun }
3594*4882a593Smuzhiyun 
3595*4882a593Smuzhiyun /**
3596*4882a593Smuzhiyun  * lpfc_nvmet_invalidate_host
3597*4882a593Smuzhiyun  *
3598*4882a593Smuzhiyun  * @phba - pointer to the driver instance bound to an adapter port.
3599*4882a593Smuzhiyun  * @ndlp - pointer to an lpfc_nodelist type
3600*4882a593Smuzhiyun  *
3601*4882a593Smuzhiyun  * This routine upcalls the nvmet transport to invalidate an NVME
3602*4882a593Smuzhiyun  * host to which this target instance had active connections.
3603*4882a593Smuzhiyun  */
3604*4882a593Smuzhiyun void
lpfc_nvmet_invalidate_host(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp)3605*4882a593Smuzhiyun lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3606*4882a593Smuzhiyun {
3607*4882a593Smuzhiyun 	struct lpfc_nvmet_tgtport *tgtp;
3608*4882a593Smuzhiyun 
3609*4882a593Smuzhiyun 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS,
3610*4882a593Smuzhiyun 			"6203 Invalidating hosthandle x%px\n",
3611*4882a593Smuzhiyun 			ndlp);
3612*4882a593Smuzhiyun 
3613*4882a593Smuzhiyun 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3614*4882a593Smuzhiyun 	atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
3615*4882a593Smuzhiyun 
3616*4882a593Smuzhiyun #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3617*4882a593Smuzhiyun 	/* Need to get the nvmet_fc_target_port pointer here.*/
3618*4882a593Smuzhiyun 	nvmet_fc_invalidate_host(phba->targetport, ndlp);
3619*4882a593Smuzhiyun #endif
3620*4882a593Smuzhiyun }
3621