xref: /OK3568_Linux_fs/kernel/drivers/scsi/lpfc/lpfc_nvme.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*******************************************************************
2*4882a593Smuzhiyun  * This file is part of the Emulex Linux Device Driver for         *
3*4882a593Smuzhiyun  * Fibre Channel Host Bus Adapters.                                *
4*4882a593Smuzhiyun  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5*4882a593Smuzhiyun  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6*4882a593Smuzhiyun  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7*4882a593Smuzhiyun  * EMULEX and SLI are trademarks of Emulex.                        *
8*4882a593Smuzhiyun  * www.broadcom.com                                                *
9*4882a593Smuzhiyun  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10*4882a593Smuzhiyun  *                                                                 *
11*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or   *
12*4882a593Smuzhiyun  * modify it under the terms of version 2 of the GNU General       *
13*4882a593Smuzhiyun  * Public License as published by the Free Software Foundation.    *
14*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful. *
15*4882a593Smuzhiyun  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16*4882a593Smuzhiyun  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18*4882a593Smuzhiyun  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19*4882a593Smuzhiyun  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20*4882a593Smuzhiyun  * more details, a copy of which can be found in the file COPYING  *
21*4882a593Smuzhiyun  * included with this package.                                     *
22*4882a593Smuzhiyun  ********************************************************************/
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <linux/nvme.h>
25*4882a593Smuzhiyun #include <linux/nvme-fc-driver.h>
26*4882a593Smuzhiyun #include <linux/nvme-fc.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define LPFC_NVME_DEFAULT_SEGS		(64 + 1)	/* 256K IOs */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define LPFC_NVME_ERSP_LEN		0x20
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define LPFC_NVME_WAIT_TMO              10
33*4882a593Smuzhiyun #define LPFC_NVME_EXPEDITE_XRICNT	8
34*4882a593Smuzhiyun #define LPFC_NVME_FB_SHIFT		9
35*4882a593Smuzhiyun #define LPFC_NVME_MAX_FB		(1 << 20)	/* 1M */
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define LPFC_MAX_NVME_INFO_TMP_LEN	100
38*4882a593Smuzhiyun #define LPFC_NVME_INFO_MORE_STR		"\nCould be more info...\n"
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define lpfc_ndlp_get_nrport(ndlp)					\
41*4882a593Smuzhiyun 	((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG))	\
42*4882a593Smuzhiyun 	? NULL : ndlp->nrport)
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun struct lpfc_nvme_qhandle {
45*4882a593Smuzhiyun 	uint32_t index;		/* WQ index to use */
46*4882a593Smuzhiyun 	uint32_t qidx;		/* queue index passed to create */
47*4882a593Smuzhiyun 	uint32_t cpu_id;	/* current cpu id at time of create */
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* Declare nvme-based local and remote port definitions. */
51*4882a593Smuzhiyun struct lpfc_nvme_lport {
52*4882a593Smuzhiyun 	struct lpfc_vport *vport;
53*4882a593Smuzhiyun 	struct completion *lport_unreg_cmp;
54*4882a593Smuzhiyun 	/* Add stats counters here */
55*4882a593Smuzhiyun 	atomic_t fc4NvmeLsRequests;
56*4882a593Smuzhiyun 	atomic_t fc4NvmeLsCmpls;
57*4882a593Smuzhiyun 	atomic_t xmt_fcp_noxri;
58*4882a593Smuzhiyun 	atomic_t xmt_fcp_bad_ndlp;
59*4882a593Smuzhiyun 	atomic_t xmt_fcp_qdepth;
60*4882a593Smuzhiyun 	atomic_t xmt_fcp_wqerr;
61*4882a593Smuzhiyun 	atomic_t xmt_fcp_err;
62*4882a593Smuzhiyun 	atomic_t xmt_fcp_abort;
63*4882a593Smuzhiyun 	atomic_t xmt_ls_abort;
64*4882a593Smuzhiyun 	atomic_t xmt_ls_err;
65*4882a593Smuzhiyun 	atomic_t cmpl_fcp_xb;
66*4882a593Smuzhiyun 	atomic_t cmpl_fcp_err;
67*4882a593Smuzhiyun 	atomic_t cmpl_ls_xb;
68*4882a593Smuzhiyun 	atomic_t cmpl_ls_err;
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun struct lpfc_nvme_rport {
72*4882a593Smuzhiyun 	struct lpfc_nvme_lport *lport;
73*4882a593Smuzhiyun 	struct nvme_fc_remote_port *remoteport;
74*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
75*4882a593Smuzhiyun 	struct completion rport_unreg_done;
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun struct lpfc_nvme_fcpreq_priv {
79*4882a593Smuzhiyun 	struct lpfc_io_buf *nvme_buf;
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun  * set NVME LS request timeouts to 30s. It is larger than the 2*R_A_TOV
84*4882a593Smuzhiyun  * set by the spec, which appears to have issues with some devices.
85*4882a593Smuzhiyun  */
86*4882a593Smuzhiyun #define LPFC_NVME_LS_TIMEOUT		30
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #define LPFC_NVMET_DEFAULT_SEGS		(64 + 1)	/* 256K IOs */
90*4882a593Smuzhiyun #define LPFC_NVMET_RQE_MIN_POST		128
91*4882a593Smuzhiyun #define LPFC_NVMET_RQE_DEF_POST		512
92*4882a593Smuzhiyun #define LPFC_NVMET_RQE_DEF_COUNT	2048
93*4882a593Smuzhiyun #define LPFC_NVMET_SUCCESS_LEN		12
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define LPFC_NVMET_MRQ_AUTO		0
96*4882a593Smuzhiyun #define LPFC_NVMET_MRQ_MAX		16
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #define LPFC_NVMET_WAIT_TMO		(5 * MSEC_PER_SEC)
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /* Used for NVME Target */
101*4882a593Smuzhiyun #define LPFC_NVMET_INV_HOST_ACTIVE      1
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun struct lpfc_nvmet_tgtport {
104*4882a593Smuzhiyun 	struct lpfc_hba *phba;
105*4882a593Smuzhiyun 	struct completion *tport_unreg_cmp;
106*4882a593Smuzhiyun 	atomic_t state;		/* tracks nvmet hosthandle invalidation */
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
109*4882a593Smuzhiyun 	atomic_t rcv_ls_req_in;
110*4882a593Smuzhiyun 	atomic_t rcv_ls_req_out;
111*4882a593Smuzhiyun 	atomic_t rcv_ls_req_drop;
112*4882a593Smuzhiyun 	atomic_t xmt_ls_abort;
113*4882a593Smuzhiyun 	atomic_t xmt_ls_abort_cmpl;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/* Stats counters - lpfc_nvmet_xmt_ls_rsp */
116*4882a593Smuzhiyun 	atomic_t xmt_ls_rsp;
117*4882a593Smuzhiyun 	atomic_t xmt_ls_drop;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
120*4882a593Smuzhiyun 	atomic_t xmt_ls_rsp_error;
121*4882a593Smuzhiyun 	atomic_t xmt_ls_rsp_aborted;
122*4882a593Smuzhiyun 	atomic_t xmt_ls_rsp_xb_set;
123*4882a593Smuzhiyun 	atomic_t xmt_ls_rsp_cmpl;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
126*4882a593Smuzhiyun 	atomic_t rcv_fcp_cmd_in;
127*4882a593Smuzhiyun 	atomic_t rcv_fcp_cmd_out;
128*4882a593Smuzhiyun 	atomic_t rcv_fcp_cmd_drop;
129*4882a593Smuzhiyun 	atomic_t rcv_fcp_cmd_defer;
130*4882a593Smuzhiyun 	atomic_t xmt_fcp_release;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	/* Stats counters - lpfc_nvmet_xmt_fcp_op */
133*4882a593Smuzhiyun 	atomic_t xmt_fcp_drop;
134*4882a593Smuzhiyun 	atomic_t xmt_fcp_read_rsp;
135*4882a593Smuzhiyun 	atomic_t xmt_fcp_read;
136*4882a593Smuzhiyun 	atomic_t xmt_fcp_write;
137*4882a593Smuzhiyun 	atomic_t xmt_fcp_rsp;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
140*4882a593Smuzhiyun 	atomic_t xmt_fcp_rsp_xb_set;
141*4882a593Smuzhiyun 	atomic_t xmt_fcp_rsp_cmpl;
142*4882a593Smuzhiyun 	atomic_t xmt_fcp_rsp_error;
143*4882a593Smuzhiyun 	atomic_t xmt_fcp_rsp_aborted;
144*4882a593Smuzhiyun 	atomic_t xmt_fcp_rsp_drop;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
147*4882a593Smuzhiyun 	atomic_t xmt_fcp_xri_abort_cqe;
148*4882a593Smuzhiyun 	atomic_t xmt_fcp_abort;
149*4882a593Smuzhiyun 	atomic_t xmt_fcp_abort_cmpl;
150*4882a593Smuzhiyun 	atomic_t xmt_abort_sol;
151*4882a593Smuzhiyun 	atomic_t xmt_abort_unsol;
152*4882a593Smuzhiyun 	atomic_t xmt_abort_rsp;
153*4882a593Smuzhiyun 	atomic_t xmt_abort_rsp_error;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/* Stats counters - defer IO */
156*4882a593Smuzhiyun 	atomic_t defer_ctx;
157*4882a593Smuzhiyun 	atomic_t defer_fod;
158*4882a593Smuzhiyun 	atomic_t defer_wqfull;
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun struct lpfc_nvmet_ctx_info {
162*4882a593Smuzhiyun 	struct list_head nvmet_ctx_list;
163*4882a593Smuzhiyun 	spinlock_t	nvmet_ctx_list_lock; /* lock per CPU */
164*4882a593Smuzhiyun 	struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
165*4882a593Smuzhiyun 	struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
166*4882a593Smuzhiyun 	uint16_t	nvmet_ctx_list_cnt;
167*4882a593Smuzhiyun 	char pad[16];  /* pad to a cache-line */
168*4882a593Smuzhiyun };
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun /* This retrieves the context info associated with the specified cpu / mrq */
171*4882a593Smuzhiyun #define lpfc_get_ctx_list(phba, cpu, mrq)  \
172*4882a593Smuzhiyun 	(phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun /* Values for state field of struct lpfc_async_xchg_ctx */
175*4882a593Smuzhiyun #define LPFC_NVME_STE_LS_RCV		1
176*4882a593Smuzhiyun #define LPFC_NVME_STE_LS_ABORT		2
177*4882a593Smuzhiyun #define LPFC_NVME_STE_LS_RSP		3
178*4882a593Smuzhiyun #define LPFC_NVME_STE_RCV		4
179*4882a593Smuzhiyun #define LPFC_NVME_STE_DATA		5
180*4882a593Smuzhiyun #define LPFC_NVME_STE_ABORT		6
181*4882a593Smuzhiyun #define LPFC_NVME_STE_DONE		7
182*4882a593Smuzhiyun #define LPFC_NVME_STE_FREE		0xff
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /* Values for flag field of struct lpfc_async_xchg_ctx */
185*4882a593Smuzhiyun #define LPFC_NVME_IO_INP		0x1  /* IO is in progress on exchange */
186*4882a593Smuzhiyun #define LPFC_NVME_ABORT_OP		0x2  /* Abort WQE issued on exchange */
187*4882a593Smuzhiyun #define LPFC_NVME_XBUSY			0x4  /* XB bit set on IO cmpl */
188*4882a593Smuzhiyun #define LPFC_NVME_CTX_RLS		0x8  /* ctx free requested */
189*4882a593Smuzhiyun #define LPFC_NVME_ABTS_RCV		0x10  /* ABTS received on exchange */
190*4882a593Smuzhiyun #define LPFC_NVME_CTX_REUSE_WQ		0x20  /* ctx reused via WQ */
191*4882a593Smuzhiyun #define LPFC_NVME_DEFER_WQFULL		0x40  /* Waiting on a free WQE */
192*4882a593Smuzhiyun #define LPFC_NVME_TNOTIFY		0x80  /* notify transport of abts */
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun struct lpfc_async_xchg_ctx {
195*4882a593Smuzhiyun 	union {
196*4882a593Smuzhiyun 		struct nvmefc_tgt_fcp_req fcp_req;
197*4882a593Smuzhiyun 	} hdlrctx;
198*4882a593Smuzhiyun 	struct list_head list;
199*4882a593Smuzhiyun 	struct lpfc_hba *phba;
200*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
201*4882a593Smuzhiyun 	struct nvmefc_ls_req *ls_req;
202*4882a593Smuzhiyun 	struct nvmefc_ls_rsp ls_rsp;
203*4882a593Smuzhiyun 	struct lpfc_iocbq *wqeq;
204*4882a593Smuzhiyun 	struct lpfc_iocbq *abort_wqeq;
205*4882a593Smuzhiyun 	spinlock_t ctxlock; /* protect flag access */
206*4882a593Smuzhiyun 	uint32_t sid;
207*4882a593Smuzhiyun 	uint32_t offset;
208*4882a593Smuzhiyun 	uint16_t oxid;
209*4882a593Smuzhiyun 	uint16_t size;
210*4882a593Smuzhiyun 	uint16_t entry_cnt;
211*4882a593Smuzhiyun 	uint16_t cpu;
212*4882a593Smuzhiyun 	uint16_t idx;
213*4882a593Smuzhiyun 	uint16_t state;
214*4882a593Smuzhiyun 	uint16_t flag;
215*4882a593Smuzhiyun 	void *payload;
216*4882a593Smuzhiyun 	struct rqb_dmabuf *rqb_buffer;
217*4882a593Smuzhiyun 	struct lpfc_nvmet_ctxbuf *ctxbuf;
218*4882a593Smuzhiyun 	struct lpfc_sli4_hdw_queue *hdwq;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
221*4882a593Smuzhiyun 	uint64_t ts_isr_cmd;
222*4882a593Smuzhiyun 	uint64_t ts_cmd_nvme;
223*4882a593Smuzhiyun 	uint64_t ts_nvme_data;
224*4882a593Smuzhiyun 	uint64_t ts_data_wqput;
225*4882a593Smuzhiyun 	uint64_t ts_isr_data;
226*4882a593Smuzhiyun 	uint64_t ts_data_nvme;
227*4882a593Smuzhiyun 	uint64_t ts_nvme_status;
228*4882a593Smuzhiyun 	uint64_t ts_status_wqput;
229*4882a593Smuzhiyun 	uint64_t ts_isr_status;
230*4882a593Smuzhiyun 	uint64_t ts_status_nvme;
231*4882a593Smuzhiyun #endif
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun /* routines found in lpfc_nvme.c */
236*4882a593Smuzhiyun int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
237*4882a593Smuzhiyun 		struct nvmefc_ls_req *pnvme_lsreq,
238*4882a593Smuzhiyun 		void (*gen_req_cmp)(struct lpfc_hba *phba,
239*4882a593Smuzhiyun 				struct lpfc_iocbq *cmdwqe,
240*4882a593Smuzhiyun 				struct lpfc_wcqe_complete *wcqe));
241*4882a593Smuzhiyun void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba,  struct lpfc_vport *vport,
242*4882a593Smuzhiyun 		struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
243*4882a593Smuzhiyun int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
244*4882a593Smuzhiyun 		struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun /* routines found in lpfc_nvmet.c */
247*4882a593Smuzhiyun int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
248*4882a593Smuzhiyun 			struct lpfc_async_xchg_ctx *ctxp, uint32_t sid,
249*4882a593Smuzhiyun 			uint16_t xri);
250*4882a593Smuzhiyun int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
251*4882a593Smuzhiyun 			struct nvmefc_ls_rsp *ls_rsp,
252*4882a593Smuzhiyun 			void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
253*4882a593Smuzhiyun 				struct lpfc_iocbq *cmdwqe,
254*4882a593Smuzhiyun 				struct lpfc_wcqe_complete *wcqe));
255*4882a593Smuzhiyun void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
256*4882a593Smuzhiyun 		struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
257