xref: /OK3568_Linux_fs/kernel/drivers/scsi/lpfc/lpfc_sli.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*******************************************************************
2*4882a593Smuzhiyun  * This file is part of the Emulex Linux Device Driver for         *
3*4882a593Smuzhiyun  * Fibre Channel Host Bus Adapters.                                *
4*4882a593Smuzhiyun  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5*4882a593Smuzhiyun  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6*4882a593Smuzhiyun  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7*4882a593Smuzhiyun  * EMULEX and SLI are trademarks of Emulex.                        *
8*4882a593Smuzhiyun  * www.broadcom.com                                                *
9*4882a593Smuzhiyun  *                                                                 *
10*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or   *
11*4882a593Smuzhiyun  * modify it under the terms of version 2 of the GNU General       *
12*4882a593Smuzhiyun  * Public License as published by the Free Software Foundation.    *
13*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful. *
14*4882a593Smuzhiyun  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
15*4882a593Smuzhiyun  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
17*4882a593Smuzhiyun  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18*4882a593Smuzhiyun  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
19*4882a593Smuzhiyun  * more details, a copy of which can be found in the file COPYING  *
20*4882a593Smuzhiyun  * included with this package.                                     *
21*4882a593Smuzhiyun  *******************************************************************/
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
24*4882a593Smuzhiyun #define CONFIG_SCSI_LPFC_DEBUG_FS
25*4882a593Smuzhiyun #endif
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* forward declaration for LPFC_IOCB_t's use */
28*4882a593Smuzhiyun struct lpfc_hba;
29*4882a593Smuzhiyun struct lpfc_vport;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /* Define the context types that SLI handles for abort and sums. */
32*4882a593Smuzhiyun typedef enum _lpfc_ctx_cmd {
33*4882a593Smuzhiyun 	LPFC_CTX_LUN,
34*4882a593Smuzhiyun 	LPFC_CTX_TGT,
35*4882a593Smuzhiyun 	LPFC_CTX_HOST
36*4882a593Smuzhiyun } lpfc_ctx_cmd;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun struct lpfc_cq_event {
39*4882a593Smuzhiyun 	struct list_head list;
40*4882a593Smuzhiyun 	uint16_t hdwq;
41*4882a593Smuzhiyun 	union {
42*4882a593Smuzhiyun 		struct lpfc_mcqe		mcqe_cmpl;
43*4882a593Smuzhiyun 		struct lpfc_acqe_link		acqe_link;
44*4882a593Smuzhiyun 		struct lpfc_acqe_fip		acqe_fip;
45*4882a593Smuzhiyun 		struct lpfc_acqe_dcbx		acqe_dcbx;
46*4882a593Smuzhiyun 		struct lpfc_acqe_grp5		acqe_grp5;
47*4882a593Smuzhiyun 		struct lpfc_acqe_fc_la		acqe_fc;
48*4882a593Smuzhiyun 		struct lpfc_acqe_sli		acqe_sli;
49*4882a593Smuzhiyun 		struct lpfc_rcqe		rcqe_cmpl;
50*4882a593Smuzhiyun 		struct sli4_wcqe_xri_aborted	wcqe_axri;
51*4882a593Smuzhiyun 		struct lpfc_wcqe_complete	wcqe_cmpl;
52*4882a593Smuzhiyun 	} cqe;
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /* This structure is used to handle IOCB requests / responses */
56*4882a593Smuzhiyun struct lpfc_iocbq {
57*4882a593Smuzhiyun 	/* lpfc_iocbqs are used in double linked lists */
58*4882a593Smuzhiyun 	struct list_head list;
59*4882a593Smuzhiyun 	struct list_head clist;
60*4882a593Smuzhiyun 	struct list_head dlist;
61*4882a593Smuzhiyun 	uint16_t iotag;         /* pre-assigned IO tag */
62*4882a593Smuzhiyun 	uint16_t sli4_lxritag;  /* logical pre-assigned XRI. */
63*4882a593Smuzhiyun 	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
64*4882a593Smuzhiyun 	uint16_t hba_wqidx;     /* index to HBA work queue */
65*4882a593Smuzhiyun 	struct lpfc_cq_event cq_event;
66*4882a593Smuzhiyun 	struct lpfc_wcqe_complete wcqe_cmpl;	/* WQE cmpl */
67*4882a593Smuzhiyun 	uint64_t isr_timestamp;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	union lpfc_wqe128 wqe;	/* SLI-4 */
70*4882a593Smuzhiyun 	IOCB_t iocb;		/* SLI-3 */
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	uint8_t rsvd2;
73*4882a593Smuzhiyun 	uint8_t priority;	/* OAS priority */
74*4882a593Smuzhiyun 	uint8_t retry;		/* retry counter for IOCB cmd - if needed */
75*4882a593Smuzhiyun 	uint32_t iocb_flag;
76*4882a593Smuzhiyun #define LPFC_IO_LIBDFC		1	/* libdfc iocb */
77*4882a593Smuzhiyun #define LPFC_IO_WAKE		2	/* Synchronous I/O completed */
78*4882a593Smuzhiyun #define LPFC_IO_WAKE_TMO	LPFC_IO_WAKE /* Synchronous I/O timed out */
79*4882a593Smuzhiyun #define LPFC_IO_FCP		4	/* FCP command -- iocbq in scsi_buf */
80*4882a593Smuzhiyun #define LPFC_DRIVER_ABORTED	8	/* driver aborted this request */
81*4882a593Smuzhiyun #define LPFC_IO_FABRIC		0x10	/* Iocb send using fabric scheduler */
82*4882a593Smuzhiyun #define LPFC_DELAY_MEM_FREE	0x20    /* Defer free'ing of FC data */
83*4882a593Smuzhiyun #define LPFC_EXCHANGE_BUSY	0x40    /* SLI4 hba reported XB in response */
84*4882a593Smuzhiyun #define LPFC_USE_FCPWQIDX	0x80    /* Submit to specified FCPWQ index */
85*4882a593Smuzhiyun #define DSS_SECURITY_OP		0x100	/* security IO */
86*4882a593Smuzhiyun #define LPFC_IO_ON_TXCMPLQ	0x200	/* The IO is still on the TXCMPLQ */
87*4882a593Smuzhiyun #define LPFC_IO_DIF_PASS	0x400	/* T10 DIF IO pass-thru prot */
88*4882a593Smuzhiyun #define LPFC_IO_DIF_STRIP	0x800	/* T10 DIF IO strip prot */
89*4882a593Smuzhiyun #define LPFC_IO_DIF_INSERT	0x1000	/* T10 DIF IO insert prot */
90*4882a593Smuzhiyun #define LPFC_IO_CMD_OUTSTANDING	0x2000 /* timeout handler abort window */
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #define LPFC_FIP_ELS_ID_MASK	0xc000	/* ELS_ID range 0-3, non-shifted mask */
93*4882a593Smuzhiyun #define LPFC_FIP_ELS_ID_SHIFT	14
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define LPFC_IO_OAS		0x10000 /* OAS FCP IO */
96*4882a593Smuzhiyun #define LPFC_IO_FOF		0x20000 /* FOF FCP IO */
97*4882a593Smuzhiyun #define LPFC_IO_LOOPBACK	0x40000 /* Loopback IO */
98*4882a593Smuzhiyun #define LPFC_PRLI_NVME_REQ	0x80000 /* This is an NVME PRLI. */
99*4882a593Smuzhiyun #define LPFC_PRLI_FCP_REQ	0x100000 /* This is an NVME PRLI. */
100*4882a593Smuzhiyun #define LPFC_IO_NVME	        0x200000 /* NVME FCP command */
101*4882a593Smuzhiyun #define LPFC_IO_NVME_LS		0x400000 /* NVME LS command */
102*4882a593Smuzhiyun #define LPFC_IO_NVMET		0x800000 /* NVMET command */
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	uint32_t drvrTimeout;	/* driver timeout in seconds */
105*4882a593Smuzhiyun 	struct lpfc_vport *vport;/* virtual port pointer */
106*4882a593Smuzhiyun 	void *context1;		/* caller context information */
107*4882a593Smuzhiyun 	void *context2;		/* caller context information */
108*4882a593Smuzhiyun 	void *context3;		/* caller context information */
109*4882a593Smuzhiyun 	union {
110*4882a593Smuzhiyun 		wait_queue_head_t    *wait_queue;
111*4882a593Smuzhiyun 		struct lpfc_iocbq    *rsp_iocb;
112*4882a593Smuzhiyun 		struct lpfcMboxq     *mbox;
113*4882a593Smuzhiyun 		struct lpfc_nodelist *ndlp;
114*4882a593Smuzhiyun 		struct lpfc_node_rrq *rrq;
115*4882a593Smuzhiyun 	} context_un;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
118*4882a593Smuzhiyun 			   struct lpfc_iocbq *);
119*4882a593Smuzhiyun 	void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
120*4882a593Smuzhiyun 			   struct lpfc_iocbq *);
121*4882a593Smuzhiyun 	void (*iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
122*4882a593Smuzhiyun 			   struct lpfc_iocbq *);
123*4882a593Smuzhiyun 	void (*wqe_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
124*4882a593Smuzhiyun 			  struct lpfc_wcqe_complete *);
125*4882a593Smuzhiyun };
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun #define SLI_IOCB_RET_IOCB      1	/* Return IOCB if cmd ring full */
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun #define IOCB_SUCCESS        0
130*4882a593Smuzhiyun #define IOCB_BUSY           1
131*4882a593Smuzhiyun #define IOCB_ERROR          2
132*4882a593Smuzhiyun #define IOCB_TIMEDOUT       3
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #define SLI_WQE_RET_WQE    1    /* Return WQE if cmd ring full */
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun #define WQE_SUCCESS        0
137*4882a593Smuzhiyun #define WQE_BUSY           1
138*4882a593Smuzhiyun #define WQE_ERROR          2
139*4882a593Smuzhiyun #define WQE_TIMEDOUT       3
140*4882a593Smuzhiyun #define WQE_ABORTED        4
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun #define LPFC_MBX_WAKE		1
143*4882a593Smuzhiyun #define LPFC_MBX_IMED_UNREG	2
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun typedef struct lpfcMboxq {
146*4882a593Smuzhiyun 	/* MBOXQs are used in single linked lists */
147*4882a593Smuzhiyun 	struct list_head list;	/* ptr to next mailbox command */
148*4882a593Smuzhiyun 	union {
149*4882a593Smuzhiyun 		MAILBOX_t mb;		/* Mailbox cmd */
150*4882a593Smuzhiyun 		struct lpfc_mqe mqe;
151*4882a593Smuzhiyun 	} u;
152*4882a593Smuzhiyun 	struct lpfc_vport *vport; /* virtual port pointer */
153*4882a593Smuzhiyun 	void *ctx_ndlp;		  /* caller ndlp information */
154*4882a593Smuzhiyun 	void *ctx_buf;		  /* caller buffer information */
155*4882a593Smuzhiyun 	void *context3;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
158*4882a593Smuzhiyun 	uint8_t mbox_flag;
159*4882a593Smuzhiyun 	uint16_t in_ext_byte_len;
160*4882a593Smuzhiyun 	uint16_t out_ext_byte_len;
161*4882a593Smuzhiyun 	uint8_t  mbox_offset_word;
162*4882a593Smuzhiyun 	struct lpfc_mcqe mcqe;
163*4882a593Smuzhiyun 	struct lpfc_mbx_nembed_sge_virt *sge_array;
164*4882a593Smuzhiyun } LPFC_MBOXQ_t;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun #define MBX_POLL        1	/* poll mailbox till command done, then
167*4882a593Smuzhiyun 				   return */
168*4882a593Smuzhiyun #define MBX_NOWAIT      2	/* issue command then return immediately */
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun #define LPFC_MAX_RING_MASK  5	/* max num of rctl/type masks allowed per
171*4882a593Smuzhiyun 				   ring */
172*4882a593Smuzhiyun #define LPFC_SLI3_MAX_RING  4	/* Max num of SLI3 rings used by driver.
173*4882a593Smuzhiyun 				   For SLI4, an additional ring for each
174*4882a593Smuzhiyun 				   FCP WQ will be allocated.  */
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun struct lpfc_sli_ring;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun struct lpfc_sli_ring_mask {
179*4882a593Smuzhiyun 	uint8_t profile;	/* profile associated with ring */
180*4882a593Smuzhiyun 	uint8_t rctl;	/* rctl / type pair configured for ring */
181*4882a593Smuzhiyun 	uint8_t type;	/* rctl / type pair configured for ring */
182*4882a593Smuzhiyun 	uint8_t rsvd;
183*4882a593Smuzhiyun 	/* rcv'd unsol event */
184*4882a593Smuzhiyun 	void (*lpfc_sli_rcv_unsol_event) (struct lpfc_hba *,
185*4882a593Smuzhiyun 					 struct lpfc_sli_ring *,
186*4882a593Smuzhiyun 					 struct lpfc_iocbq *);
187*4882a593Smuzhiyun };
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /* Structure used to hold SLI statistical counters and info */
191*4882a593Smuzhiyun struct lpfc_sli_ring_stat {
192*4882a593Smuzhiyun 	uint64_t iocb_event;	 /* IOCB event counters */
193*4882a593Smuzhiyun 	uint64_t iocb_cmd;	 /* IOCB cmd issued */
194*4882a593Smuzhiyun 	uint64_t iocb_rsp;	 /* IOCB rsp received */
195*4882a593Smuzhiyun 	uint64_t iocb_cmd_delay; /* IOCB cmd ring delay */
196*4882a593Smuzhiyun 	uint64_t iocb_cmd_full;	 /* IOCB cmd ring full */
197*4882a593Smuzhiyun 	uint64_t iocb_cmd_empty; /* IOCB cmd ring is now empty */
198*4882a593Smuzhiyun 	uint64_t iocb_rsp_full;	 /* IOCB rsp ring full */
199*4882a593Smuzhiyun };
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun struct lpfc_sli3_ring {
202*4882a593Smuzhiyun 	uint32_t local_getidx;  /* last available cmd index (from cmdGetInx) */
203*4882a593Smuzhiyun 	uint32_t next_cmdidx;   /* next_cmd index */
204*4882a593Smuzhiyun 	uint32_t rspidx;	/* current index in response ring */
205*4882a593Smuzhiyun 	uint32_t cmdidx;	/* current index in command ring */
206*4882a593Smuzhiyun 	uint16_t numCiocb;	/* number of command iocb's per ring */
207*4882a593Smuzhiyun 	uint16_t numRiocb;	/* number of rsp iocb's per ring */
208*4882a593Smuzhiyun 	uint16_t sizeCiocb;	/* Size of command iocb's in this ring */
209*4882a593Smuzhiyun 	uint16_t sizeRiocb;	/* Size of response iocb's in this ring */
210*4882a593Smuzhiyun 	uint32_t *cmdringaddr;	/* virtual address for cmd rings */
211*4882a593Smuzhiyun 	uint32_t *rspringaddr;	/* virtual address for rsp rings */
212*4882a593Smuzhiyun };
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun struct lpfc_sli4_ring {
215*4882a593Smuzhiyun 	struct lpfc_queue *wqp;	/* Pointer to associated WQ */
216*4882a593Smuzhiyun };
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun /* Structure used to hold SLI ring information */
220*4882a593Smuzhiyun struct lpfc_sli_ring {
221*4882a593Smuzhiyun 	uint16_t flag;		/* ring flags */
222*4882a593Smuzhiyun #define LPFC_DEFERRED_RING_EVENT 0x001	/* Deferred processing a ring event */
223*4882a593Smuzhiyun #define LPFC_CALL_RING_AVAILABLE 0x002	/* indicates cmd was full */
224*4882a593Smuzhiyun #define LPFC_STOP_IOCB_EVENT     0x020	/* Stop processing IOCB cmds event */
225*4882a593Smuzhiyun 	uint16_t abtsiotag;	/* tracks next iotag to use for ABTS */
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	uint8_t rsvd;
228*4882a593Smuzhiyun 	uint8_t ringno;		/* ring number */
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	spinlock_t ring_lock;	/* lock for issuing commands */
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	uint32_t fast_iotag;	/* max fastlookup based iotag           */
233*4882a593Smuzhiyun 	uint32_t iotag_ctr;	/* keeps track of the next iotag to use */
234*4882a593Smuzhiyun 	uint32_t iotag_max;	/* max iotag value to use               */
235*4882a593Smuzhiyun 	struct list_head txq;
236*4882a593Smuzhiyun 	uint16_t txq_cnt;	/* current length of queue */
237*4882a593Smuzhiyun 	uint16_t txq_max;	/* max length */
238*4882a593Smuzhiyun 	struct list_head txcmplq;
239*4882a593Smuzhiyun 	uint16_t txcmplq_cnt;	/* current length of queue */
240*4882a593Smuzhiyun 	uint16_t txcmplq_max;	/* max length */
241*4882a593Smuzhiyun 	uint32_t missbufcnt;	/* keep track of buffers to post */
242*4882a593Smuzhiyun 	struct list_head postbufq;
243*4882a593Smuzhiyun 	uint16_t postbufq_cnt;	/* current length of queue */
244*4882a593Smuzhiyun 	uint16_t postbufq_max;	/* max length */
245*4882a593Smuzhiyun 	struct list_head iocb_continueq;
246*4882a593Smuzhiyun 	uint16_t iocb_continueq_cnt;	/* current length of queue */
247*4882a593Smuzhiyun 	uint16_t iocb_continueq_max;	/* max length */
248*4882a593Smuzhiyun 	struct list_head iocb_continue_saveq;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK];
251*4882a593Smuzhiyun 	uint32_t num_mask;	/* number of mask entries in prt array */
252*4882a593Smuzhiyun 	void (*lpfc_sli_rcv_async_status) (struct lpfc_hba *,
253*4882a593Smuzhiyun 		struct lpfc_sli_ring *, struct lpfc_iocbq *);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	struct lpfc_sli_ring_stat stats;	/* SLI statistical info */
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/* cmd ring available */
258*4882a593Smuzhiyun 	void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
259*4882a593Smuzhiyun 					struct lpfc_sli_ring *);
260*4882a593Smuzhiyun 	union {
261*4882a593Smuzhiyun 		struct lpfc_sli3_ring sli3;
262*4882a593Smuzhiyun 		struct lpfc_sli4_ring sli4;
263*4882a593Smuzhiyun 	} sli;
264*4882a593Smuzhiyun };
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun /* Structure used for configuring rings to a specific profile or rctl / type */
267*4882a593Smuzhiyun struct lpfc_hbq_init {
268*4882a593Smuzhiyun 	uint32_t rn;		/* Receive buffer notification */
269*4882a593Smuzhiyun 	uint32_t entry_count;	/* max # of entries in HBQ */
270*4882a593Smuzhiyun 	uint32_t headerLen;	/* 0 if not profile 4 or 5 */
271*4882a593Smuzhiyun 	uint32_t logEntry;	/* Set to 1 if this HBQ used for LogEntry */
272*4882a593Smuzhiyun 	uint32_t profile;	/* Selection profile 0=all, 7=logentry */
273*4882a593Smuzhiyun 	uint32_t ring_mask;	/* Binds HBQ to a ring e.g. Ring0=b0001,
274*4882a593Smuzhiyun 				 * ring2=b0100 */
275*4882a593Smuzhiyun 	uint32_t hbq_index;	/* index of this hbq in ring .HBQs[] */
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	uint32_t seqlenoff;
278*4882a593Smuzhiyun 	uint32_t maxlen;
279*4882a593Smuzhiyun 	uint32_t seqlenbcnt;
280*4882a593Smuzhiyun 	uint32_t cmdcodeoff;
281*4882a593Smuzhiyun 	uint32_t cmdmatch[8];
282*4882a593Smuzhiyun 	uint32_t mask_count;	/* number of mask entries in prt array */
283*4882a593Smuzhiyun 	struct hbq_mask hbqMasks[6];
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	/* Non-config rings fields to keep track of buffer allocations */
286*4882a593Smuzhiyun 	uint32_t buffer_count;	/* number of buffers allocated */
287*4882a593Smuzhiyun 	uint32_t init_count;	/* number to allocate when initialized */
288*4882a593Smuzhiyun 	uint32_t add_count;	/* number to allocate when starved */
289*4882a593Smuzhiyun } ;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /* Structure used to hold SLI statistical counters and info */
292*4882a593Smuzhiyun struct lpfc_sli_stat {
293*4882a593Smuzhiyun 	uint64_t mbox_stat_err;  /* Mbox cmds completed status error */
294*4882a593Smuzhiyun 	uint64_t mbox_cmd;       /* Mailbox commands issued */
295*4882a593Smuzhiyun 	uint64_t sli_intr;       /* Count of Host Attention interrupts */
296*4882a593Smuzhiyun 	uint64_t sli_prev_intr;  /* Previous cnt of Host Attention interrupts */
297*4882a593Smuzhiyun 	uint64_t sli_ips;        /* Host Attention interrupts per sec */
298*4882a593Smuzhiyun 	uint32_t err_attn_event; /* Error Attn event counters */
299*4882a593Smuzhiyun 	uint32_t link_event;     /* Link event counters */
300*4882a593Smuzhiyun 	uint32_t mbox_event;     /* Mailbox event counters */
301*4882a593Smuzhiyun 	uint32_t mbox_busy;	 /* Mailbox cmd busy */
302*4882a593Smuzhiyun };
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /* Structure to store link status values when port stats are reset */
305*4882a593Smuzhiyun struct lpfc_lnk_stat {
306*4882a593Smuzhiyun 	uint32_t link_failure_count;
307*4882a593Smuzhiyun 	uint32_t loss_of_sync_count;
308*4882a593Smuzhiyun 	uint32_t loss_of_signal_count;
309*4882a593Smuzhiyun 	uint32_t prim_seq_protocol_err_count;
310*4882a593Smuzhiyun 	uint32_t invalid_tx_word_count;
311*4882a593Smuzhiyun 	uint32_t invalid_crc_count;
312*4882a593Smuzhiyun 	uint32_t error_frames;
313*4882a593Smuzhiyun 	uint32_t link_events;
314*4882a593Smuzhiyun };
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun /* Structure used to hold SLI information */
317*4882a593Smuzhiyun struct lpfc_sli {
318*4882a593Smuzhiyun 	uint32_t num_rings;
319*4882a593Smuzhiyun 	uint32_t sli_flag;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/* Additional sli_flags */
322*4882a593Smuzhiyun #define LPFC_SLI_MBOX_ACTIVE      0x100	/* HBA mailbox is currently active */
323*4882a593Smuzhiyun #define LPFC_SLI_ACTIVE           0x200	/* SLI in firmware is active */
324*4882a593Smuzhiyun #define LPFC_PROCESS_LA           0x400	/* Able to process link attention */
325*4882a593Smuzhiyun #define LPFC_BLOCK_MGMT_IO        0x800	/* Don't allow mgmt mbx or iocb cmds */
326*4882a593Smuzhiyun #define LPFC_MENLO_MAINT          0x1000 /* need for menl fw download */
327*4882a593Smuzhiyun #define LPFC_SLI_ASYNC_MBX_BLK    0x2000 /* Async mailbox is blocked */
328*4882a593Smuzhiyun #define LPFC_SLI_SUPPRESS_RSP     0x4000 /* Suppress RSP feature is supported */
329*4882a593Smuzhiyun #define LPFC_SLI_USE_EQDR         0x8000 /* EQ Delay Register is supported */
330*4882a593Smuzhiyun #define LPFC_QUEUE_FREE_INIT	  0x10000 /* Queue freeing is in progress */
331*4882a593Smuzhiyun #define LPFC_QUEUE_FREE_WAIT	  0x20000 /* Hold Queue free as it is being
332*4882a593Smuzhiyun 					   * used outside worker thread
333*4882a593Smuzhiyun 					   */
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	struct lpfc_sli_ring *sli3_ring;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	struct lpfc_sli_stat slistat;	/* SLI statistical info */
338*4882a593Smuzhiyun 	struct list_head mboxq;
339*4882a593Smuzhiyun 	uint16_t mboxq_cnt;	/* current length of queue */
340*4882a593Smuzhiyun 	uint16_t mboxq_max;	/* max length */
341*4882a593Smuzhiyun 	LPFC_MBOXQ_t *mbox_active;	/* active mboxq information */
342*4882a593Smuzhiyun 	struct list_head mboxq_cmpl;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	struct timer_list mbox_tmo;	/* Hold clk to timeout active mbox
345*4882a593Smuzhiyun 					   cmd */
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun #define LPFC_IOCBQ_LOOKUP_INCREMENT  1024
348*4882a593Smuzhiyun 	struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
349*4882a593Smuzhiyun 	size_t iocbq_lookup_len;           /* current lengs of the array */
350*4882a593Smuzhiyun 	uint16_t  last_iotag;              /* last allocated IOTAG */
351*4882a593Smuzhiyun 	time64_t  stats_start;		   /* in seconds */
352*4882a593Smuzhiyun 	struct lpfc_lnk_stat lnk_stat_offsets;
353*4882a593Smuzhiyun };
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun /* Timeout for normal outstanding mbox command (Seconds) */
356*4882a593Smuzhiyun #define LPFC_MBOX_TMO				30
357*4882a593Smuzhiyun /* Timeout for non-flash-based outstanding sli_config mbox command (Seconds) */
358*4882a593Smuzhiyun #define LPFC_MBOX_SLI4_CONFIG_TMO		60
359*4882a593Smuzhiyun /* Timeout for flash-based outstanding sli_config mbox command (Seconds) */
360*4882a593Smuzhiyun #define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO	300
361*4882a593Smuzhiyun /* Timeout for other flash-based outstanding mbox command (Seconds) */
362*4882a593Smuzhiyun #define LPFC_MBOX_TMO_FLASH_CMD			300
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun struct lpfc_io_buf {
365*4882a593Smuzhiyun 	/* Common fields */
366*4882a593Smuzhiyun 	struct list_head list;
367*4882a593Smuzhiyun 	void *data;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	dma_addr_t dma_handle;
370*4882a593Smuzhiyun 	dma_addr_t dma_phys_sgl;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	struct sli4_sge *dma_sgl; /* initial segment chunk */
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	/* linked list of extra sli4_hybrid_sge */
375*4882a593Smuzhiyun 	struct list_head dma_sgl_xtra_list;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	/* list head for fcp_cmd_rsp buf */
378*4882a593Smuzhiyun 	struct list_head dma_cmd_rsp_list;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	struct lpfc_iocbq cur_iocbq;
381*4882a593Smuzhiyun 	struct lpfc_sli4_hdw_queue *hdwq;
382*4882a593Smuzhiyun 	uint16_t hdwq_no;
383*4882a593Smuzhiyun 	uint16_t cpu;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	struct lpfc_nodelist *ndlp;
386*4882a593Smuzhiyun 	uint32_t timeout;
387*4882a593Smuzhiyun 	uint16_t flags;
388*4882a593Smuzhiyun #define LPFC_SBUF_XBUSY		0x1	/* SLI4 hba reported XB on WCQE cmpl */
389*4882a593Smuzhiyun #define LPFC_SBUF_BUMP_QDEPTH	0x2	/* bumped queue depth counter */
390*4882a593Smuzhiyun 					/* External DIF device IO conversions */
391*4882a593Smuzhiyun #define LPFC_SBUF_NORMAL_DIF	0x4	/* normal mode to insert/strip */
392*4882a593Smuzhiyun #define LPFC_SBUF_PASS_DIF	0x8	/* insert/strip mode to passthru */
393*4882a593Smuzhiyun #define LPFC_SBUF_NOT_POSTED    0x10    /* SGL failed post to FW. */
394*4882a593Smuzhiyun 	uint16_t status;	/* From IOCB Word 7- ulpStatus */
395*4882a593Smuzhiyun 	uint32_t result;	/* From IOCB Word 4. */
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	uint32_t   seg_cnt;	/* Number of scatter-gather segments returned by
398*4882a593Smuzhiyun 				 * dma_map_sg.  The driver needs this for calls
399*4882a593Smuzhiyun 				 * to dma_unmap_sg.
400*4882a593Smuzhiyun 				 */
401*4882a593Smuzhiyun 	unsigned long start_time;
402*4882a593Smuzhiyun 	spinlock_t buf_lock;	/* lock used in case of simultaneous abort */
403*4882a593Smuzhiyun 	bool expedite;		/* this is an expedite io_buf */
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	union {
406*4882a593Smuzhiyun 		/* SCSI specific fields */
407*4882a593Smuzhiyun 		struct {
408*4882a593Smuzhiyun 			struct scsi_cmnd *pCmd;
409*4882a593Smuzhiyun 			struct lpfc_rport_data *rdata;
410*4882a593Smuzhiyun 			uint32_t prot_seg_cnt;  /* seg_cnt's counterpart for
411*4882a593Smuzhiyun 						 * protection data
412*4882a593Smuzhiyun 						 */
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 			/*
415*4882a593Smuzhiyun 			 * data and dma_handle are the kernel virtual and bus
416*4882a593Smuzhiyun 			 * address of the dma-able buffer containing the
417*4882a593Smuzhiyun 			 * fcp_cmd, fcp_rsp and a scatter gather bde list that
418*4882a593Smuzhiyun 			 * supports the sg_tablesize value.
419*4882a593Smuzhiyun 			 */
420*4882a593Smuzhiyun 			struct fcp_cmnd *fcp_cmnd;
421*4882a593Smuzhiyun 			struct fcp_rsp *fcp_rsp;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 			wait_queue_head_t *waitq;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
426*4882a593Smuzhiyun 			/* Used to restore any changes to protection data for
427*4882a593Smuzhiyun 			 * error injection
428*4882a593Smuzhiyun 			 */
429*4882a593Smuzhiyun 			void *prot_data_segment;
430*4882a593Smuzhiyun 			uint32_t prot_data;
431*4882a593Smuzhiyun 			uint32_t prot_data_type;
432*4882a593Smuzhiyun #define	LPFC_INJERR_REFTAG	1
433*4882a593Smuzhiyun #define	LPFC_INJERR_APPTAG	2
434*4882a593Smuzhiyun #define	LPFC_INJERR_GUARD	3
435*4882a593Smuzhiyun #endif
436*4882a593Smuzhiyun 		};
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 		/* NVME specific fields */
439*4882a593Smuzhiyun 		struct {
440*4882a593Smuzhiyun 			struct nvmefc_fcp_req *nvmeCmd;
441*4882a593Smuzhiyun 			uint16_t qidx;
442*4882a593Smuzhiyun 		};
443*4882a593Smuzhiyun 	};
444*4882a593Smuzhiyun #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
445*4882a593Smuzhiyun 	uint64_t ts_cmd_start;
446*4882a593Smuzhiyun 	uint64_t ts_last_cmd;
447*4882a593Smuzhiyun 	uint64_t ts_cmd_wqput;
448*4882a593Smuzhiyun 	uint64_t ts_isr_cmpl;
449*4882a593Smuzhiyun 	uint64_t ts_data_io;
450*4882a593Smuzhiyun #endif
451*4882a593Smuzhiyun };
452