xref: /OK3568_Linux_fs/kernel/drivers/scsi/qedi/qedi.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * QLogic iSCSI Offload Driver
4*4882a593Smuzhiyun  * Copyright (c) 2016 Cavium Inc.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef _QEDI_H_
8*4882a593Smuzhiyun #define _QEDI_H_
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define __PREVENT_QED_HSI__
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <scsi/scsi_transport_iscsi.h>
13*4882a593Smuzhiyun #include <scsi/libiscsi.h>
14*4882a593Smuzhiyun #include <scsi/scsi_host.h>
15*4882a593Smuzhiyun #include <linux/uio_driver.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "qedi_hsi.h"
18*4882a593Smuzhiyun #include <linux/qed/qed_if.h>
19*4882a593Smuzhiyun #include "qedi_dbg.h"
20*4882a593Smuzhiyun #include <linux/qed/qed_iscsi_if.h>
21*4882a593Smuzhiyun #include <linux/qed/qed_ll2_if.h>
22*4882a593Smuzhiyun #include "qedi_version.h"
23*4882a593Smuzhiyun #include "qedi_nvm_iscsi_cfg.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define QEDI_MODULE_NAME		"qedi"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun struct qedi_endpoint;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #ifndef GET_FIELD2
30*4882a593Smuzhiyun #define GET_FIELD2(value, name) \
31*4882a593Smuzhiyun 	(((value) & (name ## _MASK)) >> (name ## _OFFSET))
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun  * PCI function probe defines
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun #define QEDI_MODE_NORMAL	0
38*4882a593Smuzhiyun #define QEDI_MODE_RECOVERY	1
39*4882a593Smuzhiyun #define QEDI_MODE_SHUTDOWN	2
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #define ISCSI_WQE_SET_PTU_INVALIDATE	1
42*4882a593Smuzhiyun #define QEDI_MAX_ISCSI_TASK		4096
43*4882a593Smuzhiyun #define QEDI_MAX_TASK_NUM		0x0FFF
44*4882a593Smuzhiyun #define QEDI_MAX_ISCSI_CONNS_PER_HBA	1024
45*4882a593Smuzhiyun #define QEDI_ISCSI_MAX_BDS_PER_CMD	255	/* Firmware max BDs is 255 */
46*4882a593Smuzhiyun #define MAX_OUTSTANDING_TASKS_PER_CON	1024
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define QEDI_MAX_BD_LEN		0xffff
49*4882a593Smuzhiyun #define QEDI_BD_SPLIT_SZ	0x1000
50*4882a593Smuzhiyun #define QEDI_PAGE_SIZE		4096
51*4882a593Smuzhiyun #define QEDI_FAST_SGE_COUNT	4
52*4882a593Smuzhiyun /* MAX Length for cached SGL */
53*4882a593Smuzhiyun #define MAX_SGLEN_FOR_CACHESGL	((1U << 16) - 1)
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define MIN_NUM_CPUS_MSIX(x)	min_t(u32, x->dev_info.num_cqs, \
56*4882a593Smuzhiyun 					num_online_cpus())
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define QEDI_LOCAL_PORT_MIN     60000
59*4882a593Smuzhiyun #define QEDI_LOCAL_PORT_MAX     61024
60*4882a593Smuzhiyun #define QEDI_LOCAL_PORT_RANGE   (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN)
61*4882a593Smuzhiyun #define QEDI_LOCAL_PORT_INVALID	0xffff
62*4882a593Smuzhiyun #define TX_RX_RING		16
63*4882a593Smuzhiyun #define RX_RING			(TX_RX_RING - 1)
64*4882a593Smuzhiyun #define QEDI_PAGE_ALIGN(addr)	ALIGN(addr, QEDI_PAGE_SIZE)
65*4882a593Smuzhiyun #define QEDI_PAGE_MASK		(~((QEDI_PAGE_SIZE) - 1))
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define QEDI_HW_DMA_BOUNDARY	0xfff
68*4882a593Smuzhiyun #define QEDI_PATH_HANDLE	0xFE0000000UL
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun enum qedi_nvm_tgts {
71*4882a593Smuzhiyun 	QEDI_NVM_TGT_PRI,
72*4882a593Smuzhiyun 	QEDI_NVM_TGT_SEC,
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun struct qedi_nvm_iscsi_image {
76*4882a593Smuzhiyun 	struct nvm_iscsi_cfg iscsi_cfg;
77*4882a593Smuzhiyun 	u32 crc;
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun struct qedi_uio_ctrl {
81*4882a593Smuzhiyun 	/* meta data */
82*4882a593Smuzhiyun 	u32 uio_hsi_version;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	/* user writes */
85*4882a593Smuzhiyun 	u32 host_tx_prod;
86*4882a593Smuzhiyun 	u32 host_rx_cons;
87*4882a593Smuzhiyun 	u32 host_rx_bd_cons;
88*4882a593Smuzhiyun 	u32 host_tx_pkt_len;
89*4882a593Smuzhiyun 	u32 host_rx_cons_cnt;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	/* driver writes */
92*4882a593Smuzhiyun 	u32 hw_tx_cons;
93*4882a593Smuzhiyun 	u32 hw_rx_prod;
94*4882a593Smuzhiyun 	u32 hw_rx_bd_prod;
95*4882a593Smuzhiyun 	u32 hw_rx_prod_cnt;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	/* other */
98*4882a593Smuzhiyun 	u8 mac_addr[6];
99*4882a593Smuzhiyun 	u8 reserve[2];
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun struct qedi_rx_bd {
103*4882a593Smuzhiyun 	u32 rx_pkt_index;
104*4882a593Smuzhiyun 	u32 rx_pkt_len;
105*4882a593Smuzhiyun 	u16 vlan_id;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun #define QEDI_RX_DESC_CNT	(QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd))
109*4882a593Smuzhiyun #define QEDI_MAX_RX_DESC_CNT	(QEDI_RX_DESC_CNT - 1)
110*4882a593Smuzhiyun #define QEDI_NUM_RX_BD		(QEDI_RX_DESC_CNT * 1)
111*4882a593Smuzhiyun #define QEDI_MAX_RX_BD		(QEDI_NUM_RX_BD - 1)
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #define QEDI_NEXT_RX_IDX(x)	((((x) & (QEDI_MAX_RX_DESC_CNT)) ==	\
114*4882a593Smuzhiyun 				  (QEDI_MAX_RX_DESC_CNT - 1)) ?		\
115*4882a593Smuzhiyun 				 (x) + 2 : (x) + 1)
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun struct qedi_uio_dev {
118*4882a593Smuzhiyun 	struct uio_info		qedi_uinfo;
119*4882a593Smuzhiyun 	u32			uio_dev;
120*4882a593Smuzhiyun 	struct list_head	list;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	u32			ll2_ring_size;
123*4882a593Smuzhiyun 	void			*ll2_ring;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	u32			ll2_buf_size;
126*4882a593Smuzhiyun 	void			*ll2_buf;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	void			*rx_pkt;
129*4882a593Smuzhiyun 	void			*tx_pkt;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	struct qedi_ctx		*qedi;
132*4882a593Smuzhiyun 	struct pci_dev		*pdev;
133*4882a593Smuzhiyun 	void			*uctrl;
134*4882a593Smuzhiyun };
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /* List to maintain the skb pointers */
137*4882a593Smuzhiyun struct skb_work_list {
138*4882a593Smuzhiyun 	struct list_head list;
139*4882a593Smuzhiyun 	struct sk_buff *skb;
140*4882a593Smuzhiyun 	u16 vlan_id;
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /* Queue sizes in number of elements */
144*4882a593Smuzhiyun #define QEDI_SQ_SIZE		MAX_OUTSTANDING_TASKS_PER_CON
145*4882a593Smuzhiyun #define QEDI_CQ_SIZE		2048
146*4882a593Smuzhiyun #define QEDI_CMDQ_SIZE		QEDI_MAX_ISCSI_TASK
147*4882a593Smuzhiyun #define QEDI_PROTO_CQ_PROD_IDX	0
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun struct qedi_glbl_q_params {
150*4882a593Smuzhiyun 	u64 hw_p_cq;	/* Completion queue PBL */
151*4882a593Smuzhiyun 	u64 hw_p_rq;	/* Request queue PBL */
152*4882a593Smuzhiyun 	u64 hw_p_cmdq;	/* Command queue PBL */
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun struct global_queue {
156*4882a593Smuzhiyun 	union iscsi_cqe *cq;
157*4882a593Smuzhiyun 	dma_addr_t cq_dma;
158*4882a593Smuzhiyun 	u32 cq_mem_size;
159*4882a593Smuzhiyun 	u32 cq_cons_idx; /* Completion queue consumer index */
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	void *cq_pbl;
162*4882a593Smuzhiyun 	dma_addr_t cq_pbl_dma;
163*4882a593Smuzhiyun 	u32 cq_pbl_size;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun };
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun struct qedi_fastpath {
168*4882a593Smuzhiyun 	struct qed_sb_info	*sb_info;
169*4882a593Smuzhiyun 	u16			sb_id;
170*4882a593Smuzhiyun #define QEDI_NAME_SIZE		16
171*4882a593Smuzhiyun 	char			name[QEDI_NAME_SIZE];
172*4882a593Smuzhiyun 	struct qedi_ctx         *qedi;
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun /* Used to pass fastpath information needed to process CQEs */
176*4882a593Smuzhiyun struct qedi_io_work {
177*4882a593Smuzhiyun 	struct list_head list;
178*4882a593Smuzhiyun 	struct iscsi_cqe_solicited cqe;
179*4882a593Smuzhiyun 	u16	que_idx;
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun  * struct iscsi_cid_queue - Per adapter iscsi cid queue
184*4882a593Smuzhiyun  *
185*4882a593Smuzhiyun  * @cid_que_base:           queue base memory
186*4882a593Smuzhiyun  * @cid_que:                queue memory pointer
187*4882a593Smuzhiyun  * @cid_q_prod_idx:         produce index
188*4882a593Smuzhiyun  * @cid_q_cons_idx:         consumer index
189*4882a593Smuzhiyun  * @cid_q_max_idx:          max index. used to detect wrap around condition
190*4882a593Smuzhiyun  * @cid_free_cnt:           queue size
191*4882a593Smuzhiyun  * @conn_cid_tbl:           iscsi cid to conn structure mapping table
192*4882a593Smuzhiyun  *
193*4882a593Smuzhiyun  * Per adapter iSCSI CID Queue
194*4882a593Smuzhiyun  */
195*4882a593Smuzhiyun struct iscsi_cid_queue {
196*4882a593Smuzhiyun 	void *cid_que_base;
197*4882a593Smuzhiyun 	u32 *cid_que;
198*4882a593Smuzhiyun 	u32 cid_q_prod_idx;
199*4882a593Smuzhiyun 	u32 cid_q_cons_idx;
200*4882a593Smuzhiyun 	u32 cid_q_max_idx;
201*4882a593Smuzhiyun 	u32 cid_free_cnt;
202*4882a593Smuzhiyun 	struct qedi_conn **conn_cid_tbl;
203*4882a593Smuzhiyun };
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun struct qedi_portid_tbl {
206*4882a593Smuzhiyun 	spinlock_t      lock;	/* Port id lock */
207*4882a593Smuzhiyun 	u16             start;
208*4882a593Smuzhiyun 	u16             max;
209*4882a593Smuzhiyun 	u16             next;
210*4882a593Smuzhiyun 	unsigned long   *table;
211*4882a593Smuzhiyun };
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun struct qedi_itt_map {
214*4882a593Smuzhiyun 	__le32	itt;
215*4882a593Smuzhiyun 	struct qedi_cmd *p_cmd;
216*4882a593Smuzhiyun };
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun /* I/O tracing entry */
219*4882a593Smuzhiyun #define QEDI_IO_TRACE_SIZE             2048
220*4882a593Smuzhiyun struct qedi_io_log {
221*4882a593Smuzhiyun #define QEDI_IO_TRACE_REQ              0
222*4882a593Smuzhiyun #define QEDI_IO_TRACE_RSP              1
223*4882a593Smuzhiyun 	u8 direction;
224*4882a593Smuzhiyun 	u16 task_id;
225*4882a593Smuzhiyun 	u32 cid;
226*4882a593Smuzhiyun 	u32 port_id;	/* Remote port fabric ID */
227*4882a593Smuzhiyun 	int lun;
228*4882a593Smuzhiyun 	u8 op;		/* SCSI CDB */
229*4882a593Smuzhiyun 	u8 lba[4];
230*4882a593Smuzhiyun 	unsigned int bufflen;	/* SCSI buffer length */
231*4882a593Smuzhiyun 	unsigned int sg_count;	/* Number of SG elements */
232*4882a593Smuzhiyun 	u8 fast_sgs;		/* number of fast sgls */
233*4882a593Smuzhiyun 	u8 slow_sgs;		/* number of slow sgls */
234*4882a593Smuzhiyun 	u8 cached_sgs;		/* number of cached sgls */
235*4882a593Smuzhiyun 	int result;		/* Result passed back to mid-layer */
236*4882a593Smuzhiyun 	unsigned long jiffies;	/* Time stamp when I/O logged */
237*4882a593Smuzhiyun 	int refcount;		/* Reference count for task id */
238*4882a593Smuzhiyun 	unsigned int blk_req_cpu; /* CPU that the task is queued on by
239*4882a593Smuzhiyun 				   * blk layer
240*4882a593Smuzhiyun 				   */
241*4882a593Smuzhiyun 	unsigned int req_cpu;	/* CPU that the task is queued on */
242*4882a593Smuzhiyun 	unsigned int intr_cpu;	/* Interrupt CPU that the task is received on */
243*4882a593Smuzhiyun 	unsigned int blk_rsp_cpu;/* CPU that task is actually processed and
244*4882a593Smuzhiyun 				  * returned to blk layer
245*4882a593Smuzhiyun 				  */
246*4882a593Smuzhiyun 	bool cached_sge;
247*4882a593Smuzhiyun 	bool slow_sge;
248*4882a593Smuzhiyun 	bool fast_sge;
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /* Number of entries in BDQ */
252*4882a593Smuzhiyun #define QEDI_BDQ_NUM		256
253*4882a593Smuzhiyun #define QEDI_BDQ_BUF_SIZE	256
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /* DMA coherent buffers for BDQ */
256*4882a593Smuzhiyun struct qedi_bdq_buf {
257*4882a593Smuzhiyun 	void *buf_addr;
258*4882a593Smuzhiyun 	dma_addr_t buf_dma;
259*4882a593Smuzhiyun };
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /* Main port level struct */
262*4882a593Smuzhiyun struct qedi_ctx {
263*4882a593Smuzhiyun 	struct qedi_dbg_ctx dbg_ctx;
264*4882a593Smuzhiyun 	struct Scsi_Host *shost;
265*4882a593Smuzhiyun 	struct pci_dev *pdev;
266*4882a593Smuzhiyun 	struct qed_dev *cdev;
267*4882a593Smuzhiyun 	struct qed_dev_iscsi_info dev_info;
268*4882a593Smuzhiyun 	struct qed_int_info int_info;
269*4882a593Smuzhiyun 	struct qedi_glbl_q_params *p_cpuq;
270*4882a593Smuzhiyun 	struct global_queue **global_queues;
271*4882a593Smuzhiyun 	/* uio declaration */
272*4882a593Smuzhiyun 	struct qedi_uio_dev *udev;
273*4882a593Smuzhiyun 	struct list_head ll2_skb_list;
274*4882a593Smuzhiyun 	spinlock_t ll2_lock;	/* Light L2 lock */
275*4882a593Smuzhiyun 	spinlock_t hba_lock;	/* per port lock */
276*4882a593Smuzhiyun 	struct task_struct *ll2_recv_thread;
277*4882a593Smuzhiyun 	unsigned long qedi_err_flags;
278*4882a593Smuzhiyun #define QEDI_ERR_ATTN_CLR_EN	0
279*4882a593Smuzhiyun #define QEDI_ERR_IS_RECOVERABLE	2
280*4882a593Smuzhiyun #define QEDI_ERR_OVERRIDE_EN	31
281*4882a593Smuzhiyun 	unsigned long flags;
282*4882a593Smuzhiyun #define UIO_DEV_OPENED		1
283*4882a593Smuzhiyun #define QEDI_IOTHREAD_WAKE	2
284*4882a593Smuzhiyun #define QEDI_IN_RECOVERY	5
285*4882a593Smuzhiyun #define QEDI_IN_OFFLINE		6
286*4882a593Smuzhiyun #define QEDI_IN_SHUTDOWN	7
287*4882a593Smuzhiyun #define QEDI_BLOCK_IO		8
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	u8 mac[ETH_ALEN];
290*4882a593Smuzhiyun 	u32 src_ip[4];
291*4882a593Smuzhiyun 	u8 ip_type;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	/* Physical address of above array */
294*4882a593Smuzhiyun 	dma_addr_t hw_p_cpuq;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	struct qedi_bdq_buf bdq[QEDI_BDQ_NUM];
297*4882a593Smuzhiyun 	void *bdq_pbl;
298*4882a593Smuzhiyun 	dma_addr_t bdq_pbl_dma;
299*4882a593Smuzhiyun 	size_t bdq_pbl_mem_size;
300*4882a593Smuzhiyun 	void *bdq_pbl_list;
301*4882a593Smuzhiyun 	dma_addr_t bdq_pbl_list_dma;
302*4882a593Smuzhiyun 	u8 bdq_pbl_list_num_entries;
303*4882a593Smuzhiyun 	struct qedi_nvm_iscsi_image *iscsi_image;
304*4882a593Smuzhiyun 	dma_addr_t nvm_buf_dma;
305*4882a593Smuzhiyun 	void __iomem *bdq_primary_prod;
306*4882a593Smuzhiyun 	void __iomem *bdq_secondary_prod;
307*4882a593Smuzhiyun 	u16 bdq_prod_idx;
308*4882a593Smuzhiyun 	u16 rq_num_entries;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	u32 max_sqes;
311*4882a593Smuzhiyun 	u8 num_queues;
312*4882a593Smuzhiyun 	u32 max_active_conns;
313*4882a593Smuzhiyun 	s32 msix_count;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	struct iscsi_cid_queue cid_que;
316*4882a593Smuzhiyun 	struct qedi_endpoint **ep_tbl;
317*4882a593Smuzhiyun 	struct qedi_portid_tbl lcl_port_tbl;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	/* Rx fast path intr context */
320*4882a593Smuzhiyun 	struct qed_sb_info	*sb_array;
321*4882a593Smuzhiyun 	struct qedi_fastpath	*fp_array;
322*4882a593Smuzhiyun 	struct qed_iscsi_tid	tasks;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun #define QEDI_LINK_DOWN		0
325*4882a593Smuzhiyun #define QEDI_LINK_UP		1
326*4882a593Smuzhiyun 	atomic_t link_state;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun #define QEDI_RESERVE_TASK_ID	0
329*4882a593Smuzhiyun #define MAX_ISCSI_TASK_ENTRIES	4096
330*4882a593Smuzhiyun #define QEDI_INVALID_TASK_ID	(MAX_ISCSI_TASK_ENTRIES + 1)
331*4882a593Smuzhiyun 	unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG];
332*4882a593Smuzhiyun 	struct qedi_itt_map *itt_map;
333*4882a593Smuzhiyun 	u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK];
334*4882a593Smuzhiyun 	struct qed_pf_params pf_params;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	struct workqueue_struct *tmf_thread;
337*4882a593Smuzhiyun 	struct workqueue_struct *offload_thread;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	u16 ll2_mtu;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	struct workqueue_struct *dpc_wq;
342*4882a593Smuzhiyun 	struct delayed_work recovery_work;
343*4882a593Smuzhiyun 	struct delayed_work board_disable_work;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	spinlock_t task_idx_lock;	/* To protect gbl context */
346*4882a593Smuzhiyun 	s32 last_tidx_alloc;
347*4882a593Smuzhiyun 	s32 last_tidx_clear;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE];
350*4882a593Smuzhiyun 	spinlock_t io_trace_lock;	/* prtect trace Log buf */
351*4882a593Smuzhiyun 	u16 io_trace_idx;
352*4882a593Smuzhiyun 	unsigned int intr_cpu;
353*4882a593Smuzhiyun 	u32 cached_sgls;
354*4882a593Smuzhiyun 	bool use_cached_sge;
355*4882a593Smuzhiyun 	u32 slow_sgls;
356*4882a593Smuzhiyun 	bool use_slow_sge;
357*4882a593Smuzhiyun 	u32 fast_sgls;
358*4882a593Smuzhiyun 	bool use_fast_sge;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	atomic_t num_offloads;
361*4882a593Smuzhiyun #define SYSFS_FLAG_FW_SEL_BOOT 2
362*4882a593Smuzhiyun #define IPV6_LEN	41
363*4882a593Smuzhiyun #define IPV4_LEN	17
364*4882a593Smuzhiyun 	struct iscsi_boot_kset *boot_kset;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	/* Used for iscsi statistics */
367*4882a593Smuzhiyun 	struct mutex stats_lock;
368*4882a593Smuzhiyun };
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun struct qedi_work {
371*4882a593Smuzhiyun 	struct list_head list;
372*4882a593Smuzhiyun 	struct qedi_ctx *qedi;
373*4882a593Smuzhiyun 	union iscsi_cqe cqe;
374*4882a593Smuzhiyun 	u16     que_idx;
375*4882a593Smuzhiyun 	bool is_solicited;
376*4882a593Smuzhiyun };
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun struct qedi_percpu_s {
379*4882a593Smuzhiyun 	struct task_struct *iothread;
380*4882a593Smuzhiyun 	struct list_head work_list;
381*4882a593Smuzhiyun 	spinlock_t p_work_lock;		/* Per cpu worker lock */
382*4882a593Smuzhiyun };
383*4882a593Smuzhiyun 
qedi_get_task_mem(struct qed_iscsi_tid * info,u32 tid)384*4882a593Smuzhiyun static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	return (info->blocks[tid / info->num_tids_per_block] +
387*4882a593Smuzhiyun 		(tid % info->num_tids_per_block) * info->size);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun #define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32))
391*4882a593Smuzhiyun #define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun #endif /* _QEDI_H_ */
394