xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/ice/ice_controlq.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* Copyright (c) 2018, Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #ifndef _ICE_CONTROLQ_H_
5*4882a593Smuzhiyun #define _ICE_CONTROLQ_H_
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "ice_adminq_cmd.h"
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /* Maximum buffer lengths for all control queue types */
10*4882a593Smuzhiyun #define ICE_AQ_MAX_BUF_LEN 4096
11*4882a593Smuzhiyun #define ICE_MBXQ_MAX_BUF_LEN 4096
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define ICE_CTL_Q_DESC(R, i) \
14*4882a593Smuzhiyun 	(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define ICE_CTL_Q_DESC_UNUSED(R) \
17*4882a593Smuzhiyun 	(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
18*4882a593Smuzhiyun 	      (R)->next_to_clean - (R)->next_to_use - 1)
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* Defines that help manage the driver vs FW API checks.
21*4882a593Smuzhiyun  * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun #define EXP_FW_API_VER_BRANCH		0x00
24*4882a593Smuzhiyun #define EXP_FW_API_VER_MAJOR		0x01
25*4882a593Smuzhiyun #define EXP_FW_API_VER_MINOR		0x05
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* Different control queue types: These are mainly for SW consumption. */
28*4882a593Smuzhiyun enum ice_ctl_q {
29*4882a593Smuzhiyun 	ICE_CTL_Q_UNKNOWN = 0,
30*4882a593Smuzhiyun 	ICE_CTL_Q_ADMIN,
31*4882a593Smuzhiyun 	ICE_CTL_Q_MAILBOX,
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /* Control Queue timeout settings - max delay 1s */
35*4882a593Smuzhiyun #define ICE_CTL_Q_SQ_CMD_TIMEOUT	10000 /* Count 10000 times */
36*4882a593Smuzhiyun #define ICE_CTL_Q_SQ_CMD_USEC		100   /* Check every 100usec */
37*4882a593Smuzhiyun #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT	10    /* Count 10 times */
38*4882a593Smuzhiyun #define ICE_CTL_Q_ADMIN_INIT_MSEC	100   /* Check every 100msec */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun struct ice_ctl_q_ring {
41*4882a593Smuzhiyun 	void *dma_head;			/* Virtual address to DMA head */
42*4882a593Smuzhiyun 	struct ice_dma_mem desc_buf;	/* descriptor ring memory */
43*4882a593Smuzhiyun 	void *cmd_buf;			/* command buffer memory */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	union {
46*4882a593Smuzhiyun 		struct ice_dma_mem *sq_bi;
47*4882a593Smuzhiyun 		struct ice_dma_mem *rq_bi;
48*4882a593Smuzhiyun 	} r;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	u16 count;		/* Number of descriptors */
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	/* used for interrupt processing */
53*4882a593Smuzhiyun 	u16 next_to_use;
54*4882a593Smuzhiyun 	u16 next_to_clean;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	/* used for queue tracking */
57*4882a593Smuzhiyun 	u32 head;
58*4882a593Smuzhiyun 	u32 tail;
59*4882a593Smuzhiyun 	u32 len;
60*4882a593Smuzhiyun 	u32 bah;
61*4882a593Smuzhiyun 	u32 bal;
62*4882a593Smuzhiyun 	u32 len_mask;
63*4882a593Smuzhiyun 	u32 len_ena_mask;
64*4882a593Smuzhiyun 	u32 len_crit_mask;
65*4882a593Smuzhiyun 	u32 head_mask;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* sq transaction details */
69*4882a593Smuzhiyun struct ice_sq_cd {
70*4882a593Smuzhiyun 	struct ice_aq_desc *wb_desc;
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /* rq event information */
76*4882a593Smuzhiyun struct ice_rq_event_info {
77*4882a593Smuzhiyun 	struct ice_aq_desc desc;
78*4882a593Smuzhiyun 	u16 msg_len;
79*4882a593Smuzhiyun 	u16 buf_len;
80*4882a593Smuzhiyun 	u8 *msg_buf;
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* Control Queue information */
84*4882a593Smuzhiyun struct ice_ctl_q_info {
85*4882a593Smuzhiyun 	enum ice_ctl_q qtype;
86*4882a593Smuzhiyun 	enum ice_aq_err rq_last_status;	/* last status on receive queue */
87*4882a593Smuzhiyun 	struct ice_ctl_q_ring rq;	/* receive queue */
88*4882a593Smuzhiyun 	struct ice_ctl_q_ring sq;	/* send queue */
89*4882a593Smuzhiyun 	u32 sq_cmd_timeout;		/* send queue cmd write back timeout */
90*4882a593Smuzhiyun 	u16 num_rq_entries;		/* receive queue depth */
91*4882a593Smuzhiyun 	u16 num_sq_entries;		/* send queue depth */
92*4882a593Smuzhiyun 	u16 rq_buf_size;		/* receive queue buffer size */
93*4882a593Smuzhiyun 	u16 sq_buf_size;		/* send queue buffer size */
94*4882a593Smuzhiyun 	enum ice_aq_err sq_last_status;	/* last status on send queue */
95*4882a593Smuzhiyun 	struct mutex sq_lock;		/* Send queue lock */
96*4882a593Smuzhiyun 	struct mutex rq_lock;		/* Receive queue lock */
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #endif /* _ICE_CONTROLQ_H_ */
100