xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/cavium/thunder/nicvf_queues.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2015 Cavium, Inc.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef NICVF_QUEUES_H
7*4882a593Smuzhiyun #define NICVF_QUEUES_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/netdevice.h>
10*4882a593Smuzhiyun #include <linux/iommu.h>
11*4882a593Smuzhiyun #include <net/xdp.h>
12*4882a593Smuzhiyun #include "q_struct.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define MAX_QUEUE_SET			128
15*4882a593Smuzhiyun #define MAX_RCV_QUEUES_PER_QS		8
16*4882a593Smuzhiyun #define MAX_RCV_BUF_DESC_RINGS_PER_QS	2
17*4882a593Smuzhiyun #define MAX_SND_QUEUES_PER_QS		8
18*4882a593Smuzhiyun #define MAX_CMP_QUEUES_PER_QS		8
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* VF's queue interrupt ranges */
21*4882a593Smuzhiyun #define	NICVF_INTR_ID_CQ		0
22*4882a593Smuzhiyun #define	NICVF_INTR_ID_SQ		8
23*4882a593Smuzhiyun #define	NICVF_INTR_ID_RBDR		16
24*4882a593Smuzhiyun #define	NICVF_INTR_ID_MISC		18
25*4882a593Smuzhiyun #define	NICVF_INTR_ID_QS_ERR		19
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define	for_each_cq_irq(irq)	\
28*4882a593Smuzhiyun 	for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
29*4882a593Smuzhiyun #define	for_each_sq_irq(irq)	\
30*4882a593Smuzhiyun 	for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
31*4882a593Smuzhiyun #define	for_each_rbdr_irq(irq)	\
32*4882a593Smuzhiyun 	for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define RBDR_SIZE0		0ULL /* 8K entries */
35*4882a593Smuzhiyun #define RBDR_SIZE1		1ULL /* 16K entries */
36*4882a593Smuzhiyun #define RBDR_SIZE2		2ULL /* 32K entries */
37*4882a593Smuzhiyun #define RBDR_SIZE3		3ULL /* 64K entries */
38*4882a593Smuzhiyun #define RBDR_SIZE4		4ULL /* 126K entries */
39*4882a593Smuzhiyun #define RBDR_SIZE5		5ULL /* 256K entries */
40*4882a593Smuzhiyun #define RBDR_SIZE6		6ULL /* 512K entries */
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define SND_QUEUE_SIZE0		0ULL /* 1K entries */
43*4882a593Smuzhiyun #define SND_QUEUE_SIZE1		1ULL /* 2K entries */
44*4882a593Smuzhiyun #define SND_QUEUE_SIZE2		2ULL /* 4K entries */
45*4882a593Smuzhiyun #define SND_QUEUE_SIZE3		3ULL /* 8K entries */
46*4882a593Smuzhiyun #define SND_QUEUE_SIZE4		4ULL /* 16K entries */
47*4882a593Smuzhiyun #define SND_QUEUE_SIZE5		5ULL /* 32K entries */
48*4882a593Smuzhiyun #define SND_QUEUE_SIZE6		6ULL /* 64K entries */
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #define CMP_QUEUE_SIZE0		0ULL /* 1K entries */
51*4882a593Smuzhiyun #define CMP_QUEUE_SIZE1		1ULL /* 2K entries */
52*4882a593Smuzhiyun #define CMP_QUEUE_SIZE2		2ULL /* 4K entries */
53*4882a593Smuzhiyun #define CMP_QUEUE_SIZE3		3ULL /* 8K entries */
54*4882a593Smuzhiyun #define CMP_QUEUE_SIZE4		4ULL /* 16K entries */
55*4882a593Smuzhiyun #define CMP_QUEUE_SIZE5		5ULL /* 32K entries */
56*4882a593Smuzhiyun #define CMP_QUEUE_SIZE6		6ULL /* 64K entries */
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /* Default queue count per QS, its lengths and threshold values */
59*4882a593Smuzhiyun #define DEFAULT_RBDR_CNT	1
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #define SND_QSIZE		SND_QUEUE_SIZE0
62*4882a593Smuzhiyun #define SND_QUEUE_LEN		(1ULL << (SND_QSIZE + 10))
63*4882a593Smuzhiyun #define MIN_SND_QUEUE_LEN	(1ULL << (SND_QUEUE_SIZE0 + 10))
64*4882a593Smuzhiyun #define MAX_SND_QUEUE_LEN	(1ULL << (SND_QUEUE_SIZE6 + 10))
65*4882a593Smuzhiyun #define SND_QUEUE_THRESH	2ULL
66*4882a593Smuzhiyun #define MIN_SQ_DESC_PER_PKT_XMIT	2
67*4882a593Smuzhiyun /* Since timestamp not enabled, otherwise 2 */
68*4882a593Smuzhiyun #define MAX_CQE_PER_PKT_XMIT		1
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /* Keep CQ and SQ sizes same, if timestamping
71*4882a593Smuzhiyun  * is enabled this equation will change.
72*4882a593Smuzhiyun  */
73*4882a593Smuzhiyun #define CMP_QSIZE		CMP_QUEUE_SIZE0
74*4882a593Smuzhiyun #define CMP_QUEUE_LEN		(1ULL << (CMP_QSIZE + 10))
75*4882a593Smuzhiyun #define MIN_CMP_QUEUE_LEN	(1ULL << (CMP_QUEUE_SIZE0 + 10))
76*4882a593Smuzhiyun #define MAX_CMP_QUEUE_LEN	(1ULL << (CMP_QUEUE_SIZE6 + 10))
77*4882a593Smuzhiyun #define CMP_QUEUE_CQE_THRESH	(NAPI_POLL_WEIGHT / 2)
78*4882a593Smuzhiyun #define CMP_QUEUE_TIMER_THRESH	80 /* ~2usec */
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /* No of CQEs that might anyway gets used by HW due to pipelining
81*4882a593Smuzhiyun  * effects irrespective of PASS/DROP/LEVELS being configured
82*4882a593Smuzhiyun  */
83*4882a593Smuzhiyun #define CMP_QUEUE_PIPELINE_RSVD 544
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #define RBDR_SIZE		RBDR_SIZE0
86*4882a593Smuzhiyun #define RCV_BUF_COUNT		(1ULL << (RBDR_SIZE + 13))
87*4882a593Smuzhiyun #define MAX_RCV_BUF_COUNT	(1ULL << (RBDR_SIZE6 + 13))
88*4882a593Smuzhiyun #define RBDR_THRESH		(RCV_BUF_COUNT / 2)
89*4882a593Smuzhiyun #define DMA_BUFFER_LEN		1536 /* In multiples of 128bytes */
90*4882a593Smuzhiyun #define RCV_FRAG_LEN	 (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
91*4882a593Smuzhiyun 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #define MAX_CQES_FOR_TX		((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
94*4882a593Smuzhiyun 				 MAX_CQE_PER_PKT_XMIT)
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /* RED and Backpressure levels of CQ for pkt reception
97*4882a593Smuzhiyun  * For CQ, level is a measure of emptiness i.e 0x0 means full
98*4882a593Smuzhiyun  * eg: For CQ of size 4K, and for pass/drop levels of 160/144
99*4882a593Smuzhiyun  * HW accepts pkt if unused CQE >= 2560
100*4882a593Smuzhiyun  * RED accepts pkt if unused CQE < 2304 & >= 2560
101*4882a593Smuzhiyun  * DROPs pkts if unused CQE < 2304
102*4882a593Smuzhiyun  */
103*4882a593Smuzhiyun #define RQ_PASS_CQ_LVL         224ULL
104*4882a593Smuzhiyun #define RQ_DROP_CQ_LVL         216ULL
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /* RED and Backpressure levels of RBDR for pkt reception
107*4882a593Smuzhiyun  * For RBDR, level is a measure of fullness i.e 0x0 means empty
108*4882a593Smuzhiyun  * eg: For RBDR of size 8K, and for pass/drop levels of 4/0
109*4882a593Smuzhiyun  * HW accepts pkt if unused RBs >= 256
110*4882a593Smuzhiyun  * RED accepts pkt if unused RBs < 256 & >= 0
111*4882a593Smuzhiyun  * DROPs pkts if unused RBs < 0
112*4882a593Smuzhiyun  */
113*4882a593Smuzhiyun #define RQ_PASS_RBDR_LVL	8ULL
114*4882a593Smuzhiyun #define RQ_DROP_RBDR_LVL	0ULL
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /* Descriptor size in bytes */
117*4882a593Smuzhiyun #define SND_QUEUE_DESC_SIZE	16
118*4882a593Smuzhiyun #define CMP_QUEUE_DESC_SIZE	512
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /* Buffer / descriptor alignments */
121*4882a593Smuzhiyun #define NICVF_RCV_BUF_ALIGN		7
122*4882a593Smuzhiyun #define NICVF_RCV_BUF_ALIGN_BYTES	(1ULL << NICVF_RCV_BUF_ALIGN)
123*4882a593Smuzhiyun #define NICVF_CQ_BASE_ALIGN_BYTES	512  /* 9 bits */
124*4882a593Smuzhiyun #define NICVF_SQ_BASE_ALIGN_BYTES	128  /* 7 bits */
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES)	ALIGN(ADDR, ALIGN_BYTES)
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /* Queue enable/disable */
129*4882a593Smuzhiyun #define NICVF_SQ_EN		BIT_ULL(19)
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun /* Queue reset */
132*4882a593Smuzhiyun #define NICVF_CQ_RESET		BIT_ULL(41)
133*4882a593Smuzhiyun #define NICVF_SQ_RESET		BIT_ULL(17)
134*4882a593Smuzhiyun #define NICVF_RBDR_RESET	BIT_ULL(43)
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun enum CQ_RX_ERRLVL_E {
137*4882a593Smuzhiyun 	CQ_ERRLVL_MAC,
138*4882a593Smuzhiyun 	CQ_ERRLVL_L2,
139*4882a593Smuzhiyun 	CQ_ERRLVL_L3,
140*4882a593Smuzhiyun 	CQ_ERRLVL_L4,
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun enum CQ_RX_ERROP_E {
144*4882a593Smuzhiyun 	CQ_RX_ERROP_RE_NONE = 0x0,
145*4882a593Smuzhiyun 	CQ_RX_ERROP_RE_PARTIAL = 0x1,
146*4882a593Smuzhiyun 	CQ_RX_ERROP_RE_JABBER = 0x2,
147*4882a593Smuzhiyun 	CQ_RX_ERROP_RE_FCS = 0x7,
148*4882a593Smuzhiyun 	CQ_RX_ERROP_RE_TERMINATE = 0x9,
149*4882a593Smuzhiyun 	CQ_RX_ERROP_RE_RX_CTL = 0xb,
150*4882a593Smuzhiyun 	CQ_RX_ERROP_PREL2_ERR = 0x1f,
151*4882a593Smuzhiyun 	CQ_RX_ERROP_L2_FRAGMENT = 0x20,
152*4882a593Smuzhiyun 	CQ_RX_ERROP_L2_OVERRUN = 0x21,
153*4882a593Smuzhiyun 	CQ_RX_ERROP_L2_PFCS = 0x22,
154*4882a593Smuzhiyun 	CQ_RX_ERROP_L2_PUNY = 0x23,
155*4882a593Smuzhiyun 	CQ_RX_ERROP_L2_MAL = 0x24,
156*4882a593Smuzhiyun 	CQ_RX_ERROP_L2_OVERSIZE = 0x25,
157*4882a593Smuzhiyun 	CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
158*4882a593Smuzhiyun 	CQ_RX_ERROP_L2_LENMISM = 0x27,
159*4882a593Smuzhiyun 	CQ_RX_ERROP_L2_PCLP = 0x28,
160*4882a593Smuzhiyun 	CQ_RX_ERROP_IP_NOT = 0x41,
161*4882a593Smuzhiyun 	CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
162*4882a593Smuzhiyun 	CQ_RX_ERROP_IP_MAL = 0x43,
163*4882a593Smuzhiyun 	CQ_RX_ERROP_IP_MALD = 0x44,
164*4882a593Smuzhiyun 	CQ_RX_ERROP_IP_HOP = 0x45,
165*4882a593Smuzhiyun 	CQ_RX_ERROP_L3_ICRC = 0x46,
166*4882a593Smuzhiyun 	CQ_RX_ERROP_L3_PCLP = 0x47,
167*4882a593Smuzhiyun 	CQ_RX_ERROP_L4_MAL = 0x61,
168*4882a593Smuzhiyun 	CQ_RX_ERROP_L4_CHK = 0x62,
169*4882a593Smuzhiyun 	CQ_RX_ERROP_UDP_LEN = 0x63,
170*4882a593Smuzhiyun 	CQ_RX_ERROP_L4_PORT = 0x64,
171*4882a593Smuzhiyun 	CQ_RX_ERROP_TCP_FLAG = 0x65,
172*4882a593Smuzhiyun 	CQ_RX_ERROP_TCP_OFFSET = 0x66,
173*4882a593Smuzhiyun 	CQ_RX_ERROP_L4_PCLP = 0x67,
174*4882a593Smuzhiyun 	CQ_RX_ERROP_RBDR_TRUNC = 0x70,
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun enum CQ_TX_ERROP_E {
178*4882a593Smuzhiyun 	CQ_TX_ERROP_GOOD = 0x0,
179*4882a593Smuzhiyun 	CQ_TX_ERROP_DESC_FAULT = 0x10,
180*4882a593Smuzhiyun 	CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
181*4882a593Smuzhiyun 	CQ_TX_ERROP_SUBDC_ERR = 0x12,
182*4882a593Smuzhiyun 	CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
183*4882a593Smuzhiyun 	CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
184*4882a593Smuzhiyun 	CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
185*4882a593Smuzhiyun 	CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
186*4882a593Smuzhiyun 	CQ_TX_ERROP_LOCK_VIOL = 0x83,
187*4882a593Smuzhiyun 	CQ_TX_ERROP_DATA_FAULT = 0x84,
188*4882a593Smuzhiyun 	CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
189*4882a593Smuzhiyun 	CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
190*4882a593Smuzhiyun 	CQ_TX_ERROP_MEM_FAULT = 0x87,
191*4882a593Smuzhiyun 	CQ_TX_ERROP_CK_OVERLAP = 0x88,
192*4882a593Smuzhiyun 	CQ_TX_ERROP_CK_OFLOW = 0x89,
193*4882a593Smuzhiyun 	CQ_TX_ERROP_ENUM_LAST = 0x8a,
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun enum RQ_SQ_STATS {
197*4882a593Smuzhiyun 	RQ_SQ_STATS_OCTS,
198*4882a593Smuzhiyun 	RQ_SQ_STATS_PKTS,
199*4882a593Smuzhiyun };
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun struct rx_tx_queue_stats {
202*4882a593Smuzhiyun 	u64	bytes;
203*4882a593Smuzhiyun 	u64	pkts;
204*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun struct q_desc_mem {
207*4882a593Smuzhiyun 	dma_addr_t	dma;
208*4882a593Smuzhiyun 	u64		size;
209*4882a593Smuzhiyun 	u32		q_len;
210*4882a593Smuzhiyun 	dma_addr_t	phys_base;
211*4882a593Smuzhiyun 	void		*base;
212*4882a593Smuzhiyun 	void		*unalign_base;
213*4882a593Smuzhiyun };
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun struct pgcache {
216*4882a593Smuzhiyun 	struct page	*page;
217*4882a593Smuzhiyun 	int		ref_count;
218*4882a593Smuzhiyun 	u64		dma_addr;
219*4882a593Smuzhiyun };
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun struct rbdr {
222*4882a593Smuzhiyun 	bool		enable;
223*4882a593Smuzhiyun 	u32		dma_size;
224*4882a593Smuzhiyun 	u32		frag_len;
225*4882a593Smuzhiyun 	u32		thresh;		/* Threshold level for interrupt */
226*4882a593Smuzhiyun 	void		*desc;
227*4882a593Smuzhiyun 	u32		head;
228*4882a593Smuzhiyun 	u32		tail;
229*4882a593Smuzhiyun 	struct q_desc_mem   dmem;
230*4882a593Smuzhiyun 	bool		is_xdp;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* For page recycling */
233*4882a593Smuzhiyun 	int		pgidx;
234*4882a593Smuzhiyun 	int		pgcnt;
235*4882a593Smuzhiyun 	int		pgalloc;
236*4882a593Smuzhiyun 	struct pgcache	*pgcache;
237*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun struct rcv_queue {
240*4882a593Smuzhiyun 	bool		enable;
241*4882a593Smuzhiyun 	struct	rbdr	*rbdr_start;
242*4882a593Smuzhiyun 	struct	rbdr	*rbdr_cont;
243*4882a593Smuzhiyun 	bool		en_tcp_reassembly;
244*4882a593Smuzhiyun 	u8		cq_qs;  /* CQ's QS to which this RQ is assigned */
245*4882a593Smuzhiyun 	u8		cq_idx; /* CQ index (0 to 7) in the QS */
246*4882a593Smuzhiyun 	u8		cont_rbdr_qs;      /* Continue buffer ptrs - QS num */
247*4882a593Smuzhiyun 	u8		cont_qs_rbdr_idx;  /* RBDR idx in the cont QS */
248*4882a593Smuzhiyun 	u8		start_rbdr_qs;     /* First buffer ptrs - QS num */
249*4882a593Smuzhiyun 	u8		start_qs_rbdr_idx; /* RBDR idx in the above QS */
250*4882a593Smuzhiyun 	u8		caching;
251*4882a593Smuzhiyun 	struct		rx_tx_queue_stats stats;
252*4882a593Smuzhiyun 	struct xdp_rxq_info xdp_rxq;
253*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun struct cmp_queue {
256*4882a593Smuzhiyun 	bool		enable;
257*4882a593Smuzhiyun 	u16		thresh;
258*4882a593Smuzhiyun 	spinlock_t	lock;  /* lock to serialize processing CQEs */
259*4882a593Smuzhiyun 	void		*desc;
260*4882a593Smuzhiyun 	struct q_desc_mem   dmem;
261*4882a593Smuzhiyun 	int		irq;
262*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun struct snd_queue {
265*4882a593Smuzhiyun 	bool		enable;
266*4882a593Smuzhiyun 	u8		cq_qs;  /* CQ's QS to which this SQ is pointing */
267*4882a593Smuzhiyun 	u8		cq_idx; /* CQ index (0 to 7) in the above QS */
268*4882a593Smuzhiyun 	u16		thresh;
269*4882a593Smuzhiyun 	atomic_t	free_cnt;
270*4882a593Smuzhiyun 	u32		head;
271*4882a593Smuzhiyun 	u32		tail;
272*4882a593Smuzhiyun 	u64		*skbuff;
273*4882a593Smuzhiyun 	void		*desc;
274*4882a593Smuzhiyun 	u64		*xdp_page;
275*4882a593Smuzhiyun 	u16		xdp_desc_cnt;
276*4882a593Smuzhiyun 	u16		xdp_free_cnt;
277*4882a593Smuzhiyun 	bool		is_xdp;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	/* For TSO segment's header */
280*4882a593Smuzhiyun 	char		*tso_hdrs;
281*4882a593Smuzhiyun 	dma_addr_t	tso_hdrs_phys;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	cpumask_t	affinity_mask;
284*4882a593Smuzhiyun 	struct q_desc_mem   dmem;
285*4882a593Smuzhiyun 	struct rx_tx_queue_stats stats;
286*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun struct queue_set {
289*4882a593Smuzhiyun 	bool		enable;
290*4882a593Smuzhiyun 	bool		be_en;
291*4882a593Smuzhiyun 	u8		vnic_id;
292*4882a593Smuzhiyun 	u8		rq_cnt;
293*4882a593Smuzhiyun 	u8		cq_cnt;
294*4882a593Smuzhiyun 	u64		cq_len;
295*4882a593Smuzhiyun 	u8		sq_cnt;
296*4882a593Smuzhiyun 	u64		sq_len;
297*4882a593Smuzhiyun 	u8		rbdr_cnt;
298*4882a593Smuzhiyun 	u64		rbdr_len;
299*4882a593Smuzhiyun 	struct	rcv_queue	rq[MAX_RCV_QUEUES_PER_QS];
300*4882a593Smuzhiyun 	struct	cmp_queue	cq[MAX_CMP_QUEUES_PER_QS];
301*4882a593Smuzhiyun 	struct	snd_queue	sq[MAX_SND_QUEUES_PER_QS];
302*4882a593Smuzhiyun 	struct	rbdr		rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
303*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun #define GET_RBDR_DESC(RING, idx)\
306*4882a593Smuzhiyun 		(&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
307*4882a593Smuzhiyun #define GET_SQ_DESC(RING, idx)\
308*4882a593Smuzhiyun 		(&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
309*4882a593Smuzhiyun #define GET_CQ_DESC(RING, idx)\
310*4882a593Smuzhiyun 		(&(((union cq_desc_t *)((RING)->desc))[idx]))
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /* CQ status bits */
313*4882a593Smuzhiyun #define	CQ_WR_FULL	BIT(26)
314*4882a593Smuzhiyun #define	CQ_WR_DISABLE	BIT(25)
315*4882a593Smuzhiyun #define	CQ_WR_FAULT	BIT(24)
316*4882a593Smuzhiyun #define	CQ_CQE_COUNT	(0xFFFF << 0)
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun #define	CQ_ERR_MASK	(CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
319*4882a593Smuzhiyun 
nicvf_iova_to_phys(struct nicvf * nic,dma_addr_t dma_addr)320*4882a593Smuzhiyun static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	/* Translation is installed only when IOMMU is present */
323*4882a593Smuzhiyun 	if (nic->iommu_domain)
324*4882a593Smuzhiyun 		return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
325*4882a593Smuzhiyun 	return dma_addr;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
329*4882a593Smuzhiyun 			      int hdr_sqe, u8 subdesc_cnt);
330*4882a593Smuzhiyun void nicvf_config_vlan_stripping(struct nicvf *nic,
331*4882a593Smuzhiyun 				 netdev_features_t features);
332*4882a593Smuzhiyun int nicvf_set_qset_resources(struct nicvf *nic);
333*4882a593Smuzhiyun int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
334*4882a593Smuzhiyun void nicvf_qset_config(struct nicvf *nic, bool enable);
335*4882a593Smuzhiyun void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
336*4882a593Smuzhiyun 			    int qidx, bool enable);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
339*4882a593Smuzhiyun void nicvf_sq_disable(struct nicvf *nic, int qidx);
340*4882a593Smuzhiyun void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
341*4882a593Smuzhiyun void nicvf_sq_free_used_descs(struct net_device *netdev,
342*4882a593Smuzhiyun 			      struct snd_queue *sq, int qidx);
343*4882a593Smuzhiyun int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
344*4882a593Smuzhiyun 			struct sk_buff *skb, u8 sq_num);
345*4882a593Smuzhiyun int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
346*4882a593Smuzhiyun 			    u64 bufaddr, u64 dma_addr, u16 len);
347*4882a593Smuzhiyun void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
350*4882a593Smuzhiyun 				  struct cqe_rx_t *cqe_rx, bool xdp);
351*4882a593Smuzhiyun void nicvf_rbdr_task(struct tasklet_struct *t);
352*4882a593Smuzhiyun void nicvf_rbdr_work(struct work_struct *work);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
355*4882a593Smuzhiyun void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
356*4882a593Smuzhiyun void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
357*4882a593Smuzhiyun int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun /* Register access APIs */
360*4882a593Smuzhiyun void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
361*4882a593Smuzhiyun u64  nicvf_reg_read(struct nicvf *nic, u64 offset);
362*4882a593Smuzhiyun void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
363*4882a593Smuzhiyun u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
364*4882a593Smuzhiyun void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
365*4882a593Smuzhiyun 			   u64 qidx, u64 val);
366*4882a593Smuzhiyun u64  nicvf_queue_reg_read(struct nicvf *nic,
367*4882a593Smuzhiyun 			  u64 offset, u64 qidx);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun /* Stats */
370*4882a593Smuzhiyun void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
371*4882a593Smuzhiyun void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
372*4882a593Smuzhiyun int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
373*4882a593Smuzhiyun int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
374*4882a593Smuzhiyun #endif /* NICVF_QUEUES_H */
375