xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/iavf/iavf_txrx.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* Copyright(c) 2013 - 2018 Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #ifndef _IAVF_TXRX_H_
5*4882a593Smuzhiyun #define _IAVF_TXRX_H_
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /* Interrupt Throttling and Rate Limiting Goodies */
8*4882a593Smuzhiyun #define IAVF_DEFAULT_IRQ_WORK      256
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /* The datasheet for the X710 and XL710 indicate that the maximum value for
11*4882a593Smuzhiyun  * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
12*4882a593Smuzhiyun  * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
13*4882a593Smuzhiyun  * the register value which is divided by 2 lets use the actual values and
14*4882a593Smuzhiyun  * avoid an excessive amount of translation.
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun #define IAVF_ITR_DYNAMIC	0x8000	/* use top bit as a flag */
17*4882a593Smuzhiyun #define IAVF_ITR_MASK		0x1FFE	/* mask for ITR register value */
18*4882a593Smuzhiyun #define IAVF_MIN_ITR		     2	/* reg uses 2 usec resolution */
19*4882a593Smuzhiyun #define IAVF_ITR_100K		    10	/* all values below must be even */
20*4882a593Smuzhiyun #define IAVF_ITR_50K		    20
21*4882a593Smuzhiyun #define IAVF_ITR_20K		    50
22*4882a593Smuzhiyun #define IAVF_ITR_18K		    60
23*4882a593Smuzhiyun #define IAVF_ITR_8K		   122
24*4882a593Smuzhiyun #define IAVF_MAX_ITR		  8160	/* maximum value as per datasheet */
25*4882a593Smuzhiyun #define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC)
26*4882a593Smuzhiyun #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK)
27*4882a593Smuzhiyun #define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC))
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define IAVF_ITR_RX_DEF		(IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
30*4882a593Smuzhiyun #define IAVF_ITR_TX_DEF		(IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
33*4882a593Smuzhiyun  * the value of the rate limit is non-zero
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun #define INTRL_ENA                  BIT(6)
36*4882a593Smuzhiyun #define IAVF_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
37*4882a593Smuzhiyun #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
38*4882a593Smuzhiyun #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
39*4882a593Smuzhiyun #define IAVF_INTRL_8K              125     /* 8000 ints/sec */
40*4882a593Smuzhiyun #define IAVF_INTRL_62K             16      /* 62500 ints/sec */
41*4882a593Smuzhiyun #define IAVF_INTRL_83K             12      /* 83333 ints/sec */
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define IAVF_QUEUE_END_OF_LIST 0x7FF
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /* this enum matches hardware bits and is meant to be used by DYN_CTLN
46*4882a593Smuzhiyun  * registers and QINT registers or more generally anywhere in the manual
47*4882a593Smuzhiyun  * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
48*4882a593Smuzhiyun  * register but instead is a special value meaning "don't update" ITR0/1/2.
49*4882a593Smuzhiyun  */
50*4882a593Smuzhiyun enum iavf_dyn_idx_t {
51*4882a593Smuzhiyun 	IAVF_IDX_ITR0 = 0,
52*4882a593Smuzhiyun 	IAVF_IDX_ITR1 = 1,
53*4882a593Smuzhiyun 	IAVF_IDX_ITR2 = 2,
54*4882a593Smuzhiyun 	IAVF_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /* these are indexes into ITRN registers */
58*4882a593Smuzhiyun #define IAVF_RX_ITR    IAVF_IDX_ITR0
59*4882a593Smuzhiyun #define IAVF_TX_ITR    IAVF_IDX_ITR1
60*4882a593Smuzhiyun #define IAVF_PE_ITR    IAVF_IDX_ITR2
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* Supported RSS offloads */
63*4882a593Smuzhiyun #define IAVF_DEFAULT_RSS_HENA ( \
64*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
65*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
66*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
67*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
68*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
69*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
70*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
71*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
72*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
73*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
74*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
77*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
78*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
79*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
80*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
81*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
82*4882a593Smuzhiyun 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* Supported Rx Buffer Sizes (a multiple of 128) */
85*4882a593Smuzhiyun #define IAVF_RXBUFFER_256   256
86*4882a593Smuzhiyun #define IAVF_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
87*4882a593Smuzhiyun #define IAVF_RXBUFFER_2048  2048
88*4882a593Smuzhiyun #define IAVF_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
89*4882a593Smuzhiyun #define IAVF_MAX_RXBUFFER   9728  /* largest size for single descriptor */
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
92*4882a593Smuzhiyun  * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
93*4882a593Smuzhiyun  * this adds up to 512 bytes of extra data meaning the smallest allocation
94*4882a593Smuzhiyun  * we could have is 1K.
95*4882a593Smuzhiyun  * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
96*4882a593Smuzhiyun  * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
97*4882a593Smuzhiyun  */
98*4882a593Smuzhiyun #define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
99*4882a593Smuzhiyun #define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
100*4882a593Smuzhiyun #define iavf_rx_desc iavf_32byte_rx_desc
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun #define IAVF_RX_DMA_ATTR \
103*4882a593Smuzhiyun 	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /* Attempt to maximize the headroom available for incoming frames.  We
106*4882a593Smuzhiyun  * use a 2K buffer for receives and need 1536/1534 to store the data for
107*4882a593Smuzhiyun  * the frame.  This leaves us with 512 bytes of room.  From that we need
108*4882a593Smuzhiyun  * to deduct the space needed for the shared info and the padding needed
109*4882a593Smuzhiyun  * to IP align the frame.
110*4882a593Smuzhiyun  *
111*4882a593Smuzhiyun  * Note: For cache line sizes 256 or larger this value is going to end
112*4882a593Smuzhiyun  *	 up negative.  In these cases we should fall back to the legacy
113*4882a593Smuzhiyun  *	 receive path.
114*4882a593Smuzhiyun  */
115*4882a593Smuzhiyun #if (PAGE_SIZE < 8192)
116*4882a593Smuzhiyun #define IAVF_2K_TOO_SMALL_WITH_PADDING \
117*4882a593Smuzhiyun ((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
118*4882a593Smuzhiyun 
iavf_compute_pad(int rx_buf_len)119*4882a593Smuzhiyun static inline int iavf_compute_pad(int rx_buf_len)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	int page_size, pad_size;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
124*4882a593Smuzhiyun 	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	return pad_size;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
iavf_skb_pad(void)129*4882a593Smuzhiyun static inline int iavf_skb_pad(void)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	int rx_buf_len;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/* If a 2K buffer cannot handle a standard Ethernet frame then
134*4882a593Smuzhiyun 	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
135*4882a593Smuzhiyun 	 *
136*4882a593Smuzhiyun 	 * For a 3K buffer we need to add enough padding to allow for
137*4882a593Smuzhiyun 	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
138*4882a593Smuzhiyun 	 * cache-line alignment.
139*4882a593Smuzhiyun 	 */
140*4882a593Smuzhiyun 	if (IAVF_2K_TOO_SMALL_WITH_PADDING)
141*4882a593Smuzhiyun 		rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
142*4882a593Smuzhiyun 	else
143*4882a593Smuzhiyun 		rx_buf_len = IAVF_RXBUFFER_1536;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* if needed make room for NET_IP_ALIGN */
146*4882a593Smuzhiyun 	rx_buf_len -= NET_IP_ALIGN;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return iavf_compute_pad(rx_buf_len);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun #define IAVF_SKB_PAD iavf_skb_pad()
152*4882a593Smuzhiyun #else
153*4882a593Smuzhiyun #define IAVF_2K_TOO_SMALL_WITH_PADDING false
154*4882a593Smuzhiyun #define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
155*4882a593Smuzhiyun #endif
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun /**
158*4882a593Smuzhiyun  * iavf_test_staterr - tests bits in Rx descriptor status and error fields
159*4882a593Smuzhiyun  * @rx_desc: pointer to receive descriptor (in le64 format)
160*4882a593Smuzhiyun  * @stat_err_bits: value to mask
161*4882a593Smuzhiyun  *
162*4882a593Smuzhiyun  * This function does some fast chicanery in order to return the
163*4882a593Smuzhiyun  * value of the mask which is really only used for boolean tests.
164*4882a593Smuzhiyun  * The status_error_len doesn't need to be shifted because it begins
165*4882a593Smuzhiyun  * at offset zero.
166*4882a593Smuzhiyun  */
iavf_test_staterr(union iavf_rx_desc * rx_desc,const u64 stat_err_bits)167*4882a593Smuzhiyun static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
168*4882a593Smuzhiyun 				     const u64 stat_err_bits)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	return !!(rx_desc->wb.qword1.status_error_len &
171*4882a593Smuzhiyun 		  cpu_to_le64(stat_err_bits));
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun /* How many Rx Buffers do we bundle into one write to the hardware ? */
175*4882a593Smuzhiyun #define IAVF_RX_INCREMENT(r, i) \
176*4882a593Smuzhiyun 	do {					\
177*4882a593Smuzhiyun 		(i)++;				\
178*4882a593Smuzhiyun 		if ((i) == (r)->count)		\
179*4882a593Smuzhiyun 			i = 0;			\
180*4882a593Smuzhiyun 		r->next_to_clean = i;		\
181*4882a593Smuzhiyun 	} while (0)
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun #define IAVF_RX_NEXT_DESC(r, i, n)		\
184*4882a593Smuzhiyun 	do {					\
185*4882a593Smuzhiyun 		(i)++;				\
186*4882a593Smuzhiyun 		if ((i) == (r)->count)		\
187*4882a593Smuzhiyun 			i = 0;			\
188*4882a593Smuzhiyun 		(n) = IAVF_RX_DESC((r), (i));	\
189*4882a593Smuzhiyun 	} while (0)
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun #define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n)		\
192*4882a593Smuzhiyun 	do {						\
193*4882a593Smuzhiyun 		IAVF_RX_NEXT_DESC((r), (i), (n));	\
194*4882a593Smuzhiyun 		prefetch((n));				\
195*4882a593Smuzhiyun 	} while (0)
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun #define IAVF_MAX_BUFFER_TXD	8
198*4882a593Smuzhiyun #define IAVF_MIN_TX_LEN		17
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /* The size limit for a transmit buffer in a descriptor is (16K - 1).
201*4882a593Smuzhiyun  * In order to align with the read requests we will align the value to
202*4882a593Smuzhiyun  * the nearest 4K which represents our maximum read request size.
203*4882a593Smuzhiyun  */
204*4882a593Smuzhiyun #define IAVF_MAX_READ_REQ_SIZE		4096
205*4882a593Smuzhiyun #define IAVF_MAX_DATA_PER_TXD		(16 * 1024 - 1)
206*4882a593Smuzhiyun #define IAVF_MAX_DATA_PER_TXD_ALIGNED \
207*4882a593Smuzhiyun 	(IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1))
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /**
210*4882a593Smuzhiyun  * iavf_txd_use_count  - estimate the number of descriptors needed for Tx
211*4882a593Smuzhiyun  * @size: transmit request size in bytes
212*4882a593Smuzhiyun  *
213*4882a593Smuzhiyun  * Due to hardware alignment restrictions (4K alignment), we need to
214*4882a593Smuzhiyun  * assume that we can have no more than 12K of data per descriptor, even
215*4882a593Smuzhiyun  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
216*4882a593Smuzhiyun  * Thus, we need to divide by 12K. But division is slow! Instead,
217*4882a593Smuzhiyun  * we decompose the operation into shifts and one relatively cheap
218*4882a593Smuzhiyun  * multiply operation.
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  * To divide by 12K, we first divide by 4K, then divide by 3:
221*4882a593Smuzhiyun  *     To divide by 4K, shift right by 12 bits
222*4882a593Smuzhiyun  *     To divide by 3, multiply by 85, then divide by 256
223*4882a593Smuzhiyun  *     (Divide by 256 is done by shifting right by 8 bits)
224*4882a593Smuzhiyun  * Finally, we add one to round up. Because 256 isn't an exact multiple of
225*4882a593Smuzhiyun  * 3, we'll underestimate near each multiple of 12K. This is actually more
226*4882a593Smuzhiyun  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
227*4882a593Smuzhiyun  * segment.  For our purposes this is accurate out to 1M which is orders of
228*4882a593Smuzhiyun  * magnitude greater than our largest possible GSO size.
229*4882a593Smuzhiyun  *
230*4882a593Smuzhiyun  * This would then be implemented as:
231*4882a593Smuzhiyun  *     return (((size >> 12) * 85) >> 8) + 1;
232*4882a593Smuzhiyun  *
233*4882a593Smuzhiyun  * Since multiplication and division are commutative, we can reorder
234*4882a593Smuzhiyun  * operations into:
235*4882a593Smuzhiyun  *     return ((size * 85) >> 20) + 1;
236*4882a593Smuzhiyun  */
iavf_txd_use_count(unsigned int size)237*4882a593Smuzhiyun static inline unsigned int iavf_txd_use_count(unsigned int size)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	return ((size * 85) >> 20) + 1;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun /* Tx Descriptors needed, worst case */
243*4882a593Smuzhiyun #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
244*4882a593Smuzhiyun #define IAVF_MIN_DESC_PENDING	4
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun #define IAVF_TX_FLAGS_HW_VLAN		BIT(1)
247*4882a593Smuzhiyun #define IAVF_TX_FLAGS_SW_VLAN		BIT(2)
248*4882a593Smuzhiyun #define IAVF_TX_FLAGS_TSO		BIT(3)
249*4882a593Smuzhiyun #define IAVF_TX_FLAGS_IPV4		BIT(4)
250*4882a593Smuzhiyun #define IAVF_TX_FLAGS_IPV6		BIT(5)
251*4882a593Smuzhiyun #define IAVF_TX_FLAGS_FCCRC		BIT(6)
252*4882a593Smuzhiyun #define IAVF_TX_FLAGS_FSO		BIT(7)
253*4882a593Smuzhiyun #define IAVF_TX_FLAGS_FD_SB		BIT(9)
254*4882a593Smuzhiyun #define IAVF_TX_FLAGS_VXLAN_TUNNEL	BIT(10)
255*4882a593Smuzhiyun #define IAVF_TX_FLAGS_VLAN_MASK		0xffff0000
256*4882a593Smuzhiyun #define IAVF_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
257*4882a593Smuzhiyun #define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT	29
258*4882a593Smuzhiyun #define IAVF_TX_FLAGS_VLAN_SHIFT	16
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun struct iavf_tx_buffer {
261*4882a593Smuzhiyun 	struct iavf_tx_desc *next_to_watch;
262*4882a593Smuzhiyun 	union {
263*4882a593Smuzhiyun 		struct sk_buff *skb;
264*4882a593Smuzhiyun 		void *raw_buf;
265*4882a593Smuzhiyun 	};
266*4882a593Smuzhiyun 	unsigned int bytecount;
267*4882a593Smuzhiyun 	unsigned short gso_segs;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	DEFINE_DMA_UNMAP_ADDR(dma);
270*4882a593Smuzhiyun 	DEFINE_DMA_UNMAP_LEN(len);
271*4882a593Smuzhiyun 	u32 tx_flags;
272*4882a593Smuzhiyun };
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun struct iavf_rx_buffer {
275*4882a593Smuzhiyun 	dma_addr_t dma;
276*4882a593Smuzhiyun 	struct page *page;
277*4882a593Smuzhiyun #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
278*4882a593Smuzhiyun 	__u32 page_offset;
279*4882a593Smuzhiyun #else
280*4882a593Smuzhiyun 	__u16 page_offset;
281*4882a593Smuzhiyun #endif
282*4882a593Smuzhiyun 	__u16 pagecnt_bias;
283*4882a593Smuzhiyun };
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun struct iavf_queue_stats {
286*4882a593Smuzhiyun 	u64 packets;
287*4882a593Smuzhiyun 	u64 bytes;
288*4882a593Smuzhiyun };
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun struct iavf_tx_queue_stats {
291*4882a593Smuzhiyun 	u64 restart_queue;
292*4882a593Smuzhiyun 	u64 tx_busy;
293*4882a593Smuzhiyun 	u64 tx_done_old;
294*4882a593Smuzhiyun 	u64 tx_linearize;
295*4882a593Smuzhiyun 	u64 tx_force_wb;
296*4882a593Smuzhiyun 	int prev_pkt_ctr;
297*4882a593Smuzhiyun 	u64 tx_lost_interrupt;
298*4882a593Smuzhiyun };
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun struct iavf_rx_queue_stats {
301*4882a593Smuzhiyun 	u64 non_eop_descs;
302*4882a593Smuzhiyun 	u64 alloc_page_failed;
303*4882a593Smuzhiyun 	u64 alloc_buff_failed;
304*4882a593Smuzhiyun 	u64 page_reuse_count;
305*4882a593Smuzhiyun 	u64 realloc_count;
306*4882a593Smuzhiyun };
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun enum iavf_ring_state_t {
309*4882a593Smuzhiyun 	__IAVF_TX_FDIR_INIT_DONE,
310*4882a593Smuzhiyun 	__IAVF_TX_XPS_INIT_DONE,
311*4882a593Smuzhiyun 	__IAVF_RING_STATE_NBITS /* must be last */
312*4882a593Smuzhiyun };
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun /* some useful defines for virtchannel interface, which
315*4882a593Smuzhiyun  * is the only remaining user of header split
316*4882a593Smuzhiyun  */
317*4882a593Smuzhiyun #define IAVF_RX_DTYPE_NO_SPLIT      0
318*4882a593Smuzhiyun #define IAVF_RX_DTYPE_HEADER_SPLIT  1
319*4882a593Smuzhiyun #define IAVF_RX_DTYPE_SPLIT_ALWAYS  2
320*4882a593Smuzhiyun #define IAVF_RX_SPLIT_L2      0x1
321*4882a593Smuzhiyun #define IAVF_RX_SPLIT_IP      0x2
322*4882a593Smuzhiyun #define IAVF_RX_SPLIT_TCP_UDP 0x4
323*4882a593Smuzhiyun #define IAVF_RX_SPLIT_SCTP    0x8
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun /* struct that defines a descriptor ring, associated with a VSI */
326*4882a593Smuzhiyun struct iavf_ring {
327*4882a593Smuzhiyun 	struct iavf_ring *next;		/* pointer to next ring in q_vector */
328*4882a593Smuzhiyun 	void *desc;			/* Descriptor ring memory */
329*4882a593Smuzhiyun 	struct device *dev;		/* Used for DMA mapping */
330*4882a593Smuzhiyun 	struct net_device *netdev;	/* netdev ring maps to */
331*4882a593Smuzhiyun 	union {
332*4882a593Smuzhiyun 		struct iavf_tx_buffer *tx_bi;
333*4882a593Smuzhiyun 		struct iavf_rx_buffer *rx_bi;
334*4882a593Smuzhiyun 	};
335*4882a593Smuzhiyun 	DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
336*4882a593Smuzhiyun 	u16 queue_index;		/* Queue number of ring */
337*4882a593Smuzhiyun 	u8 dcb_tc;			/* Traffic class of ring */
338*4882a593Smuzhiyun 	u8 __iomem *tail;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	/* high bit set means dynamic, use accessors routines to read/write.
341*4882a593Smuzhiyun 	 * hardware only supports 2us resolution for the ITR registers.
342*4882a593Smuzhiyun 	 * these values always store the USER setting, and must be converted
343*4882a593Smuzhiyun 	 * before programming to a register.
344*4882a593Smuzhiyun 	 */
345*4882a593Smuzhiyun 	u16 itr_setting;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	u16 count;			/* Number of descriptors */
348*4882a593Smuzhiyun 	u16 reg_idx;			/* HW register index of the ring */
349*4882a593Smuzhiyun 	u16 rx_buf_len;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	/* used in interrupt processing */
352*4882a593Smuzhiyun 	u16 next_to_use;
353*4882a593Smuzhiyun 	u16 next_to_clean;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	u8 atr_sample_rate;
356*4882a593Smuzhiyun 	u8 atr_count;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	bool ring_active;		/* is ring online or not */
359*4882a593Smuzhiyun 	bool arm_wb;		/* do something to arm write back */
360*4882a593Smuzhiyun 	u8 packet_stride;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	u16 flags;
363*4882a593Smuzhiyun #define IAVF_TXR_FLAGS_WB_ON_ITR		BIT(0)
364*4882a593Smuzhiyun #define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED	BIT(1)
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	/* stats structs */
367*4882a593Smuzhiyun 	struct iavf_queue_stats	stats;
368*4882a593Smuzhiyun 	struct u64_stats_sync syncp;
369*4882a593Smuzhiyun 	union {
370*4882a593Smuzhiyun 		struct iavf_tx_queue_stats tx_stats;
371*4882a593Smuzhiyun 		struct iavf_rx_queue_stats rx_stats;
372*4882a593Smuzhiyun 	};
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	unsigned int size;		/* length of descriptor ring in bytes */
375*4882a593Smuzhiyun 	dma_addr_t dma;			/* physical address of ring */
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	struct iavf_vsi *vsi;		/* Backreference to associated VSI */
378*4882a593Smuzhiyun 	struct iavf_q_vector *q_vector;	/* Backreference to associated vector */
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	struct rcu_head rcu;		/* to avoid race on free */
381*4882a593Smuzhiyun 	u16 next_to_alloc;
382*4882a593Smuzhiyun 	struct sk_buff *skb;		/* When iavf_clean_rx_ring_irq() must
383*4882a593Smuzhiyun 					 * return before it sees the EOP for
384*4882a593Smuzhiyun 					 * the current packet, we save that skb
385*4882a593Smuzhiyun 					 * here and resume receiving this
386*4882a593Smuzhiyun 					 * packet the next time
387*4882a593Smuzhiyun 					 * iavf_clean_rx_ring_irq() is called
388*4882a593Smuzhiyun 					 * for this ring.
389*4882a593Smuzhiyun 					 */
390*4882a593Smuzhiyun } ____cacheline_internodealigned_in_smp;
391*4882a593Smuzhiyun 
ring_uses_build_skb(struct iavf_ring * ring)392*4882a593Smuzhiyun static inline bool ring_uses_build_skb(struct iavf_ring *ring)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
set_ring_build_skb_enabled(struct iavf_ring * ring)397*4882a593Smuzhiyun static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
clear_ring_build_skb_enabled(struct iavf_ring * ring)402*4882a593Smuzhiyun static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun #define IAVF_ITR_ADAPTIVE_MIN_INC	0x0002
408*4882a593Smuzhiyun #define IAVF_ITR_ADAPTIVE_MIN_USECS	0x0002
409*4882a593Smuzhiyun #define IAVF_ITR_ADAPTIVE_MAX_USECS	0x007e
410*4882a593Smuzhiyun #define IAVF_ITR_ADAPTIVE_LATENCY	0x8000
411*4882a593Smuzhiyun #define IAVF_ITR_ADAPTIVE_BULK		0x0000
412*4882a593Smuzhiyun #define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY))
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun struct iavf_ring_container {
415*4882a593Smuzhiyun 	struct iavf_ring *ring;		/* pointer to linked list of ring(s) */
416*4882a593Smuzhiyun 	unsigned long next_update;	/* jiffies value of next update */
417*4882a593Smuzhiyun 	unsigned int total_bytes;	/* total bytes processed this int */
418*4882a593Smuzhiyun 	unsigned int total_packets;	/* total packets processed this int */
419*4882a593Smuzhiyun 	u16 count;
420*4882a593Smuzhiyun 	u16 target_itr;			/* target ITR setting for ring(s) */
421*4882a593Smuzhiyun 	u16 current_itr;		/* current ITR setting for ring(s) */
422*4882a593Smuzhiyun };
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun /* iterator for handling rings in ring container */
425*4882a593Smuzhiyun #define iavf_for_each_ring(pos, head) \
426*4882a593Smuzhiyun 	for (pos = (head).ring; pos != NULL; pos = pos->next)
427*4882a593Smuzhiyun 
iavf_rx_pg_order(struct iavf_ring * ring)428*4882a593Smuzhiyun static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun #if (PAGE_SIZE < 8192)
431*4882a593Smuzhiyun 	if (ring->rx_buf_len > (PAGE_SIZE / 2))
432*4882a593Smuzhiyun 		return 1;
433*4882a593Smuzhiyun #endif
434*4882a593Smuzhiyun 	return 0;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun #define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
440*4882a593Smuzhiyun netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
441*4882a593Smuzhiyun void iavf_clean_tx_ring(struct iavf_ring *tx_ring);
442*4882a593Smuzhiyun void iavf_clean_rx_ring(struct iavf_ring *rx_ring);
443*4882a593Smuzhiyun int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
444*4882a593Smuzhiyun int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
445*4882a593Smuzhiyun void iavf_free_tx_resources(struct iavf_ring *tx_ring);
446*4882a593Smuzhiyun void iavf_free_rx_resources(struct iavf_ring *rx_ring);
447*4882a593Smuzhiyun int iavf_napi_poll(struct napi_struct *napi, int budget);
448*4882a593Smuzhiyun void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector);
449*4882a593Smuzhiyun u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw);
450*4882a593Smuzhiyun void iavf_detect_recover_hung(struct iavf_vsi *vsi);
451*4882a593Smuzhiyun int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
452*4882a593Smuzhiyun bool __iavf_chk_linearize(struct sk_buff *skb);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun /**
455*4882a593Smuzhiyun  * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
456*4882a593Smuzhiyun  * @skb:     send buffer
457*4882a593Smuzhiyun  *
458*4882a593Smuzhiyun  * Returns number of data descriptors needed for this skb. Returns 0 to indicate
459*4882a593Smuzhiyun  * there is not enough descriptors available in this ring since we need at least
460*4882a593Smuzhiyun  * one descriptor.
461*4882a593Smuzhiyun  **/
iavf_xmit_descriptor_count(struct sk_buff * skb)462*4882a593Smuzhiyun static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
465*4882a593Smuzhiyun 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
466*4882a593Smuzhiyun 	int count = 0, size = skb_headlen(skb);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	for (;;) {
469*4882a593Smuzhiyun 		count += iavf_txd_use_count(size);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 		if (!nr_frags--)
472*4882a593Smuzhiyun 			break;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 		size = skb_frag_size(frag++);
475*4882a593Smuzhiyun 	}
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	return count;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun /**
481*4882a593Smuzhiyun  * iavf_maybe_stop_tx - 1st level check for Tx stop conditions
482*4882a593Smuzhiyun  * @tx_ring: the ring to be checked
483*4882a593Smuzhiyun  * @size:    the size buffer we want to assure is available
484*4882a593Smuzhiyun  *
485*4882a593Smuzhiyun  * Returns 0 if stop is not needed
486*4882a593Smuzhiyun  **/
iavf_maybe_stop_tx(struct iavf_ring * tx_ring,int size)487*4882a593Smuzhiyun static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	if (likely(IAVF_DESC_UNUSED(tx_ring) >= size))
490*4882a593Smuzhiyun 		return 0;
491*4882a593Smuzhiyun 	return __iavf_maybe_stop_tx(tx_ring, size);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun /**
495*4882a593Smuzhiyun  * iavf_chk_linearize - Check if there are more than 8 fragments per packet
496*4882a593Smuzhiyun  * @skb:      send buffer
497*4882a593Smuzhiyun  * @count:    number of buffers used
498*4882a593Smuzhiyun  *
499*4882a593Smuzhiyun  * Note: Our HW can't scatter-gather more than 8 fragments to build
500*4882a593Smuzhiyun  * a packet on the wire and so we need to figure out the cases where we
501*4882a593Smuzhiyun  * need to linearize the skb.
502*4882a593Smuzhiyun  **/
iavf_chk_linearize(struct sk_buff * skb,int count)503*4882a593Smuzhiyun static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	/* Both TSO and single send will work if count is less than 8 */
506*4882a593Smuzhiyun 	if (likely(count < IAVF_MAX_BUFFER_TXD))
507*4882a593Smuzhiyun 		return false;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	if (skb_is_gso(skb))
510*4882a593Smuzhiyun 		return __iavf_chk_linearize(skb);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	/* we can support up to 8 data buffers for a single send */
513*4882a593Smuzhiyun 	return count != IAVF_MAX_BUFFER_TXD;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun /**
516*4882a593Smuzhiyun  * txring_txq - helper to convert from a ring to a queue
517*4882a593Smuzhiyun  * @ring: Tx ring to find the netdev equivalent of
518*4882a593Smuzhiyun  **/
txring_txq(const struct iavf_ring * ring)519*4882a593Smuzhiyun static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun #endif /* _IAVF_TXRX_H_ */
524