xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/stmicro/stmmac/ring_mode.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*******************************************************************************
3*4882a593Smuzhiyun   Specialised functions for managing Ring mode
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun   Copyright(C) 2011  STMicroelectronics Ltd
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun   It defines all the functions used to handle the normal/enhanced
8*4882a593Smuzhiyun   descriptors in case of the DMA is configured to work in chained or
9*4882a593Smuzhiyun   in ring mode.
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
13*4882a593Smuzhiyun *******************************************************************************/
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include "stmmac.h"
16*4882a593Smuzhiyun 
jumbo_frm(void * p,struct sk_buff * skb,int csum)17*4882a593Smuzhiyun static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
20*4882a593Smuzhiyun 	unsigned int nopaged_len = skb_headlen(skb);
21*4882a593Smuzhiyun 	struct stmmac_priv *priv = tx_q->priv_data;
22*4882a593Smuzhiyun 	unsigned int entry = tx_q->cur_tx;
23*4882a593Smuzhiyun 	unsigned int bmax, len, des2;
24*4882a593Smuzhiyun 	struct dma_desc *desc;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	if (priv->extend_desc)
27*4882a593Smuzhiyun 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
28*4882a593Smuzhiyun 	else
29*4882a593Smuzhiyun 		desc = tx_q->dma_tx + entry;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	if (priv->plat->enh_desc)
32*4882a593Smuzhiyun 		bmax = BUF_SIZE_8KiB;
33*4882a593Smuzhiyun 	else
34*4882a593Smuzhiyun 		bmax = BUF_SIZE_2KiB;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	len = nopaged_len - bmax;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	if (nopaged_len > BUF_SIZE_8KiB) {
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 		des2 = dma_map_single(priv->device, skb->data, bmax,
41*4882a593Smuzhiyun 				      DMA_TO_DEVICE);
42*4882a593Smuzhiyun 		desc->des2 = cpu_to_le32(des2);
43*4882a593Smuzhiyun 		if (dma_mapping_error(priv->device, des2))
44*4882a593Smuzhiyun 			return -1;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].buf = des2;
47*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].len = bmax;
48*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].is_jumbo = true;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 		desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
51*4882a593Smuzhiyun 		stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
52*4882a593Smuzhiyun 				STMMAC_RING_MODE, 0, false, skb->len);
53*4882a593Smuzhiyun 		tx_q->tx_skbuff[entry] = NULL;
54*4882a593Smuzhiyun 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 		if (priv->extend_desc)
57*4882a593Smuzhiyun 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
58*4882a593Smuzhiyun 		else
59*4882a593Smuzhiyun 			desc = tx_q->dma_tx + entry;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 		des2 = dma_map_single(priv->device, skb->data + bmax, len,
62*4882a593Smuzhiyun 				      DMA_TO_DEVICE);
63*4882a593Smuzhiyun 		desc->des2 = cpu_to_le32(des2);
64*4882a593Smuzhiyun 		if (dma_mapping_error(priv->device, des2))
65*4882a593Smuzhiyun 			return -1;
66*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].buf = des2;
67*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].len = len;
68*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].is_jumbo = true;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 		desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
71*4882a593Smuzhiyun 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
72*4882a593Smuzhiyun 				STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
73*4882a593Smuzhiyun 				skb->len);
74*4882a593Smuzhiyun 	} else {
75*4882a593Smuzhiyun 		des2 = dma_map_single(priv->device, skb->data,
76*4882a593Smuzhiyun 				      nopaged_len, DMA_TO_DEVICE);
77*4882a593Smuzhiyun 		desc->des2 = cpu_to_le32(des2);
78*4882a593Smuzhiyun 		if (dma_mapping_error(priv->device, des2))
79*4882a593Smuzhiyun 			return -1;
80*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].buf = des2;
81*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].len = nopaged_len;
82*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].is_jumbo = true;
83*4882a593Smuzhiyun 		desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
84*4882a593Smuzhiyun 		stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
85*4882a593Smuzhiyun 				STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
86*4882a593Smuzhiyun 				skb->len);
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	tx_q->cur_tx = entry;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	return entry;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
is_jumbo_frm(int len,int enh_desc)94*4882a593Smuzhiyun static unsigned int is_jumbo_frm(int len, int enh_desc)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	unsigned int ret = 0;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	if (len >= BUF_SIZE_4KiB)
99*4882a593Smuzhiyun 		ret = 1;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	return ret;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
refill_desc3(void * priv_ptr,struct dma_desc * p)104*4882a593Smuzhiyun static void refill_desc3(void *priv_ptr, struct dma_desc *p)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	struct stmmac_rx_queue *rx_q = priv_ptr;
107*4882a593Smuzhiyun 	struct stmmac_priv *priv = rx_q->priv_data;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	/* Fill DES3 in case of RING mode */
110*4882a593Smuzhiyun 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
111*4882a593Smuzhiyun 		p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /* In ring mode we need to fill the desc3 because it is used as buffer */
init_desc3(struct dma_desc * p)115*4882a593Smuzhiyun static void init_desc3(struct dma_desc *p)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
clean_desc3(void * priv_ptr,struct dma_desc * p)120*4882a593Smuzhiyun static void clean_desc3(void *priv_ptr, struct dma_desc *p)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
123*4882a593Smuzhiyun 	struct stmmac_priv *priv = tx_q->priv_data;
124*4882a593Smuzhiyun 	unsigned int entry = tx_q->dirty_tx;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* des3 is only used for jumbo frames tx or time stamping */
127*4882a593Smuzhiyun 	if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
128*4882a593Smuzhiyun 		     (tx_q->tx_skbuff_dma[entry].last_segment &&
129*4882a593Smuzhiyun 		      !priv->extend_desc && priv->hwts_tx_en)))
130*4882a593Smuzhiyun 		p->des3 = 0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
set_16kib_bfsize(int mtu)133*4882a593Smuzhiyun static int set_16kib_bfsize(int mtu)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	int ret = 0;
136*4882a593Smuzhiyun 	if (unlikely(mtu > BUF_SIZE_8KiB))
137*4882a593Smuzhiyun 		ret = BUF_SIZE_16KiB;
138*4882a593Smuzhiyun 	return ret;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun const struct stmmac_mode_ops ring_mode_ops = {
142*4882a593Smuzhiyun 	.is_jumbo_frm = is_jumbo_frm,
143*4882a593Smuzhiyun 	.jumbo_frm = jumbo_frm,
144*4882a593Smuzhiyun 	.refill_desc3 = refill_desc3,
145*4882a593Smuzhiyun 	.init_desc3 = init_desc3,
146*4882a593Smuzhiyun 	.clean_desc3 = clean_desc3,
147*4882a593Smuzhiyun 	.set_16kib_bfsize = set_16kib_bfsize,
148*4882a593Smuzhiyun };
149