xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/stmicro/stmmac/chain_mode.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*******************************************************************************
3*4882a593Smuzhiyun   Specialised functions for managing Chained mode
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun   Copyright(C) 2011  STMicroelectronics Ltd
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun   It defines all the functions used to handle the normal/enhanced
8*4882a593Smuzhiyun   descriptors in case of the DMA is configured to work in chained or
9*4882a593Smuzhiyun   in ring mode.
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
13*4882a593Smuzhiyun *******************************************************************************/
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include "stmmac.h"
16*4882a593Smuzhiyun 
jumbo_frm(void * p,struct sk_buff * skb,int csum)17*4882a593Smuzhiyun static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
20*4882a593Smuzhiyun 	unsigned int nopaged_len = skb_headlen(skb);
21*4882a593Smuzhiyun 	struct stmmac_priv *priv = tx_q->priv_data;
22*4882a593Smuzhiyun 	unsigned int entry = tx_q->cur_tx;
23*4882a593Smuzhiyun 	unsigned int bmax, des2;
24*4882a593Smuzhiyun 	unsigned int i = 1, len;
25*4882a593Smuzhiyun 	struct dma_desc *desc;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	desc = tx_q->dma_tx + entry;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	if (priv->plat->enh_desc)
30*4882a593Smuzhiyun 		bmax = BUF_SIZE_8KiB;
31*4882a593Smuzhiyun 	else
32*4882a593Smuzhiyun 		bmax = BUF_SIZE_2KiB;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	len = nopaged_len - bmax;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	des2 = dma_map_single(priv->device, skb->data,
37*4882a593Smuzhiyun 			      bmax, DMA_TO_DEVICE);
38*4882a593Smuzhiyun 	desc->des2 = cpu_to_le32(des2);
39*4882a593Smuzhiyun 	if (dma_mapping_error(priv->device, des2))
40*4882a593Smuzhiyun 		return -1;
41*4882a593Smuzhiyun 	tx_q->tx_skbuff_dma[entry].buf = des2;
42*4882a593Smuzhiyun 	tx_q->tx_skbuff_dma[entry].len = bmax;
43*4882a593Smuzhiyun 	/* do not close the descriptor and do not set own bit */
44*4882a593Smuzhiyun 	stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
45*4882a593Smuzhiyun 			0, false, skb->len);
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	while (len != 0) {
48*4882a593Smuzhiyun 		tx_q->tx_skbuff[entry] = NULL;
49*4882a593Smuzhiyun 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
50*4882a593Smuzhiyun 		desc = tx_q->dma_tx + entry;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 		if (len > bmax) {
53*4882a593Smuzhiyun 			des2 = dma_map_single(priv->device,
54*4882a593Smuzhiyun 					      (skb->data + bmax * i),
55*4882a593Smuzhiyun 					      bmax, DMA_TO_DEVICE);
56*4882a593Smuzhiyun 			desc->des2 = cpu_to_le32(des2);
57*4882a593Smuzhiyun 			if (dma_mapping_error(priv->device, des2))
58*4882a593Smuzhiyun 				return -1;
59*4882a593Smuzhiyun 			tx_q->tx_skbuff_dma[entry].buf = des2;
60*4882a593Smuzhiyun 			tx_q->tx_skbuff_dma[entry].len = bmax;
61*4882a593Smuzhiyun 			stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum,
62*4882a593Smuzhiyun 					STMMAC_CHAIN_MODE, 1, false, skb->len);
63*4882a593Smuzhiyun 			len -= bmax;
64*4882a593Smuzhiyun 			i++;
65*4882a593Smuzhiyun 		} else {
66*4882a593Smuzhiyun 			des2 = dma_map_single(priv->device,
67*4882a593Smuzhiyun 					      (skb->data + bmax * i), len,
68*4882a593Smuzhiyun 					      DMA_TO_DEVICE);
69*4882a593Smuzhiyun 			desc->des2 = cpu_to_le32(des2);
70*4882a593Smuzhiyun 			if (dma_mapping_error(priv->device, des2))
71*4882a593Smuzhiyun 				return -1;
72*4882a593Smuzhiyun 			tx_q->tx_skbuff_dma[entry].buf = des2;
73*4882a593Smuzhiyun 			tx_q->tx_skbuff_dma[entry].len = len;
74*4882a593Smuzhiyun 			/* last descriptor can be set now */
75*4882a593Smuzhiyun 			stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
76*4882a593Smuzhiyun 					STMMAC_CHAIN_MODE, 1, true, skb->len);
77*4882a593Smuzhiyun 			len = 0;
78*4882a593Smuzhiyun 		}
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	tx_q->cur_tx = entry;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	return entry;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
is_jumbo_frm(int len,int enh_desc)86*4882a593Smuzhiyun static unsigned int is_jumbo_frm(int len, int enh_desc)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	unsigned int ret = 0;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
91*4882a593Smuzhiyun 	    (!enh_desc && (len > BUF_SIZE_2KiB))) {
92*4882a593Smuzhiyun 		ret = 1;
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	return ret;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
init_dma_chain(void * des,dma_addr_t phy_addr,unsigned int size,unsigned int extend_desc)98*4882a593Smuzhiyun static void init_dma_chain(void *des, dma_addr_t phy_addr,
99*4882a593Smuzhiyun 				  unsigned int size, unsigned int extend_desc)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	/*
102*4882a593Smuzhiyun 	 * In chained mode the des3 points to the next element in the ring.
103*4882a593Smuzhiyun 	 * The latest element has to point to the head.
104*4882a593Smuzhiyun 	 */
105*4882a593Smuzhiyun 	int i;
106*4882a593Smuzhiyun 	dma_addr_t dma_phy = phy_addr;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (extend_desc) {
109*4882a593Smuzhiyun 		struct dma_extended_desc *p = (struct dma_extended_desc *)des;
110*4882a593Smuzhiyun 		for (i = 0; i < (size - 1); i++) {
111*4882a593Smuzhiyun 			dma_phy += sizeof(struct dma_extended_desc);
112*4882a593Smuzhiyun 			p->basic.des3 = cpu_to_le32((unsigned int)dma_phy);
113*4882a593Smuzhiyun 			p++;
114*4882a593Smuzhiyun 		}
115*4882a593Smuzhiyun 		p->basic.des3 = cpu_to_le32((unsigned int)phy_addr);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	} else {
118*4882a593Smuzhiyun 		struct dma_desc *p = (struct dma_desc *)des;
119*4882a593Smuzhiyun 		for (i = 0; i < (size - 1); i++) {
120*4882a593Smuzhiyun 			dma_phy += sizeof(struct dma_desc);
121*4882a593Smuzhiyun 			p->des3 = cpu_to_le32((unsigned int)dma_phy);
122*4882a593Smuzhiyun 			p++;
123*4882a593Smuzhiyun 		}
124*4882a593Smuzhiyun 		p->des3 = cpu_to_le32((unsigned int)phy_addr);
125*4882a593Smuzhiyun 	}
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
refill_desc3(void * priv_ptr,struct dma_desc * p)128*4882a593Smuzhiyun static void refill_desc3(void *priv_ptr, struct dma_desc *p)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
131*4882a593Smuzhiyun 	struct stmmac_priv *priv = rx_q->priv_data;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	if (priv->hwts_rx_en && !priv->extend_desc)
134*4882a593Smuzhiyun 		/* NOTE: Device will overwrite des3 with timestamp value if
135*4882a593Smuzhiyun 		 * 1588-2002 time stamping is enabled, hence reinitialize it
136*4882a593Smuzhiyun 		 * to keep explicit chaining in the descriptor.
137*4882a593Smuzhiyun 		 */
138*4882a593Smuzhiyun 		p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
139*4882a593Smuzhiyun 				      (((rx_q->dirty_rx) + 1) %
140*4882a593Smuzhiyun 				       priv->dma_rx_size) *
141*4882a593Smuzhiyun 				      sizeof(struct dma_desc)));
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
clean_desc3(void * priv_ptr,struct dma_desc * p)144*4882a593Smuzhiyun static void clean_desc3(void *priv_ptr, struct dma_desc *p)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
147*4882a593Smuzhiyun 	struct stmmac_priv *priv = tx_q->priv_data;
148*4882a593Smuzhiyun 	unsigned int entry = tx_q->dirty_tx;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
151*4882a593Smuzhiyun 	    priv->hwts_tx_en)
152*4882a593Smuzhiyun 		/* NOTE: Device will overwrite des3 with timestamp value if
153*4882a593Smuzhiyun 		 * 1588-2002 time stamping is enabled, hence reinitialize it
154*4882a593Smuzhiyun 		 * to keep explicit chaining in the descriptor.
155*4882a593Smuzhiyun 		 */
156*4882a593Smuzhiyun 		p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
157*4882a593Smuzhiyun 				      ((tx_q->dirty_tx + 1) %
158*4882a593Smuzhiyun 				       priv->dma_tx_size))
159*4882a593Smuzhiyun 				      * sizeof(struct dma_desc)));
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun const struct stmmac_mode_ops chain_mode_ops = {
163*4882a593Smuzhiyun 	.init = init_dma_chain,
164*4882a593Smuzhiyun 	.is_jumbo_frm = is_jumbo_frm,
165*4882a593Smuzhiyun 	.jumbo_frm = jumbo_frm,
166*4882a593Smuzhiyun 	.refill_desc3 = refill_desc3,
167*4882a593Smuzhiyun 	.clean_desc3 = clean_desc3,
168*4882a593Smuzhiyun };
169