xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/bitfield.h>
5*4882a593Smuzhiyun #include <linux/dmapool.h>
6*4882a593Smuzhiyun #include <linux/etherdevice.h>
7*4882a593Smuzhiyun #include <linux/if_vlan.h>
8*4882a593Smuzhiyun #include <linux/of_address.h>
9*4882a593Smuzhiyun #include <linux/of_device.h>
10*4882a593Smuzhiyun #include <linux/of.h>
11*4882a593Smuzhiyun #include <linux/platform_device.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "prestera_dsa.h"
14*4882a593Smuzhiyun #include "prestera.h"
15*4882a593Smuzhiyun #include "prestera_hw.h"
16*4882a593Smuzhiyun #include "prestera_rxtx.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define PRESTERA_SDMA_WAIT_MUL		10
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun struct prestera_sdma_desc {
21*4882a593Smuzhiyun 	__le32 word1;
22*4882a593Smuzhiyun 	__le32 word2;
23*4882a593Smuzhiyun 	__le32 buff;
24*4882a593Smuzhiyun 	__le32 next;
25*4882a593Smuzhiyun } __packed __aligned(16);
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define PRESTERA_SDMA_BUFF_SIZE_MAX	1544
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \
30*4882a593Smuzhiyun 	((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0))
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define PRESTERA_SDMA_RX_DESC_OWNER(desc) \
33*4882a593Smuzhiyun 	((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \
36*4882a593Smuzhiyun 	(PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN)
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define PRESTERA_SDMA_RX_DESC_CPU_OWN	0
39*4882a593Smuzhiyun #define PRESTERA_SDMA_RX_DESC_DMA_OWN	1
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #define PRESTERA_SDMA_RX_QUEUE_NUM	8
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define PRESTERA_SDMA_RX_DESC_PER_Q	1000
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_DESC_PER_Q	1000
46*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_MAX_BURST	64
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_DESC_OWNER(desc) \
49*4882a593Smuzhiyun 	((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_DESC_CPU_OWN	0
52*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_DESC_DMA_OWN	1U
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \
55*4882a593Smuzhiyun 	(PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN)
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_DESC_LAST	BIT(20)
58*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_DESC_FIRST	BIT(21)
59*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_DESC_CALC_CRC	BIT(12)
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_DESC_SINGLE	\
62*4882a593Smuzhiyun 	(PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST)
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_DESC_INIT	\
65*4882a593Smuzhiyun 	(PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC)
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define PRESTERA_SDMA_RX_INTR_MASK_REG		0x2814
68*4882a593Smuzhiyun #define PRESTERA_SDMA_RX_QUEUE_STATUS_REG	0x2680
69*4882a593Smuzhiyun #define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n)	(0x260C + (n) * 16)
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_QUEUE_DESC_REG		0x26C0
72*4882a593Smuzhiyun #define PRESTERA_SDMA_TX_QUEUE_START_REG	0x2868
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun struct prestera_sdma_buf {
75*4882a593Smuzhiyun 	struct prestera_sdma_desc *desc;
76*4882a593Smuzhiyun 	dma_addr_t desc_dma;
77*4882a593Smuzhiyun 	struct sk_buff *skb;
78*4882a593Smuzhiyun 	dma_addr_t buf_dma;
79*4882a593Smuzhiyun 	bool is_used;
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun struct prestera_rx_ring {
83*4882a593Smuzhiyun 	struct prestera_sdma_buf *bufs;
84*4882a593Smuzhiyun 	int next_rx;
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun struct prestera_tx_ring {
88*4882a593Smuzhiyun 	struct prestera_sdma_buf *bufs;
89*4882a593Smuzhiyun 	int next_tx;
90*4882a593Smuzhiyun 	int max_burst;
91*4882a593Smuzhiyun 	int burst;
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun struct prestera_sdma {
95*4882a593Smuzhiyun 	struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM];
96*4882a593Smuzhiyun 	struct prestera_tx_ring tx_ring;
97*4882a593Smuzhiyun 	struct prestera_switch *sw;
98*4882a593Smuzhiyun 	struct dma_pool *desc_pool;
99*4882a593Smuzhiyun 	struct work_struct tx_work;
100*4882a593Smuzhiyun 	struct napi_struct rx_napi;
101*4882a593Smuzhiyun 	struct net_device napi_dev;
102*4882a593Smuzhiyun 	u32 map_addr;
103*4882a593Smuzhiyun 	u64 dma_mask;
104*4882a593Smuzhiyun 	/* protect SDMA with concurrrent access from multiple CPUs */
105*4882a593Smuzhiyun 	spinlock_t tx_lock;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun struct prestera_rxtx {
109*4882a593Smuzhiyun 	struct prestera_sdma sdma;
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun 
prestera_sdma_buf_init(struct prestera_sdma * sdma,struct prestera_sdma_buf * buf)112*4882a593Smuzhiyun static int prestera_sdma_buf_init(struct prestera_sdma *sdma,
113*4882a593Smuzhiyun 				  struct prestera_sdma_buf *buf)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	struct prestera_sdma_desc *desc;
116*4882a593Smuzhiyun 	dma_addr_t dma;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma);
119*4882a593Smuzhiyun 	if (!desc)
120*4882a593Smuzhiyun 		return -ENOMEM;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	buf->buf_dma = DMA_MAPPING_ERROR;
123*4882a593Smuzhiyun 	buf->desc_dma = dma;
124*4882a593Smuzhiyun 	buf->desc = desc;
125*4882a593Smuzhiyun 	buf->skb = NULL;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	return 0;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
prestera_sdma_map(struct prestera_sdma * sdma,dma_addr_t pa)130*4882a593Smuzhiyun static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	return sdma->map_addr + pa;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
prestera_sdma_rx_desc_init(struct prestera_sdma * sdma,struct prestera_sdma_desc * desc,dma_addr_t buf)135*4882a593Smuzhiyun static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma,
136*4882a593Smuzhiyun 				       struct prestera_sdma_desc *desc,
137*4882a593Smuzhiyun 				       dma_addr_t buf)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	u32 word = le32_to_cpu(desc->word2);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0));
142*4882a593Smuzhiyun 	desc->word2 = cpu_to_le32(word);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/* make sure buffer is set before reset the descriptor */
147*4882a593Smuzhiyun 	wmb();
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	desc->word1 = cpu_to_le32(0xA0000000);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
prestera_sdma_rx_desc_set_next(struct prestera_sdma * sdma,struct prestera_sdma_desc * desc,dma_addr_t next)152*4882a593Smuzhiyun static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma,
153*4882a593Smuzhiyun 					   struct prestera_sdma_desc *desc,
154*4882a593Smuzhiyun 					   dma_addr_t next)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
prestera_sdma_rx_skb_alloc(struct prestera_sdma * sdma,struct prestera_sdma_buf * buf)159*4882a593Smuzhiyun static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma,
160*4882a593Smuzhiyun 				      struct prestera_sdma_buf *buf)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	struct device *dev = sdma->sw->dev->dev;
163*4882a593Smuzhiyun 	struct sk_buff *skb;
164*4882a593Smuzhiyun 	dma_addr_t dma;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC);
167*4882a593Smuzhiyun 	if (!skb)
168*4882a593Smuzhiyun 		return -ENOMEM;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
171*4882a593Smuzhiyun 	if (dma_mapping_error(dev, dma))
172*4882a593Smuzhiyun 		goto err_dma_map;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (buf->skb)
175*4882a593Smuzhiyun 		dma_unmap_single(dev, buf->buf_dma, buf->skb->len,
176*4882a593Smuzhiyun 				 DMA_FROM_DEVICE);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	buf->buf_dma = dma;
179*4882a593Smuzhiyun 	buf->skb = skb;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	return 0;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun err_dma_map:
184*4882a593Smuzhiyun 	kfree_skb(skb);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	return -ENOMEM;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
prestera_sdma_rx_skb_get(struct prestera_sdma * sdma,struct prestera_sdma_buf * buf)189*4882a593Smuzhiyun static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
190*4882a593Smuzhiyun 						struct prestera_sdma_buf *buf)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	dma_addr_t buf_dma = buf->buf_dma;
193*4882a593Smuzhiyun 	struct sk_buff *skb = buf->skb;
194*4882a593Smuzhiyun 	u32 len = skb->len;
195*4882a593Smuzhiyun 	int err;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	err = prestera_sdma_rx_skb_alloc(sdma, buf);
198*4882a593Smuzhiyun 	if (err) {
199*4882a593Smuzhiyun 		buf->buf_dma = buf_dma;
200*4882a593Smuzhiyun 		buf->skb = skb;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		skb = alloc_skb(skb->len, GFP_ATOMIC);
203*4882a593Smuzhiyun 		if (skb) {
204*4882a593Smuzhiyun 			skb_put(skb, len);
205*4882a593Smuzhiyun 			skb_copy_from_linear_data(buf->skb, skb->data, len);
206*4882a593Smuzhiyun 		}
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	return skb;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
prestera_rxtx_process_skb(struct prestera_sdma * sdma,struct sk_buff * skb)214*4882a593Smuzhiyun static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
215*4882a593Smuzhiyun 				     struct sk_buff *skb)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	const struct prestera_port *port;
218*4882a593Smuzhiyun 	struct prestera_dsa dsa;
219*4882a593Smuzhiyun 	u32 hw_port, dev_id;
220*4882a593Smuzhiyun 	int err;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	skb_pull(skb, ETH_HLEN);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* ethertype field is part of the dsa header */
225*4882a593Smuzhiyun 	err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN);
226*4882a593Smuzhiyun 	if (err)
227*4882a593Smuzhiyun 		return err;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	dev_id = dsa.hw_dev_num;
230*4882a593Smuzhiyun 	hw_port = dsa.port_num;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port);
233*4882a593Smuzhiyun 	if (unlikely(!port)) {
234*4882a593Smuzhiyun 		dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n",
235*4882a593Smuzhiyun 				     dev_id, hw_port);
236*4882a593Smuzhiyun 		return -ENOENT;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN)))
240*4882a593Smuzhiyun 		return -EINVAL;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* remove DSA tag and update checksum */
243*4882a593Smuzhiyun 	skb_pull_rcsum(skb, PRESTERA_DSA_HLEN);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN,
246*4882a593Smuzhiyun 		ETH_ALEN * 2);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	skb_push(skb, ETH_HLEN);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, port->dev);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (dsa.vlan.is_tagged) {
253*4882a593Smuzhiyun 		u16 tci = dsa.vlan.vid & VLAN_VID_MASK;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 		tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT;
256*4882a593Smuzhiyun 		if (dsa.vlan.cfi_bit)
257*4882a593Smuzhiyun 			tci |= VLAN_CFI_MASK;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	return 0;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
prestera_sdma_next_rx_buf_idx(int buf_idx)265*4882a593Smuzhiyun static int prestera_sdma_next_rx_buf_idx(int buf_idx)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
prestera_sdma_rx_poll(struct napi_struct * napi,int budget)270*4882a593Smuzhiyun static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
273*4882a593Smuzhiyun 	unsigned int rxq_done_map = 0;
274*4882a593Smuzhiyun 	struct prestera_sdma *sdma;
275*4882a593Smuzhiyun 	struct list_head rx_list;
276*4882a593Smuzhiyun 	unsigned int qmask;
277*4882a593Smuzhiyun 	int pkts_done = 0;
278*4882a593Smuzhiyun 	int q;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
281*4882a593Smuzhiyun 	qmask = GENMASK(qnum - 1, 0);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rx_list);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	sdma = container_of(napi, struct prestera_sdma, rx_napi);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	while (pkts_done < budget && rxq_done_map != qmask) {
288*4882a593Smuzhiyun 		for (q = 0; q < qnum && pkts_done < budget; q++) {
289*4882a593Smuzhiyun 			struct prestera_rx_ring *ring = &sdma->rx_ring[q];
290*4882a593Smuzhiyun 			struct prestera_sdma_desc *desc;
291*4882a593Smuzhiyun 			struct prestera_sdma_buf *buf;
292*4882a593Smuzhiyun 			int buf_idx = ring->next_rx;
293*4882a593Smuzhiyun 			struct sk_buff *skb;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 			buf = &ring->bufs[buf_idx];
296*4882a593Smuzhiyun 			desc = buf->desc;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 			if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) {
299*4882a593Smuzhiyun 				rxq_done_map &= ~BIT(q);
300*4882a593Smuzhiyun 			} else {
301*4882a593Smuzhiyun 				rxq_done_map |= BIT(q);
302*4882a593Smuzhiyun 				continue;
303*4882a593Smuzhiyun 			}
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 			pkts_done++;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 			__skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc));
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 			skb = prestera_sdma_rx_skb_get(sdma, buf);
310*4882a593Smuzhiyun 			if (!skb)
311*4882a593Smuzhiyun 				goto rx_next_buf;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 			if (unlikely(prestera_rxtx_process_skb(sdma, skb)))
314*4882a593Smuzhiyun 				goto rx_next_buf;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 			list_add_tail(&skb->list, &rx_list);
317*4882a593Smuzhiyun rx_next_buf:
318*4882a593Smuzhiyun 			ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx);
319*4882a593Smuzhiyun 		}
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (pkts_done < budget && napi_complete_done(napi, pkts_done))
323*4882a593Smuzhiyun 		prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG,
324*4882a593Smuzhiyun 			       GENMASK(9, 2));
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	netif_receive_skb_list(&rx_list);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	return pkts_done;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
prestera_sdma_rx_fini(struct prestera_sdma * sdma)331*4882a593Smuzhiyun static void prestera_sdma_rx_fini(struct prestera_sdma *sdma)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
334*4882a593Smuzhiyun 	int q, b;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	/* disable all rx queues */
337*4882a593Smuzhiyun 	prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
338*4882a593Smuzhiyun 		       GENMASK(15, 8));
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	for (q = 0; q < qnum; q++) {
341*4882a593Smuzhiyun 		struct prestera_rx_ring *ring = &sdma->rx_ring[q];
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		if (!ring->bufs)
344*4882a593Smuzhiyun 			break;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 		for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) {
347*4882a593Smuzhiyun 			struct prestera_sdma_buf *buf = &ring->bufs[b];
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 			if (buf->desc_dma)
350*4882a593Smuzhiyun 				dma_pool_free(sdma->desc_pool, buf->desc,
351*4882a593Smuzhiyun 					      buf->desc_dma);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 			if (!buf->skb)
354*4882a593Smuzhiyun 				continue;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 			if (buf->buf_dma != DMA_MAPPING_ERROR)
357*4882a593Smuzhiyun 				dma_unmap_single(sdma->sw->dev->dev,
358*4882a593Smuzhiyun 						 buf->buf_dma, buf->skb->len,
359*4882a593Smuzhiyun 						 DMA_FROM_DEVICE);
360*4882a593Smuzhiyun 			kfree_skb(buf->skb);
361*4882a593Smuzhiyun 		}
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
prestera_sdma_rx_init(struct prestera_sdma * sdma)365*4882a593Smuzhiyun static int prestera_sdma_rx_init(struct prestera_sdma *sdma)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	int bnum = PRESTERA_SDMA_RX_DESC_PER_Q;
368*4882a593Smuzhiyun 	int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
369*4882a593Smuzhiyun 	int err;
370*4882a593Smuzhiyun 	int q;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	/* disable all rx queues */
373*4882a593Smuzhiyun 	prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
374*4882a593Smuzhiyun 		       GENMASK(15, 8));
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	for (q = 0; q < qnum; q++) {
377*4882a593Smuzhiyun 		struct prestera_sdma_buf *head, *tail, *next, *prev;
378*4882a593Smuzhiyun 		struct prestera_rx_ring *ring = &sdma->rx_ring[q];
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
381*4882a593Smuzhiyun 		if (!ring->bufs)
382*4882a593Smuzhiyun 			return -ENOMEM;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 		ring->next_rx = 0;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 		tail = &ring->bufs[bnum - 1];
387*4882a593Smuzhiyun 		head = &ring->bufs[0];
388*4882a593Smuzhiyun 		next = head;
389*4882a593Smuzhiyun 		prev = next;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 		do {
392*4882a593Smuzhiyun 			err = prestera_sdma_buf_init(sdma, next);
393*4882a593Smuzhiyun 			if (err)
394*4882a593Smuzhiyun 				return err;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 			err = prestera_sdma_rx_skb_alloc(sdma, next);
397*4882a593Smuzhiyun 			if (err)
398*4882a593Smuzhiyun 				return err;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 			prestera_sdma_rx_desc_init(sdma, next->desc,
401*4882a593Smuzhiyun 						   next->buf_dma);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 			prestera_sdma_rx_desc_set_next(sdma, prev->desc,
404*4882a593Smuzhiyun 						       next->desc_dma);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 			prev = next;
407*4882a593Smuzhiyun 			next++;
408*4882a593Smuzhiyun 		} while (prev != tail);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 		/* join tail with head to make a circular list */
411*4882a593Smuzhiyun 		prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
414*4882a593Smuzhiyun 			       prestera_sdma_map(sdma, head->desc_dma));
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	/* make sure all rx descs are filled before enabling all rx queues */
418*4882a593Smuzhiyun 	wmb();
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
421*4882a593Smuzhiyun 		       GENMASK(7, 0));
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	return 0;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun 
prestera_sdma_tx_desc_init(struct prestera_sdma * sdma,struct prestera_sdma_desc * desc)426*4882a593Smuzhiyun static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma,
427*4882a593Smuzhiyun 				       struct prestera_sdma_desc *desc)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT);
430*4882a593Smuzhiyun 	desc->word2 = 0;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
prestera_sdma_tx_desc_set_next(struct prestera_sdma * sdma,struct prestera_sdma_desc * desc,dma_addr_t next)433*4882a593Smuzhiyun static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma,
434*4882a593Smuzhiyun 					   struct prestera_sdma_desc *desc,
435*4882a593Smuzhiyun 					   dma_addr_t next)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
prestera_sdma_tx_desc_set_buf(struct prestera_sdma * sdma,struct prestera_sdma_desc * desc,dma_addr_t buf,size_t len)440*4882a593Smuzhiyun static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma,
441*4882a593Smuzhiyun 					  struct prestera_sdma_desc *desc,
442*4882a593Smuzhiyun 					  dma_addr_t buf, size_t len)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	u32 word = le32_to_cpu(desc->word2);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16));
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
449*4882a593Smuzhiyun 	desc->word2 = cpu_to_le32(word);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc * desc)452*4882a593Smuzhiyun static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	u32 word = le32_to_cpu(desc->word1);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	/* make sure everything is written before enable xmit */
459*4882a593Smuzhiyun 	wmb();
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	desc->word1 = cpu_to_le32(word);
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun 
prestera_sdma_tx_buf_map(struct prestera_sdma * sdma,struct prestera_sdma_buf * buf,struct sk_buff * skb)464*4882a593Smuzhiyun static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma,
465*4882a593Smuzhiyun 				    struct prestera_sdma_buf *buf,
466*4882a593Smuzhiyun 				    struct sk_buff *skb)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	struct device *dma_dev = sdma->sw->dev->dev;
469*4882a593Smuzhiyun 	dma_addr_t dma;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE);
472*4882a593Smuzhiyun 	if (dma_mapping_error(dma_dev, dma))
473*4882a593Smuzhiyun 		return -ENOMEM;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	buf->buf_dma = dma;
476*4882a593Smuzhiyun 	buf->skb = skb;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	return 0;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
prestera_sdma_tx_buf_unmap(struct prestera_sdma * sdma,struct prestera_sdma_buf * buf)481*4882a593Smuzhiyun static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma,
482*4882a593Smuzhiyun 				       struct prestera_sdma_buf *buf)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	struct device *dma_dev = sdma->sw->dev->dev;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE);
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
prestera_sdma_tx_recycle_work_fn(struct work_struct * work)489*4882a593Smuzhiyun static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
492*4882a593Smuzhiyun 	struct prestera_tx_ring *tx_ring;
493*4882a593Smuzhiyun 	struct prestera_sdma *sdma;
494*4882a593Smuzhiyun 	int b;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	sdma = container_of(work, struct prestera_sdma, tx_work);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	tx_ring = &sdma->tx_ring;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	for (b = 0; b < bnum; b++) {
501*4882a593Smuzhiyun 		struct prestera_sdma_buf *buf = &tx_ring->bufs[b];
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 		if (!buf->is_used)
504*4882a593Smuzhiyun 			continue;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 		if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc))
507*4882a593Smuzhiyun 			continue;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 		prestera_sdma_tx_buf_unmap(sdma, buf);
510*4882a593Smuzhiyun 		dev_consume_skb_any(buf->skb);
511*4882a593Smuzhiyun 		buf->skb = NULL;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 		/* make sure everything is cleaned up */
514*4882a593Smuzhiyun 		wmb();
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 		buf->is_used = false;
517*4882a593Smuzhiyun 	}
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
prestera_sdma_tx_init(struct prestera_sdma * sdma)520*4882a593Smuzhiyun static int prestera_sdma_tx_init(struct prestera_sdma *sdma)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun 	struct prestera_sdma_buf *head, *tail, *next, *prev;
523*4882a593Smuzhiyun 	struct prestera_tx_ring *tx_ring = &sdma->tx_ring;
524*4882a593Smuzhiyun 	int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
525*4882a593Smuzhiyun 	int err;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn);
528*4882a593Smuzhiyun 	spin_lock_init(&sdma->tx_lock);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
531*4882a593Smuzhiyun 	if (!tx_ring->bufs)
532*4882a593Smuzhiyun 		return -ENOMEM;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	tail = &tx_ring->bufs[bnum - 1];
535*4882a593Smuzhiyun 	head = &tx_ring->bufs[0];
536*4882a593Smuzhiyun 	next = head;
537*4882a593Smuzhiyun 	prev = next;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST;
540*4882a593Smuzhiyun 	tx_ring->burst = tx_ring->max_burst;
541*4882a593Smuzhiyun 	tx_ring->next_tx = 0;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	do {
544*4882a593Smuzhiyun 		err = prestera_sdma_buf_init(sdma, next);
545*4882a593Smuzhiyun 		if (err)
546*4882a593Smuzhiyun 			return err;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 		next->is_used = false;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 		prestera_sdma_tx_desc_init(sdma, next->desc);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 		prestera_sdma_tx_desc_set_next(sdma, prev->desc,
553*4882a593Smuzhiyun 					       next->desc_dma);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		prev = next;
556*4882a593Smuzhiyun 		next++;
557*4882a593Smuzhiyun 	} while (prev != tail);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	/* join tail with head to make a circular list */
560*4882a593Smuzhiyun 	prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	/* make sure descriptors are written */
563*4882a593Smuzhiyun 	wmb();
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG,
566*4882a593Smuzhiyun 		       prestera_sdma_map(sdma, head->desc_dma));
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	return 0;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun 
prestera_sdma_tx_fini(struct prestera_sdma * sdma)571*4882a593Smuzhiyun static void prestera_sdma_tx_fini(struct prestera_sdma *sdma)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun 	struct prestera_tx_ring *ring = &sdma->tx_ring;
574*4882a593Smuzhiyun 	int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
575*4882a593Smuzhiyun 	int b;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	cancel_work_sync(&sdma->tx_work);
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	if (!ring->bufs)
580*4882a593Smuzhiyun 		return;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	for (b = 0; b < bnum; b++) {
583*4882a593Smuzhiyun 		struct prestera_sdma_buf *buf = &ring->bufs[b];
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 		if (buf->desc)
586*4882a593Smuzhiyun 			dma_pool_free(sdma->desc_pool, buf->desc,
587*4882a593Smuzhiyun 				      buf->desc_dma);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 		if (!buf->skb)
590*4882a593Smuzhiyun 			continue;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 		dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma,
593*4882a593Smuzhiyun 				 buf->skb->len, DMA_TO_DEVICE);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 		dev_consume_skb_any(buf->skb);
596*4882a593Smuzhiyun 	}
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
prestera_rxtx_handle_event(struct prestera_switch * sw,struct prestera_event * evt,void * arg)599*4882a593Smuzhiyun static void prestera_rxtx_handle_event(struct prestera_switch *sw,
600*4882a593Smuzhiyun 				       struct prestera_event *evt,
601*4882a593Smuzhiyun 				       void *arg)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	struct prestera_sdma *sdma = arg;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT)
606*4882a593Smuzhiyun 		return;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0);
609*4882a593Smuzhiyun 	napi_schedule(&sdma->rx_napi);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
prestera_sdma_switch_init(struct prestera_switch * sw)612*4882a593Smuzhiyun static int prestera_sdma_switch_init(struct prestera_switch *sw)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun 	struct prestera_sdma *sdma = &sw->rxtx->sdma;
615*4882a593Smuzhiyun 	struct device *dev = sw->dev->dev;
616*4882a593Smuzhiyun 	struct prestera_rxtx_params p;
617*4882a593Smuzhiyun 	int err;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	p.use_sdma = true;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	err = prestera_hw_rxtx_init(sw, &p);
622*4882a593Smuzhiyun 	if (err) {
623*4882a593Smuzhiyun 		dev_err(dev, "failed to init rxtx by hw\n");
624*4882a593Smuzhiyun 		return err;
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	sdma->dma_mask = dma_get_mask(dev);
628*4882a593Smuzhiyun 	sdma->map_addr = p.map_addr;
629*4882a593Smuzhiyun 	sdma->sw = sw;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	sdma->desc_pool = dma_pool_create("desc_pool", dev,
632*4882a593Smuzhiyun 					  sizeof(struct prestera_sdma_desc),
633*4882a593Smuzhiyun 					  16, 0);
634*4882a593Smuzhiyun 	if (!sdma->desc_pool)
635*4882a593Smuzhiyun 		return -ENOMEM;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	err = prestera_sdma_rx_init(sdma);
638*4882a593Smuzhiyun 	if (err) {
639*4882a593Smuzhiyun 		dev_err(dev, "failed to init rx ring\n");
640*4882a593Smuzhiyun 		goto err_rx_init;
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	err = prestera_sdma_tx_init(sdma);
644*4882a593Smuzhiyun 	if (err) {
645*4882a593Smuzhiyun 		dev_err(dev, "failed to init tx ring\n");
646*4882a593Smuzhiyun 		goto err_tx_init;
647*4882a593Smuzhiyun 	}
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX,
650*4882a593Smuzhiyun 						 prestera_rxtx_handle_event,
651*4882a593Smuzhiyun 						 sdma);
652*4882a593Smuzhiyun 	if (err)
653*4882a593Smuzhiyun 		goto err_evt_register;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	init_dummy_netdev(&sdma->napi_dev);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64);
658*4882a593Smuzhiyun 	napi_enable(&sdma->rx_napi);
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	return 0;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun err_evt_register:
663*4882a593Smuzhiyun err_tx_init:
664*4882a593Smuzhiyun 	prestera_sdma_tx_fini(sdma);
665*4882a593Smuzhiyun err_rx_init:
666*4882a593Smuzhiyun 	prestera_sdma_rx_fini(sdma);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	dma_pool_destroy(sdma->desc_pool);
669*4882a593Smuzhiyun 	return err;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun 
prestera_sdma_switch_fini(struct prestera_switch * sw)672*4882a593Smuzhiyun static void prestera_sdma_switch_fini(struct prestera_switch *sw)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun 	struct prestera_sdma *sdma = &sw->rxtx->sdma;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	napi_disable(&sdma->rx_napi);
677*4882a593Smuzhiyun 	netif_napi_del(&sdma->rx_napi);
678*4882a593Smuzhiyun 	prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
679*4882a593Smuzhiyun 					     prestera_rxtx_handle_event);
680*4882a593Smuzhiyun 	prestera_sdma_tx_fini(sdma);
681*4882a593Smuzhiyun 	prestera_sdma_rx_fini(sdma);
682*4882a593Smuzhiyun 	dma_pool_destroy(sdma->desc_pool);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun 
prestera_sdma_is_ready(struct prestera_sdma * sdma)685*4882a593Smuzhiyun static bool prestera_sdma_is_ready(struct prestera_sdma *sdma)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun 	return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun 
prestera_sdma_tx_wait(struct prestera_sdma * sdma,struct prestera_tx_ring * tx_ring)690*4882a593Smuzhiyun static int prestera_sdma_tx_wait(struct prestera_sdma *sdma,
691*4882a593Smuzhiyun 				 struct prestera_tx_ring *tx_ring)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun 	int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	do {
696*4882a593Smuzhiyun 		if (prestera_sdma_is_ready(sdma))
697*4882a593Smuzhiyun 			return 0;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 		udelay(1);
700*4882a593Smuzhiyun 	} while (--tx_wait_num);
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	return -EBUSY;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun 
prestera_sdma_tx_start(struct prestera_sdma * sdma)705*4882a593Smuzhiyun static void prestera_sdma_tx_start(struct prestera_sdma *sdma)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1);
708*4882a593Smuzhiyun 	schedule_work(&sdma->tx_work);
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun 
prestera_sdma_xmit(struct prestera_sdma * sdma,struct sk_buff * skb)711*4882a593Smuzhiyun static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma,
712*4882a593Smuzhiyun 				      struct sk_buff *skb)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun 	struct device *dma_dev = sdma->sw->dev->dev;
715*4882a593Smuzhiyun 	struct net_device *dev = skb->dev;
716*4882a593Smuzhiyun 	struct prestera_tx_ring *tx_ring;
717*4882a593Smuzhiyun 	struct prestera_sdma_buf *buf;
718*4882a593Smuzhiyun 	int err;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	spin_lock(&sdma->tx_lock);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	tx_ring = &sdma->tx_ring;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	buf = &tx_ring->bufs[tx_ring->next_tx];
725*4882a593Smuzhiyun 	if (buf->is_used) {
726*4882a593Smuzhiyun 		schedule_work(&sdma->tx_work);
727*4882a593Smuzhiyun 		goto drop_skb;
728*4882a593Smuzhiyun 	}
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	if (unlikely(eth_skb_pad(skb)))
731*4882a593Smuzhiyun 		goto drop_skb_nofree;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	err = prestera_sdma_tx_buf_map(sdma, buf, skb);
734*4882a593Smuzhiyun 	if (err)
735*4882a593Smuzhiyun 		goto drop_skb;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len);
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len,
740*4882a593Smuzhiyun 				   DMA_TO_DEVICE);
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	if (tx_ring->burst) {
743*4882a593Smuzhiyun 		tx_ring->burst--;
744*4882a593Smuzhiyun 	} else {
745*4882a593Smuzhiyun 		tx_ring->burst = tx_ring->max_burst;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 		err = prestera_sdma_tx_wait(sdma, tx_ring);
748*4882a593Smuzhiyun 		if (err)
749*4882a593Smuzhiyun 			goto drop_skb_unmap;
750*4882a593Smuzhiyun 	}
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q;
753*4882a593Smuzhiyun 	prestera_sdma_tx_desc_xmit(buf->desc);
754*4882a593Smuzhiyun 	buf->is_used = true;
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	prestera_sdma_tx_start(sdma);
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	goto tx_done;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun drop_skb_unmap:
761*4882a593Smuzhiyun 	prestera_sdma_tx_buf_unmap(sdma, buf);
762*4882a593Smuzhiyun drop_skb:
763*4882a593Smuzhiyun 	dev_consume_skb_any(skb);
764*4882a593Smuzhiyun drop_skb_nofree:
765*4882a593Smuzhiyun 	dev->stats.tx_dropped++;
766*4882a593Smuzhiyun tx_done:
767*4882a593Smuzhiyun 	spin_unlock(&sdma->tx_lock);
768*4882a593Smuzhiyun 	return NETDEV_TX_OK;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun 
prestera_rxtx_switch_init(struct prestera_switch * sw)771*4882a593Smuzhiyun int prestera_rxtx_switch_init(struct prestera_switch *sw)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun 	struct prestera_rxtx *rxtx;
774*4882a593Smuzhiyun 	int err;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
777*4882a593Smuzhiyun 	if (!rxtx)
778*4882a593Smuzhiyun 		return -ENOMEM;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	sw->rxtx = rxtx;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	err = prestera_sdma_switch_init(sw);
783*4882a593Smuzhiyun 	if (err)
784*4882a593Smuzhiyun 		kfree(rxtx);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	return err;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun 
prestera_rxtx_switch_fini(struct prestera_switch * sw)789*4882a593Smuzhiyun void prestera_rxtx_switch_fini(struct prestera_switch *sw)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun 	prestera_sdma_switch_fini(sw);
792*4882a593Smuzhiyun 	kfree(sw->rxtx);
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun 
prestera_rxtx_port_init(struct prestera_port * port)795*4882a593Smuzhiyun int prestera_rxtx_port_init(struct prestera_port *port)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun 	int err;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	err = prestera_hw_rxtx_port_init(port);
800*4882a593Smuzhiyun 	if (err)
801*4882a593Smuzhiyun 		return err;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	port->dev->needed_headroom = PRESTERA_DSA_HLEN;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	return 0;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun 
prestera_rxtx_xmit(struct prestera_port * port,struct sk_buff * skb)808*4882a593Smuzhiyun netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun 	struct prestera_dsa dsa;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	dsa.hw_dev_num = port->dev_id;
813*4882a593Smuzhiyun 	dsa.port_num = port->hw_id;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0)
816*4882a593Smuzhiyun 		return NET_XMIT_DROP;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	skb_push(skb, PRESTERA_DSA_HLEN);
819*4882a593Smuzhiyun 	memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN);
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0)
822*4882a593Smuzhiyun 		return NET_XMIT_DROP;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb);
825*4882a593Smuzhiyun }
826