xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk-tool.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd
4  */
5 
6 #include <linux/ip.h>
7 #include <linux/tcp.h>
8 #include <linux/skbuff.h>
9 #include <linux/if_ether.h>
10 #include <linux/if.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/of_device.h>
13 #include <linux/slab.h>
14 #include <linux/prefetch.h>
15 #include <linux/regmap.h>
16 #include <linux/phy.h>
17 #include <linux/udp.h>
18 #include <linux/skbuff.h>
19 #include <net/pkt_cls.h>
20 #include <net/tcp.h>
21 #include <net/udp.h>
22 #include <linux/soc/rockchip/rk_vendor_storage.h>
23 #include "stmmac.h"
24 #include "dwmac1000.h"
25 #include "dwmac_dma.h"
26 #include "dwmac-rk-tool.h"
27 
28 enum {
29 	LOOPBACK_TYPE_GMAC = 1,
30 	LOOPBACK_TYPE_PHY
31 };
32 
33 enum {
34 	LOOPBACK_SPEED10 = 10,
35 	LOOPBACK_SPEED100 = 100,
36 	LOOPBACK_SPEED1000 = 1000
37 };
38 
39 struct dwmac_rk_packet_attrs {
40 	unsigned char src[6];
41 	unsigned char dst[6];
42 	u32 ip_src;
43 	u32 ip_dst;
44 	int tcp;
45 	int sport;
46 	int dport;
47 	int size;
48 };
49 
50 struct dwmac_rk_hdr {
51 	__be32 version;
52 	__be64 magic;
53 	u32 id;
54 	int tx;
55 	int rx;
56 } __packed;
57 
58 struct dwmac_rk_lb_priv {
59 	/* desc && buffer */
60 	struct dma_desc *dma_tx;
61 	dma_addr_t dma_tx_phy;
62 	struct sk_buff *tx_skbuff;
63 	dma_addr_t tx_skbuff_dma;
64 	unsigned int tx_skbuff_dma_len;
65 
66 	struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
67 	dma_addr_t dma_rx_phy;
68 	struct sk_buff *rx_skbuff;
69 	dma_addr_t rx_skbuff_dma;
70 	u32 rx_tail_addr;
71 	u32 tx_tail_addr;
72 
73 	/* rx buffer size */
74 	unsigned int dma_buf_sz;
75 	unsigned int buf_sz;
76 
77 	int type;
78 	int speed;
79 	struct dwmac_rk_packet_attrs *packet;
80 
81 	unsigned int actual_size;
82 	int scan;
83 	int sysfs;
84 	u32 id;
85 	int tx;
86 	int rx;
87 	int final_tx;
88 	int final_rx;
89 	int max_delay;
90 };
91 
92 #define DMA_CONTROL_OSP		BIT(4)
93 #define DMA_CHAN_BASE_ADDR	0x00001100
94 #define DMA_CHAN_BASE_OFFSET	0x80
95 #define DMA_CHANX_BASE_ADDR(x)	(DMA_CHAN_BASE_ADDR + \
96 				((x) * DMA_CHAN_BASE_OFFSET))
97 #define DMA_CHAN_TX_CONTROL(x)	(DMA_CHANX_BASE_ADDR(x) + 0x4)
98 #define DMA_CHAN_STATUS(x)	(DMA_CHANX_BASE_ADDR(x) + 0x60)
99 #define DMA_CHAN_STATUS_ERI	BIT(11)
100 #define DMA_CHAN_STATUS_ETI	BIT(10)
101 
102 #define	STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
103 #define MAX_DELAYLINE 0x7f
104 #define RK3588_MAX_DELAYLINE 0xc7
105 #define SCAN_STEP 0x5
106 #define SCAN_VALID_RANGE 0xA
107 
108 #define DWMAC_RK_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
109 				sizeof(struct dwmac_rk_hdr))
110 #define DWMAC_RK_TEST_PKT_MAGIC 0xdeadcafecafedeadULL
111 #define DWMAC_RK_TEST_PKT_MAX_SIZE 1500
112 
113 static __maybe_unused struct dwmac_rk_packet_attrs dwmac_rk_udp_attr = {
114 	.dst = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
115 	.tcp = 0,
116 	.size = 1024,
117 };
118 
119 static __maybe_unused struct dwmac_rk_packet_attrs dwmac_rk_tcp_attr = {
120 	.dst = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
121 	.tcp = 1,
122 	.size = 1024,
123 };
124 
dwmac_rk_enable_mac_loopback(struct stmmac_priv * priv,int speed,int addr,bool phy)125 static int dwmac_rk_enable_mac_loopback(struct stmmac_priv *priv, int speed,
126 					int addr, bool phy)
127 {
128 	u32 ctrl;
129 	int phy_val;
130 
131 	ctrl = readl(priv->ioaddr + GMAC_CONTROL);
132 	ctrl &= ~priv->hw->link.speed_mask;
133 	ctrl |= GMAC_CONTROL_LM;
134 
135 	if (phy)
136 		phy_val = mdiobus_read(priv->mii, addr, MII_BMCR);
137 
138 	switch (speed) {
139 	case LOOPBACK_SPEED1000:
140 		ctrl |= priv->hw->link.speed1000;
141 		if (phy) {
142 			phy_val &= ~BMCR_SPEED100;
143 			phy_val |= BMCR_SPEED1000;
144 		}
145 		break;
146 	case LOOPBACK_SPEED100:
147 		ctrl |= priv->hw->link.speed100;
148 		if (phy) {
149 			phy_val &= ~BMCR_SPEED1000;
150 			phy_val |= BMCR_SPEED100;
151 		}
152 		break;
153 	case LOOPBACK_SPEED10:
154 		ctrl |= priv->hw->link.speed10;
155 		if (phy) {
156 			phy_val &= ~BMCR_SPEED1000;
157 			phy_val &= ~BMCR_SPEED100;
158 		}
159 		break;
160 	default:
161 		return -EPERM;
162 	}
163 
164 	ctrl |= priv->hw->link.duplex;
165 	writel(ctrl, priv->ioaddr + GMAC_CONTROL);
166 
167 	if (phy) {
168 		phy_val &= ~BMCR_PDOWN;
169 		phy_val &= ~BMCR_ANENABLE;
170 		phy_val &= ~BMCR_PDOWN;
171 		phy_val |= BMCR_FULLDPLX;
172 		mdiobus_write(priv->mii, addr, MII_BMCR, phy_val);
173 		phy_val = mdiobus_read(priv->mii, addr, MII_BMCR);
174 	}
175 
176 	if (likely(priv->plat->fix_mac_speed))
177 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
178 
179 	return 0;
180 }
181 
dwmac_rk_disable_mac_loopback(struct stmmac_priv * priv,int addr)182 static int dwmac_rk_disable_mac_loopback(struct stmmac_priv *priv, int addr)
183 {
184 	u32 ctrl;
185 	int phy_val;
186 
187 	ctrl = readl(priv->ioaddr + GMAC_CONTROL);
188 	ctrl &= ~GMAC_CONTROL_LM;
189 	writel(ctrl, priv->ioaddr + GMAC_CONTROL);
190 
191 	phy_val = mdiobus_read(priv->mii, addr, MII_BMCR);
192 	phy_val |= BMCR_ANENABLE;
193 
194 	mdiobus_write(priv->mii, addr, MII_BMCR, phy_val);
195 	phy_val = mdiobus_read(priv->mii, addr, MII_BMCR);
196 
197 	return 0;
198 }
199 
dwmac_rk_set_mac_loopback(struct stmmac_priv * priv,int speed,bool enable,int addr,bool phy)200 static int dwmac_rk_set_mac_loopback(struct stmmac_priv *priv,
201 				     int speed, bool enable,
202 				     int addr, bool phy)
203 {
204 	if (enable)
205 		return dwmac_rk_enable_mac_loopback(priv, speed, addr, phy);
206 	else
207 		return dwmac_rk_disable_mac_loopback(priv, addr);
208 }
209 
dwmac_rk_enable_phy_loopback(struct stmmac_priv * priv,int speed,int addr,bool phy)210 static int dwmac_rk_enable_phy_loopback(struct stmmac_priv *priv, int speed,
211 					int addr, bool phy)
212 {
213 	u32 ctrl;
214 	int val;
215 
216 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
217 	ctrl &= ~priv->hw->link.speed_mask;
218 
219 	if (phy)
220 		val = mdiobus_read(priv->mii, addr, MII_BMCR);
221 
222 	switch (speed) {
223 	case LOOPBACK_SPEED1000:
224 		ctrl |= priv->hw->link.speed1000;
225 		if (phy) {
226 			val &= ~BMCR_SPEED100;
227 			val |= BMCR_SPEED1000;
228 		}
229 		break;
230 	case LOOPBACK_SPEED100:
231 		ctrl |= priv->hw->link.speed100;
232 		if (phy) {
233 			val &= ~BMCR_SPEED1000;
234 			val |= BMCR_SPEED100;
235 		}
236 		break;
237 	case LOOPBACK_SPEED10:
238 		ctrl |= priv->hw->link.speed10;
239 		if (phy) {
240 			val &= ~BMCR_SPEED1000;
241 			val &= ~BMCR_SPEED100;
242 		}
243 		break;
244 	default:
245 		return -EPERM;
246 	}
247 
248 	ctrl |= priv->hw->link.duplex;
249 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
250 
251 	if (phy) {
252 		val |= BMCR_FULLDPLX;
253 		val &= ~BMCR_PDOWN;
254 		val &= ~BMCR_ANENABLE;
255 		val |= BMCR_LOOPBACK;
256 		mdiobus_write(priv->mii, addr, MII_BMCR, val);
257 		val = mdiobus_read(priv->mii, addr, MII_BMCR);
258 	}
259 
260 	if (likely(priv->plat->fix_mac_speed))
261 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
262 
263 	return 0;
264 }
265 
dwmac_rk_disable_phy_loopback(struct stmmac_priv * priv,int addr)266 static int dwmac_rk_disable_phy_loopback(struct stmmac_priv *priv, int addr)
267 {
268 	int val;
269 
270 	val = mdiobus_read(priv->mii, addr, MII_BMCR);
271 	val |= BMCR_ANENABLE;
272 	val &= ~BMCR_LOOPBACK;
273 
274 	mdiobus_write(priv->mii, addr, MII_BMCR, val);
275 	val = mdiobus_read(priv->mii, addr, MII_BMCR);
276 
277 	return 0;
278 }
279 
dwmac_rk_set_phy_loopback(struct stmmac_priv * priv,int speed,bool enable,int addr,bool phy)280 static int dwmac_rk_set_phy_loopback(struct stmmac_priv *priv,
281 				     int speed, bool enable,
282 				     int addr, bool phy)
283 {
284 	if (enable)
285 		return dwmac_rk_enable_phy_loopback(priv, speed,
286 						     addr, phy);
287 	else
288 		return dwmac_rk_disable_phy_loopback(priv, addr);
289 }
290 
dwmac_rk_set_loopback(struct stmmac_priv * priv,int type,int speed,bool enable,int addr,bool phy)291 static int dwmac_rk_set_loopback(struct stmmac_priv *priv,
292 				 int type, int speed, bool enable,
293 				 int addr, bool phy)
294 {
295 	int ret;
296 
297 	switch (type) {
298 	case LOOPBACK_TYPE_PHY:
299 		ret = dwmac_rk_set_phy_loopback(priv, speed, enable, addr, phy);
300 		break;
301 	case LOOPBACK_TYPE_GMAC:
302 		ret = dwmac_rk_set_mac_loopback(priv, speed, enable, addr, phy);
303 		break;
304 	default:
305 		ret = -EOPNOTSUPP;
306 	}
307 
308 	usleep_range(100000, 200000);
309 	return ret;
310 }
311 
dwmac_rk_ether_addr_copy(u8 * dst,const u8 * src)312 static inline void dwmac_rk_ether_addr_copy(u8 *dst, const u8 *src)
313 {
314 	u16 *a = (u16 *)dst;
315 	const u16 *b = (const u16 *)src;
316 
317 	a[0] = b[0];
318 	a[1] = b[1];
319 	a[2] = b[2];
320 }
321 
dwmac_rk_udp4_hwcsum(struct sk_buff * skb,__be32 src,__be32 dst)322 static void dwmac_rk_udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
323 {
324 	struct udphdr *uh = udp_hdr(skb);
325 	int offset = skb_transport_offset(skb);
326 	int len = skb->len - offset;
327 
328 	skb->csum_start = skb_transport_header(skb) - skb->head;
329 	skb->csum_offset = offsetof(struct udphdr, check);
330 	uh->check = ~csum_tcpudp_magic(src, dst, len,
331 				       IPPROTO_UDP, 0);
332 }
333 
dwmac_rk_get_skb(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)334 static struct sk_buff *dwmac_rk_get_skb(struct stmmac_priv *priv,
335 					struct dwmac_rk_lb_priv *lb_priv)
336 {
337 	struct sk_buff *skb = NULL;
338 	struct udphdr *uhdr = NULL;
339 	struct tcphdr *thdr = NULL;
340 	struct dwmac_rk_hdr *shdr;
341 	struct ethhdr *ehdr;
342 	struct iphdr *ihdr;
343 	struct dwmac_rk_packet_attrs *attr;
344 	int iplen, size, nfrags;
345 
346 	attr = lb_priv->packet;
347 	size = attr->size + DWMAC_RK_TEST_PKT_SIZE;
348 	if (attr->tcp)
349 		size += sizeof(struct tcphdr);
350 	else
351 		size += sizeof(struct udphdr);
352 
353 	if (size >= DWMAC_RK_TEST_PKT_MAX_SIZE)
354 		return NULL;
355 
356 	lb_priv->actual_size = size;
357 
358 	skb = netdev_alloc_skb_ip_align(priv->dev, size);
359 	if (!skb)
360 		return NULL;
361 
362 	skb_linearize(skb);
363 	nfrags = skb_shinfo(skb)->nr_frags;
364 	if (nfrags > 0) {
365 		pr_err("%s: TX nfrags is not zero\n", __func__);
366 		dev_kfree_skb(skb);
367 		return NULL;
368 	}
369 
370 	ehdr = (struct ethhdr *)skb_push(skb, ETH_HLEN);
371 	skb_reset_mac_header(skb);
372 
373 	skb_set_network_header(skb, skb->len);
374 	ihdr = (struct iphdr *)skb_put(skb, sizeof(*ihdr));
375 
376 	skb_set_transport_header(skb, skb->len);
377 	if (attr->tcp)
378 		thdr = (struct tcphdr *)skb_put(skb, sizeof(*thdr));
379 	else
380 		uhdr = (struct udphdr *)skb_put(skb, sizeof(*uhdr));
381 
382 	eth_zero_addr(ehdr->h_source);
383 	eth_zero_addr(ehdr->h_dest);
384 
385 	dwmac_rk_ether_addr_copy(ehdr->h_source, priv->dev->dev_addr);
386 	dwmac_rk_ether_addr_copy(ehdr->h_dest, attr->dst);
387 
388 	ehdr->h_proto = htons(ETH_P_IP);
389 
390 	if (attr->tcp) {
391 		if (!thdr) {
392 			dev_kfree_skb(skb);
393 			return NULL;
394 		}
395 
396 		thdr->source = htons(attr->sport);
397 		thdr->dest = htons(attr->dport);
398 		thdr->doff = sizeof(struct tcphdr) / 4;
399 		thdr->check = 0;
400 	} else {
401 		if (!uhdr) {
402 			dev_kfree_skb(skb);
403 			return NULL;
404 		}
405 
406 		uhdr->source = htons(attr->sport);
407 		uhdr->dest = htons(attr->dport);
408 		uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
409 		uhdr->check = 0;
410 	}
411 
412 	ihdr->ihl = 5;
413 	ihdr->ttl = 32;
414 	ihdr->version = 4;
415 	if (attr->tcp)
416 		ihdr->protocol = IPPROTO_TCP;
417 	else
418 		ihdr->protocol = IPPROTO_UDP;
419 
420 	iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
421 	if (attr->tcp)
422 		iplen += sizeof(*thdr);
423 	else
424 		iplen += sizeof(*uhdr);
425 
426 	ihdr->tot_len = htons(iplen);
427 	ihdr->frag_off = 0;
428 	ihdr->saddr = htonl(attr->ip_src);
429 	ihdr->daddr = htonl(attr->ip_dst);
430 	ihdr->tos = 0;
431 	ihdr->id = 0;
432 	ip_send_check(ihdr);
433 
434 	shdr = (struct dwmac_rk_hdr *)skb_put(skb, sizeof(*shdr));
435 	shdr->version = 0;
436 	shdr->magic = cpu_to_be64(DWMAC_RK_TEST_PKT_MAGIC);
437 	shdr->id = lb_priv->id;
438 	shdr->tx = lb_priv->tx;
439 	shdr->rx = lb_priv->rx;
440 
441 	if (attr->size) {
442 		skb_put(skb, attr->size);
443 		get_random_bytes((u8 *)shdr + sizeof(*shdr), attr->size);
444 	}
445 
446 	skb->csum = 0;
447 	skb->ip_summed = CHECKSUM_PARTIAL;
448 	if (attr->tcp) {
449 		if (!thdr) {
450 			dev_kfree_skb(skb);
451 			return NULL;
452 		}
453 
454 		thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr,
455 					    ihdr->daddr, 0);
456 		skb->csum_start = skb_transport_header(skb) - skb->head;
457 		skb->csum_offset = offsetof(struct tcphdr, check);
458 	} else {
459 		dwmac_rk_udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
460 	}
461 
462 	skb->protocol = htons(ETH_P_IP);
463 	skb->pkt_type = PACKET_HOST;
464 
465 	return skb;
466 }
467 
dwmac_rk_loopback_validate(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv,struct sk_buff * skb)468 static int dwmac_rk_loopback_validate(struct stmmac_priv *priv,
469 				      struct dwmac_rk_lb_priv *lb_priv,
470 				      struct sk_buff *skb)
471 {
472 	struct dwmac_rk_hdr *shdr;
473 	struct ethhdr *ehdr;
474 	struct udphdr *uhdr;
475 	struct tcphdr *thdr;
476 	struct iphdr *ihdr;
477 	int ret = -EAGAIN;
478 
479 	if (skb->len >= DWMAC_RK_TEST_PKT_MAX_SIZE)
480 		goto out;
481 
482 	if (lb_priv->actual_size != skb->len)
483 		goto out;
484 
485 	ehdr = (struct ethhdr *)(skb->data);
486 	if (!ether_addr_equal(ehdr->h_dest, lb_priv->packet->dst))
487 		goto out;
488 
489 	if (!ether_addr_equal(ehdr->h_source, priv->dev->dev_addr))
490 		goto out;
491 
492 	ihdr = (struct iphdr *)(skb->data + ETH_HLEN);
493 
494 	if (lb_priv->packet->tcp) {
495 		if (ihdr->protocol != IPPROTO_TCP)
496 			goto out;
497 
498 		thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
499 		if (thdr->dest != htons(lb_priv->packet->dport))
500 			goto out;
501 
502 		shdr = (struct dwmac_rk_hdr *)((u8 *)thdr + sizeof(*thdr));
503 	} else {
504 		if (ihdr->protocol != IPPROTO_UDP)
505 			goto out;
506 
507 		uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
508 		if (uhdr->dest != htons(lb_priv->packet->dport))
509 			goto out;
510 
511 		shdr = (struct dwmac_rk_hdr *)((u8 *)uhdr + sizeof(*uhdr));
512 	}
513 
514 	if (shdr->magic != cpu_to_be64(DWMAC_RK_TEST_PKT_MAGIC))
515 		goto out;
516 
517 	if (lb_priv->id != shdr->id)
518 		goto out;
519 
520 	if (lb_priv->tx != shdr->tx || lb_priv->rx != shdr->rx)
521 		goto out;
522 
523 	ret = 0;
524 out:
525 	return ret;
526 }
527 
dwmac_rk_rx_fill(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)528 static inline int dwmac_rk_rx_fill(struct stmmac_priv *priv,
529 				   struct dwmac_rk_lb_priv *lb_priv)
530 {
531 	struct dma_desc *p;
532 	struct sk_buff *skb;
533 
534 	p = lb_priv->dma_rx;
535 	if (likely(!lb_priv->rx_skbuff)) {
536 		skb = netdev_alloc_skb_ip_align(priv->dev, lb_priv->buf_sz);
537 		if (unlikely(!skb))
538 			return -ENOMEM;
539 
540 		if (skb_linearize(skb)) {
541 			pr_err("%s: Rx skb linearize failed\n", __func__);
542 			lb_priv->rx_skbuff = NULL;
543 			dev_kfree_skb(skb);
544 			return -EPERM;
545 		}
546 
547 		lb_priv->rx_skbuff = skb;
548 		lb_priv->rx_skbuff_dma =
549 		    dma_map_single(priv->device, skb->data, lb_priv->dma_buf_sz,
550 				   DMA_FROM_DEVICE);
551 		if (dma_mapping_error(priv->device,
552 				      lb_priv->rx_skbuff_dma)) {
553 			pr_err("%s: Rx dma map failed\n", __func__);
554 			lb_priv->rx_skbuff = NULL;
555 			dev_kfree_skb(skb);
556 			return -EFAULT;
557 		}
558 
559 		stmmac_set_desc_addr(priv, p, lb_priv->rx_skbuff_dma);
560 		/* Fill DES3 in case of RING mode */
561 		if (lb_priv->dma_buf_sz == BUF_SIZE_16KiB)
562 			p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
563 	}
564 
565 	wmb();
566 	stmmac_set_rx_owner(priv, p, priv->use_riwt);
567 	wmb();
568 
569 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, lb_priv->rx_tail_addr, 0);
570 
571 	return 0;
572 }
573 
dwmac_rk_rx_clean(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)574 static void dwmac_rk_rx_clean(struct stmmac_priv *priv,
575 			      struct dwmac_rk_lb_priv *lb_priv)
576 {
577 	if (likely(lb_priv->rx_skbuff_dma)) {
578 		dma_unmap_single(priv->device,
579 				 lb_priv->rx_skbuff_dma,
580 				 lb_priv->dma_buf_sz, DMA_FROM_DEVICE);
581 		lb_priv->rx_skbuff_dma = 0;
582 	}
583 
584 	if (likely(lb_priv->rx_skbuff)) {
585 		dev_consume_skb_any(lb_priv->rx_skbuff);
586 		lb_priv->rx_skbuff = NULL;
587 	}
588 }
589 
dwmac_rk_rx_validate(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)590 static int dwmac_rk_rx_validate(struct stmmac_priv *priv,
591 				struct dwmac_rk_lb_priv *lb_priv)
592 {
593 	struct dma_desc *p;
594 	struct sk_buff *skb;
595 	int coe = priv->hw->rx_csum;
596 	unsigned int frame_len;
597 
598 	p = lb_priv->dma_rx;
599 	skb = lb_priv->rx_skbuff;
600 	if (unlikely(!skb)) {
601 		pr_err("%s: Inconsistent Rx descriptor chain\n",
602 		       __func__);
603 		return -EINVAL;
604 	}
605 
606 	frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
607 	/*  check if frame_len fits the preallocated memory */
608 	if (frame_len > lb_priv->dma_buf_sz) {
609 		pr_err("%s: frame_len long: %d\n", __func__, frame_len);
610 		return -ENOMEM;
611 	}
612 
613 	frame_len -= ETH_FCS_LEN;
614 	prefetch(skb->data - NET_IP_ALIGN);
615 	skb_put(skb, frame_len);
616 	dma_unmap_single(priv->device,
617 			 lb_priv->rx_skbuff_dma,
618 			 lb_priv->dma_buf_sz,
619 			 DMA_FROM_DEVICE);
620 
621 	return dwmac_rk_loopback_validate(priv, lb_priv, skb);
622 }
623 
dwmac_rk_get_desc_status(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)624 static int dwmac_rk_get_desc_status(struct stmmac_priv *priv,
625 				    struct dwmac_rk_lb_priv *lb_priv)
626 {
627 	struct dma_desc *txp, *rxp;
628 	int tx_status, rx_status;
629 
630 	txp = lb_priv->dma_tx;
631 	tx_status = priv->hw->desc->tx_status(&priv->dev->stats,
632 					      &priv->xstats, txp,
633 					      priv->ioaddr);
634 	/* Check if the descriptor is owned by the DMA */
635 	if (unlikely(tx_status & tx_dma_own))
636 		return -EBUSY;
637 
638 	rxp = lb_priv->dma_rx;
639 	/* read the status of the incoming frame */
640 	rx_status = priv->hw->desc->rx_status(&priv->dev->stats,
641 					      &priv->xstats, rxp);
642 	if (unlikely(rx_status & dma_own))
643 		return -EBUSY;
644 
645 	usleep_range(100, 150);
646 
647 	return 0;
648 }
649 
dwmac_rk_tx_clean(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)650 static void dwmac_rk_tx_clean(struct stmmac_priv *priv,
651 			      struct dwmac_rk_lb_priv *lb_priv)
652 {
653 	struct sk_buff *skb = lb_priv->tx_skbuff;
654 	struct dma_desc *p;
655 
656 	p = lb_priv->dma_tx;
657 
658 	if (likely(lb_priv->tx_skbuff_dma)) {
659 		dma_unmap_single(priv->device,
660 				 lb_priv->tx_skbuff_dma,
661 				 lb_priv->tx_skbuff_dma_len,
662 				 DMA_TO_DEVICE);
663 		lb_priv->tx_skbuff_dma = 0;
664 	}
665 
666 	if (likely(skb)) {
667 		dev_consume_skb_any(skb);
668 		lb_priv->tx_skbuff = NULL;
669 	}
670 
671 	priv->hw->desc->release_tx_desc(p, priv->mode);
672 }
673 
dwmac_rk_xmit(struct sk_buff * skb,struct net_device * dev,struct dwmac_rk_lb_priv * lb_priv)674 static int dwmac_rk_xmit(struct sk_buff *skb, struct net_device *dev,
675 			 struct dwmac_rk_lb_priv *lb_priv)
676 {
677 	struct stmmac_priv *priv = netdev_priv(dev);
678 	unsigned int nopaged_len = skb_headlen(skb);
679 	int csum_insertion = 0;
680 	struct dma_desc *desc;
681 	unsigned int des;
682 
683 	priv->hw->mac->reset_eee_mode(priv->hw);
684 
685 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
686 
687 	desc = lb_priv->dma_tx;
688 	lb_priv->tx_skbuff = skb;
689 
690 	des = dma_map_single(priv->device, skb->data,
691 			     nopaged_len, DMA_TO_DEVICE);
692 	if (dma_mapping_error(priv->device, des))
693 		goto dma_map_err;
694 	lb_priv->tx_skbuff_dma = des;
695 
696 	stmmac_set_desc_addr(priv, desc, des);
697 	lb_priv->tx_skbuff_dma_len = nopaged_len;
698 
699 	/* Prepare the first descriptor setting the OWN bit too */
700 	stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len,
701 			       csum_insertion, priv->mode, 1, 1,
702 			       skb->len);
703 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
704 
705 	lb_priv->tx_tail_addr = lb_priv->dma_tx_phy + sizeof(*desc);
706 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, lb_priv->tx_tail_addr, 0);
707 
708 	return 0;
709 
710 dma_map_err:
711 	pr_err("%s: Tx dma map failed\n", __func__);
712 	dev_kfree_skb(skb);
713 	return -EFAULT;
714 }
715 
__dwmac_rk_loopback_run(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)716 static int __dwmac_rk_loopback_run(struct stmmac_priv *priv,
717 				   struct dwmac_rk_lb_priv *lb_priv)
718 {
719 	u32 rx_channels_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
720 	u32 tx_channels_count = min_t(u32, priv->plat->tx_queues_to_use, 1);
721 	struct sk_buff *tx_skb;
722 	u32 chan = 0;
723 	int ret = -EIO, delay;
724 	u32 status;
725 	bool finish = false;
726 
727 	if (lb_priv->speed == LOOPBACK_SPEED1000)
728 		delay = 10;
729 	else if (lb_priv->speed == LOOPBACK_SPEED100)
730 		delay = 20;
731 	else if (lb_priv->speed == LOOPBACK_SPEED10)
732 		delay = 50;
733 	else
734 		return -EPERM;
735 
736 	if (dwmac_rk_rx_fill(priv, lb_priv))
737 		return -ENOMEM;
738 
739 	/* Enable the MAC Rx/Tx */
740 	stmmac_mac_set(priv, priv->ioaddr, true);
741 
742 	for (chan = 0; chan < rx_channels_count; chan++)
743 		stmmac_start_rx(priv, priv->ioaddr, chan);
744 	for (chan = 0; chan < tx_channels_count; chan++)
745 		stmmac_start_tx(priv, priv->ioaddr, chan);
746 
747 	tx_skb = dwmac_rk_get_skb(priv, lb_priv);
748 	if (!tx_skb) {
749 		ret = -ENOMEM;
750 		goto stop;
751 	}
752 
753 	if (dwmac_rk_xmit(tx_skb, priv->dev, lb_priv)) {
754 		ret = -EFAULT;
755 		goto stop;
756 	}
757 
758 	do {
759 		usleep_range(100, 150);
760 		delay--;
761 		if (priv->plat->has_gmac4) {
762 			status = readl(priv->ioaddr + DMA_CHAN_STATUS(0));
763 			finish = (status & DMA_CHAN_STATUS_ERI) && (status & DMA_CHAN_STATUS_ETI);
764 		} else {
765 			status = readl(priv->ioaddr + DMA_STATUS);
766 			finish = (status & DMA_STATUS_ERI) && (status & DMA_STATUS_ETI);
767 		}
768 
769 		if (finish) {
770 			if (!dwmac_rk_get_desc_status(priv, lb_priv)) {
771 				ret = dwmac_rk_rx_validate(priv, lb_priv);
772 				break;
773 			}
774 		}
775 	} while (delay <= 0);
776 	writel((status & 0x1ffff), priv->ioaddr + DMA_STATUS);
777 
778 stop:
779 	for (chan = 0; chan < rx_channels_count; chan++)
780 		stmmac_stop_rx(priv, priv->ioaddr, chan);
781 	for (chan = 0; chan < tx_channels_count; chan++)
782 		stmmac_stop_tx(priv, priv->ioaddr, chan);
783 
784 	stmmac_mac_set(priv, priv->ioaddr, false);
785 	/* wait for state machine is disabled */
786 	usleep_range(100, 150);
787 
788 	dwmac_rk_tx_clean(priv, lb_priv);
789 	dwmac_rk_rx_clean(priv, lb_priv);
790 
791 	return ret;
792 }
793 
dwmac_rk_loopback_with_identify(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv,int tx,int rx)794 static int dwmac_rk_loopback_with_identify(struct stmmac_priv *priv,
795 					   struct dwmac_rk_lb_priv *lb_priv,
796 					   int tx, int rx)
797 {
798 	lb_priv->id++;
799 	lb_priv->tx = tx;
800 	lb_priv->rx = rx;
801 
802 	lb_priv->packet = &dwmac_rk_tcp_attr;
803 	dwmac_rk_set_rgmii_delayline(priv, tx, rx);
804 
805 	return __dwmac_rk_loopback_run(priv, lb_priv);
806 }
807 
dwmac_rk_delayline_is_txvalid(struct dwmac_rk_lb_priv * lb_priv,int tx)808 static inline bool dwmac_rk_delayline_is_txvalid(struct dwmac_rk_lb_priv *lb_priv,
809 						 int tx)
810 {
811 	if (tx > 0 && tx < lb_priv->max_delay)
812 		return true;
813 	else
814 		return false;
815 }
816 
dwmac_rk_delayline_is_valid(struct dwmac_rk_lb_priv * lb_priv,int tx,int rx)817 static inline bool dwmac_rk_delayline_is_valid(struct dwmac_rk_lb_priv *lb_priv,
818 					       int tx, int rx)
819 {
820 	if ((tx > 0 && tx < lb_priv->max_delay) &&
821 	    (rx > 0 && rx < lb_priv->max_delay))
822 		return true;
823 	else
824 		return false;
825 }
826 
dwmac_rk_delayline_scan_cross(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)827 static int dwmac_rk_delayline_scan_cross(struct stmmac_priv *priv,
828 					 struct dwmac_rk_lb_priv *lb_priv)
829 {
830 	int tx_left, tx_right, rx_up, rx_down;
831 	int i, j, tx_index, rx_index;
832 	int tx_mid = 0, rx_mid = 0;
833 
834 	/* initiation */
835 	tx_index = SCAN_STEP;
836 	rx_index = SCAN_STEP;
837 
838 re_scan:
839 	/* start from rx based on the experience */
840 	for (i = rx_index; i <= (lb_priv->max_delay - SCAN_STEP); i += SCAN_STEP) {
841 		tx_left = 0;
842 		tx_right = 0;
843 		tx_mid = 0;
844 
845 		for (j = tx_index; j <= (lb_priv->max_delay - SCAN_STEP);
846 		     j += SCAN_STEP) {
847 			if (!dwmac_rk_loopback_with_identify(priv,
848 			    lb_priv, j, i)) {
849 				if (!tx_left)
850 					tx_left = j;
851 				tx_right = j;
852 			}
853 		}
854 
855 		/* look for tx_mid */
856 		if ((tx_right - tx_left) > SCAN_VALID_RANGE) {
857 			tx_mid = (tx_right + tx_left) / 2;
858 			break;
859 		}
860 	}
861 
862 	/* Worst case: reach the end */
863 	if (i >= (lb_priv->max_delay - SCAN_STEP))
864 		goto end;
865 
866 	rx_up = 0;
867 	rx_down = 0;
868 
869 	/* look for rx_mid base on the tx_mid */
870 	for (i = SCAN_STEP; i <= (lb_priv->max_delay - SCAN_STEP);
871 	     i += SCAN_STEP) {
872 		if (!dwmac_rk_loopback_with_identify(priv, lb_priv,
873 		    tx_mid, i)) {
874 			if (!rx_up)
875 				rx_up = i;
876 			rx_down = i;
877 		}
878 	}
879 
880 	if ((rx_down - rx_up) > SCAN_VALID_RANGE) {
881 		/* Now get the rx_mid */
882 		rx_mid = (rx_up + rx_down) / 2;
883 	} else {
884 		rx_index += SCAN_STEP;
885 		rx_mid = 0;
886 		goto re_scan;
887 	}
888 
889 	if (dwmac_rk_delayline_is_valid(lb_priv, tx_mid, rx_mid)) {
890 		lb_priv->final_tx = tx_mid;
891 		lb_priv->final_rx = rx_mid;
892 
893 		pr_info("Find available tx_delay = 0x%02x, rx_delay = 0x%02x\n",
894 			lb_priv->final_tx, lb_priv->final_rx);
895 
896 		return 0;
897 	}
898 end:
899 	pr_err("Can't find available delayline\n");
900 	return -ENXIO;
901 }
902 
dwmac_rk_delayline_scan(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)903 static int dwmac_rk_delayline_scan(struct stmmac_priv *priv,
904 				   struct dwmac_rk_lb_priv *lb_priv)
905 {
906 	int phy_iface = dwmac_rk_get_phy_interface(priv);
907 	int tx, rx, tx_sum, rx_sum, count;
908 	int tx_mid, rx_mid;
909 	int ret = -ENXIO;
910 
911 	tx_sum = 0;
912 	rx_sum = 0;
913 	count = 0;
914 
915 	for (rx = 0x0; rx <= lb_priv->max_delay; rx++) {
916 		if (phy_iface == PHY_INTERFACE_MODE_RGMII_RXID)
917 			rx = -1;
918 		printk(KERN_CONT "RX(%03d):", rx);
919 		for (tx = 0x0; tx <= lb_priv->max_delay; tx++) {
920 			if (!dwmac_rk_loopback_with_identify(priv,
921 			    lb_priv, tx, rx)) {
922 				tx_sum += tx;
923 				rx_sum += rx;
924 				count++;
925 				printk(KERN_CONT "O");
926 			} else {
927 				printk(KERN_CONT " ");
928 			}
929 		}
930 		printk(KERN_CONT "\n");
931 
932 		if (phy_iface == PHY_INTERFACE_MODE_RGMII_RXID)
933 			break;
934 	}
935 
936 	if (tx_sum && rx_sum && count) {
937 		tx_mid = tx_sum / count;
938 		rx_mid = rx_sum / count;
939 
940 		if (phy_iface == PHY_INTERFACE_MODE_RGMII_RXID) {
941 			if (dwmac_rk_delayline_is_txvalid(lb_priv, tx_mid)) {
942 				lb_priv->final_tx = tx_mid;
943 				lb_priv->final_rx = -1;
944 				ret = 0;
945 			}
946 		} else {
947 			if (dwmac_rk_delayline_is_valid(lb_priv, tx_mid, rx_mid)) {
948 				lb_priv->final_tx = tx_mid;
949 				lb_priv->final_rx = rx_mid;
950 				ret = 0;
951 			}
952 		}
953 	}
954 
955 	if (ret) {
956 		pr_err("\nCan't find suitable delayline\n");
957 	} else {
958 		if (phy_iface == PHY_INTERFACE_MODE_RGMII_RXID)
959 			pr_info("Find available tx_delay = 0x%02x, rx_delay = disable\n",
960 				lb_priv->final_tx);
961 		else
962 			pr_info("\nFind suitable tx_delay = 0x%02x, rx_delay = 0x%02x\n",
963 				lb_priv->final_tx, lb_priv->final_rx);
964 	}
965 
966 	return ret;
967 }
968 
dwmac_rk_loopback_delayline_scan(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)969 static int dwmac_rk_loopback_delayline_scan(struct stmmac_priv *priv,
970 					    struct dwmac_rk_lb_priv *lb_priv)
971 {
972 	if (lb_priv->sysfs)
973 		return dwmac_rk_delayline_scan(priv, lb_priv);
974 	else
975 		return dwmac_rk_delayline_scan_cross(priv, lb_priv);
976 }
977 
dwmac_rk_dma_free_rx_skbufs(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)978 static void dwmac_rk_dma_free_rx_skbufs(struct stmmac_priv *priv,
979 					struct dwmac_rk_lb_priv *lb_priv)
980 {
981 	if (lb_priv->rx_skbuff) {
982 		dma_unmap_single(priv->device, lb_priv->rx_skbuff_dma,
983 				 lb_priv->dma_buf_sz, DMA_FROM_DEVICE);
984 		dev_kfree_skb_any(lb_priv->rx_skbuff);
985 	}
986 	lb_priv->rx_skbuff = NULL;
987 }
988 
dwmac_rk_dma_free_tx_skbufs(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)989 static void dwmac_rk_dma_free_tx_skbufs(struct stmmac_priv *priv,
990 					struct dwmac_rk_lb_priv *lb_priv)
991 {
992 	if (lb_priv->tx_skbuff_dma) {
993 		dma_unmap_single(priv->device,
994 				 lb_priv->tx_skbuff_dma,
995 				 lb_priv->tx_skbuff_dma_len,
996 				 DMA_TO_DEVICE);
997 	}
998 
999 	if (lb_priv->tx_skbuff) {
1000 		dev_kfree_skb_any(lb_priv->tx_skbuff);
1001 		lb_priv->tx_skbuff = NULL;
1002 		lb_priv->tx_skbuff_dma = 0;
1003 	}
1004 }
1005 
dwmac_rk_init_dma_desc_rings(struct net_device * dev,gfp_t flags,struct dwmac_rk_lb_priv * lb_priv)1006 static int dwmac_rk_init_dma_desc_rings(struct net_device *dev, gfp_t flags,
1007 					struct dwmac_rk_lb_priv *lb_priv)
1008 {
1009 	struct stmmac_priv *priv = netdev_priv(dev);
1010 	struct dma_desc *p;
1011 
1012 	p = lb_priv->dma_tx;
1013 	p->des2 = 0;
1014 	lb_priv->tx_skbuff_dma = 0;
1015 	lb_priv->tx_skbuff_dma_len = 0;
1016 	lb_priv->tx_skbuff = NULL;
1017 
1018 	lb_priv->rx_skbuff = NULL;
1019 	stmmac_init_rx_desc(priv, lb_priv->dma_rx,
1020 				     priv->use_riwt, priv->mode,
1021 				     true, lb_priv->dma_buf_sz);
1022 
1023 	stmmac_init_tx_desc(priv, lb_priv->dma_tx,
1024 				     priv->mode,
1025 				     true);
1026 
1027 	return 0;
1028 }
1029 
dwmac_rk_alloc_dma_desc_resources(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)1030 static int dwmac_rk_alloc_dma_desc_resources(struct stmmac_priv *priv,
1031 					     struct dwmac_rk_lb_priv *lb_priv)
1032 {
1033 	int ret = -ENOMEM;
1034 
1035 	/* desc dma map */
1036 	lb_priv->dma_rx = dma_alloc_coherent(priv->device,
1037 					     sizeof(struct dma_desc),
1038 					     &lb_priv->dma_rx_phy,
1039 					     GFP_KERNEL);
1040 	if (!lb_priv->dma_rx)
1041 		return ret;
1042 
1043 	lb_priv->dma_tx = dma_alloc_coherent(priv->device,
1044 					     sizeof(struct dma_desc),
1045 					     &lb_priv->dma_tx_phy,
1046 					     GFP_KERNEL);
1047 	if (!lb_priv->dma_tx) {
1048 		dma_free_coherent(priv->device,
1049 				  sizeof(struct dma_desc),
1050 				  lb_priv->dma_rx, lb_priv->dma_rx_phy);
1051 		return ret;
1052 	}
1053 
1054 	return 0;
1055 }
1056 
dwmac_rk_free_dma_desc_resources(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)1057 static void dwmac_rk_free_dma_desc_resources(struct stmmac_priv *priv,
1058 					     struct dwmac_rk_lb_priv *lb_priv)
1059 {
1060 	/* Release the DMA TX/RX socket buffers */
1061 	dwmac_rk_dma_free_rx_skbufs(priv, lb_priv);
1062 	dwmac_rk_dma_free_tx_skbufs(priv, lb_priv);
1063 
1064 	dma_free_coherent(priv->device, sizeof(struct dma_desc),
1065 			  lb_priv->dma_tx, lb_priv->dma_tx_phy);
1066 	dma_free_coherent(priv->device, sizeof(struct dma_desc),
1067 			  lb_priv->dma_rx, lb_priv->dma_rx_phy);
1068 }
1069 
dwmac_rk_init_dma_engine(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)1070 static int dwmac_rk_init_dma_engine(struct stmmac_priv *priv,
1071 				    struct dwmac_rk_lb_priv *lb_priv)
1072 {
1073 	u32 rx_channels_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
1074 	u32 tx_channels_count = min_t(u32, priv->plat->tx_queues_to_use, 1);
1075 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
1076 	u32 chan = 0;
1077 	int ret = 0;
1078 
1079 	ret = stmmac_reset(priv, priv->ioaddr);
1080 	if (ret) {
1081 		dev_err(priv->device, "Failed to reset the dma\n");
1082 		return ret;
1083 	}
1084 
1085 	/* DMA Configuration */
1086 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, 0);
1087 
1088 	if (priv->plat->axi)
1089 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
1090 
1091 	for (chan = 0; chan < dma_csr_ch; chan++)
1092 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 0);
1093 
1094 	/* DMA RX Channel Configuration */
1095 	for (chan = 0; chan < rx_channels_count; chan++) {
1096 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
1097 				    lb_priv->dma_rx_phy, 0);
1098 
1099 		lb_priv->rx_tail_addr = lb_priv->dma_rx_phy +
1100 			    (1 * sizeof(struct dma_desc));
1101 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
1102 				       lb_priv->rx_tail_addr, 0);
1103 	}
1104 
1105 	/* DMA TX Channel Configuration */
1106 	for (chan = 0; chan < tx_channels_count; chan++) {
1107 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
1108 				    lb_priv->dma_tx_phy, chan);
1109 
1110 		lb_priv->tx_tail_addr = lb_priv->dma_tx_phy;
1111 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
1112 				       lb_priv->tx_tail_addr, chan);
1113 	}
1114 
1115 	return ret;
1116 }
1117 
dwmac_rk_dma_operation_mode(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)1118 static void dwmac_rk_dma_operation_mode(struct stmmac_priv *priv,
1119 					struct dwmac_rk_lb_priv *lb_priv)
1120 {
1121 	u32 rx_channels_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
1122 	u32 tx_channels_count = min_t(u32, priv->plat->tx_queues_to_use, 1);
1123 	int rxfifosz = priv->plat->rx_fifo_size;
1124 	int txfifosz = priv->plat->tx_fifo_size;
1125 	u32 txmode = SF_DMA_MODE;
1126 	u32 rxmode = SF_DMA_MODE;
1127 	u32 chan = 0;
1128 	u8 qmode = 0;
1129 
1130 	if (rxfifosz == 0)
1131 		rxfifosz = priv->dma_cap.rx_fifo_size;
1132 	if (txfifosz == 0)
1133 		txfifosz = priv->dma_cap.tx_fifo_size;
1134 
1135 	/* Adjust for real per queue fifo size */
1136 	rxfifosz /= rx_channels_count;
1137 	txfifosz /= tx_channels_count;
1138 
1139 	/* configure all channels */
1140 	for (chan = 0; chan < rx_channels_count; chan++) {
1141 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1142 
1143 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1144 				   rxfifosz, qmode);
1145 		stmmac_set_dma_bfsize(priv, priv->ioaddr, lb_priv->dma_buf_sz,
1146 				      chan);
1147 	}
1148 
1149 	for (chan = 0; chan < tx_channels_count; chan++) {
1150 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1151 
1152 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1153 				   txfifosz, qmode);
1154 	}
1155 }
1156 
dwmac_rk_rx_queue_dma_chan_map(struct stmmac_priv * priv)1157 static void dwmac_rk_rx_queue_dma_chan_map(struct stmmac_priv *priv)
1158 {
1159 	u32 rx_queues_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
1160 	u32 queue;
1161 	u32 chan;
1162 
1163 	for (queue = 0; queue < rx_queues_count; queue++) {
1164 		chan = priv->plat->rx_queues_cfg[queue].chan;
1165 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
1166 	}
1167 }
1168 
dwmac_rk_mac_enable_rx_queues(struct stmmac_priv * priv)1169 static void dwmac_rk_mac_enable_rx_queues(struct stmmac_priv *priv)
1170 {
1171 	u32 rx_queues_count = min_t(u32, priv->plat->rx_queues_to_use, 1);
1172 	int queue;
1173 	u8 mode;
1174 
1175 	for (queue = 0; queue < rx_queues_count; queue++) {
1176 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1177 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1178 	}
1179 }
1180 
dwmac_rk_mtl_configuration(struct stmmac_priv * priv)1181 static void dwmac_rk_mtl_configuration(struct stmmac_priv *priv)
1182 {
1183 	/* Map RX MTL to DMA channels */
1184 	dwmac_rk_rx_queue_dma_chan_map(priv);
1185 
1186 	/* Enable MAC RX Queues */
1187 	dwmac_rk_mac_enable_rx_queues(priv);
1188 }
1189 
dwmac_rk_mmc_setup(struct stmmac_priv * priv)1190 static void dwmac_rk_mmc_setup(struct stmmac_priv *priv)
1191 {
1192 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1193 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1194 
1195 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
1196 
1197 	if (priv->dma_cap.rmon) {
1198 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
1199 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1200 	} else {
1201 		netdev_info(priv->dev, "No MAC Management Counters available\n");
1202 	}
1203 }
1204 
dwmac_rk_init(struct net_device * dev,struct dwmac_rk_lb_priv * lb_priv)1205 static int dwmac_rk_init(struct net_device *dev,
1206 			 struct dwmac_rk_lb_priv *lb_priv)
1207 {
1208 	struct stmmac_priv *priv = netdev_priv(dev);
1209 	int ret;
1210 	u32 mode;
1211 
1212 	lb_priv->dma_buf_sz = 1536; /* mtu 1500 size */
1213 
1214 	if (priv->plat->has_gmac4)
1215 		lb_priv->buf_sz = priv->dma_cap.rx_fifo_size; /* rx fifo size */
1216 	else
1217 		lb_priv->buf_sz = 4096; /* rx fifo size */
1218 
1219 	ret = dwmac_rk_alloc_dma_desc_resources(priv, lb_priv);
1220 	if (ret < 0) {
1221 		pr_err("%s: DMA descriptors allocation failed\n", __func__);
1222 		return ret;
1223 	}
1224 
1225 	ret = dwmac_rk_init_dma_desc_rings(dev, GFP_KERNEL, lb_priv);
1226 	if (ret < 0) {
1227 		pr_err("%s: DMA descriptors initialization failed\n", __func__);
1228 		goto init_error;
1229 	}
1230 
1231 	/* DMA initialization and SW reset */
1232 	ret = dwmac_rk_init_dma_engine(priv, lb_priv);
1233 	if (ret < 0) {
1234 		pr_err("%s: DMA engine initialization failed\n", __func__);
1235 		goto init_error;
1236 	}
1237 
1238 	/* Copy the MAC addr into the HW  */
1239 	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1240 
1241 	/* Initialize the MAC Core */
1242 	stmmac_core_init(priv, priv->hw, dev);
1243 
1244 	dwmac_rk_mtl_configuration(priv);
1245 
1246 	dwmac_rk_mmc_setup(priv);
1247 
1248 	ret = priv->hw->mac->rx_ipc(priv->hw);
1249 	if (!ret) {
1250 		pr_warn(" RX IPC Checksum Offload disabled\n");
1251 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1252 		priv->hw->rx_csum = 0;
1253 	}
1254 
1255 	/* Set the HW DMA mode and the COE */
1256 	dwmac_rk_dma_operation_mode(priv, lb_priv);
1257 
1258 	if (priv->plat->has_gmac4) {
1259 		mode = readl(priv->ioaddr + DMA_CHAN_TX_CONTROL(0));
1260 		/* Disable OSP to get best performance */
1261 		mode &= ~DMA_CONTROL_OSP;
1262 		writel(mode, priv->ioaddr + DMA_CHAN_TX_CONTROL(0));
1263 	} else {
1264 		/* Disable OSF */
1265 		mode = readl(priv->ioaddr + DMA_CONTROL);
1266 		writel((mode & ~DMA_CONTROL_OSF), priv->ioaddr + DMA_CONTROL);
1267 	}
1268 
1269 	stmmac_enable_dma_irq(priv, priv->ioaddr, 0, 1, 1);
1270 
1271 	if (priv->hw->pcs)
1272 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
1273 
1274 	return 0;
1275 init_error:
1276 	dwmac_rk_free_dma_desc_resources(priv, lb_priv);
1277 
1278 	return ret;
1279 }
1280 
dwmac_rk_release(struct net_device * dev,struct dwmac_rk_lb_priv * lb_priv)1281 static void dwmac_rk_release(struct net_device *dev,
1282 			     struct dwmac_rk_lb_priv *lb_priv)
1283 {
1284 	struct stmmac_priv *priv = netdev_priv(dev);
1285 
1286 	stmmac_disable_dma_irq(priv, priv->ioaddr, 0, 0, 0);
1287 
1288 	/* Release and free the Rx/Tx resources */
1289 	dwmac_rk_free_dma_desc_resources(priv, lb_priv);
1290 }
1291 
dwmac_rk_get_max_delayline(struct stmmac_priv * priv)1292 static int dwmac_rk_get_max_delayline(struct stmmac_priv *priv)
1293 {
1294 	if (of_device_is_compatible(priv->device->of_node,
1295 				    "rockchip,rk3588-gmac"))
1296 		return RK3588_MAX_DELAYLINE;
1297 	else
1298 		return MAX_DELAYLINE;
1299 }
1300 
dwmac_rk_phy_poll_reset(struct stmmac_priv * priv,int addr)1301 static int dwmac_rk_phy_poll_reset(struct stmmac_priv *priv, int addr)
1302 {
1303 	/* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
1304 	unsigned int val, retries = 12;
1305 	int ret;
1306 
1307 	val = mdiobus_read(priv->mii, addr, MII_BMCR);
1308 	mdiobus_write(priv->mii, addr, MII_BMCR, val | BMCR_RESET);
1309 
1310 	do {
1311 		msleep(50);
1312 		ret = mdiobus_read(priv->mii, addr, MII_BMCR);
1313 		if (ret < 0)
1314 			return ret;
1315 	} while (ret & BMCR_RESET && --retries);
1316 	if (ret & BMCR_RESET)
1317 		return -ETIMEDOUT;
1318 
1319 	msleep(1);
1320 	return 0;
1321 }
1322 
dwmac_rk_loopback_run(struct stmmac_priv * priv,struct dwmac_rk_lb_priv * lb_priv)1323 static int dwmac_rk_loopback_run(struct stmmac_priv *priv,
1324 				 struct dwmac_rk_lb_priv *lb_priv)
1325 {
1326 	struct net_device *ndev = priv->dev;
1327 	int phy_iface = dwmac_rk_get_phy_interface(priv);
1328 	int ndev_up, phy_addr;
1329 	int ret = -EINVAL;
1330 
1331 	if (!ndev || !priv->mii)
1332 		return -EINVAL;
1333 
1334 	phy_addr = priv->dev->phydev->mdio.addr;
1335 	lb_priv->max_delay = dwmac_rk_get_max_delayline(priv);
1336 
1337 	rtnl_lock();
1338 	/* check the netdevice up or not */
1339 	ndev_up = ndev->flags & IFF_UP;
1340 
1341 	if (ndev_up) {
1342 		if (!netif_running(ndev) || !ndev->phydev) {
1343 			rtnl_unlock();
1344 			return -EINVAL;
1345 		}
1346 
1347 		/* check if the negotiation status */
1348 		if (ndev->phydev->state != PHY_NOLINK &&
1349 		    ndev->phydev->state != PHY_RUNNING) {
1350 			rtnl_unlock();
1351 			pr_warn("Try again later, after negotiation done\n");
1352 			return -EAGAIN;
1353 		}
1354 
1355 		ndev->netdev_ops->ndo_stop(ndev);
1356 
1357 		if (priv->plat->stmmac_rst)
1358 			reset_control_assert(priv->plat->stmmac_rst);
1359 		dwmac_rk_phy_poll_reset(priv, phy_addr);
1360 		if (priv->plat->stmmac_rst)
1361 			reset_control_deassert(priv->plat->stmmac_rst);
1362 	}
1363 	/* wait for phy and controller ready */
1364 	usleep_range(100000, 200000);
1365 
1366 	dwmac_rk_set_loopback(priv, lb_priv->type, lb_priv->speed,
1367 			      true, phy_addr, true);
1368 
1369 	ret = dwmac_rk_init(ndev, lb_priv);
1370 	if (ret)
1371 		goto exit_init;
1372 
1373 	dwmac_rk_set_loopback(priv, lb_priv->type, lb_priv->speed,
1374 			      true, phy_addr, false);
1375 
1376 	if (lb_priv->scan) {
1377 		/* scan only support for rgmii mode */
1378 		if (phy_iface != PHY_INTERFACE_MODE_RGMII &&
1379 		    phy_iface != PHY_INTERFACE_MODE_RGMII_ID &&
1380 		    phy_iface != PHY_INTERFACE_MODE_RGMII_RXID &&
1381 		    phy_iface != PHY_INTERFACE_MODE_RGMII_TXID) {
1382 			ret = -EINVAL;
1383 			goto out;
1384 		}
1385 		ret = dwmac_rk_loopback_delayline_scan(priv, lb_priv);
1386 	} else {
1387 		lb_priv->id++;
1388 		lb_priv->tx = 0;
1389 		lb_priv->rx = 0;
1390 
1391 		lb_priv->packet = &dwmac_rk_tcp_attr;
1392 		ret = __dwmac_rk_loopback_run(priv, lb_priv);
1393 	}
1394 
1395 out:
1396 	dwmac_rk_release(ndev, lb_priv);
1397 	dwmac_rk_set_loopback(priv, lb_priv->type, lb_priv->speed,
1398 			      false, phy_addr, false);
1399 exit_init:
1400 	if (ndev_up)
1401 		ndev->netdev_ops->ndo_open(ndev);
1402 
1403 	rtnl_unlock();
1404 
1405 	return ret;
1406 }
1407 
rgmii_delayline_show(struct device * dev,struct device_attribute * attr,char * buf)1408 static ssize_t rgmii_delayline_show(struct device *dev,
1409 				    struct device_attribute *attr,
1410 				    char *buf)
1411 {
1412 	struct net_device *ndev = dev_get_drvdata(dev);
1413 	struct stmmac_priv *priv = netdev_priv(ndev);
1414 	int tx, rx;
1415 
1416 	dwmac_rk_get_rgmii_delayline(priv, &tx, &rx);
1417 
1418 	return sprintf(buf, "tx delayline: 0x%x, rx delayline: 0x%x\n",
1419 		       tx, rx);
1420 }
1421 
rgmii_delayline_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1422 static ssize_t rgmii_delayline_store(struct device *dev,
1423 				     struct device_attribute *attr,
1424 				     const char *buf, size_t count)
1425 {
1426 	struct net_device *ndev = dev_get_drvdata(dev);
1427 	struct stmmac_priv *priv = netdev_priv(ndev);
1428 	int tx = 0, rx = 0;
1429 	char tmp[32];
1430 	size_t buf_size = min(count, (sizeof(tmp) - 1));
1431 	char *data;
1432 
1433 	memset(tmp, 0, sizeof(tmp));
1434 	strncpy(tmp, buf, buf_size);
1435 
1436 	data = tmp;
1437 	data = strstr(data, " ");
1438 	if (!data)
1439 		goto out;
1440 	*data = 0;
1441 	data++;
1442 
1443 	if (kstrtoint(tmp, 0, &tx) || tx > dwmac_rk_get_max_delayline(priv))
1444 		goto out;
1445 
1446 	if (kstrtoint(data, 0, &rx) || rx > dwmac_rk_get_max_delayline(priv))
1447 		goto out;
1448 
1449 	dwmac_rk_set_rgmii_delayline(priv, tx, rx);
1450 	pr_info("Set rgmii delayline tx: 0x%x, rx: 0x%x\n", tx, rx);
1451 
1452 	return count;
1453 out:
1454 	pr_err("wrong delayline value input, range is <0x0, 0x7f>\n");
1455 	pr_err("usage: <tx_delayline> <rx_delayline>\n");
1456 
1457 	return count;
1458 }
1459 static DEVICE_ATTR_RW(rgmii_delayline);
1460 
mac_lb_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1461 static ssize_t mac_lb_store(struct device *dev,
1462 			    struct device_attribute *attr,
1463 			    const char *buf, size_t count)
1464 {
1465 	struct net_device *ndev = dev_get_drvdata(dev);
1466 	struct stmmac_priv *priv = netdev_priv(ndev);
1467 	struct dwmac_rk_lb_priv *lb_priv;
1468 	int ret, speed;
1469 
1470 	lb_priv = kzalloc(sizeof(*lb_priv), GFP_KERNEL);
1471 	if (!lb_priv)
1472 		return -ENOMEM;
1473 
1474 	ret = kstrtoint(buf, 0, &speed);
1475 	if (ret) {
1476 		kfree(lb_priv);
1477 		return count;
1478 	}
1479 	pr_info("MAC loopback speed set to %d\n", speed);
1480 
1481 	lb_priv->sysfs = 1;
1482 	lb_priv->type = LOOPBACK_TYPE_GMAC;
1483 	lb_priv->speed = speed;
1484 	lb_priv->scan = 0;
1485 
1486 	ret = dwmac_rk_loopback_run(priv, lb_priv);
1487 	kfree(lb_priv);
1488 
1489 	if (!ret)
1490 		pr_info("MAC loopback: PASS\n");
1491 	else
1492 		pr_info("MAC loopback: FAIL\n");
1493 
1494 	return count;
1495 }
1496 static DEVICE_ATTR_WO(mac_lb);
1497 
phy_lb_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1498 static ssize_t phy_lb_store(struct device *dev,
1499 			    struct device_attribute *attr,
1500 			    const char *buf, size_t count)
1501 {
1502 	struct net_device *ndev = dev_get_drvdata(dev);
1503 	struct stmmac_priv *priv = netdev_priv(ndev);
1504 	struct dwmac_rk_lb_priv *lb_priv;
1505 	int ret, speed;
1506 
1507 	lb_priv = kzalloc(sizeof(*lb_priv), GFP_KERNEL);
1508 	if (!lb_priv)
1509 		return  -ENOMEM;
1510 
1511 	ret = kstrtoint(buf, 0, &speed);
1512 	if (ret) {
1513 		kfree(lb_priv);
1514 		return count;
1515 	}
1516 	pr_info("PHY loopback speed set to %d\n", speed);
1517 
1518 	lb_priv->sysfs = 1;
1519 	lb_priv->type = LOOPBACK_TYPE_PHY;
1520 	lb_priv->speed = speed;
1521 	lb_priv->scan = 0;
1522 
1523 	ret = dwmac_rk_loopback_run(priv, lb_priv);
1524 	if (!ret)
1525 		pr_info("PHY loopback: PASS\n");
1526 	else
1527 		pr_info("PHY loopback: FAIL\n");
1528 
1529 	kfree(lb_priv);
1530 	return count;
1531 }
1532 static DEVICE_ATTR_WO(phy_lb);
1533 
phy_lb_scan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1534 static ssize_t phy_lb_scan_store(struct device *dev,
1535 				 struct device_attribute *attr,
1536 				 const char *buf, size_t count)
1537 {
1538 	struct net_device *ndev = dev_get_drvdata(dev);
1539 	struct stmmac_priv *priv = netdev_priv(ndev);
1540 	struct dwmac_rk_lb_priv *lb_priv;
1541 	int ret, speed;
1542 
1543 	lb_priv = kzalloc(sizeof(*lb_priv), GFP_KERNEL);
1544 	if (!lb_priv)
1545 		return -ENOMEM;
1546 
1547 	ret = kstrtoint(buf, 0, &speed);
1548 	if (ret) {
1549 		kfree(lb_priv);
1550 		return count;
1551 	}
1552 	pr_info("Delayline scan speed set to %d\n", speed);
1553 
1554 	lb_priv->sysfs = 1;
1555 	lb_priv->type = LOOPBACK_TYPE_PHY;
1556 	lb_priv->speed = speed;
1557 	lb_priv->scan = 1;
1558 
1559 	dwmac_rk_loopback_run(priv, lb_priv);
1560 
1561 	kfree(lb_priv);
1562 	return count;
1563 }
1564 static DEVICE_ATTR_WO(phy_lb_scan);
1565 
dwmac_rk_create_loopback_sysfs(struct device * device)1566 int dwmac_rk_create_loopback_sysfs(struct device *device)
1567 {
1568 	int ret;
1569 
1570 	ret = device_create_file(device, &dev_attr_rgmii_delayline);
1571 	if (ret)
1572 		return ret;
1573 
1574 	ret = device_create_file(device, &dev_attr_mac_lb);
1575 	if (ret)
1576 		goto remove_rgmii_delayline;
1577 
1578 	ret = device_create_file(device, &dev_attr_phy_lb);
1579 	if (ret)
1580 		goto remove_mac_lb;
1581 
1582 	ret = device_create_file(device, &dev_attr_phy_lb_scan);
1583 	if (ret)
1584 		goto remove_phy_lb;
1585 
1586 	return 0;
1587 
1588 remove_rgmii_delayline:
1589 	device_remove_file(device, &dev_attr_rgmii_delayline);
1590 
1591 remove_mac_lb:
1592 	device_remove_file(device, &dev_attr_mac_lb);
1593 
1594 remove_phy_lb:
1595 	device_remove_file(device, &dev_attr_phy_lb);
1596 
1597 	return ret;
1598 }
1599 
dwmac_rk_remove_loopback_sysfs(struct device * device)1600 int dwmac_rk_remove_loopback_sysfs(struct device *device)
1601 {
1602 	device_remove_file(device, &dev_attr_rgmii_delayline);
1603 	device_remove_file(device, &dev_attr_mac_lb);
1604 	device_remove_file(device, &dev_attr_phy_lb);
1605 	device_remove_file(device, &dev_attr_phy_lb_scan);
1606 
1607 	return 0;
1608 }
1609