xref: /rk3399_rockchip-uboot/drivers/net/ldpaa_eth/ldpaa_eth.c (revision 0c7c87a4ac3bb532f785fb6a19f68dcc2a588d1c)
1 /*
2  * Copyright (C) 2014 Freescale Semiconductor
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/types.h>
10 #include <malloc.h>
11 #include <net.h>
12 #include <hwconfig.h>
13 #include <phy.h>
14 #include <linux/compat.h>
15 
16 #include "ldpaa_eth.h"
17 
18 #undef CONFIG_PHYLIB
19 static int init_phy(struct eth_device *dev)
20 {
21 	/*TODO for external PHY */
22 
23 	return 0;
24 }
25 
26 static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv,
27 			 const struct dpaa_fd *fd)
28 {
29 	u64 fd_addr;
30 	uint16_t fd_offset;
31 	uint32_t fd_length;
32 	struct ldpaa_fas *fas;
33 	uint32_t status, err;
34 	struct qbman_release_desc releasedesc;
35 	struct qbman_swp *swp = dflt_dpio->sw_portal;
36 
37 	fd_addr = ldpaa_fd_get_addr(fd);
38 	fd_offset = ldpaa_fd_get_offset(fd);
39 	fd_length = ldpaa_fd_get_len(fd);
40 
41 	debug("Rx frame:data addr=0x%p size=0x%x\n", (u64 *)fd_addr, fd_length);
42 
43 	if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
44 		/* Read the frame annotation status word and check for errors */
45 		fas = (struct ldpaa_fas *)
46 				((uint8_t *)(fd_addr) +
47 				priv->buf_layout.private_data_size);
48 		status = le32_to_cpu(fas->status);
49 		if (status & LDPAA_ETH_RX_ERR_MASK) {
50 			printf("Rx frame error(s): 0x%08x\n",
51 			       status & LDPAA_ETH_RX_ERR_MASK);
52 			goto error;
53 		} else if (status & LDPAA_ETH_RX_UNSUPP_MASK) {
54 			printf("Unsupported feature in bitmask: 0x%08x\n",
55 			       status & LDPAA_ETH_RX_UNSUPP_MASK);
56 			goto error;
57 		}
58 	}
59 
60 	debug("Rx frame: To Upper layer\n");
61 	net_process_received_packet((uint8_t *)(fd_addr) + fd_offset,
62 				    fd_length);
63 
64 error:
65 	flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE);
66 	qbman_release_desc_clear(&releasedesc);
67 	qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
68 	do {
69 		/* Release buffer into the QBMAN */
70 		err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
71 	} while (err == -EBUSY);
72 	return;
73 }
74 
75 static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
76 {
77 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)dev->priv;
78 	const struct ldpaa_dq *dq;
79 	const struct dpaa_fd *fd;
80 	int i = 5, err = 0, status;
81 	u32 timeo = (CONFIG_SYS_HZ * 2) / 1000;
82 	u32 time_start;
83 	static struct qbman_pull_desc pulldesc;
84 	struct qbman_swp *swp = dflt_dpio->sw_portal;
85 
86 	while (--i) {
87 		qbman_pull_desc_clear(&pulldesc);
88 		qbman_pull_desc_set_numframes(&pulldesc, 1);
89 		qbman_pull_desc_set_fq(&pulldesc, priv->rx_dflt_fqid);
90 
91 		err = qbman_swp_pull(swp, &pulldesc);
92 		if (err < 0) {
93 			printf("Dequeue frames error:0x%08x\n", err);
94 			continue;
95 		}
96 
97 		time_start = get_timer(0);
98 
99 		 do {
100 			dq = qbman_swp_dqrr_next(swp);
101 		} while (get_timer(time_start) < timeo && !dq);
102 
103 		if (dq) {
104 			/* Check for valid frame. If not sent a consume
105 			 * confirmation to QBMAN otherwise give it to NADK
106 			 * application and then send consume confirmation to
107 			 * QBMAN.
108 			 */
109 			status = (uint8_t)ldpaa_dq_flags(dq);
110 			if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
111 				debug("Dequeue RX frames:");
112 				debug("No frame delivered\n");
113 
114 				qbman_swp_dqrr_consume(swp, dq);
115 				continue;
116 			}
117 
118 			fd = ldpaa_dq_fd(dq);
119 
120 			/* Obtain FD and process it */
121 			ldpaa_eth_rx(priv, fd);
122 			qbman_swp_dqrr_consume(swp, dq);
123 			break;
124 		} else {
125 			err = -ENODATA;
126 			debug("No DQRR entries\n");
127 			break;
128 		}
129 	}
130 
131 	return err;
132 }
133 
134 static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
135 {
136 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
137 	struct dpaa_fd fd;
138 	u64 buffer_start;
139 	int data_offset, err;
140 	u32 timeo = (CONFIG_SYS_HZ * 10) / 1000;
141 	u32 time_start;
142 	struct qbman_swp *swp = dflt_dpio->sw_portal;
143 	struct qbman_eq_desc ed;
144 	struct qbman_release_desc releasedesc;
145 
146 	/* Setup the FD fields */
147 	memset(&fd, 0, sizeof(fd));
148 
149 	data_offset = priv->tx_data_offset;
150 
151 	do {
152 		err = qbman_swp_acquire(dflt_dpio->sw_portal,
153 					dflt_dpbp->dpbp_attr.bpid,
154 					&buffer_start, 1);
155 	} while (err == -EBUSY);
156 
157 	if (err < 0) {
158 		printf("qbman_swp_acquire() failed\n");
159 		return -ENOMEM;
160 	}
161 
162 	debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start);
163 
164 	memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len);
165 
166 	flush_dcache_range(buffer_start, buffer_start +
167 					LDPAA_ETH_RX_BUFFER_SIZE);
168 
169 	ldpaa_fd_set_addr(&fd, (u64)buffer_start);
170 	ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset));
171 	ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid);
172 	ldpaa_fd_set_len(&fd, len);
173 
174 	fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA |
175 				LDPAA_FD_CTRL_PTV1;
176 
177 	qbman_eq_desc_clear(&ed);
178 	qbman_eq_desc_set_no_orp(&ed, 0);
179 	qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);
180 
181 	time_start = get_timer(0);
182 
183 	while (get_timer(time_start) < timeo) {
184 		err = qbman_swp_enqueue(swp, &ed,
185 				(const struct qbman_fd *)(&fd));
186 		if (err != -EBUSY)
187 			break;
188 	}
189 
190 	if (err < 0) {
191 		printf("error enqueueing Tx frame\n");
192 		goto error;
193 	}
194 
195 	return err;
196 
197 error:
198 	qbman_release_desc_clear(&releasedesc);
199 	qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
200 	time_start = get_timer(0);
201 	do {
202 		/* Release buffer into the QBMAN */
203 		err = qbman_swp_release(swp, &releasedesc, &buffer_start, 1);
204 	} while (get_timer(time_start) < timeo && err == -EBUSY);
205 
206 	if (err == -EBUSY)
207 		printf("TX data: QBMAN buffer release fails\n");
208 
209 	return err;
210 }
211 
212 static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
213 {
214 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
215 	struct dpni_queue_attr rx_queue_attr;
216 	uint8_t mac_addr[6];
217 	int err;
218 
219 	if (net_dev->state == ETH_STATE_ACTIVE)
220 		return 0;
221 
222 	/* DPNI initialization */
223 	err = ldpaa_dpni_setup(priv);
224 	if (err < 0)
225 		goto err_dpni_setup;
226 
227 	err = ldpaa_dpbp_setup();
228 	if (err < 0)
229 		goto err_dpbp_setup;
230 
231 	/* DPNI binding DPBP */
232 	err = ldpaa_dpni_bind(priv);
233 	if (err)
234 		goto err_bind;
235 
236 	err = dpni_get_primary_mac_addr(dflt_mc_io, priv->dpni_handle,
237 					mac_addr);
238 	if (err) {
239 		printf("dpni_get_primary_mac_addr() failed\n");
240 		return err;
241 	}
242 
243 	memcpy(net_dev->enetaddr, mac_addr, 0x6);
244 
245 	/* setup the MAC address */
246 	if (net_dev->enetaddr[0] & 0x01) {
247 		printf("%s: MacAddress is multcast address\n",	__func__);
248 		return 1;
249 	}
250 
251 #ifdef CONFIG_PHYLIB
252 	/* TODO Check this path */
253 	err = phy_startup(priv->phydev);
254 	if (err) {
255 		printf("%s: Could not initialize\n", priv->phydev->dev->name);
256 		return err;
257 	}
258 #else
259 	priv->phydev->speed = SPEED_1000;
260 	priv->phydev->link = 1;
261 	priv->phydev->duplex = DUPLEX_FULL;
262 #endif
263 
264 	err = dpni_enable(dflt_mc_io, priv->dpni_handle);
265 	if (err < 0) {
266 		printf("dpni_enable() failed\n");
267 		return err;
268 	}
269 
270 	/* TODO: support multiple Rx flows */
271 	err = dpni_get_rx_flow(dflt_mc_io, priv->dpni_handle, 0, 0,
272 			       &rx_queue_attr);
273 	if (err) {
274 		printf("dpni_get_rx_flow() failed\n");
275 		goto err_rx_flow;
276 	}
277 
278 	priv->rx_dflt_fqid = rx_queue_attr.fqid;
279 
280 	err = dpni_get_qdid(dflt_mc_io, priv->dpni_handle, &priv->tx_qdid);
281 	if (err) {
282 		printf("dpni_get_qdid() failed\n");
283 		goto err_qdid;
284 	}
285 
286 	if (!priv->phydev->link)
287 		printf("%s: No link.\n", priv->phydev->dev->name);
288 
289 	return priv->phydev->link ? 0 : -1;
290 
291 err_qdid:
292 err_rx_flow:
293 	dpni_disable(dflt_mc_io, priv->dpni_handle);
294 err_bind:
295 	ldpaa_dpbp_free();
296 err_dpbp_setup:
297 	dpni_close(dflt_mc_io, priv->dpni_handle);
298 err_dpni_setup:
299 	return err;
300 }
301 
302 static void ldpaa_eth_stop(struct eth_device *net_dev)
303 {
304 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
305 	int err = 0;
306 
307 	if ((net_dev->state == ETH_STATE_PASSIVE) ||
308 	    (net_dev->state == ETH_STATE_INIT))
309 		return;
310 	/* Stop Tx and Rx traffic */
311 	err = dpni_disable(dflt_mc_io, priv->dpni_handle);
312 	if (err < 0)
313 		printf("dpni_disable() failed\n");
314 
315 #ifdef CONFIG_PHYLIB
316 	phy_shutdown(priv->phydev);
317 #endif
318 
319 	ldpaa_dpbp_free();
320 	dpni_reset(dflt_mc_io, priv->dpni_handle);
321 	dpni_close(dflt_mc_io, priv->dpni_handle);
322 }
323 
324 static void ldpaa_dpbp_drain_cnt(int count)
325 {
326 	uint64_t buf_array[7];
327 	void *addr;
328 	int ret, i;
329 
330 	BUG_ON(count > 7);
331 
332 	do {
333 		ret = qbman_swp_acquire(dflt_dpio->sw_portal,
334 					dflt_dpbp->dpbp_attr.bpid,
335 					buf_array, count);
336 		if (ret < 0) {
337 			printf("qbman_swp_acquire() failed\n");
338 			return;
339 		}
340 		for (i = 0; i < ret; i++) {
341 			addr = (void *)buf_array[i];
342 			debug("Free: buffer addr =0x%p\n", addr);
343 			free(addr);
344 		}
345 	} while (ret);
346 }
347 
348 static void ldpaa_dpbp_drain(void)
349 {
350 	int i;
351 	for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7)
352 		ldpaa_dpbp_drain_cnt(7);
353 }
354 
355 static int ldpaa_bp_add_7(uint16_t bpid)
356 {
357 	uint64_t buf_array[7];
358 	u8 *addr;
359 	int i;
360 	struct qbman_release_desc rd;
361 
362 	for (i = 0; i < 7; i++) {
363 		addr = memalign(L1_CACHE_BYTES, LDPAA_ETH_RX_BUFFER_SIZE);
364 		if (!addr) {
365 			printf("addr allocation failed\n");
366 			goto err_alloc;
367 		}
368 		memset(addr, 0x00, LDPAA_ETH_RX_BUFFER_SIZE);
369 		flush_dcache_range((u64)addr,
370 				   (u64)(addr + LDPAA_ETH_RX_BUFFER_SIZE));
371 
372 		buf_array[i] = (uint64_t)addr;
373 		debug("Release: buffer addr =0x%p\n", addr);
374 	}
375 
376 release_bufs:
377 	/* In case the portal is busy, retry until successful.
378 	 * This function is guaranteed to succeed in a reasonable amount
379 	 * of time.
380 	 */
381 
382 	do {
383 		mdelay(1);
384 		qbman_release_desc_clear(&rd);
385 		qbman_release_desc_set_bpid(&rd, bpid);
386 	} while (qbman_swp_release(dflt_dpio->sw_portal, &rd, buf_array, i));
387 
388 	return i;
389 
390 err_alloc:
391 	if (i)
392 		goto release_bufs;
393 
394 	return 0;
395 }
396 
397 static int ldpaa_dpbp_seed(uint16_t bpid)
398 {
399 	int i;
400 	int count;
401 
402 	for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) {
403 		count = ldpaa_bp_add_7(bpid);
404 		if (count < 7)
405 			printf("Buffer Seed= %d\n", count);
406 	}
407 
408 	return 0;
409 }
410 
411 static int ldpaa_dpbp_setup(void)
412 {
413 	int err;
414 
415 	err = dpbp_open(dflt_mc_io, dflt_dpbp->dpbp_attr.id,
416 			&dflt_dpbp->dpbp_handle);
417 	if (err) {
418 		printf("dpbp_open() failed\n");
419 		goto err_open;
420 	}
421 
422 	err = dpbp_enable(dflt_mc_io, dflt_dpbp->dpbp_handle);
423 	if (err) {
424 		printf("dpbp_enable() failed\n");
425 		goto err_enable;
426 	}
427 
428 	err = dpbp_get_attributes(dflt_mc_io, dflt_dpbp->dpbp_handle,
429 				  &dflt_dpbp->dpbp_attr);
430 	if (err) {
431 		printf("dpbp_get_attributes() failed\n");
432 		goto err_get_attr;
433 	}
434 
435 	err = ldpaa_dpbp_seed(dflt_dpbp->dpbp_attr.bpid);
436 	if (err) {
437 		printf("Buffer seeding failed for DPBP %d (bpid=%d)\n",
438 		       dflt_dpbp->dpbp_attr.id, dflt_dpbp->dpbp_attr.bpid);
439 		goto err_seed;
440 	}
441 
442 	return 0;
443 
444 err_seed:
445 err_get_attr:
446 	dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
447 err_enable:
448 	dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
449 err_open:
450 	return err;
451 }
452 
453 static void ldpaa_dpbp_free(void)
454 {
455 	ldpaa_dpbp_drain();
456 	dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
457 	dpbp_reset(dflt_mc_io, dflt_dpbp->dpbp_handle);
458 	dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
459 }
460 
461 static int ldpaa_dpni_setup(struct ldpaa_eth_priv *priv)
462 {
463 	int err;
464 
465 	/* and get a handle for the DPNI this interface is associate with */
466 	err = dpni_open(dflt_mc_io, priv->dpni_id, &priv->dpni_handle);
467 	if (err) {
468 		printf("dpni_open() failed\n");
469 		goto err_open;
470 	}
471 
472 	err = dpni_get_attributes(dflt_mc_io, priv->dpni_handle,
473 				  &priv->dpni_attrs);
474 	if (err) {
475 		printf("dpni_get_attributes() failed (err=%d)\n", err);
476 		goto err_get_attr;
477 	}
478 
479 	/* Configure our buffers' layout */
480 	priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
481 				   DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
482 				   DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
483 	priv->buf_layout.pass_parser_result = true;
484 	priv->buf_layout.pass_frame_status = true;
485 	priv->buf_layout.private_data_size = LDPAA_ETH_SWA_SIZE;
486 	/* ...rx, ... */
487 	err = dpni_set_rx_buffer_layout(dflt_mc_io, priv->dpni_handle,
488 					&priv->buf_layout);
489 	if (err) {
490 		printf("dpni_set_rx_buffer_layout() failed");
491 		goto err_buf_layout;
492 	}
493 
494 	/* ... tx, ... */
495 	priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PARSER_RESULT;
496 	err = dpni_set_tx_buffer_layout(dflt_mc_io, priv->dpni_handle,
497 					&priv->buf_layout);
498 	if (err) {
499 		printf("dpni_set_tx_buffer_layout() failed");
500 		goto err_buf_layout;
501 	}
502 
503 	/* ... tx-confirm. */
504 	priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
505 	err = dpni_set_tx_conf_buffer_layout(dflt_mc_io, priv->dpni_handle,
506 					     &priv->buf_layout);
507 	if (err) {
508 		printf("dpni_set_tx_conf_buffer_layout() failed");
509 		goto err_buf_layout;
510 	}
511 
512 	/* Now that we've set our tx buffer layout, retrieve the minimum
513 	 * required tx data offset.
514 	 */
515 	err = dpni_get_tx_data_offset(dflt_mc_io, priv->dpni_handle,
516 				      &priv->tx_data_offset);
517 	if (err) {
518 		printf("dpni_get_tx_data_offset() failed\n");
519 		goto err_data_offset;
520 	}
521 
522 	/* Warn in case TX data offset is not multiple of 64 bytes. */
523 	WARN_ON(priv->tx_data_offset % 64);
524 
525 	/* Accomodate SWA space. */
526 	priv->tx_data_offset += LDPAA_ETH_SWA_SIZE;
527 	debug("priv->tx_data_offset=%d\n", priv->tx_data_offset);
528 
529 	return 0;
530 
531 err_data_offset:
532 err_buf_layout:
533 err_get_attr:
534 	dpni_close(dflt_mc_io, priv->dpni_handle);
535 err_open:
536 	return err;
537 }
538 
539 static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv)
540 {
541 	struct dpni_pools_cfg pools_params;
542 	struct dpni_tx_flow_cfg dflt_tx_flow;
543 	int err = 0;
544 
545 	pools_params.num_dpbp = 1;
546 	pools_params.pools[0].dpbp_id = (uint16_t)dflt_dpbp->dpbp_attr.id;
547 	pools_params.pools[0].buffer_size = LDPAA_ETH_RX_BUFFER_SIZE;
548 	err = dpni_set_pools(dflt_mc_io, priv->dpni_handle, &pools_params);
549 	if (err) {
550 		printf("dpni_set_pools() failed\n");
551 		return err;
552 	}
553 
554 	priv->tx_flow_id = DPNI_NEW_FLOW_ID;
555 	memset(&dflt_tx_flow, 0, sizeof(dflt_tx_flow));
556 
557 	dflt_tx_flow.options = DPNI_TX_FLOW_OPT_ONLY_TX_ERROR;
558 	dflt_tx_flow.conf_err_cfg.use_default_queue = 0;
559 	dflt_tx_flow.conf_err_cfg.errors_only = 1;
560 	err = dpni_set_tx_flow(dflt_mc_io, priv->dpni_handle,
561 			       &priv->tx_flow_id, &dflt_tx_flow);
562 	if (err) {
563 		printf("dpni_set_tx_flow() failed\n");
564 		return err;
565 	}
566 
567 	return 0;
568 }
569 
570 static int ldpaa_eth_netdev_init(struct eth_device *net_dev)
571 {
572 	int err;
573 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
574 
575 	sprintf(net_dev->name, "DPNI%d", priv->dpni_id);
576 
577 	net_dev->iobase = 0;
578 	net_dev->init = ldpaa_eth_open;
579 	net_dev->halt = ldpaa_eth_stop;
580 	net_dev->send = ldpaa_eth_tx;
581 	net_dev->recv = ldpaa_eth_pull_dequeue_rx;
582 /*
583 	TODO: PHY MDIO information
584 	priv->bus = info->bus;
585 	priv->phyaddr = info->phy_addr;
586 	priv->enet_if = info->enet_if;
587 */
588 
589 	if (init_phy(net_dev))
590 		return 0;
591 
592 	err = eth_register(net_dev);
593 	if (err < 0) {
594 		printf("eth_register() = %d\n", err);
595 		return err;
596 	}
597 
598 	return 0;
599 }
600 
601 int ldpaa_eth_init(struct dprc_obj_desc obj_desc)
602 {
603 	struct eth_device		*net_dev = NULL;
604 	struct ldpaa_eth_priv		*priv = NULL;
605 	int				err = 0;
606 
607 
608 	/* Net device */
609 	net_dev = (struct eth_device *)malloc(sizeof(struct eth_device));
610 	if (!net_dev) {
611 		printf("eth_device malloc() failed\n");
612 		return -ENOMEM;
613 	}
614 	memset(net_dev, 0, sizeof(struct eth_device));
615 
616 	/* alloc the ldpaa ethernet private struct */
617 	priv = (struct ldpaa_eth_priv *)malloc(sizeof(struct ldpaa_eth_priv));
618 	if (!priv) {
619 		printf("ldpaa_eth_priv malloc() failed\n");
620 		return -ENOMEM;
621 	}
622 	memset(priv, 0, sizeof(struct ldpaa_eth_priv));
623 
624 	net_dev->priv = (void *)priv;
625 	priv->net_dev = (struct eth_device *)net_dev;
626 	priv->dpni_id = obj_desc.id;
627 
628 	err = ldpaa_eth_netdev_init(net_dev);
629 	if (err)
630 		goto err_netdev_init;
631 
632 	debug("ldpaa ethernet: Probed interface %s\n", net_dev->name);
633 	return 0;
634 
635 err_netdev_init:
636 	free(priv);
637 	net_dev->priv = NULL;
638 	free(net_dev);
639 
640 	return err;
641 }
642