xref: /rk3399_rockchip-uboot/drivers/net/ldpaa_eth/ldpaa_eth.c (revision c919ab9ee5811844d0ebefcfd0bba903d4089a85)
1 /*
2  * Copyright (C) 2014 Freescale Semiconductor
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/types.h>
10 #include <malloc.h>
11 #include <net.h>
12 #include <hwconfig.h>
13 #include <phy.h>
14 #include <linux/compat.h>
15 #include <fsl-mc/fsl_dpmac.h>
16 
17 #include "ldpaa_eth.h"
18 
19 #undef CONFIG_PHYLIB
20 static int init_phy(struct eth_device *dev)
21 {
22 	/*TODO for external PHY */
23 
24 	return 0;
25 }
26 
27 static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv,
28 			 const struct dpaa_fd *fd)
29 {
30 	u64 fd_addr;
31 	uint16_t fd_offset;
32 	uint32_t fd_length;
33 	struct ldpaa_fas *fas;
34 	uint32_t status, err;
35 	u32 timeo = (CONFIG_SYS_HZ * 2) / 1000;
36 	u32 time_start;
37 	struct qbman_release_desc releasedesc;
38 	struct qbman_swp *swp = dflt_dpio->sw_portal;
39 
40 	fd_addr = ldpaa_fd_get_addr(fd);
41 	fd_offset = ldpaa_fd_get_offset(fd);
42 	fd_length = ldpaa_fd_get_len(fd);
43 
44 	debug("Rx frame:data addr=0x%p size=0x%x\n", (u64 *)fd_addr, fd_length);
45 
46 	if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
47 		/* Read the frame annotation status word and check for errors */
48 		fas = (struct ldpaa_fas *)
49 				((uint8_t *)(fd_addr) +
50 				dflt_dpni->buf_layout.private_data_size);
51 		status = le32_to_cpu(fas->status);
52 		if (status & LDPAA_ETH_RX_ERR_MASK) {
53 			printf("Rx frame error(s): 0x%08x\n",
54 			       status & LDPAA_ETH_RX_ERR_MASK);
55 			goto error;
56 		} else if (status & LDPAA_ETH_RX_UNSUPP_MASK) {
57 			printf("Unsupported feature in bitmask: 0x%08x\n",
58 			       status & LDPAA_ETH_RX_UNSUPP_MASK);
59 			goto error;
60 		}
61 	}
62 
63 	debug("Rx frame: To Upper layer\n");
64 	net_process_received_packet((uint8_t *)(fd_addr) + fd_offset,
65 				    fd_length);
66 
67 error:
68 	flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE);
69 	qbman_release_desc_clear(&releasedesc);
70 	qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
71 	time_start = get_timer(0);
72 	do {
73 		/* Release buffer into the QBMAN */
74 		err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
75 	} while (get_timer(time_start) < timeo && err == -EBUSY);
76 
77 	if (err == -EBUSY)
78 		printf("Rx frame: QBMAN buffer release fails\n");
79 
80 	return;
81 }
82 
83 static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
84 {
85 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)dev->priv;
86 	const struct ldpaa_dq *dq;
87 	const struct dpaa_fd *fd;
88 	int i = 5, err = 0, status;
89 	u32 timeo = (CONFIG_SYS_HZ * 2) / 1000;
90 	u32 time_start;
91 	static struct qbman_pull_desc pulldesc;
92 	struct qbman_swp *swp = dflt_dpio->sw_portal;
93 
94 	while (--i) {
95 		qbman_pull_desc_clear(&pulldesc);
96 		qbman_pull_desc_set_numframes(&pulldesc, 1);
97 		qbman_pull_desc_set_fq(&pulldesc, priv->rx_dflt_fqid);
98 
99 		err = qbman_swp_pull(swp, &pulldesc);
100 		if (err < 0) {
101 			printf("Dequeue frames error:0x%08x\n", err);
102 			continue;
103 		}
104 
105 		time_start = get_timer(0);
106 
107 		 do {
108 			dq = qbman_swp_dqrr_next(swp);
109 		} while (get_timer(time_start) < timeo && !dq);
110 
111 		if (dq) {
112 			/* Check for valid frame. If not sent a consume
113 			 * confirmation to QBMAN otherwise give it to NADK
114 			 * application and then send consume confirmation to
115 			 * QBMAN.
116 			 */
117 			status = (uint8_t)ldpaa_dq_flags(dq);
118 			if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
119 				debug("Dequeue RX frames:");
120 				debug("No frame delivered\n");
121 
122 				qbman_swp_dqrr_consume(swp, dq);
123 				continue;
124 			}
125 
126 			fd = ldpaa_dq_fd(dq);
127 
128 			/* Obtain FD and process it */
129 			ldpaa_eth_rx(priv, fd);
130 			qbman_swp_dqrr_consume(swp, dq);
131 			break;
132 		} else {
133 			err = -ENODATA;
134 			debug("No DQRR entries\n");
135 			break;
136 		}
137 	}
138 
139 	return err;
140 }
141 
142 static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
143 {
144 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
145 	struct dpaa_fd fd;
146 	u64 buffer_start;
147 	int data_offset, err;
148 	u32 timeo = (CONFIG_SYS_HZ * 10) / 1000;
149 	u32 time_start;
150 	struct qbman_swp *swp = dflt_dpio->sw_portal;
151 	struct qbman_eq_desc ed;
152 	struct qbman_release_desc releasedesc;
153 
154 	/* Setup the FD fields */
155 	memset(&fd, 0, sizeof(fd));
156 
157 	data_offset = priv->tx_data_offset;
158 
159 	do {
160 		err = qbman_swp_acquire(dflt_dpio->sw_portal,
161 					dflt_dpbp->dpbp_attr.bpid,
162 					&buffer_start, 1);
163 	} while (err == -EBUSY);
164 
165 	if (err < 0) {
166 		printf("qbman_swp_acquire() failed\n");
167 		return -ENOMEM;
168 	}
169 
170 	debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start);
171 
172 	memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len);
173 
174 	flush_dcache_range(buffer_start, buffer_start +
175 					LDPAA_ETH_RX_BUFFER_SIZE);
176 
177 	ldpaa_fd_set_addr(&fd, (u64)buffer_start);
178 	ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset));
179 	ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid);
180 	ldpaa_fd_set_len(&fd, len);
181 
182 	fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA |
183 				LDPAA_FD_CTRL_PTV1;
184 
185 	qbman_eq_desc_clear(&ed);
186 	qbman_eq_desc_set_no_orp(&ed, 0);
187 	qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);
188 
189 	time_start = get_timer(0);
190 
191 	while (get_timer(time_start) < timeo) {
192 		err = qbman_swp_enqueue(swp, &ed,
193 				(const struct qbman_fd *)(&fd));
194 		if (err != -EBUSY)
195 			break;
196 	}
197 
198 	if (err < 0) {
199 		printf("error enqueueing Tx frame\n");
200 		goto error;
201 	}
202 
203 	return err;
204 
205 error:
206 	qbman_release_desc_clear(&releasedesc);
207 	qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
208 	time_start = get_timer(0);
209 	do {
210 		/* Release buffer into the QBMAN */
211 		err = qbman_swp_release(swp, &releasedesc, &buffer_start, 1);
212 	} while (get_timer(time_start) < timeo && err == -EBUSY);
213 
214 	if (err == -EBUSY)
215 		printf("TX data: QBMAN buffer release fails\n");
216 
217 	return err;
218 }
219 
220 static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
221 {
222 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
223 	struct dpni_queue_attr rx_queue_attr;
224 	struct dpmac_link_state	dpmac_link_state = { 0 };
225 	int err;
226 
227 	if (net_dev->state == ETH_STATE_ACTIVE)
228 		return 0;
229 
230 	if (get_mc_boot_status() != 0) {
231 		printf("ERROR (MC is not booted)\n");
232 		return -ENODEV;
233 	}
234 
235 	if (get_dpl_apply_status() == 0) {
236 		printf("ERROR (DPL is deployed. No device available)\n");
237 		return -ENODEV;
238 	}
239 	/* DPMAC initialization */
240 	err = ldpaa_dpmac_setup(priv);
241 	if (err < 0)
242 		goto err_dpmac_setup;
243 
244 	/* DPMAC binding DPNI */
245 	err = ldpaa_dpmac_bind(priv);
246 	if (err)
247 		goto err_dpamc_bind;
248 
249 	/* DPNI initialization */
250 	err = ldpaa_dpni_setup(priv);
251 	if (err < 0)
252 		goto err_dpni_setup;
253 
254 	err = ldpaa_dpbp_setup();
255 	if (err < 0)
256 		goto err_dpbp_setup;
257 
258 	/* DPNI binding DPBP */
259 	err = ldpaa_dpni_bind(priv);
260 	if (err)
261 		goto err_dpni_bind;
262 
263 	err = dpni_add_mac_addr(dflt_mc_io, MC_CMD_NO_FLAGS,
264 				dflt_dpni->dpni_handle, net_dev->enetaddr);
265 	if (err) {
266 		printf("dpni_add_mac_addr() failed\n");
267 		return err;
268 	}
269 
270 #ifdef CONFIG_PHYLIB
271 	/* TODO Check this path */
272 	err = phy_startup(priv->phydev);
273 	if (err) {
274 		printf("%s: Could not initialize\n", priv->phydev->dev->name);
275 		return err;
276 	}
277 #else
278 	priv->phydev->speed = SPEED_1000;
279 	priv->phydev->link = 1;
280 	priv->phydev->duplex = DUPLEX_FULL;
281 #endif
282 
283 	err = dpni_enable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle);
284 	if (err < 0) {
285 		printf("dpni_enable() failed\n");
286 		return err;
287 	}
288 
289 	dpmac_link_state.rate = SPEED_1000;
290 	dpmac_link_state.options = DPMAC_LINK_OPT_AUTONEG;
291 	dpmac_link_state.up = 1;
292 	err = dpmac_set_link_state(dflt_mc_io, MC_CMD_NO_FLAGS,
293 				  priv->dpmac_handle, &dpmac_link_state);
294 	if (err < 0) {
295 		printf("dpmac_set_link_state() failed\n");
296 		return err;
297 	}
298 	/* TODO: support multiple Rx flows */
299 	err = dpni_get_rx_flow(dflt_mc_io, MC_CMD_NO_FLAGS,
300 			       dflt_dpni->dpni_handle, 0, 0, &rx_queue_attr);
301 	if (err) {
302 		printf("dpni_get_rx_flow() failed\n");
303 		goto err_rx_flow;
304 	}
305 
306 	priv->rx_dflt_fqid = rx_queue_attr.fqid;
307 
308 	err = dpni_get_qdid(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle,
309 			    &priv->tx_qdid);
310 	if (err) {
311 		printf("dpni_get_qdid() failed\n");
312 		goto err_qdid;
313 	}
314 
315 	if (!priv->phydev->link)
316 		printf("%s: No link.\n", priv->phydev->dev->name);
317 
318 	return priv->phydev->link ? 0 : -1;
319 
320 err_qdid:
321 err_rx_flow:
322 	dpni_disable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle);
323 err_dpni_bind:
324 	ldpaa_dpbp_free();
325 err_dpbp_setup:
326 err_dpamc_bind:
327 	dpni_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle);
328 err_dpni_setup:
329 err_dpmac_setup:
330 	return err;
331 }
332 
333 static void ldpaa_eth_stop(struct eth_device *net_dev)
334 {
335 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
336 	int err = 0;
337 
338 	if ((net_dev->state == ETH_STATE_PASSIVE) ||
339 	    (net_dev->state == ETH_STATE_INIT))
340 		return;
341 
342 	err = dprc_disconnect(dflt_mc_io, MC_CMD_NO_FLAGS,
343 			      dflt_dprc_handle, &dpmac_endpoint);
344 	if (err < 0)
345 		printf("dprc_disconnect() failed dpmac_endpoint\n");
346 
347 	err = dpmac_destroy(dflt_mc_io, MC_CMD_NO_FLAGS, priv->dpmac_handle);
348 	if (err < 0)
349 		printf("dpmac_destroy() failed\n");
350 
351 	/* Stop Tx and Rx traffic */
352 	err = dpni_disable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle);
353 	if (err < 0)
354 		printf("dpni_disable() failed\n");
355 
356 #ifdef CONFIG_PHYLIB
357 	phy_shutdown(priv->phydev);
358 #endif
359 
360 	ldpaa_dpbp_free();
361 	dpni_reset(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle);
362 	dpni_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle);
363 }
364 
365 static void ldpaa_dpbp_drain_cnt(int count)
366 {
367 	uint64_t buf_array[7];
368 	void *addr;
369 	int ret, i;
370 
371 	BUG_ON(count > 7);
372 
373 	do {
374 		ret = qbman_swp_acquire(dflt_dpio->sw_portal,
375 					dflt_dpbp->dpbp_attr.bpid,
376 					buf_array, count);
377 		if (ret < 0) {
378 			printf("qbman_swp_acquire() failed\n");
379 			return;
380 		}
381 		for (i = 0; i < ret; i++) {
382 			addr = (void *)buf_array[i];
383 			debug("Free: buffer addr =0x%p\n", addr);
384 			free(addr);
385 		}
386 	} while (ret);
387 }
388 
389 static void ldpaa_dpbp_drain(void)
390 {
391 	int i;
392 	for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7)
393 		ldpaa_dpbp_drain_cnt(7);
394 }
395 
396 static int ldpaa_bp_add_7(uint16_t bpid)
397 {
398 	uint64_t buf_array[7];
399 	u8 *addr;
400 	int i;
401 	struct qbman_release_desc rd;
402 
403 	for (i = 0; i < 7; i++) {
404 		addr = memalign(L1_CACHE_BYTES, LDPAA_ETH_RX_BUFFER_SIZE);
405 		if (!addr) {
406 			printf("addr allocation failed\n");
407 			goto err_alloc;
408 		}
409 		memset(addr, 0x00, LDPAA_ETH_RX_BUFFER_SIZE);
410 		flush_dcache_range((u64)addr,
411 				   (u64)(addr + LDPAA_ETH_RX_BUFFER_SIZE));
412 
413 		buf_array[i] = (uint64_t)addr;
414 		debug("Release: buffer addr =0x%p\n", addr);
415 	}
416 
417 release_bufs:
418 	/* In case the portal is busy, retry until successful.
419 	 * This function is guaranteed to succeed in a reasonable amount
420 	 * of time.
421 	 */
422 
423 	do {
424 		mdelay(1);
425 		qbman_release_desc_clear(&rd);
426 		qbman_release_desc_set_bpid(&rd, bpid);
427 	} while (qbman_swp_release(dflt_dpio->sw_portal, &rd, buf_array, i));
428 
429 	return i;
430 
431 err_alloc:
432 	if (i)
433 		goto release_bufs;
434 
435 	return 0;
436 }
437 
438 static int ldpaa_dpbp_seed(uint16_t bpid)
439 {
440 	int i;
441 	int count;
442 
443 	for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) {
444 		count = ldpaa_bp_add_7(bpid);
445 		if (count < 7)
446 			printf("Buffer Seed= %d\n", count);
447 	}
448 
449 	return 0;
450 }
451 
452 static int ldpaa_dpbp_setup(void)
453 {
454 	int err;
455 
456 	err = dpbp_open(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_attr.id,
457 			&dflt_dpbp->dpbp_handle);
458 	if (err) {
459 		printf("dpbp_open() failed\n");
460 		goto err_open;
461 	}
462 
463 	err = dpbp_enable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
464 	if (err) {
465 		printf("dpbp_enable() failed\n");
466 		goto err_enable;
467 	}
468 
469 	err = dpbp_get_attributes(dflt_mc_io, MC_CMD_NO_FLAGS,
470 				  dflt_dpbp->dpbp_handle,
471 				  &dflt_dpbp->dpbp_attr);
472 	if (err) {
473 		printf("dpbp_get_attributes() failed\n");
474 		goto err_get_attr;
475 	}
476 
477 	err = ldpaa_dpbp_seed(dflt_dpbp->dpbp_attr.bpid);
478 	if (err) {
479 		printf("Buffer seeding failed for DPBP %d (bpid=%d)\n",
480 		       dflt_dpbp->dpbp_attr.id, dflt_dpbp->dpbp_attr.bpid);
481 		goto err_seed;
482 	}
483 
484 	return 0;
485 
486 err_seed:
487 err_get_attr:
488 	dpbp_disable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
489 err_enable:
490 	dpbp_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
491 err_open:
492 	return err;
493 }
494 
495 static void ldpaa_dpbp_free(void)
496 {
497 	ldpaa_dpbp_drain();
498 	dpbp_disable(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
499 	dpbp_reset(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
500 	dpbp_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpbp->dpbp_handle);
501 }
502 
503 static int ldpaa_dpmac_setup(struct ldpaa_eth_priv *priv)
504 {
505 	int err = 0;
506 	struct dpmac_cfg dpmac_cfg;
507 
508 	dpmac_cfg.mac_id = priv->dpmac_id;
509 	err = dpmac_create(dflt_mc_io, MC_CMD_NO_FLAGS, &dpmac_cfg,
510 			  &priv->dpmac_handle);
511 	if (err)
512 		printf("dpmac_create() failed\n");
513 	return err;
514 }
515 
516 static int ldpaa_dpmac_bind(struct ldpaa_eth_priv *priv)
517 {
518 	int err = 0;
519 	struct dprc_connection_cfg dprc_connection_cfg = {
520 		/* If both rates are zero the connection */
521 		/* will be configured in "best effort" mode. */
522 		.committed_rate = 0,
523 		.max_rate = 0
524 	};
525 
526 	memset(&dpmac_endpoint, 0, sizeof(struct dprc_endpoint));
527 	sprintf(dpmac_endpoint.type, "dpmac");
528 	dpmac_endpoint.id = priv->dpmac_id;
529 
530 	memset(&dpni_endpoint, 0, sizeof(struct dprc_endpoint));
531 	sprintf(dpni_endpoint.type, "dpni");
532 	dpni_endpoint.id = dflt_dpni->dpni_id;
533 
534 	err = dprc_connect(dflt_mc_io, MC_CMD_NO_FLAGS,
535 			     dflt_dprc_handle,
536 			     &dpmac_endpoint,
537 			     &dpni_endpoint,
538 			     &dprc_connection_cfg);
539 	return err;
540 }
541 
542 static int ldpaa_dpni_setup(struct ldpaa_eth_priv *priv)
543 {
544 	int err;
545 
546 	/* and get a handle for the DPNI this interface is associate with */
547 	err = dpni_open(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_id,
548 			&dflt_dpni->dpni_handle);
549 	if (err) {
550 		printf("dpni_open() failed\n");
551 		goto err_open;
552 	}
553 
554 	err = dpni_get_attributes(dflt_mc_io, MC_CMD_NO_FLAGS,
555 				  dflt_dpni->dpni_handle,
556 				  &dflt_dpni->dpni_attrs);
557 	if (err) {
558 		printf("dpni_get_attributes() failed (err=%d)\n", err);
559 		goto err_get_attr;
560 	}
561 
562 	/* Configure our buffers' layout */
563 	dflt_dpni->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
564 				   DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
565 				   DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
566 	dflt_dpni->buf_layout.pass_parser_result = true;
567 	dflt_dpni->buf_layout.pass_frame_status = true;
568 	dflt_dpni->buf_layout.private_data_size = LDPAA_ETH_SWA_SIZE;
569 	/* ...rx, ... */
570 	err = dpni_set_rx_buffer_layout(dflt_mc_io, MC_CMD_NO_FLAGS,
571 					dflt_dpni->dpni_handle,
572 					&dflt_dpni->buf_layout);
573 	if (err) {
574 		printf("dpni_set_rx_buffer_layout() failed");
575 		goto err_buf_layout;
576 	}
577 
578 	/* ... tx, ... */
579 	dflt_dpni->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PARSER_RESULT;
580 	err = dpni_set_tx_buffer_layout(dflt_mc_io, MC_CMD_NO_FLAGS,
581 					dflt_dpni->dpni_handle,
582 					&dflt_dpni->buf_layout);
583 	if (err) {
584 		printf("dpni_set_tx_buffer_layout() failed");
585 		goto err_buf_layout;
586 	}
587 
588 	/* ... tx-confirm. */
589 	dflt_dpni->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
590 	err = dpni_set_tx_conf_buffer_layout(dflt_mc_io, MC_CMD_NO_FLAGS,
591 					     dflt_dpni->dpni_handle,
592 					     &dflt_dpni->buf_layout);
593 	if (err) {
594 		printf("dpni_set_tx_conf_buffer_layout() failed");
595 		goto err_buf_layout;
596 	}
597 
598 	/* Now that we've set our tx buffer layout, retrieve the minimum
599 	 * required tx data offset.
600 	 */
601 	err = dpni_get_tx_data_offset(dflt_mc_io, MC_CMD_NO_FLAGS,
602 				      dflt_dpni->dpni_handle,
603 				      &priv->tx_data_offset);
604 	if (err) {
605 		printf("dpni_get_tx_data_offset() failed\n");
606 		goto err_data_offset;
607 	}
608 
609 	/* Warn in case TX data offset is not multiple of 64 bytes. */
610 	WARN_ON(priv->tx_data_offset % 64);
611 
612 	/* Accomodate SWA space. */
613 	priv->tx_data_offset += LDPAA_ETH_SWA_SIZE;
614 	debug("priv->tx_data_offset=%d\n", priv->tx_data_offset);
615 
616 	return 0;
617 
618 err_data_offset:
619 err_buf_layout:
620 err_get_attr:
621 	dpni_close(dflt_mc_io, MC_CMD_NO_FLAGS, dflt_dpni->dpni_handle);
622 err_open:
623 	return err;
624 }
625 
626 static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv)
627 {
628 	struct dpni_pools_cfg pools_params;
629 	struct dpni_tx_flow_cfg dflt_tx_flow;
630 	int err = 0;
631 
632 	pools_params.num_dpbp = 1;
633 	pools_params.pools[0].dpbp_id = (uint16_t)dflt_dpbp->dpbp_attr.id;
634 	pools_params.pools[0].buffer_size = LDPAA_ETH_RX_BUFFER_SIZE;
635 	err = dpni_set_pools(dflt_mc_io, MC_CMD_NO_FLAGS,
636 			     dflt_dpni->dpni_handle, &pools_params);
637 	if (err) {
638 		printf("dpni_set_pools() failed\n");
639 		return err;
640 	}
641 
642 	priv->tx_flow_id = DPNI_NEW_FLOW_ID;
643 	memset(&dflt_tx_flow, 0, sizeof(dflt_tx_flow));
644 
645 	dflt_tx_flow.options = DPNI_TX_FLOW_OPT_ONLY_TX_ERROR;
646 	dflt_tx_flow.conf_err_cfg.use_default_queue = 0;
647 	dflt_tx_flow.conf_err_cfg.errors_only = 1;
648 	err = dpni_set_tx_flow(dflt_mc_io, MC_CMD_NO_FLAGS,
649 			       dflt_dpni->dpni_handle, &priv->tx_flow_id,
650 			       &dflt_tx_flow);
651 	if (err) {
652 		printf("dpni_set_tx_flow() failed\n");
653 		return err;
654 	}
655 
656 	return 0;
657 }
658 
659 static int ldpaa_eth_netdev_init(struct eth_device *net_dev,
660 				 phy_interface_t enet_if)
661 {
662 	int err;
663 	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
664 
665 	sprintf(net_dev->name, "DPMAC%d@%s", priv->dpmac_id,
666 		phy_interface_strings[enet_if]);
667 
668 	net_dev->iobase = 0;
669 	net_dev->init = ldpaa_eth_open;
670 	net_dev->halt = ldpaa_eth_stop;
671 	net_dev->send = ldpaa_eth_tx;
672 	net_dev->recv = ldpaa_eth_pull_dequeue_rx;
673 /*
674 	TODO: PHY MDIO information
675 	priv->bus = info->bus;
676 	priv->phyaddr = info->phy_addr;
677 	priv->enet_if = info->enet_if;
678 */
679 
680 	if (init_phy(net_dev))
681 		return 0;
682 
683 	err = eth_register(net_dev);
684 	if (err < 0) {
685 		printf("eth_register() = %d\n", err);
686 		return err;
687 	}
688 
689 	return 0;
690 }
691 
692 int ldpaa_eth_init(int dpmac_id, phy_interface_t enet_if)
693 {
694 	struct eth_device		*net_dev = NULL;
695 	struct ldpaa_eth_priv		*priv = NULL;
696 	int				err = 0;
697 
698 
699 	/* Net device */
700 	net_dev = (struct eth_device *)malloc(sizeof(struct eth_device));
701 	if (!net_dev) {
702 		printf("eth_device malloc() failed\n");
703 		return -ENOMEM;
704 	}
705 	memset(net_dev, 0, sizeof(struct eth_device));
706 
707 	/* alloc the ldpaa ethernet private struct */
708 	priv = (struct ldpaa_eth_priv *)malloc(sizeof(struct ldpaa_eth_priv));
709 	if (!priv) {
710 		printf("ldpaa_eth_priv malloc() failed\n");
711 		return -ENOMEM;
712 	}
713 	memset(priv, 0, sizeof(struct ldpaa_eth_priv));
714 
715 	net_dev->priv = (void *)priv;
716 	priv->net_dev = (struct eth_device *)net_dev;
717 	priv->dpmac_id = dpmac_id;
718 	debug("%s dpmac_id=%d\n", __func__, dpmac_id);
719 
720 	err = ldpaa_eth_netdev_init(net_dev, enet_if);
721 	if (err)
722 		goto err_netdev_init;
723 
724 	debug("ldpaa ethernet: Probed interface %s\n", net_dev->name);
725 	return 0;
726 
727 err_netdev_init:
728 	free(priv);
729 	net_dev->priv = NULL;
730 	free(net_dev);
731 
732 	return err;
733 }
734