xref: /rk3399_rockchip-uboot/drivers/net/dwc_eth_qos.c (revision 7a00f0a44b0ec80f61ab1cd47fc45a04869bbfa3)
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION.
3  *
4  * SPDX-License-Identifier: GPL-2.0
5  *
6  * Portions based on U-Boot's rtl8169.c.
7  */
8 
9 /*
10  * This driver supports the Synopsys Designware Ethernet QOS (Quality Of
11  * Service) IP block. The IP supports multiple options for bus type, clocking/
12  * reset structure, and feature list.
13  *
14  * The driver is written such that generic core logic is kept separate from
15  * configuration-specific logic. Code that interacts with configuration-
16  * specific resources is split out into separate functions to avoid polluting
17  * common code. If/when this driver is enhanced to support multiple
18  * configurations, the core code should be adapted to call all configuration-
19  * specific functions through function pointers, with the definition of those
20  * function pointers being supplied by struct udevice_id eqos_ids[]'s .data
21  * field.
22  *
23  * The following configurations are currently supported:
24  * tegra186:
25  *    NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an
26  *    AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and
27  *    supports a single RGMII PHY. This configuration also has SW control over
28  *    all clock and reset signals to the HW block.
29  */
30 #include <common.h>
31 #include <clk.h>
32 #include <dm.h>
33 #include <errno.h>
34 #include <memalign.h>
35 #include <miiphy.h>
36 #include <net.h>
37 #include <netdev.h>
38 #include <phy.h>
39 #include <reset.h>
40 #include <wait_bit.h>
41 #include <asm/io.h>
42 #include <eth_phy.h>
43 #ifdef CONFIG_ARCH_IMX8M
44 #include <asm/arch/clock.h>
45 #include <asm/mach-imx/sys_proto.h>
46 #endif
47 #include "dwc_eth_qos.h"
48 
49 /* Core registers */
50 
51 #define EQOS_MAC_REGS_BASE 0x000
52 struct eqos_mac_regs {
53 	uint32_t configuration;				/* 0x000 */
54 	uint32_t unused_004[(0x070 - 0x004) / 4];	/* 0x004 */
55 	uint32_t q0_tx_flow_ctrl;			/* 0x070 */
56 	uint32_t unused_070[(0x090 - 0x074) / 4];	/* 0x074 */
57 	uint32_t rx_flow_ctrl;				/* 0x090 */
58 	uint32_t unused_094;				/* 0x094 */
59 	uint32_t txq_prty_map0;				/* 0x098 */
60 	uint32_t unused_09c;				/* 0x09c */
61 	uint32_t rxq_ctrl0;				/* 0x0a0 */
62 	uint32_t unused_0a4;				/* 0x0a4 */
63 	uint32_t rxq_ctrl2;				/* 0x0a8 */
64 	uint32_t unused_0ac[(0x0dc - 0x0ac) / 4];	/* 0x0ac */
65 	uint32_t us_tic_counter;			/* 0x0dc */
66 	uint32_t unused_0e0[(0x11c - 0x0e0) / 4];	/* 0x0e0 */
67 	uint32_t hw_feature0;				/* 0x11c */
68 	uint32_t hw_feature1;				/* 0x120 */
69 	uint32_t hw_feature2;				/* 0x124 */
70 	uint32_t unused_128[(0x200 - 0x128) / 4];	/* 0x128 */
71 	uint32_t mdio_address;				/* 0x200 */
72 	uint32_t mdio_data;				/* 0x204 */
73 	uint32_t unused_208[(0x300 - 0x208) / 4];	/* 0x208 */
74 	uint32_t address0_high;				/* 0x300 */
75 	uint32_t address0_low;				/* 0x304 */
76 };
77 
78 #define EQOS_MAC_CONFIGURATION_GPSLCE			BIT(23)
79 #define EQOS_MAC_CONFIGURATION_CST			BIT(21)
80 #define EQOS_MAC_CONFIGURATION_ACS			BIT(20)
81 #define EQOS_MAC_CONFIGURATION_WD			BIT(19)
82 #define EQOS_MAC_CONFIGURATION_JD			BIT(17)
83 #define EQOS_MAC_CONFIGURATION_JE			BIT(16)
84 #define EQOS_MAC_CONFIGURATION_PS			BIT(15)
85 #define EQOS_MAC_CONFIGURATION_FES			BIT(14)
86 #define EQOS_MAC_CONFIGURATION_DM			BIT(13)
87 #define EQOS_MAC_CONFIGURATION_LM			BIT(12)
88 #define EQOS_MAC_CONFIGURATION_TE			BIT(1)
89 #define EQOS_MAC_CONFIGURATION_RE			BIT(0)
90 
91 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT		16
92 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK		0xffff
93 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE			BIT(1)
94 
95 #define EQOS_MAC_RX_FLOW_CTRL_RFE			BIT(0)
96 
97 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT		0
98 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK		0xff
99 
100 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT			0
101 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK			3
102 
103 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT			0
104 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK			0xff
105 
106 #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT		8
107 #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT		2
108 #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT		1
109 #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT		0
110 
111 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT		6
112 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK		0x1f
113 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT		0
114 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK		0x1f
115 
116 #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT			28
117 #define EQOS_MAC_HW_FEATURE3_ASP_MASK			0x3
118 
119 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT			21
120 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT			16
121 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT			8
122 #define EQOS_MAC_MDIO_ADDRESS_SKAP			BIT(4)
123 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT			2
124 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ			3
125 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE			1
126 #define EQOS_MAC_MDIO_ADDRESS_C45E			BIT(1)
127 #define EQOS_MAC_MDIO_ADDRESS_GB			BIT(0)
128 
129 #define EQOS_MAC_MDIO_DATA_GD_MASK			0xffff
130 
131 #define EQOS_MTL_REGS_BASE 0xd00
132 struct eqos_mtl_regs {
133 	uint32_t txq0_operation_mode;			/* 0xd00 */
134 	uint32_t unused_d04;				/* 0xd04 */
135 	uint32_t txq0_debug;				/* 0xd08 */
136 	uint32_t unused_d0c[(0xd18 - 0xd0c) / 4];	/* 0xd0c */
137 	uint32_t txq0_quantum_weight;			/* 0xd18 */
138 	uint32_t unused_d1c[(0xd30 - 0xd1c) / 4];	/* 0xd1c */
139 	uint32_t rxq0_operation_mode;			/* 0xd30 */
140 	uint32_t unused_d34;				/* 0xd34 */
141 	uint32_t rxq0_debug;				/* 0xd38 */
142 };
143 
144 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT		16
145 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK		0x1ff
146 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT	2
147 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK		3
148 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED	2
149 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF		BIT(1)
150 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ		BIT(0)
151 
152 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS			BIT(4)
153 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT		1
154 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK			3
155 
156 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT		20
157 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK		0x3ff
158 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT		14
159 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK		0x3f
160 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT		8
161 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK		0x3f
162 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC		BIT(7)
163 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF		BIT(5)
164 #define EQOS_MTL_RXQ0_OPERATION_MODE_FEP		BIT(4)
165 #define EQOS_MTL_RXQ0_OPERATION_MODE_FUP		BIT(3)
166 
167 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT			16
168 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK			0x7fff
169 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT		4
170 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK			3
171 
172 #define EQOS_DMA_REGS_BASE 0x1000
173 struct eqos_dma_regs {
174 	uint32_t mode;					/* 0x1000 */
175 	uint32_t sysbus_mode;				/* 0x1004 */
176 	uint32_t unused_1008[(0x1100 - 0x1008) / 4];	/* 0x1008 */
177 	uint32_t ch0_control;				/* 0x1100 */
178 	uint32_t ch0_tx_control;			/* 0x1104 */
179 	uint32_t ch0_rx_control;			/* 0x1108 */
180 	uint32_t unused_110c;				/* 0x110c */
181 	uint32_t ch0_txdesc_list_haddress;		/* 0x1110 */
182 	uint32_t ch0_txdesc_list_address;		/* 0x1114 */
183 	uint32_t ch0_rxdesc_list_haddress;		/* 0x1118 */
184 	uint32_t ch0_rxdesc_list_address;		/* 0x111c */
185 	uint32_t ch0_txdesc_tail_pointer;		/* 0x1120 */
186 	uint32_t unused_1124;				/* 0x1124 */
187 	uint32_t ch0_rxdesc_tail_pointer;		/* 0x1128 */
188 	uint32_t ch0_txdesc_ring_length;		/* 0x112c */
189 	uint32_t ch0_rxdesc_ring_length;		/* 0x1130 */
190 };
191 
192 #define EQOS_DMA_MODE_SWR				BIT(0)
193 
194 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT		16
195 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK		0xf
196 #define EQOS_DMA_SYSBUS_MODE_EAME			BIT(11)
197 #define EQOS_DMA_SYSBUS_MODE_BLEN16			BIT(3)
198 #define EQOS_DMA_SYSBUS_MODE_BLEN8			BIT(2)
199 #define EQOS_DMA_SYSBUS_MODE_BLEN4			BIT(1)
200 
201 #define EQOS_DMA_CH0_CONTROL_PBLX8			BIT(16)
202 
203 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT		16
204 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK		0x3f
205 #define EQOS_DMA_CH0_TX_CONTROL_OSP			BIT(4)
206 #define EQOS_DMA_CH0_TX_CONTROL_ST			BIT(0)
207 
208 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT		16
209 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK		0x3f
210 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT		1
211 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK		0x3fff
212 #define EQOS_DMA_CH0_RX_CONTROL_SR			BIT(0)
213 
214 /* These registers are Tegra186-specific */
215 #define EQOS_TEGRA186_REGS_BASE 0x8800
216 struct eqos_tegra186_regs {
217 	uint32_t sdmemcomppadctrl;			/* 0x8800 */
218 	uint32_t auto_cal_config;			/* 0x8804 */
219 	uint32_t unused_8808;				/* 0x8808 */
220 	uint32_t auto_cal_status;			/* 0x880c */
221 };
222 
223 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD	BIT(31)
224 
225 #define EQOS_AUTO_CAL_CONFIG_START			BIT(31)
226 #define EQOS_AUTO_CAL_CONFIG_ENABLE			BIT(29)
227 
228 #define EQOS_AUTO_CAL_STATUS_ACTIVE			BIT(31)
229 
230 /* Descriptors */
231 
232 #define EQOS_DESCRIPTOR_WORDS	4
233 #define EQOS_DESCRIPTOR_SIZE	(EQOS_DESCRIPTOR_WORDS * 4)
234 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */
235 #define EQOS_DESCRIPTOR_ALIGN	ARCH_DMA_MINALIGN
236 #define EQOS_DESCRIPTORS_TX	4
237 #define EQOS_DESCRIPTORS_RX	4
238 #define EQOS_DESCRIPTORS_NUM	(EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX)
239 #define EQOS_DESCRIPTORS_SIZE	ALIGN(EQOS_DESCRIPTORS_NUM * \
240 				      EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN)
241 #define EQOS_BUFFER_ALIGN	ARCH_DMA_MINALIGN
242 #define EQOS_MAX_PACKET_SIZE	ALIGN(1568, ARCH_DMA_MINALIGN)
243 #define EQOS_RX_BUFFER_SIZE	(EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE)
244 
245 /*
246  * Warn if the cache-line size is larger than the descriptor size. In such
247  * cases the driver will likely fail because the CPU needs to flush the cache
248  * when requeuing RX buffers, therefore descriptors written by the hardware
249  * may be discarded. Architectures with full IO coherence, such as x86, do not
250  * experience this issue, and hence are excluded from this condition.
251  *
252  * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause
253  * the driver to allocate descriptors from a pool of non-cached memory.
254  */
255 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN
256 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \
257 	!defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86)
258 #warning Cache line size is larger than descriptor size
259 #endif
260 #endif
261 
262 struct eqos_desc {
263 	u32 des0;
264 	u32 des1;
265 	u32 des2;
266 	u32 des3;
267 };
268 
269 #define EQOS_DESC3_OWN		BIT(31)
270 #define EQOS_DESC3_FD		BIT(29)
271 #define EQOS_DESC3_LD		BIT(28)
272 #define EQOS_DESC3_BUF1V	BIT(24)
273 
274 /*
275  * TX and RX descriptors are 16 bytes. This causes problems with the cache
276  * maintenance on CPUs where the cache-line size exceeds the size of these
277  * descriptors. What will happen is that when the driver receives a packet
278  * it will be immediately requeued for the hardware to reuse. The CPU will
279  * therefore need to flush the cache-line containing the descriptor, which
280  * will cause all other descriptors in the same cache-line to be flushed
281  * along with it. If one of those descriptors had been written to by the
282  * device those changes (and the associated packet) will be lost.
283  *
284  * To work around this, we make use of non-cached memory if available. If
285  * descriptors are mapped uncached there's no need to manually flush them
286  * or invalidate them.
287  *
288  * Note that this only applies to descriptors. The packet data buffers do
289  * not have the same constraints since they are 1536 bytes large, so they
290  * are unlikely to share cache-lines.
291  */
292 static void *eqos_alloc_descs(unsigned int num)
293 {
294 #ifdef CONFIG_SYS_NONCACHED_MEMORY
295 	return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE,
296 				      EQOS_DESCRIPTOR_ALIGN);
297 #else
298 	return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE);
299 #endif
300 }
301 
302 static void eqos_free_descs(void *descs)
303 {
304 #ifdef CONFIG_SYS_NONCACHED_MEMORY
305 	/* FIXME: noncached_alloc() has no opposite */
306 #else
307 	free(descs);
308 #endif
309 }
310 
311 static void eqos_inval_desc_tegra186(void *desc)
312 {
313 #ifndef CONFIG_SYS_NONCACHED_MEMORY
314 	unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
315 	unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE,
316 				  ARCH_DMA_MINALIGN);
317 
318 	invalidate_dcache_range(start, end);
319 #endif
320 }
321 
322 static void eqos_inval_desc_generic(void *desc)
323 {
324 #ifndef CONFIG_SYS_NONCACHED_MEMORY
325 	unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN);
326 	unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE,
327 				    ARCH_DMA_MINALIGN);
328 
329 	invalidate_dcache_range(start, end);
330 #endif
331 }
332 
333 static void eqos_flush_desc_tegra186(void *desc)
334 {
335 #ifndef CONFIG_SYS_NONCACHED_MEMORY
336 	flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE);
337 #endif
338 }
339 
340 static void eqos_flush_desc_generic(void *desc)
341 {
342 #ifndef CONFIG_SYS_NONCACHED_MEMORY
343 	unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN);
344 	unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE,
345 				    ARCH_DMA_MINALIGN);
346 
347 	flush_dcache_range(start, end);
348 #endif
349 }
350 
351 static void eqos_inval_buffer_tegra186(void *buf, size_t size)
352 {
353 	unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
354 	unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN);
355 
356 	invalidate_dcache_range(start, end);
357 }
358 
359 static void eqos_inval_buffer_generic(void *buf, size_t size)
360 {
361 	unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
362 	unsigned long end = roundup((unsigned long)buf + size,
363 				    ARCH_DMA_MINALIGN);
364 
365 	invalidate_dcache_range(start, end);
366 }
367 
368 static void eqos_flush_buffer_tegra186(void *buf, size_t size)
369 {
370 	flush_cache((unsigned long)buf, size);
371 }
372 
373 static void eqos_flush_buffer_generic(void *buf, size_t size)
374 {
375 	unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
376 	unsigned long end = roundup((unsigned long)buf + size,
377 				    ARCH_DMA_MINALIGN);
378 
379 	flush_dcache_range(start, end);
380 }
381 
382 static int eqos_mdio_wait_idle(struct eqos_priv *eqos)
383 {
384 	return wait_for_bit_le32(&eqos->mac_regs->mdio_address,
385 				 EQOS_MAC_MDIO_ADDRESS_GB, false,
386 				 1000000, true);
387 }
388 
389 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
390 			  int mdio_reg)
391 {
392 	struct eqos_priv *eqos = bus->priv;
393 	u32 val;
394 	int ret;
395 
396 	debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr,
397 	      mdio_reg);
398 
399 	ret = eqos_mdio_wait_idle(eqos);
400 	if (ret) {
401 		pr_err("MDIO not idle at entry");
402 		return ret;
403 	}
404 
405 	val = readl(&eqos->mac_regs->mdio_address);
406 	val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
407 		EQOS_MAC_MDIO_ADDRESS_C45E;
408 	val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
409 		(mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
410 		(eqos->config->config_mac_mdio <<
411 		 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
412 		(EQOS_MAC_MDIO_ADDRESS_GOC_READ <<
413 		 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
414 		EQOS_MAC_MDIO_ADDRESS_GB;
415 	writel(val, &eqos->mac_regs->mdio_address);
416 
417 	udelay(eqos->config->mdio_wait);
418 
419 	ret = eqos_mdio_wait_idle(eqos);
420 	if (ret) {
421 		pr_err("MDIO read didn't complete");
422 		return ret;
423 	}
424 
425 	val = readl(&eqos->mac_regs->mdio_data);
426 	val &= EQOS_MAC_MDIO_DATA_GD_MASK;
427 
428 	debug("%s: val=%x\n", __func__, val);
429 
430 	return val;
431 }
432 
433 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
434 			   int mdio_reg, u16 mdio_val)
435 {
436 	struct eqos_priv *eqos = bus->priv;
437 	u32 val;
438 	int ret;
439 
440 	debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev,
441 	      mdio_addr, mdio_reg, mdio_val);
442 
443 	ret = eqos_mdio_wait_idle(eqos);
444 	if (ret) {
445 		pr_err("MDIO not idle at entry");
446 		return ret;
447 	}
448 
449 	writel(mdio_val, &eqos->mac_regs->mdio_data);
450 
451 	val = readl(&eqos->mac_regs->mdio_address);
452 	val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
453 		EQOS_MAC_MDIO_ADDRESS_C45E;
454 	val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
455 		(mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
456 		(eqos->config->config_mac_mdio <<
457 		 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
458 		(EQOS_MAC_MDIO_ADDRESS_GOC_WRITE <<
459 		 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
460 		EQOS_MAC_MDIO_ADDRESS_GB;
461 	writel(val, &eqos->mac_regs->mdio_address);
462 
463 	udelay(eqos->config->mdio_wait);
464 
465 	ret = eqos_mdio_wait_idle(eqos);
466 	if (ret) {
467 		pr_err("MDIO read didn't complete");
468 		return ret;
469 	}
470 
471 	return 0;
472 }
473 
474 static int eqos_start_clks_tegra186(struct udevice *dev)
475 {
476 #ifdef CONFIG_CLK
477 	struct eqos_priv *eqos = dev_get_priv(dev);
478 	int ret;
479 
480 	debug("%s(dev=%p):\n", __func__, dev);
481 
482 	ret = clk_enable(&eqos->clk_slave_bus);
483 	if (ret < 0) {
484 		pr_err("clk_enable(clk_slave_bus) failed: %d", ret);
485 		goto err;
486 	}
487 
488 	ret = clk_enable(&eqos->clk_master_bus);
489 	if (ret < 0) {
490 		pr_err("clk_enable(clk_master_bus) failed: %d", ret);
491 		goto err_disable_clk_slave_bus;
492 	}
493 
494 	ret = clk_enable(&eqos->clk_rx);
495 	if (ret < 0) {
496 		pr_err("clk_enable(clk_rx) failed: %d", ret);
497 		goto err_disable_clk_master_bus;
498 	}
499 
500 	ret = clk_enable(&eqos->clk_ptp_ref);
501 	if (ret < 0) {
502 		pr_err("clk_enable(clk_ptp_ref) failed: %d", ret);
503 		goto err_disable_clk_rx;
504 	}
505 
506 	ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000);
507 	if (ret < 0) {
508 		pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret);
509 		goto err_disable_clk_ptp_ref;
510 	}
511 
512 	ret = clk_enable(&eqos->clk_tx);
513 	if (ret < 0) {
514 		pr_err("clk_enable(clk_tx) failed: %d", ret);
515 		goto err_disable_clk_ptp_ref;
516 	}
517 #endif
518 
519 	debug("%s: OK\n", __func__);
520 	return 0;
521 
522 #ifdef CONFIG_CLK
523 err_disable_clk_ptp_ref:
524 	clk_disable(&eqos->clk_ptp_ref);
525 err_disable_clk_rx:
526 	clk_disable(&eqos->clk_rx);
527 err_disable_clk_master_bus:
528 	clk_disable(&eqos->clk_master_bus);
529 err_disable_clk_slave_bus:
530 	clk_disable(&eqos->clk_slave_bus);
531 err:
532 	debug("%s: FAILED: %d\n", __func__, ret);
533 	return ret;
534 #endif
535 }
536 
537 static int eqos_start_clks_stm32(struct udevice *dev)
538 {
539 #ifdef CONFIG_CLK
540 	struct eqos_priv *eqos = dev_get_priv(dev);
541 	int ret;
542 
543 	debug("%s(dev=%p):\n", __func__, dev);
544 
545 	ret = clk_enable(&eqos->clk_master_bus);
546 	if (ret < 0) {
547 		pr_err("clk_enable(clk_master_bus) failed: %d", ret);
548 		goto err;
549 	}
550 
551 	if (clk_valid(&eqos->clk_rx)) {
552 		ret = clk_enable(&eqos->clk_rx);
553 		if (ret < 0) {
554 			pr_err("clk_enable(clk_rx) failed: %d", ret);
555 			goto err_disable_clk_master_bus;
556 		}
557 	}
558 
559 	if (clk_valid(&eqos->clk_tx)) {
560 		ret = clk_enable(&eqos->clk_tx);
561 		if (ret < 0) {
562 			pr_err("clk_enable(clk_tx) failed: %d", ret);
563 			goto err_disable_clk_rx;
564 		}
565 	}
566 
567 	if (clk_valid(&eqos->clk_ck)) {
568 		ret = clk_enable(&eqos->clk_ck);
569 		if (ret < 0) {
570 			pr_err("clk_enable(clk_ck) failed: %d", ret);
571 			goto err_disable_clk_tx;
572 		}
573 	}
574 #endif
575 
576 	debug("%s: OK\n", __func__);
577 	return 0;
578 
579 #ifdef CONFIG_CLK
580 err_disable_clk_tx:
581 	if (clk_valid(&eqos->clk_tx))
582 		clk_disable(&eqos->clk_tx);
583 err_disable_clk_rx:
584 	if (clk_valid(&eqos->clk_rx))
585 		clk_disable(&eqos->clk_rx);
586 err_disable_clk_master_bus:
587 	clk_disable(&eqos->clk_master_bus);
588 err:
589 	debug("%s: FAILED: %d\n", __func__, ret);
590 	return ret;
591 #endif
592 }
593 
594 static int eqos_start_clks_imx(struct udevice *dev)
595 {
596 	return 0;
597 }
598 
599 static void eqos_stop_clks_tegra186(struct udevice *dev)
600 {
601 #ifdef CONFIG_CLK
602 	struct eqos_priv *eqos = dev_get_priv(dev);
603 
604 	debug("%s(dev=%p):\n", __func__, dev);
605 
606 	clk_disable(&eqos->clk_tx);
607 	clk_disable(&eqos->clk_ptp_ref);
608 	clk_disable(&eqos->clk_rx);
609 	clk_disable(&eqos->clk_master_bus);
610 	clk_disable(&eqos->clk_slave_bus);
611 #endif
612 
613 	debug("%s: OK\n", __func__);
614 }
615 
616 static void eqos_stop_clks_stm32(struct udevice *dev)
617 {
618 #ifdef CONFIG_CLK
619 	struct eqos_priv *eqos = dev_get_priv(dev);
620 
621 	debug("%s(dev=%p):\n", __func__, dev);
622 
623 	if (clk_valid(&eqos->clk_tx))
624 		clk_disable(&eqos->clk_tx);
625 	if (clk_valid(&eqos->clk_rx))
626 		clk_disable(&eqos->clk_rx);
627 	clk_disable(&eqos->clk_master_bus);
628 	if (clk_valid(&eqos->clk_ck))
629 		clk_disable(&eqos->clk_ck);
630 #endif
631 
632 	debug("%s: OK\n", __func__);
633 }
634 
635 static void eqos_stop_clks_imx(struct udevice *dev)
636 {
637 	/* empty */
638 }
639 
640 static int eqos_start_resets_tegra186(struct udevice *dev)
641 {
642 	struct eqos_priv *eqos = dev_get_priv(dev);
643 	int ret;
644 
645 	debug("%s(dev=%p):\n", __func__, dev);
646 
647 	ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
648 	if (ret < 0) {
649 		pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret);
650 		return ret;
651 	}
652 
653 	udelay(2);
654 
655 	ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
656 	if (ret < 0) {
657 		pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret);
658 		return ret;
659 	}
660 
661 	ret = reset_assert(&eqos->reset_ctl);
662 	if (ret < 0) {
663 		pr_err("reset_assert() failed: %d", ret);
664 		return ret;
665 	}
666 
667 	udelay(2);
668 
669 	ret = reset_deassert(&eqos->reset_ctl);
670 	if (ret < 0) {
671 		pr_err("reset_deassert() failed: %d", ret);
672 		return ret;
673 	}
674 
675 	debug("%s: OK\n", __func__);
676 	return 0;
677 }
678 
679 static int eqos_start_resets_stm32(struct udevice *dev)
680 {
681 	struct eqos_priv *eqos = dev_get_priv(dev);
682 	int ret;
683 
684 	debug("%s(dev=%p):\n", __func__, dev);
685 	if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
686 		ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
687 		if (ret < 0) {
688 			pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d",
689 			       ret);
690 			return ret;
691 		}
692 
693 		udelay(eqos->reset_delays[0]);
694 
695 		ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
696 		if (ret < 0) {
697 			pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
698 			       ret);
699 			return ret;
700 		}
701 
702 		udelay(eqos->reset_delays[1]);
703 
704 		ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
705 		if (ret < 0) {
706 			pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d",
707 			       ret);
708 			return ret;
709 		}
710 
711 		udelay(eqos->reset_delays[2]);
712 	}
713 	debug("%s: OK\n", __func__);
714 
715 	return 0;
716 }
717 
718 static int eqos_start_resets_imx(struct udevice *dev)
719 {
720 	return 0;
721 }
722 
723 static int eqos_stop_resets_tegra186(struct udevice *dev)
724 {
725 	struct eqos_priv *eqos = dev_get_priv(dev);
726 
727 	reset_assert(&eqos->reset_ctl);
728 	dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
729 
730 	return 0;
731 }
732 
733 static int eqos_stop_resets_stm32(struct udevice *dev)
734 {
735 	struct eqos_priv *eqos = dev_get_priv(dev);
736 	int ret;
737 
738 	if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
739 		ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
740 		if (ret < 0) {
741 			pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
742 			       ret);
743 			return ret;
744 		}
745 	}
746 
747 	return 0;
748 }
749 
750 static int eqos_stop_resets_imx(struct udevice *dev)
751 {
752 	return 0;
753 }
754 
755 static int eqos_calibrate_pads_tegra186(struct udevice *dev)
756 {
757 	struct eqos_priv *eqos = dev_get_priv(dev);
758 	int ret;
759 
760 	debug("%s(dev=%p):\n", __func__, dev);
761 
762 	setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
763 		     EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
764 
765 	udelay(1);
766 
767 	setbits_le32(&eqos->tegra186_regs->auto_cal_config,
768 		     EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE);
769 
770 	ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
771 				EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false);
772 	if (ret) {
773 		pr_err("calibrate didn't start");
774 		goto failed;
775 	}
776 
777 	ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
778 				EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false);
779 	if (ret) {
780 		pr_err("calibrate didn't finish");
781 		goto failed;
782 	}
783 
784 	ret = 0;
785 
786 failed:
787 	clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
788 		     EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
789 
790 	debug("%s: returns %d\n", __func__, ret);
791 
792 	return ret;
793 }
794 
795 static int eqos_disable_calibration_tegra186(struct udevice *dev)
796 {
797 	struct eqos_priv *eqos = dev_get_priv(dev);
798 
799 	debug("%s(dev=%p):\n", __func__, dev);
800 
801 	clrbits_le32(&eqos->tegra186_regs->auto_cal_config,
802 		     EQOS_AUTO_CAL_CONFIG_ENABLE);
803 
804 	return 0;
805 }
806 
807 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev)
808 {
809 #ifdef CONFIG_CLK
810 	struct eqos_priv *eqos = dev_get_priv(dev);
811 
812 	return clk_get_rate(&eqos->clk_slave_bus);
813 #else
814 	return 0;
815 #endif
816 }
817 
818 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev)
819 {
820 #ifdef CONFIG_CLK
821 	struct eqos_priv *eqos = dev_get_priv(dev);
822 
823 	return clk_get_rate(&eqos->clk_master_bus);
824 #else
825 	return 0;
826 #endif
827 }
828 
829 __weak u32 imx_get_eqos_csr_clk(void)
830 {
831 	return 100 * 1000000;
832 }
833 __weak int imx_eqos_txclk_set_rate(unsigned long rate)
834 {
835 	return 0;
836 }
837 
838 static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev)
839 {
840 	return imx_get_eqos_csr_clk();
841 }
842 
843 static int eqos_calibrate_pads_stm32(struct udevice *dev)
844 {
845 	return 0;
846 }
847 
848 static int eqos_calibrate_pads_imx(struct udevice *dev)
849 {
850 	return 0;
851 }
852 
853 static int eqos_disable_calibration_stm32(struct udevice *dev)
854 {
855 	return 0;
856 }
857 
858 static int eqos_disable_calibration_imx(struct udevice *dev)
859 {
860 	return 0;
861 }
862 
863 static int eqos_set_full_duplex(struct udevice *dev)
864 {
865 	struct eqos_priv *eqos = dev_get_priv(dev);
866 
867 	debug("%s(dev=%p):\n", __func__, dev);
868 
869 	setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
870 
871 	return 0;
872 }
873 
874 static int eqos_set_half_duplex(struct udevice *dev)
875 {
876 	struct eqos_priv *eqos = dev_get_priv(dev);
877 
878 	debug("%s(dev=%p):\n", __func__, dev);
879 
880 	clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
881 
882 	/* WAR: Flush TX queue when switching to half-duplex */
883 	setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
884 		     EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
885 
886 	return 0;
887 }
888 
889 static int eqos_set_gmii_speed(struct udevice *dev)
890 {
891 	struct eqos_priv *eqos = dev_get_priv(dev);
892 
893 	debug("%s(dev=%p):\n", __func__, dev);
894 
895 	clrbits_le32(&eqos->mac_regs->configuration,
896 		     EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
897 
898 	return 0;
899 }
900 
901 static int eqos_set_mii_speed_100(struct udevice *dev)
902 {
903 	struct eqos_priv *eqos = dev_get_priv(dev);
904 
905 	debug("%s(dev=%p):\n", __func__, dev);
906 
907 	setbits_le32(&eqos->mac_regs->configuration,
908 		     EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
909 
910 	return 0;
911 }
912 
913 static int eqos_set_mii_speed_10(struct udevice *dev)
914 {
915 	struct eqos_priv *eqos = dev_get_priv(dev);
916 
917 	debug("%s(dev=%p):\n", __func__, dev);
918 
919 	clrsetbits_le32(&eqos->mac_regs->configuration,
920 			EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS);
921 
922 	return 0;
923 }
924 
925 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev)
926 {
927 #ifdef CONFIG_CLK
928 	struct eqos_priv *eqos = dev_get_priv(dev);
929 	ulong rate;
930 	int ret;
931 
932 	debug("%s(dev=%p):\n", __func__, dev);
933 
934 	switch (eqos->phy->speed) {
935 	case SPEED_1000:
936 		rate = 125 * 1000 * 1000;
937 		break;
938 	case SPEED_100:
939 		rate = 25 * 1000 * 1000;
940 		break;
941 	case SPEED_10:
942 		rate = 2.5 * 1000 * 1000;
943 		break;
944 	default:
945 		pr_err("invalid speed %d", eqos->phy->speed);
946 		return -EINVAL;
947 	}
948 
949 	ret = clk_set_rate(&eqos->clk_tx, rate);
950 	if (ret < 0) {
951 		pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret);
952 		return ret;
953 	}
954 #endif
955 
956 	return 0;
957 }
958 
959 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev)
960 {
961 	return 0;
962 }
963 
964 static int eqos_set_tx_clk_speed_imx(struct udevice *dev)
965 {
966 	struct eqos_priv *eqos = dev_get_priv(dev);
967 	ulong rate;
968 	int ret;
969 
970 	debug("%s(dev=%p):\n", __func__, dev);
971 
972 	switch (eqos->phy->speed) {
973 	case SPEED_1000:
974 		rate = 125 * 1000 * 1000;
975 		break;
976 	case SPEED_100:
977 		rate = 25 * 1000 * 1000;
978 		break;
979 	case SPEED_10:
980 		rate = 2.5 * 1000 * 1000;
981 		break;
982 	default:
983 		pr_err("invalid speed %d", eqos->phy->speed);
984 		return -EINVAL;
985 	}
986 
987 	ret = imx_eqos_txclk_set_rate(rate);
988 	if (ret < 0) {
989 		pr_err("imx (tx_clk, %lu) failed: %d", rate, ret);
990 		return ret;
991 	}
992 
993 	return 0;
994 }
995 
996 static int eqos_adjust_link(struct udevice *dev)
997 {
998 	struct eqos_priv *eqos = dev_get_priv(dev);
999 	int ret;
1000 	bool en_calibration;
1001 
1002 	debug("%s(dev=%p):\n", __func__, dev);
1003 
1004 	if (eqos->phy->duplex)
1005 		ret = eqos_set_full_duplex(dev);
1006 	else
1007 		ret = eqos_set_half_duplex(dev);
1008 	if (ret < 0) {
1009 		pr_err("eqos_set_*_duplex() failed: %d", ret);
1010 		return ret;
1011 	}
1012 
1013 	switch (eqos->phy->speed) {
1014 	case SPEED_1000:
1015 		en_calibration = true;
1016 		ret = eqos_set_gmii_speed(dev);
1017 		break;
1018 	case SPEED_100:
1019 		en_calibration = true;
1020 		ret = eqos_set_mii_speed_100(dev);
1021 		break;
1022 	case SPEED_10:
1023 		en_calibration = false;
1024 		ret = eqos_set_mii_speed_10(dev);
1025 		break;
1026 	default:
1027 		pr_err("invalid speed %d", eqos->phy->speed);
1028 		return -EINVAL;
1029 	}
1030 	if (ret < 0) {
1031 		pr_err("eqos_set_*mii_speed*() failed: %d", ret);
1032 		return ret;
1033 	}
1034 
1035 	if (en_calibration) {
1036 		ret = eqos->config->ops->eqos_calibrate_pads(dev);
1037 		if (ret < 0) {
1038 			pr_err("eqos_calibrate_pads() failed: %d",
1039 			       ret);
1040 			return ret;
1041 		}
1042 	} else {
1043 		ret = eqos->config->ops->eqos_disable_calibration(dev);
1044 		if (ret < 0) {
1045 			pr_err("eqos_disable_calibration() failed: %d",
1046 			       ret);
1047 			return ret;
1048 		}
1049 	}
1050 	ret = eqos->config->ops->eqos_set_tx_clk_speed(dev);
1051 	if (ret < 0) {
1052 		pr_err("eqos_set_tx_clk_speed() failed: %d", ret);
1053 		return ret;
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 int eqos_write_hwaddr(struct udevice *dev)
1060 {
1061 	struct eth_pdata *plat = dev_get_platdata(dev);
1062 	struct eqos_priv *eqos = dev_get_priv(dev);
1063 	uint32_t val;
1064 
1065 	/*
1066 	 * This function may be called before start() or after stop(). At that
1067 	 * time, on at least some configurations of the EQoS HW, all clocks to
1068 	 * the EQoS HW block will be stopped, and a reset signal applied. If
1069 	 * any register access is attempted in this state, bus timeouts or CPU
1070 	 * hangs may occur. This check prevents that.
1071 	 *
1072 	 * A simple solution to this problem would be to not implement
1073 	 * write_hwaddr(), since start() always writes the MAC address into HW
1074 	 * anyway. However, it is desirable to implement write_hwaddr() to
1075 	 * support the case of SW that runs subsequent to U-Boot which expects
1076 	 * the MAC address to already be programmed into the EQoS registers,
1077 	 * which must happen irrespective of whether the U-Boot user (or
1078 	 * scripts) actually made use of the EQoS device, and hence
1079 	 * irrespective of whether start() was ever called.
1080 	 *
1081 	 * Note that this requirement by subsequent SW is not valid for
1082 	 * Tegra186, and is likely not valid for any non-PCI instantiation of
1083 	 * the EQoS HW block. This function is implemented solely as
1084 	 * future-proofing with the expectation the driver will eventually be
1085 	 * ported to some system where the expectation above is true.
1086 	 */
1087 	if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok)
1088 		return 0;
1089 
1090 	/* Update the MAC address */
1091 	val = (plat->enetaddr[5] << 8) |
1092 		(plat->enetaddr[4]);
1093 	writel(val, &eqos->mac_regs->address0_high);
1094 	val = (plat->enetaddr[3] << 24) |
1095 		(plat->enetaddr[2] << 16) |
1096 		(plat->enetaddr[1] << 8) |
1097 		(plat->enetaddr[0]);
1098 	writel(val, &eqos->mac_regs->address0_low);
1099 
1100 	return 0;
1101 }
1102 
1103 static int eqos_read_rom_hwaddr(struct udevice *dev)
1104 {
1105 	struct eth_pdata *pdata = dev_get_platdata(dev);
1106 
1107 #ifdef CONFIG_ARCH_IMX8M
1108 	imx_get_mac_from_fuse(dev->req_seq, pdata->enetaddr);
1109 #endif
1110 	return !is_valid_ethaddr(pdata->enetaddr);
1111 }
1112 
1113 int eqos_init(struct udevice *dev)
1114 {
1115 	struct eqos_priv *eqos = dev_get_priv(dev);
1116 	int ret = 0, limit = 10;
1117 	ulong rate;
1118 	u32 val;
1119 
1120 	debug("%s(dev=%p):\n", __func__, dev);
1121 
1122 	if (eqos->config->ops->eqos_start_clks) {
1123 		ret = eqos->config->ops->eqos_start_clks(dev);
1124 		if (ret < 0) {
1125 			pr_err("eqos_start_clks() failed: %d", ret);
1126 			goto err;
1127 		}
1128 	}
1129 
1130 	if (!eqos->mii_reseted) {
1131 		ret = eqos->config->ops->eqos_start_resets(dev);
1132 		if (ret < 0) {
1133 			pr_err("eqos_start_resets() failed: %d", ret);
1134 			goto err_stop_clks;
1135 		}
1136 
1137 		eqos->mii_reseted = true;
1138 		udelay(10);
1139 	}
1140 
1141 	eqos->reg_access_ok = true;
1142 
1143 	/* DMA SW reset */
1144 	val = readl(&eqos->dma_regs->mode);
1145 	val |= EQOS_DMA_MODE_SWR;
1146 	writel(val, &eqos->dma_regs->mode);
1147 	while (limit--) {
1148 		if (!(readl(&eqos->dma_regs->mode) & EQOS_DMA_MODE_SWR))
1149 			break;
1150 		mdelay(10);
1151 	}
1152 
1153 	if (limit < 0) {
1154 		pr_err("EQOS_DMA_MODE_SWR stuck");
1155 		goto err_stop_resets;
1156 	}
1157 
1158 	ret = eqos->config->ops->eqos_calibrate_pads(dev);
1159 	if (ret < 0) {
1160 		pr_err("eqos_calibrate_pads() failed: %d", ret);
1161 		goto err_stop_resets;
1162 	}
1163 	rate = eqos->config->ops->eqos_get_tick_clk_rate(dev);
1164 
1165 	val = (rate / 1000000) - 1;
1166 	writel(val, &eqos->mac_regs->us_tic_counter);
1167 
1168 	/*
1169 	 * if PHY was already connected and configured,
1170 	 * don't need to reconnect/reconfigure again
1171 	 */
1172 	if (!eqos->phy) {
1173 		int addr = -1;
1174 #ifdef CONFIG_DM_ETH_PHY
1175 		addr = eth_phy_get_addr(dev);
1176 #endif
1177 #ifdef DWC_NET_PHYADDR
1178 		addr = DWC_NET_PHYADDR;
1179 #endif
1180 		eqos->phy = phy_connect(eqos->mii, addr, dev,
1181 		 eqos->config->ops->eqos_get_interface(dev));
1182 		if (!eqos->phy) {
1183 			pr_err("phy_connect() failed");
1184 			goto err_stop_resets;
1185 		}
1186 
1187 		if (eqos->max_speed) {
1188 			ret = phy_set_supported(eqos->phy, eqos->max_speed);
1189 			if (ret) {
1190 				pr_err("phy_set_supported() failed: %d", ret);
1191 				goto err_shutdown_phy;
1192 			}
1193 		}
1194 
1195 		ret = phy_config(eqos->phy);
1196 		if (ret < 0) {
1197 			pr_err("phy_config() failed: %d", ret);
1198 			goto err_shutdown_phy;
1199 		}
1200 	}
1201 
1202 	ret = phy_startup(eqos->phy);
1203 	if (ret < 0) {
1204 		pr_err("phy_startup() failed: %d", ret);
1205 		goto err_shutdown_phy;
1206 	}
1207 
1208 	if (!eqos->phy->link) {
1209 		pr_err("No link");
1210 		goto err_shutdown_phy;
1211 	}
1212 
1213 	ret = eqos_adjust_link(dev);
1214 	if (ret < 0) {
1215 		pr_err("eqos_adjust_link() failed: %d", ret);
1216 		goto err_shutdown_phy;
1217 	}
1218 
1219 	debug("%s: OK\n", __func__);
1220 	return 0;
1221 
1222 err_shutdown_phy:
1223 	phy_shutdown(eqos->phy);
1224 err_stop_resets:
1225 	eqos->config->ops->eqos_stop_resets(dev);
1226 	eqos->mii_reseted = false;
1227 err_stop_clks:
1228 	if (eqos->config->ops->eqos_stop_clks)
1229 		eqos->config->ops->eqos_stop_clks(dev);
1230 err:
1231 	pr_err("FAILED: %d", ret);
1232 	return ret;
1233 }
1234 
1235 void eqos_enable(struct udevice *dev)
1236 {
1237 	struct eqos_priv *eqos = dev_get_priv(dev);
1238 	u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
1239 	ulong last_rx_desc;
1240 	int i;
1241 
1242 	eqos->tx_desc_idx = 0;
1243 	eqos->rx_desc_idx = 0;
1244 
1245 	/* Configure MTL */
1246 	writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100);
1247 
1248 	/* Enable Store and Forward mode for TX */
1249 	/* Program Tx operating mode */
1250 	setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1251 		     EQOS_MTL_TXQ0_OPERATION_MODE_TSF |
1252 		     (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
1253 		      EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
1254 
1255 	/* Transmit Queue weight */
1256 	writel(0x10, &eqos->mtl_regs->txq0_quantum_weight);
1257 
1258 	/* Enable Store and Forward mode for RX, since no jumbo frame */
1259 	setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1260 		     EQOS_MTL_RXQ0_OPERATION_MODE_RSF |
1261 		     EQOS_MTL_RXQ0_OPERATION_MODE_FEP |
1262 		     EQOS_MTL_RXQ0_OPERATION_MODE_FUP);
1263 
1264 	/* Transmit/Receive queue fifo size; use all RAM for 1 queue */
1265 	val = readl(&eqos->mac_regs->hw_feature1);
1266 	tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
1267 		EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
1268 	rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
1269 		EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
1270 
1271 	/*
1272 	 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
1273 	 * r/tqs is encoded as (n / 256) - 1.
1274 	 */
1275 	tqs = (128 << tx_fifo_sz) / 256 - 1;
1276 	rqs = (128 << rx_fifo_sz) / 256 - 1;
1277 
1278 	clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1279 			EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
1280 			EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
1281 			tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
1282 	clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1283 			EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
1284 			EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
1285 			rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
1286 
1287 	/* Flow control used only if each channel gets 4KB or more FIFO */
1288 	if (rqs >= ((4096 / 256) - 1)) {
1289 		u32 rfd, rfa;
1290 
1291 		setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1292 			     EQOS_MTL_RXQ0_OPERATION_MODE_EHFC);
1293 
1294 		/*
1295 		 * Set Threshold for Activating Flow Contol space for min 2
1296 		 * frames ie, (1500 * 1) = 1500 bytes.
1297 		 *
1298 		 * Set Threshold for Deactivating Flow Contol for space of
1299 		 * min 1 frame (frame size 1500bytes) in receive fifo
1300 		 */
1301 		if (rqs == ((4096 / 256) - 1)) {
1302 			/*
1303 			 * This violates the above formula because of FIFO size
1304 			 * limit therefore overflow may occur inspite of this.
1305 			 */
1306 			rfd = 0x3;	/* Full-3K */
1307 			rfa = 0x1;	/* Full-1.5K */
1308 		} else if (rqs == ((8192 / 256) - 1)) {
1309 			rfd = 0x6;	/* Full-4K */
1310 			rfa = 0xa;	/* Full-6K */
1311 		} else if (rqs == ((16384 / 256) - 1)) {
1312 			rfd = 0x6;	/* Full-4K */
1313 			rfa = 0x12;	/* Full-10K */
1314 		} else {
1315 			rfd = 0x6;	/* Full-4K */
1316 			rfa = 0x1E;	/* Full-16K */
1317 		}
1318 
1319 		clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1320 				(EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK <<
1321 				 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1322 				(EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK <<
1323 				 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT),
1324 				(rfd <<
1325 				 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1326 				(rfa <<
1327 				 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT));
1328 	}
1329 
1330 	/* Configure MAC */
1331 
1332 	clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1333 			EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1334 			EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
1335 			eqos->config->config_mac <<
1336 			EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1337 
1338 	clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1339 			EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1340 			EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
1341 			0x2 <<
1342 			EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1343 
1344 	/* Multicast and Broadcast Queue Enable */
1345 	setbits_le32(&eqos->mac_regs->unused_0a4,
1346 		     0x00100000);
1347 	/* enable promise mode */
1348 	setbits_le32(&eqos->mac_regs->unused_004[1],
1349 		     0x1);
1350 
1351 	/* Set TX flow control parameters */
1352 	/* Set Pause Time */
1353 	setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1354 		     0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
1355 	/* Assign priority for TX flow control */
1356 	clrbits_le32(&eqos->mac_regs->txq_prty_map0,
1357 		     EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK <<
1358 		     EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT);
1359 	/* Assign priority for RX flow control */
1360 	clrbits_le32(&eqos->mac_regs->rxq_ctrl2,
1361 		     EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK <<
1362 		     EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
1363 	/* Enable flow control */
1364 	setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1365 		     EQOS_MAC_Q0_TX_FLOW_CTRL_TFE);
1366 	setbits_le32(&eqos->mac_regs->rx_flow_ctrl,
1367 		     EQOS_MAC_RX_FLOW_CTRL_RFE);
1368 
1369 	clrsetbits_le32(&eqos->mac_regs->configuration,
1370 			EQOS_MAC_CONFIGURATION_GPSLCE |
1371 			EQOS_MAC_CONFIGURATION_WD |
1372 			EQOS_MAC_CONFIGURATION_JD |
1373 			EQOS_MAC_CONFIGURATION_JE,
1374 			EQOS_MAC_CONFIGURATION_CST |
1375 			EQOS_MAC_CONFIGURATION_ACS);
1376 
1377 	eqos_write_hwaddr(dev);
1378 
1379 	/* Configure DMA */
1380 
1381 	/* Enable OSP mode */
1382 	setbits_le32(&eqos->dma_regs->ch0_tx_control,
1383 		     EQOS_DMA_CH0_TX_CONTROL_OSP);
1384 
1385 	/* RX buffer size. Must be a multiple of bus width */
1386 	clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1387 			EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
1388 			EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
1389 			EQOS_MAX_PACKET_SIZE <<
1390 			EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
1391 
1392 	setbits_le32(&eqos->dma_regs->ch0_control,
1393 		     EQOS_DMA_CH0_CONTROL_PBLX8);
1394 
1395 	/*
1396 	 * Burst length must be < 1/2 FIFO size.
1397 	 * FIFO size in tqs is encoded as (n / 256) - 1.
1398 	 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
1399 	 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
1400 	 */
1401 	pbl = tqs + 1;
1402 	if (pbl > 32)
1403 		pbl = 32;
1404 	clrsetbits_le32(&eqos->dma_regs->ch0_tx_control,
1405 			EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
1406 			EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
1407 			pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
1408 
1409 	clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1410 			EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
1411 			EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
1412 			8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
1413 
1414 	/* DMA performance configuration */
1415 	val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
1416 		EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 |
1417 		EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4;
1418 	writel(val, &eqos->dma_regs->sysbus_mode);
1419 
1420 	/* Set up descriptors */
1421 
1422 	memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE);
1423 	for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
1424 		struct eqos_desc *rx_desc = &(eqos->rx_descs[i]);
1425 		rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
1426 					     (i * EQOS_MAX_PACKET_SIZE));
1427 		rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
1428 		mb();
1429 		eqos->config->ops->eqos_flush_desc(rx_desc);
1430 		eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf +
1431 						(i * EQOS_MAX_PACKET_SIZE),
1432 						EQOS_MAX_PACKET_SIZE);
1433 	}
1434 
1435 	writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress);
1436 	writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address);
1437 	writel(EQOS_DESCRIPTORS_TX - 1,
1438 	       &eqos->dma_regs->ch0_txdesc_ring_length);
1439 
1440 	writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress);
1441 	writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address);
1442 	writel(EQOS_DESCRIPTORS_RX - 1,
1443 	       &eqos->dma_regs->ch0_rxdesc_ring_length);
1444 
1445 	/* Enable everything */
1446 	setbits_le32(&eqos->dma_regs->ch0_tx_control,
1447 		     EQOS_DMA_CH0_TX_CONTROL_ST);
1448 	setbits_le32(&eqos->dma_regs->ch0_rx_control,
1449 		     EQOS_DMA_CH0_RX_CONTROL_SR);
1450 	setbits_le32(&eqos->mac_regs->configuration,
1451 		     EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
1452 
1453 	/* TX tail pointer not written until we need to TX a packet */
1454 	/*
1455 	 * Point RX tail pointer at last descriptor. Ideally, we'd point at the
1456 	 * first descriptor, implying all descriptors were available. However,
1457 	 * that's not distinguishable from none of the descriptors being
1458 	 * available.
1459 	 */
1460 	last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]);
1461 	writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1462 
1463 	eqos->started = true;
1464 }
1465 
1466 static int eqos_start(struct udevice *dev)
1467 {
1468 	int ret;
1469 
1470 	ret = eqos_init(dev);
1471 	if (ret)
1472 		return ret;
1473 
1474 	eqos_enable(dev);
1475 
1476 	return 0;
1477 }
1478 
1479 void eqos_stop(struct udevice *dev)
1480 {
1481 	struct eqos_priv *eqos = dev_get_priv(dev);
1482 	int i;
1483 
1484 	debug("%s(dev=%p):\n", __func__, dev);
1485 
1486 	if (!eqos->started)
1487 		return;
1488 	eqos->started = false;
1489 	eqos->reg_access_ok = false;
1490 
1491 	/* Disable TX DMA */
1492 	clrbits_le32(&eqos->dma_regs->ch0_tx_control,
1493 		     EQOS_DMA_CH0_TX_CONTROL_ST);
1494 
1495 	/* Wait for TX all packets to drain out of MTL */
1496 	for (i = 0; i < 1000000; i++) {
1497 		u32 val = readl(&eqos->mtl_regs->txq0_debug);
1498 		u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
1499 			EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK;
1500 		u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS;
1501 		if ((trcsts != 1) && (!txqsts))
1502 			break;
1503 	}
1504 
1505 	/* Turn off MAC TX and RX */
1506 	clrbits_le32(&eqos->mac_regs->configuration,
1507 		     EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
1508 
1509 	/* Wait for all RX packets to drain out of MTL */
1510 	for (i = 0; i < 1000000; i++) {
1511 		u32 val = readl(&eqos->mtl_regs->rxq0_debug);
1512 		u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
1513 			EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK;
1514 		u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
1515 			EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK;
1516 		if ((!prxq) && (!rxqsts))
1517 			break;
1518 	}
1519 
1520 	/* Turn off RX DMA */
1521 	clrbits_le32(&eqos->dma_regs->ch0_rx_control,
1522 		     EQOS_DMA_CH0_RX_CONTROL_SR);
1523 
1524 	if (eqos->phy) {
1525 		phy_shutdown(eqos->phy);
1526 	}
1527 	if (eqos->config->ops->eqos_stop_clks)
1528 		eqos->config->ops->eqos_stop_clks(dev);
1529 
1530 	debug("%s: OK\n", __func__);
1531 }
1532 
1533 int eqos_send(struct udevice *dev, void *packet, int length)
1534 {
1535 	struct eqos_priv *eqos = dev_get_priv(dev);
1536 	struct eqos_desc *tx_desc;
1537 	int i;
1538 
1539 	debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
1540 	      length);
1541 
1542 	memcpy(eqos->tx_dma_buf, packet, length);
1543 	eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length);
1544 
1545 	tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]);
1546 	eqos->tx_desc_idx++;
1547 	eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX;
1548 
1549 	tx_desc->des0 = (ulong)eqos->tx_dma_buf;
1550 	tx_desc->des1 = 0;
1551 	tx_desc->des2 = length;
1552 	/*
1553 	 * Make sure that if HW sees the _OWN write below, it will see all the
1554 	 * writes to the rest of the descriptor too.
1555 	 */
1556 	mb();
1557 	tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length;
1558 	eqos->config->ops->eqos_flush_desc(tx_desc);
1559 
1560 	writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])),
1561 		&eqos->dma_regs->ch0_txdesc_tail_pointer);
1562 
1563 	for (i = 0; i < 1000000; i++) {
1564 		eqos->config->ops->eqos_inval_desc(tx_desc);
1565 		if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN))
1566 			return 0;
1567 		udelay(1);
1568 	}
1569 
1570 	debug("%s: TX timeout\n", __func__);
1571 
1572 	return -ETIMEDOUT;
1573 }
1574 
1575 int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
1576 {
1577 	struct eqos_priv *eqos = dev_get_priv(dev);
1578 	struct eqos_desc *rx_desc;
1579 	int length;
1580 
1581 	debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags);
1582 
1583 	rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]);
1584 	eqos->config->ops->eqos_inval_desc(rx_desc);
1585 	if (rx_desc->des3 & EQOS_DESC3_OWN) {
1586 		debug("%s: RX packet not available\n", __func__);
1587 		return -EAGAIN;
1588 	}
1589 
1590 	*packetp = eqos->rx_dma_buf +
1591 		(eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1592 	length = rx_desc->des3 & 0x7fff;
1593 	debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
1594 
1595 	eqos->config->ops->eqos_inval_buffer(*packetp, length);
1596 
1597 	return length;
1598 }
1599 
1600 int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
1601 {
1602 	struct eqos_priv *eqos = dev_get_priv(dev);
1603 	uchar *packet_expected;
1604 	struct eqos_desc *rx_desc;
1605 
1606 	debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
1607 
1608 	packet_expected = eqos->rx_dma_buf +
1609 		(eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1610 	if (packet != packet_expected) {
1611 		debug("%s: Unexpected packet (expected %p)\n", __func__,
1612 		      packet_expected);
1613 		return -EINVAL;
1614 	}
1615 
1616 	eqos->config->ops->eqos_inval_buffer(packet, length);
1617 
1618 	rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]);
1619 
1620 	rx_desc->des0 = 0;
1621 	mb();
1622 	eqos->config->ops->eqos_flush_desc(rx_desc);
1623 	eqos->config->ops->eqos_inval_buffer(packet, length);
1624 	rx_desc->des0 = (u32)(ulong)packet;
1625 	rx_desc->des1 = 0;
1626 	rx_desc->des2 = 0;
1627 	/*
1628 	 * Make sure that if HW sees the _OWN write below, it will see all the
1629 	 * writes to the rest of the descriptor too.
1630 	 */
1631 	mb();
1632 	rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
1633 	eqos->config->ops->eqos_flush_desc(rx_desc);
1634 
1635 	writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1636 
1637 	eqos->rx_desc_idx++;
1638 	eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
1639 
1640 	return 0;
1641 }
1642 
1643 static int eqos_probe_resources_core(struct udevice *dev)
1644 {
1645 	struct eqos_priv *eqos = dev_get_priv(dev);
1646 	int ret;
1647 
1648 	debug("%s(dev=%p):\n", __func__, dev);
1649 
1650 	eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX +
1651 				       EQOS_DESCRIPTORS_RX);
1652 	if (!eqos->descs) {
1653 		debug("%s: eqos_alloc_descs() failed\n", __func__);
1654 		ret = -ENOMEM;
1655 		goto err;
1656 	}
1657 	eqos->tx_descs = (struct eqos_desc *)eqos->descs;
1658 	eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX);
1659 	debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs,
1660 	      eqos->rx_descs);
1661 
1662 	eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
1663 	if (!eqos->tx_dma_buf) {
1664 		debug("%s: memalign(tx_dma_buf) failed\n", __func__);
1665 		ret = -ENOMEM;
1666 		goto err_free_descs;
1667 	}
1668 	debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf);
1669 
1670 	eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE);
1671 	if (!eqos->rx_dma_buf) {
1672 		debug("%s: memalign(rx_dma_buf) failed\n", __func__);
1673 		ret = -ENOMEM;
1674 		goto err_free_tx_dma_buf;
1675 	}
1676 	debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf);
1677 
1678 	eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE);
1679 	if (!eqos->rx_pkt) {
1680 		debug("%s: malloc(rx_pkt) failed\n", __func__);
1681 		ret = -ENOMEM;
1682 		goto err_free_rx_dma_buf;
1683 	}
1684 	debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt);
1685 
1686 	eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf,
1687 			EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX);
1688 
1689 	debug("%s: OK\n", __func__);
1690 	return 0;
1691 
1692 err_free_rx_dma_buf:
1693 	free(eqos->rx_dma_buf);
1694 err_free_tx_dma_buf:
1695 	free(eqos->tx_dma_buf);
1696 err_free_descs:
1697 	eqos_free_descs(eqos->descs);
1698 err:
1699 
1700 	debug("%s: returns %d\n", __func__, ret);
1701 	return ret;
1702 }
1703 
1704 static int eqos_remove_resources_core(struct udevice *dev)
1705 {
1706 	struct eqos_priv *eqos = dev_get_priv(dev);
1707 
1708 	debug("%s(dev=%p):\n", __func__, dev);
1709 
1710 	free(eqos->rx_pkt);
1711 	free(eqos->rx_dma_buf);
1712 	free(eqos->tx_dma_buf);
1713 	eqos_free_descs(eqos->descs);
1714 
1715 	debug("%s: OK\n", __func__);
1716 	return 0;
1717 }
1718 
1719 static int eqos_probe_resources_tegra186(struct udevice *dev)
1720 {
1721 	struct eqos_priv *eqos = dev_get_priv(dev);
1722 	int ret;
1723 
1724 	debug("%s(dev=%p):\n", __func__, dev);
1725 
1726 	ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl);
1727 	if (ret) {
1728 		pr_err("reset_get_by_name(rst) failed: %d", ret);
1729 		return ret;
1730 	}
1731 
1732 	ret = gpio_request_by_name(dev, "phy-reset-gpios", 0,
1733 				   &eqos->phy_reset_gpio,
1734 				   GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
1735 	if (ret) {
1736 		pr_err("gpio_request_by_name(phy reset) failed: %d", ret);
1737 		goto err_free_reset_eqos;
1738 	}
1739 
1740 	ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus);
1741 	if (ret) {
1742 		pr_err("clk_get_by_name(slave_bus) failed: %d", ret);
1743 		goto err_free_gpio_phy_reset;
1744 	}
1745 
1746 	ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus);
1747 	if (ret) {
1748 		pr_err("clk_get_by_name(master_bus) failed: %d", ret);
1749 		goto err_free_clk_slave_bus;
1750 	}
1751 
1752 	ret = clk_get_by_name(dev, "rx", &eqos->clk_rx);
1753 	if (ret) {
1754 		pr_err("clk_get_by_name(rx) failed: %d", ret);
1755 		goto err_free_clk_master_bus;
1756 	}
1757 
1758 	ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref);
1759 	if (ret) {
1760 		pr_err("clk_get_by_name(ptp_ref) failed: %d", ret);
1761 		goto err_free_clk_rx;
1762 		return ret;
1763 	}
1764 
1765 	ret = clk_get_by_name(dev, "tx", &eqos->clk_tx);
1766 	if (ret) {
1767 		pr_err("clk_get_by_name(tx) failed: %d", ret);
1768 		goto err_free_clk_ptp_ref;
1769 	}
1770 
1771 	debug("%s: OK\n", __func__);
1772 	return 0;
1773 
1774 err_free_clk_ptp_ref:
1775 	clk_free(&eqos->clk_ptp_ref);
1776 err_free_clk_rx:
1777 	clk_free(&eqos->clk_rx);
1778 err_free_clk_master_bus:
1779 	clk_free(&eqos->clk_master_bus);
1780 err_free_clk_slave_bus:
1781 	clk_free(&eqos->clk_slave_bus);
1782 err_free_gpio_phy_reset:
1783 	dm_gpio_free(dev, &eqos->phy_reset_gpio);
1784 err_free_reset_eqos:
1785 	reset_free(&eqos->reset_ctl);
1786 
1787 	debug("%s: returns %d\n", __func__, ret);
1788 	return ret;
1789 }
1790 
1791 /* board-specific Ethernet Interface initializations. */
1792 __weak int board_interface_eth_init(struct udevice *dev,
1793 				    phy_interface_t interface_type)
1794 {
1795 	return 0;
1796 }
1797 
1798 static int eqos_probe_resources_stm32(struct udevice *dev)
1799 {
1800 	struct eqos_priv *eqos = dev_get_priv(dev);
1801 	int ret;
1802 	phy_interface_t interface;
1803 	struct ofnode_phandle_args phandle_args;
1804 
1805 	debug("%s(dev=%p):\n", __func__, dev);
1806 
1807 	interface = eqos->config->ops->eqos_get_interface(dev);
1808 
1809 	if (interface == PHY_INTERFACE_MODE_NONE) {
1810 		pr_err("Invalid PHY interface\n");
1811 		return -EINVAL;
1812 	}
1813 
1814 	ret = board_interface_eth_init(dev, interface);
1815 	if (ret)
1816 		return -EINVAL;
1817 
1818 	eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0);
1819 
1820 	ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus);
1821 	if (ret) {
1822 		pr_err("clk_get_by_name(master_bus) failed: %d", ret);
1823 		return ret;
1824 	}
1825 
1826 	ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx);
1827 	if (ret)
1828 		pr_warn("clk_get_by_name(rx) failed: %d", ret);
1829 
1830 	ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx);
1831 	if (ret)
1832 		pr_warn("clk_get_by_name(tx) failed: %d", ret);
1833 
1834 	/*  Get ETH_CLK clocks (optional) */
1835 	ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck);
1836 	if (ret)
1837 		pr_warn("No phy clock provided %d", ret);
1838 
1839 	eqos->phyaddr = -1;
1840 	ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
1841 					 &phandle_args);
1842 	if (!ret) {
1843 		/* search "reset-gpios" in phy node */
1844 		ret = gpio_request_by_name_nodev(phandle_args.node,
1845 						 "reset-gpios", 0,
1846 						 &eqos->phy_reset_gpio,
1847 						 GPIOD_IS_OUT |
1848 						 GPIOD_IS_OUT_ACTIVE);
1849 		if (ret)
1850 			pr_warn("gpio_request_by_name(phy reset) not provided %d",
1851 				ret);
1852 		else
1853 			eqos->reset_delays[1] = 2;
1854 
1855 		eqos->phyaddr = ofnode_read_u32_default(phandle_args.node,
1856 							"reg", -1);
1857 	}
1858 
1859 	if (!dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
1860 		int reset_flags = GPIOD_IS_OUT;
1861 
1862 		if (dev_read_bool(dev, "snps,reset-active-low"))
1863 			reset_flags |= GPIOD_ACTIVE_LOW;
1864 
1865 		ret = gpio_request_by_name(dev, "snps,reset-gpio", 0,
1866 					   &eqos->phy_reset_gpio, reset_flags);
1867 		if (ret == 0)
1868 			ret = dev_read_u32_array(dev, "snps,reset-delays-us",
1869 						 eqos->reset_delays, 3);
1870 		else
1871 			pr_warn("gpio_request_by_name(snps,reset-gpio) failed: %d",
1872 				ret);
1873 	}
1874 
1875 	debug("%s: OK\n", __func__);
1876 	return 0;
1877 }
1878 
1879 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev)
1880 {
1881 	const char *phy_mode;
1882 	phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1883 
1884 	debug("%s(dev=%p):\n", __func__, dev);
1885 
1886 	phy_mode = dev_read_string(dev, "phy-mode");
1887 	if (phy_mode)
1888 		interface = phy_get_interface_by_name(phy_mode);
1889 
1890 	return interface;
1891 }
1892 
1893 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev)
1894 {
1895 	return PHY_INTERFACE_MODE_MII;
1896 }
1897 
1898 static int eqos_probe_resources_imx(struct udevice *dev)
1899 {
1900 	struct eqos_priv *eqos = dev_get_priv(dev);
1901 	phy_interface_t interface;
1902 
1903 	debug("%s(dev=%p):\n", __func__, dev);
1904 
1905 	interface = eqos->config->ops->eqos_get_interface(dev);
1906 
1907 	if (interface == PHY_INTERFACE_MODE_NONE) {
1908 		pr_err("Invalid PHY interface\n");
1909 		return -EINVAL;
1910 	}
1911 
1912 	debug("%s: OK\n", __func__);
1913 	return 0;
1914 }
1915 
1916 static phy_interface_t eqos_get_interface_imx(struct udevice *dev)
1917 {
1918 	const char *phy_mode;
1919 	phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1920 
1921 	debug("%s(dev=%p):\n", __func__, dev);
1922 
1923 	phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1924 			       NULL);
1925 	if (phy_mode)
1926 		interface = phy_get_interface_by_name(phy_mode);
1927 
1928 	return interface;
1929 }
1930 
1931 static int eqos_remove_resources_tegra186(struct udevice *dev)
1932 {
1933 	struct eqos_priv *eqos = dev_get_priv(dev);
1934 
1935 	debug("%s(dev=%p):\n", __func__, dev);
1936 
1937 #ifdef CONFIG_CLK
1938 	clk_free(&eqos->clk_tx);
1939 	clk_free(&eqos->clk_ptp_ref);
1940 	clk_free(&eqos->clk_rx);
1941 	clk_free(&eqos->clk_slave_bus);
1942 	clk_free(&eqos->clk_master_bus);
1943 #endif
1944 	dm_gpio_free(dev, &eqos->phy_reset_gpio);
1945 	reset_free(&eqos->reset_ctl);
1946 
1947 	debug("%s: OK\n", __func__);
1948 	return 0;
1949 }
1950 
1951 static int eqos_remove_resources_stm32(struct udevice *dev)
1952 {
1953 #ifdef CONFIG_CLK
1954 	struct eqos_priv *eqos = dev_get_priv(dev);
1955 
1956 	debug("%s(dev=%p):\n", __func__, dev);
1957 
1958 	if (clk_valid(&eqos->clk_tx))
1959 		clk_free(&eqos->clk_tx);
1960 	if (clk_valid(&eqos->clk_rx))
1961 		clk_free(&eqos->clk_rx);
1962 	clk_free(&eqos->clk_master_bus);
1963 	if (clk_valid(&eqos->clk_ck))
1964 		clk_free(&eqos->clk_ck);
1965 #endif
1966 
1967 	if (dm_gpio_is_valid(&eqos->phy_reset_gpio))
1968 		dm_gpio_free(dev, &eqos->phy_reset_gpio);
1969 
1970 	debug("%s: OK\n", __func__);
1971 	return 0;
1972 }
1973 
1974 static int eqos_remove_resources_imx(struct udevice *dev)
1975 {
1976 	return 0;
1977 }
1978 
1979 int eqos_probe(struct udevice *dev)
1980 {
1981 	struct eqos_priv *eqos = dev_get_priv(dev);
1982 	int ret;
1983 
1984 	debug("%s(dev=%p):\n", __func__, dev);
1985 
1986 	eqos->dev = dev;
1987 	eqos->config = (void *)dev_get_driver_data(dev);
1988 
1989 	eqos->regs = dev_read_addr(dev);
1990 	if (eqos->regs == FDT_ADDR_T_NONE) {
1991 		pr_err("dev_read_addr() failed");
1992 		return -ENODEV;
1993 	}
1994 	eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE);
1995 	eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE);
1996 	eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE);
1997 	eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE);
1998 
1999 	ret = eqos_probe_resources_core(dev);
2000 	if (ret < 0) {
2001 		pr_err("eqos_probe_resources_core() failed: %d", ret);
2002 		return ret;
2003 	}
2004 
2005 	ret = eqos->config->ops->eqos_probe_resources(dev);
2006 	if (ret < 0) {
2007 		pr_err("eqos_probe_resources() failed: %d", ret);
2008 		goto err_remove_resources_core;
2009 	}
2010 
2011 #ifdef CONFIG_DM_ETH_PHY
2012 	eqos->mii = eth_phy_get_mdio_bus(dev);
2013 #endif
2014 	if (!eqos->mii) {
2015 		eqos->mii = mdio_alloc();
2016 		if (!eqos->mii) {
2017 			pr_err("mdio_alloc() failed");
2018 			ret = -ENOMEM;
2019 			goto err_remove_resources_tegra;
2020 		}
2021 		eqos->mii->read = eqos_mdio_read;
2022 		eqos->mii->write = eqos_mdio_write;
2023 		eqos->mii->priv = eqos;
2024 		strcpy(eqos->mii->name, dev->name);
2025 
2026 		ret = mdio_register(eqos->mii);
2027 		if (ret < 0) {
2028 			pr_err("mdio_register() failed: %d", ret);
2029 			goto err_free_mdio;
2030 		}
2031 	}
2032 
2033 #ifdef CONFIG_DM_ETH_PHY
2034 	eth_phy_set_mdio_bus(dev, eqos->mii);
2035 #endif
2036 
2037 	debug("%s: OK\n", __func__);
2038 	return 0;
2039 
2040 err_free_mdio:
2041 	mdio_free(eqos->mii);
2042 err_remove_resources_tegra:
2043 	eqos->config->ops->eqos_remove_resources(dev);
2044 err_remove_resources_core:
2045 	eqos_remove_resources_core(dev);
2046 
2047 	debug("%s: returns %d\n", __func__, ret);
2048 	return ret;
2049 }
2050 
2051 static int eqos_remove(struct udevice *dev)
2052 {
2053 	struct eqos_priv *eqos = dev_get_priv(dev);
2054 
2055 	debug("%s(dev=%p):\n", __func__, dev);
2056 
2057 	mdio_unregister(eqos->mii);
2058 	mdio_free(eqos->mii);
2059 	eqos->config->ops->eqos_remove_resources(dev);
2060 
2061 	eqos_probe_resources_core(dev);
2062 
2063 	debug("%s: OK\n", __func__);
2064 	return 0;
2065 }
2066 
2067 static const struct eth_ops eqos_ops = {
2068 	.start = eqos_start,
2069 	.stop = eqos_stop,
2070 	.send = eqos_send,
2071 	.recv = eqos_recv,
2072 	.free_pkt = eqos_free_pkt,
2073 	.write_hwaddr = eqos_write_hwaddr,
2074 	.read_rom_hwaddr	= eqos_read_rom_hwaddr,
2075 };
2076 
2077 static struct eqos_ops eqos_tegra186_ops = {
2078 	.eqos_inval_desc = eqos_inval_desc_tegra186,
2079 	.eqos_flush_desc = eqos_flush_desc_tegra186,
2080 	.eqos_inval_buffer = eqos_inval_buffer_tegra186,
2081 	.eqos_flush_buffer = eqos_flush_buffer_tegra186,
2082 	.eqos_probe_resources = eqos_probe_resources_tegra186,
2083 	.eqos_remove_resources = eqos_remove_resources_tegra186,
2084 	.eqos_stop_resets = eqos_stop_resets_tegra186,
2085 	.eqos_start_resets = eqos_start_resets_tegra186,
2086 	.eqos_stop_clks = eqos_stop_clks_tegra186,
2087 	.eqos_start_clks = eqos_start_clks_tegra186,
2088 	.eqos_calibrate_pads = eqos_calibrate_pads_tegra186,
2089 	.eqos_disable_calibration = eqos_disable_calibration_tegra186,
2090 	.eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186,
2091 	.eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186,
2092 	.eqos_get_interface = eqos_get_interface_tegra186
2093 };
2094 
2095 static const struct eqos_config eqos_tegra186_config = {
2096 	.reg_access_always_ok = false,
2097 	.mdio_wait = 10,
2098 	.swr_wait = 10,
2099 	.config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2100 	.config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35,
2101 	.ops = &eqos_tegra186_ops
2102 };
2103 
2104 static struct eqos_ops eqos_stm32_ops = {
2105 	.eqos_inval_desc = eqos_inval_desc_generic,
2106 	.eqos_flush_desc = eqos_flush_desc_generic,
2107 	.eqos_inval_buffer = eqos_inval_buffer_generic,
2108 	.eqos_flush_buffer = eqos_flush_buffer_generic,
2109 	.eqos_probe_resources = eqos_probe_resources_stm32,
2110 	.eqos_remove_resources = eqos_remove_resources_stm32,
2111 	.eqos_stop_resets = eqos_stop_resets_stm32,
2112 	.eqos_start_resets = eqos_start_resets_stm32,
2113 	.eqos_stop_clks = eqos_stop_clks_stm32,
2114 	.eqos_start_clks = eqos_start_clks_stm32,
2115 	.eqos_calibrate_pads = eqos_calibrate_pads_stm32,
2116 	.eqos_disable_calibration = eqos_disable_calibration_stm32,
2117 	.eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32,
2118 	.eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32,
2119 	.eqos_get_interface = eqos_get_interface_stm32
2120 };
2121 
2122 static const struct eqos_config eqos_stm32_config = {
2123 	.reg_access_always_ok = false,
2124 	.mdio_wait = 10000,
2125 	.swr_wait = 50,
2126 	.config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV,
2127 	.config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2128 	.ops = &eqos_stm32_ops
2129 };
2130 
2131 static struct eqos_ops eqos_imx_ops = {
2132 	.eqos_inval_desc = eqos_inval_desc_generic,
2133 	.eqos_flush_desc = eqos_flush_desc_generic,
2134 	.eqos_inval_buffer = eqos_inval_buffer_generic,
2135 	.eqos_flush_buffer = eqos_flush_buffer_generic,
2136 	.eqos_probe_resources = eqos_probe_resources_imx,
2137 	.eqos_remove_resources = eqos_remove_resources_imx,
2138 	.eqos_stop_resets = eqos_stop_resets_imx,
2139 	.eqos_start_resets = eqos_start_resets_imx,
2140 	.eqos_stop_clks = eqos_stop_clks_imx,
2141 	.eqos_start_clks = eqos_start_clks_imx,
2142 	.eqos_calibrate_pads = eqos_calibrate_pads_imx,
2143 	.eqos_disable_calibration = eqos_disable_calibration_imx,
2144 	.eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx,
2145 	.eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx,
2146 	.eqos_get_interface = eqos_get_interface_imx
2147 };
2148 
2149 struct eqos_config eqos_imx_config = {
2150 	.reg_access_always_ok = false,
2151 	.mdio_wait = 10000,
2152 	.swr_wait = 50,
2153 	.config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2154 	.config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2155 	.ops = &eqos_imx_ops
2156 };
2157 
2158 struct eqos_ops eqos_rockchip_ops = {
2159 	.eqos_inval_desc = eqos_inval_desc_generic,
2160 	.eqos_flush_desc = eqos_flush_desc_generic,
2161 	.eqos_inval_buffer = eqos_inval_buffer_generic,
2162 	.eqos_flush_buffer = eqos_flush_buffer_generic,
2163 	.eqos_probe_resources = eqos_probe_resources_stm32,
2164 	.eqos_remove_resources = eqos_remove_resources_stm32,
2165 	.eqos_stop_resets = eqos_stop_resets_stm32,
2166 	.eqos_start_resets = eqos_start_resets_stm32,
2167 	.eqos_calibrate_pads = eqos_calibrate_pads_stm32,
2168 	.eqos_disable_calibration = eqos_disable_calibration_stm32,
2169 	.eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32,
2170 	.eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32,
2171 	.eqos_get_interface = eqos_get_interface_stm32
2172 };
2173 
2174 static const struct udevice_id eqos_ids[] = {
2175 	{
2176 		.compatible = "nvidia,tegra186-eqos",
2177 		.data = (ulong)&eqos_tegra186_config
2178 	},
2179 	{
2180 		.compatible = "snps,dwmac-4.20a",
2181 		.data = (ulong)&eqos_stm32_config
2182 	},
2183 	{
2184 		.compatible = "fsl,imx-eqos",
2185 		.data = (ulong)&eqos_imx_config
2186 	},
2187 
2188 	{ }
2189 };
2190 
2191 U_BOOT_DRIVER(eth_eqos) = {
2192 	.name = "eth_eqos",
2193 	.id = UCLASS_ETH,
2194 	.of_match = of_match_ptr(eqos_ids),
2195 	.probe = eqos_probe,
2196 	.remove = eqos_remove,
2197 	.ops = &eqos_ops,
2198 	.priv_auto_alloc_size = sizeof(struct eqos_priv),
2199 	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
2200 };
2201