xref: /rk3399_rockchip-uboot/drivers/net/dwc_eth_qos.c (revision 894688431927c1b73c64860c8aa71463c2593ea2)
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION.
3  *
4  * SPDX-License-Identifier: GPL-2.0
5  *
6  * Portions based on U-Boot's rtl8169.c.
7  */
8 
9 /*
10  * This driver supports the Synopsys Designware Ethernet QOS (Quality Of
11  * Service) IP block. The IP supports multiple options for bus type, clocking/
12  * reset structure, and feature list.
13  *
14  * The driver is written such that generic core logic is kept separate from
15  * configuration-specific logic. Code that interacts with configuration-
16  * specific resources is split out into separate functions to avoid polluting
17  * common code. If/when this driver is enhanced to support multiple
18  * configurations, the core code should be adapted to call all configuration-
19  * specific functions through function pointers, with the definition of those
20  * function pointers being supplied by struct udevice_id eqos_ids[]'s .data
21  * field.
22  *
23  * The following configurations are currently supported:
24  * tegra186:
25  *    NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an
26  *    AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and
27  *    supports a single RGMII PHY. This configuration also has SW control over
28  *    all clock and reset signals to the HW block.
29  */
30 #include <common.h>
31 #include <clk.h>
32 #include <dm.h>
33 #include <errno.h>
34 #include <memalign.h>
35 #include <miiphy.h>
36 #include <net.h>
37 #include <netdev.h>
38 #include <phy.h>
39 #include <reset.h>
40 #include <wait_bit.h>
41 #include <asm/io.h>
42 #include <eth_phy.h>
43 #ifdef CONFIG_ARCH_IMX8M
44 #include <asm/arch/clock.h>
45 #include <asm/mach-imx/sys_proto.h>
46 #endif
47 #include "dwc_eth_qos.h"
48 
49 /* Core registers */
50 
51 #define EQOS_MAC_REGS_BASE 0x000
52 struct eqos_mac_regs {
53 	uint32_t configuration;				/* 0x000 */
54 	uint32_t unused_004[(0x070 - 0x004) / 4];	/* 0x004 */
55 	uint32_t q0_tx_flow_ctrl;			/* 0x070 */
56 	uint32_t unused_070[(0x090 - 0x074) / 4];	/* 0x074 */
57 	uint32_t rx_flow_ctrl;				/* 0x090 */
58 	uint32_t unused_094;				/* 0x094 */
59 	uint32_t txq_prty_map0;				/* 0x098 */
60 	uint32_t unused_09c;				/* 0x09c */
61 	uint32_t rxq_ctrl0;				/* 0x0a0 */
62 	uint32_t unused_0a4;				/* 0x0a4 */
63 	uint32_t rxq_ctrl2;				/* 0x0a8 */
64 	uint32_t unused_0ac[(0x0dc - 0x0ac) / 4];	/* 0x0ac */
65 	uint32_t us_tic_counter;			/* 0x0dc */
66 	uint32_t unused_0e0[(0x11c - 0x0e0) / 4];	/* 0x0e0 */
67 	uint32_t hw_feature0;				/* 0x11c */
68 	uint32_t hw_feature1;				/* 0x120 */
69 	uint32_t hw_feature2;				/* 0x124 */
70 	uint32_t unused_128[(0x200 - 0x128) / 4];	/* 0x128 */
71 	uint32_t mdio_address;				/* 0x200 */
72 	uint32_t mdio_data;				/* 0x204 */
73 	uint32_t unused_208[(0x300 - 0x208) / 4];	/* 0x208 */
74 	uint32_t address0_high;				/* 0x300 */
75 	uint32_t address0_low;				/* 0x304 */
76 };
77 
78 #define EQOS_MAC_CONFIGURATION_GPSLCE			BIT(23)
79 #define EQOS_MAC_CONFIGURATION_CST			BIT(21)
80 #define EQOS_MAC_CONFIGURATION_ACS			BIT(20)
81 #define EQOS_MAC_CONFIGURATION_WD			BIT(19)
82 #define EQOS_MAC_CONFIGURATION_JD			BIT(17)
83 #define EQOS_MAC_CONFIGURATION_JE			BIT(16)
84 #define EQOS_MAC_CONFIGURATION_PS			BIT(15)
85 #define EQOS_MAC_CONFIGURATION_FES			BIT(14)
86 #define EQOS_MAC_CONFIGURATION_DM			BIT(13)
87 #define EQOS_MAC_CONFIGURATION_LM			BIT(12)
88 #define EQOS_MAC_CONFIGURATION_TE			BIT(1)
89 #define EQOS_MAC_CONFIGURATION_RE			BIT(0)
90 
91 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT		16
92 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK		0xffff
93 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE			BIT(1)
94 
95 #define EQOS_MAC_RX_FLOW_CTRL_RFE			BIT(0)
96 
97 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT		0
98 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK		0xff
99 
100 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT			0
101 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK			3
102 
103 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT			0
104 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK			0xff
105 
106 #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT		8
107 #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT		2
108 #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT		1
109 #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT		0
110 
111 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT		6
112 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK		0x1f
113 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT		0
114 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK		0x1f
115 
116 #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT			28
117 #define EQOS_MAC_HW_FEATURE3_ASP_MASK			0x3
118 
119 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT			21
120 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT			16
121 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT			8
122 #define EQOS_MAC_MDIO_ADDRESS_SKAP			BIT(4)
123 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT			2
124 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ			3
125 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE			1
126 #define EQOS_MAC_MDIO_ADDRESS_C45E			BIT(1)
127 #define EQOS_MAC_MDIO_ADDRESS_GB			BIT(0)
128 
129 #define EQOS_MAC_MDIO_DATA_GD_MASK			0xffff
130 
131 #define EQOS_MTL_REGS_BASE 0xd00
132 struct eqos_mtl_regs {
133 	uint32_t txq0_operation_mode;			/* 0xd00 */
134 	uint32_t unused_d04;				/* 0xd04 */
135 	uint32_t txq0_debug;				/* 0xd08 */
136 	uint32_t unused_d0c[(0xd18 - 0xd0c) / 4];	/* 0xd0c */
137 	uint32_t txq0_quantum_weight;			/* 0xd18 */
138 	uint32_t unused_d1c[(0xd30 - 0xd1c) / 4];	/* 0xd1c */
139 	uint32_t rxq0_operation_mode;			/* 0xd30 */
140 	uint32_t unused_d34;				/* 0xd34 */
141 	uint32_t rxq0_debug;				/* 0xd38 */
142 };
143 
144 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT		16
145 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK		0x1ff
146 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT	2
147 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK		3
148 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED	2
149 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF		BIT(1)
150 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ		BIT(0)
151 
152 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS			BIT(4)
153 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT		1
154 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK			3
155 
156 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT		20
157 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK		0x3ff
158 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT		14
159 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK		0x3f
160 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT		8
161 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK		0x3f
162 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC		BIT(7)
163 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF		BIT(5)
164 #define EQOS_MTL_RXQ0_OPERATION_MODE_FEP		BIT(4)
165 #define EQOS_MTL_RXQ0_OPERATION_MODE_FUP		BIT(3)
166 
167 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT			16
168 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK			0x7fff
169 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT		4
170 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK			3
171 
172 #define EQOS_DMA_REGS_BASE 0x1000
173 struct eqos_dma_regs {
174 	uint32_t mode;					/* 0x1000 */
175 	uint32_t sysbus_mode;				/* 0x1004 */
176 	uint32_t unused_1008[(0x1100 - 0x1008) / 4];	/* 0x1008 */
177 	uint32_t ch0_control;				/* 0x1100 */
178 	uint32_t ch0_tx_control;			/* 0x1104 */
179 	uint32_t ch0_rx_control;			/* 0x1108 */
180 	uint32_t unused_110c;				/* 0x110c */
181 	uint32_t ch0_txdesc_list_haddress;		/* 0x1110 */
182 	uint32_t ch0_txdesc_list_address;		/* 0x1114 */
183 	uint32_t ch0_rxdesc_list_haddress;		/* 0x1118 */
184 	uint32_t ch0_rxdesc_list_address;		/* 0x111c */
185 	uint32_t ch0_txdesc_tail_pointer;		/* 0x1120 */
186 	uint32_t unused_1124;				/* 0x1124 */
187 	uint32_t ch0_rxdesc_tail_pointer;		/* 0x1128 */
188 	uint32_t ch0_txdesc_ring_length;		/* 0x112c */
189 	uint32_t ch0_rxdesc_ring_length;		/* 0x1130 */
190 };
191 
192 #define EQOS_DMA_MODE_SWR				BIT(0)
193 
194 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT		16
195 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK		0xf
196 #define EQOS_DMA_SYSBUS_MODE_EAME			BIT(11)
197 #define EQOS_DMA_SYSBUS_MODE_BLEN16			BIT(3)
198 #define EQOS_DMA_SYSBUS_MODE_BLEN8			BIT(2)
199 #define EQOS_DMA_SYSBUS_MODE_BLEN4			BIT(1)
200 
201 #define EQOS_DMA_CH0_CONTROL_PBLX8			BIT(16)
202 
203 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT		16
204 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK		0x3f
205 #define EQOS_DMA_CH0_TX_CONTROL_OSP			BIT(4)
206 #define EQOS_DMA_CH0_TX_CONTROL_ST			BIT(0)
207 
208 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT		16
209 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK		0x3f
210 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT		1
211 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK		0x3fff
212 #define EQOS_DMA_CH0_RX_CONTROL_SR			BIT(0)
213 
214 /* These registers are Tegra186-specific */
215 #define EQOS_TEGRA186_REGS_BASE 0x8800
216 struct eqos_tegra186_regs {
217 	uint32_t sdmemcomppadctrl;			/* 0x8800 */
218 	uint32_t auto_cal_config;			/* 0x8804 */
219 	uint32_t unused_8808;				/* 0x8808 */
220 	uint32_t auto_cal_status;			/* 0x880c */
221 };
222 
223 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD	BIT(31)
224 
225 #define EQOS_AUTO_CAL_CONFIG_START			BIT(31)
226 #define EQOS_AUTO_CAL_CONFIG_ENABLE			BIT(29)
227 
228 #define EQOS_AUTO_CAL_STATUS_ACTIVE			BIT(31)
229 
230 /* Descriptors */
231 
232 #define EQOS_DESCRIPTOR_WORDS	4
233 #define EQOS_DESCRIPTOR_SIZE	(EQOS_DESCRIPTOR_WORDS * 4)
234 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */
235 #define EQOS_DESCRIPTOR_ALIGN	ARCH_DMA_MINALIGN
236 #define EQOS_DESCRIPTORS_TX	4
237 #define EQOS_DESCRIPTORS_RX	4
238 #define EQOS_DESCRIPTORS_NUM	(EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX)
239 #define EQOS_DESCRIPTORS_SIZE	ALIGN(EQOS_DESCRIPTORS_NUM * \
240 				      EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN)
241 #define EQOS_BUFFER_ALIGN	ARCH_DMA_MINALIGN
242 #define EQOS_MAX_PACKET_SIZE	ALIGN(1568, ARCH_DMA_MINALIGN)
243 #define EQOS_RX_BUFFER_SIZE	(EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE)
244 
245 /*
246  * Warn if the cache-line size is larger than the descriptor size. In such
247  * cases the driver will likely fail because the CPU needs to flush the cache
248  * when requeuing RX buffers, therefore descriptors written by the hardware
249  * may be discarded. Architectures with full IO coherence, such as x86, do not
250  * experience this issue, and hence are excluded from this condition.
251  *
252  * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause
253  * the driver to allocate descriptors from a pool of non-cached memory.
254  */
255 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN
256 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \
257 	!defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86)
258 #warning Cache line size is larger than descriptor size
259 #endif
260 #endif
261 
262 struct eqos_desc {
263 	u32 des0;
264 	u32 des1;
265 	u32 des2;
266 	u32 des3;
267 };
268 
269 #define EQOS_DESC3_OWN		BIT(31)
270 #define EQOS_DESC3_FD		BIT(29)
271 #define EQOS_DESC3_LD		BIT(28)
272 #define EQOS_DESC3_BUF1V	BIT(24)
273 
274 /*
275  * TX and RX descriptors are 16 bytes. This causes problems with the cache
276  * maintenance on CPUs where the cache-line size exceeds the size of these
277  * descriptors. What will happen is that when the driver receives a packet
278  * it will be immediately requeued for the hardware to reuse. The CPU will
279  * therefore need to flush the cache-line containing the descriptor, which
280  * will cause all other descriptors in the same cache-line to be flushed
281  * along with it. If one of those descriptors had been written to by the
282  * device those changes (and the associated packet) will be lost.
283  *
284  * To work around this, we make use of non-cached memory if available. If
285  * descriptors are mapped uncached there's no need to manually flush them
286  * or invalidate them.
287  *
288  * Note that this only applies to descriptors. The packet data buffers do
289  * not have the same constraints since they are 1536 bytes large, so they
290  * are unlikely to share cache-lines.
291  */
292 static void *eqos_alloc_descs(unsigned int num)
293 {
294 #ifdef CONFIG_SYS_NONCACHED_MEMORY
295 	return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE,
296 				      EQOS_DESCRIPTOR_ALIGN);
297 #else
298 	return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE);
299 #endif
300 }
301 
302 static void eqos_free_descs(void *descs)
303 {
304 #ifdef CONFIG_SYS_NONCACHED_MEMORY
305 	/* FIXME: noncached_alloc() has no opposite */
306 #else
307 	free(descs);
308 #endif
309 }
310 
311 static void eqos_inval_desc_tegra186(void *desc)
312 {
313 #ifndef CONFIG_SYS_NONCACHED_MEMORY
314 	unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
315 	unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE,
316 				  ARCH_DMA_MINALIGN);
317 
318 	invalidate_dcache_range(start, end);
319 #endif
320 }
321 
322 static void eqos_inval_desc_generic(void *desc)
323 {
324 #ifndef CONFIG_SYS_NONCACHED_MEMORY
325 	unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN);
326 	unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE,
327 				    ARCH_DMA_MINALIGN);
328 
329 	invalidate_dcache_range(start, end);
330 #endif
331 }
332 
333 static void eqos_flush_desc_tegra186(void *desc)
334 {
335 #ifndef CONFIG_SYS_NONCACHED_MEMORY
336 	flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE);
337 #endif
338 }
339 
340 static void eqos_flush_desc_generic(void *desc)
341 {
342 #ifndef CONFIG_SYS_NONCACHED_MEMORY
343 	unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN);
344 	unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE,
345 				    ARCH_DMA_MINALIGN);
346 
347 	flush_dcache_range(start, end);
348 #endif
349 }
350 
351 static void eqos_inval_buffer_tegra186(void *buf, size_t size)
352 {
353 	unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
354 	unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN);
355 
356 	invalidate_dcache_range(start, end);
357 }
358 
359 static void eqos_inval_buffer_generic(void *buf, size_t size)
360 {
361 	unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
362 	unsigned long end = roundup((unsigned long)buf + size,
363 				    ARCH_DMA_MINALIGN);
364 
365 	invalidate_dcache_range(start, end);
366 }
367 
368 static void eqos_flush_buffer_tegra186(void *buf, size_t size)
369 {
370 	flush_cache((unsigned long)buf, size);
371 }
372 
373 static void eqos_flush_buffer_generic(void *buf, size_t size)
374 {
375 	unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
376 	unsigned long end = roundup((unsigned long)buf + size,
377 				    ARCH_DMA_MINALIGN);
378 
379 	flush_dcache_range(start, end);
380 }
381 
382 static int eqos_mdio_wait_idle(struct eqos_priv *eqos)
383 {
384 	return wait_for_bit_le32(&eqos->mac_regs->mdio_address,
385 				 EQOS_MAC_MDIO_ADDRESS_GB, false,
386 				 1000000, true);
387 }
388 
389 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
390 			  int mdio_reg)
391 {
392 	struct eqos_priv *eqos = bus->priv;
393 	u32 val;
394 	int ret;
395 
396 	debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr,
397 	      mdio_reg);
398 
399 	ret = eqos_mdio_wait_idle(eqos);
400 	if (ret) {
401 		pr_err("MDIO not idle at entry");
402 		return ret;
403 	}
404 
405 	val = readl(&eqos->mac_regs->mdio_address);
406 	val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
407 		EQOS_MAC_MDIO_ADDRESS_C45E;
408 	val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
409 		(mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
410 		(eqos->config->config_mac_mdio <<
411 		 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
412 		(EQOS_MAC_MDIO_ADDRESS_GOC_READ <<
413 		 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
414 		EQOS_MAC_MDIO_ADDRESS_GB;
415 	writel(val, &eqos->mac_regs->mdio_address);
416 
417 	udelay(eqos->config->mdio_wait);
418 
419 	ret = eqos_mdio_wait_idle(eqos);
420 	if (ret) {
421 		pr_err("MDIO read didn't complete");
422 		return ret;
423 	}
424 
425 	val = readl(&eqos->mac_regs->mdio_data);
426 	val &= EQOS_MAC_MDIO_DATA_GD_MASK;
427 
428 	debug("%s: val=%x\n", __func__, val);
429 
430 	return val;
431 }
432 
433 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
434 			   int mdio_reg, u16 mdio_val)
435 {
436 	struct eqos_priv *eqos = bus->priv;
437 	u32 val;
438 	int ret;
439 
440 	debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev,
441 	      mdio_addr, mdio_reg, mdio_val);
442 
443 	ret = eqos_mdio_wait_idle(eqos);
444 	if (ret) {
445 		pr_err("MDIO not idle at entry");
446 		return ret;
447 	}
448 
449 	writel(mdio_val, &eqos->mac_regs->mdio_data);
450 
451 	val = readl(&eqos->mac_regs->mdio_address);
452 	val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
453 		EQOS_MAC_MDIO_ADDRESS_C45E;
454 	val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
455 		(mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
456 		(eqos->config->config_mac_mdio <<
457 		 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
458 		(EQOS_MAC_MDIO_ADDRESS_GOC_WRITE <<
459 		 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
460 		EQOS_MAC_MDIO_ADDRESS_GB;
461 	writel(val, &eqos->mac_regs->mdio_address);
462 
463 	udelay(eqos->config->mdio_wait);
464 
465 	ret = eqos_mdio_wait_idle(eqos);
466 	if (ret) {
467 		pr_err("MDIO read didn't complete");
468 		return ret;
469 	}
470 
471 	return 0;
472 }
473 
474 static int eqos_start_clks_tegra186(struct udevice *dev)
475 {
476 #ifdef CONFIG_CLK
477 	struct eqos_priv *eqos = dev_get_priv(dev);
478 	int ret;
479 
480 	debug("%s(dev=%p):\n", __func__, dev);
481 
482 	ret = clk_enable(&eqos->clk_slave_bus);
483 	if (ret < 0) {
484 		pr_err("clk_enable(clk_slave_bus) failed: %d", ret);
485 		goto err;
486 	}
487 
488 	ret = clk_enable(&eqos->clk_master_bus);
489 	if (ret < 0) {
490 		pr_err("clk_enable(clk_master_bus) failed: %d", ret);
491 		goto err_disable_clk_slave_bus;
492 	}
493 
494 	ret = clk_enable(&eqos->clk_rx);
495 	if (ret < 0) {
496 		pr_err("clk_enable(clk_rx) failed: %d", ret);
497 		goto err_disable_clk_master_bus;
498 	}
499 
500 	ret = clk_enable(&eqos->clk_ptp_ref);
501 	if (ret < 0) {
502 		pr_err("clk_enable(clk_ptp_ref) failed: %d", ret);
503 		goto err_disable_clk_rx;
504 	}
505 
506 	ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000);
507 	if (ret < 0) {
508 		pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret);
509 		goto err_disable_clk_ptp_ref;
510 	}
511 
512 	ret = clk_enable(&eqos->clk_tx);
513 	if (ret < 0) {
514 		pr_err("clk_enable(clk_tx) failed: %d", ret);
515 		goto err_disable_clk_ptp_ref;
516 	}
517 #endif
518 
519 	debug("%s: OK\n", __func__);
520 	return 0;
521 
522 #ifdef CONFIG_CLK
523 err_disable_clk_ptp_ref:
524 	clk_disable(&eqos->clk_ptp_ref);
525 err_disable_clk_rx:
526 	clk_disable(&eqos->clk_rx);
527 err_disable_clk_master_bus:
528 	clk_disable(&eqos->clk_master_bus);
529 err_disable_clk_slave_bus:
530 	clk_disable(&eqos->clk_slave_bus);
531 err:
532 	debug("%s: FAILED: %d\n", __func__, ret);
533 	return ret;
534 #endif
535 }
536 
537 static int eqos_start_clks_stm32(struct udevice *dev)
538 {
539 #ifdef CONFIG_CLK
540 	struct eqos_priv *eqos = dev_get_priv(dev);
541 	int ret;
542 
543 	debug("%s(dev=%p):\n", __func__, dev);
544 
545 	ret = clk_enable(&eqos->clk_master_bus);
546 	if (ret < 0) {
547 		pr_err("clk_enable(clk_master_bus) failed: %d", ret);
548 		goto err;
549 	}
550 
551 	if (clk_valid(&eqos->clk_rx)) {
552 		ret = clk_enable(&eqos->clk_rx);
553 		if (ret < 0) {
554 			pr_err("clk_enable(clk_rx) failed: %d", ret);
555 			goto err_disable_clk_master_bus;
556 		}
557 	}
558 
559 	if (clk_valid(&eqos->clk_tx)) {
560 		ret = clk_enable(&eqos->clk_tx);
561 		if (ret < 0) {
562 			pr_err("clk_enable(clk_tx) failed: %d", ret);
563 			goto err_disable_clk_rx;
564 		}
565 	}
566 
567 	if (clk_valid(&eqos->clk_ck)) {
568 		ret = clk_enable(&eqos->clk_ck);
569 		if (ret < 0) {
570 			pr_err("clk_enable(clk_ck) failed: %d", ret);
571 			goto err_disable_clk_tx;
572 		}
573 	}
574 #endif
575 
576 	debug("%s: OK\n", __func__);
577 	return 0;
578 
579 #ifdef CONFIG_CLK
580 err_disable_clk_tx:
581 	if (clk_valid(&eqos->clk_tx))
582 		clk_disable(&eqos->clk_tx);
583 err_disable_clk_rx:
584 	if (clk_valid(&eqos->clk_rx))
585 		clk_disable(&eqos->clk_rx);
586 err_disable_clk_master_bus:
587 	clk_disable(&eqos->clk_master_bus);
588 err:
589 	debug("%s: FAILED: %d\n", __func__, ret);
590 	return ret;
591 #endif
592 }
593 
594 static int eqos_start_clks_imx(struct udevice *dev)
595 {
596 	return 0;
597 }
598 
599 static void eqos_stop_clks_tegra186(struct udevice *dev)
600 {
601 #ifdef CONFIG_CLK
602 	struct eqos_priv *eqos = dev_get_priv(dev);
603 
604 	debug("%s(dev=%p):\n", __func__, dev);
605 
606 	clk_disable(&eqos->clk_tx);
607 	clk_disable(&eqos->clk_ptp_ref);
608 	clk_disable(&eqos->clk_rx);
609 	clk_disable(&eqos->clk_master_bus);
610 	clk_disable(&eqos->clk_slave_bus);
611 #endif
612 
613 	debug("%s: OK\n", __func__);
614 }
615 
616 static void eqos_stop_clks_stm32(struct udevice *dev)
617 {
618 #ifdef CONFIG_CLK
619 	struct eqos_priv *eqos = dev_get_priv(dev);
620 
621 	debug("%s(dev=%p):\n", __func__, dev);
622 
623 	if (clk_valid(&eqos->clk_tx))
624 		clk_disable(&eqos->clk_tx);
625 	if (clk_valid(&eqos->clk_rx))
626 		clk_disable(&eqos->clk_rx);
627 	clk_disable(&eqos->clk_master_bus);
628 	if (clk_valid(&eqos->clk_ck))
629 		clk_disable(&eqos->clk_ck);
630 #endif
631 
632 	debug("%s: OK\n", __func__);
633 }
634 
635 static void eqos_stop_clks_imx(struct udevice *dev)
636 {
637 	/* empty */
638 }
639 
640 static int eqos_start_resets_tegra186(struct udevice *dev)
641 {
642 	struct eqos_priv *eqos = dev_get_priv(dev);
643 	int ret;
644 
645 	debug("%s(dev=%p):\n", __func__, dev);
646 
647 	ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
648 	if (ret < 0) {
649 		pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret);
650 		return ret;
651 	}
652 
653 	udelay(2);
654 
655 	ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
656 	if (ret < 0) {
657 		pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret);
658 		return ret;
659 	}
660 
661 	ret = reset_assert(&eqos->reset_ctl);
662 	if (ret < 0) {
663 		pr_err("reset_assert() failed: %d", ret);
664 		return ret;
665 	}
666 
667 	udelay(2);
668 
669 	ret = reset_deassert(&eqos->reset_ctl);
670 	if (ret < 0) {
671 		pr_err("reset_deassert() failed: %d", ret);
672 		return ret;
673 	}
674 
675 	debug("%s: OK\n", __func__);
676 	return 0;
677 }
678 
679 static int eqos_start_resets_stm32(struct udevice *dev)
680 {
681 	struct eqos_priv *eqos = dev_get_priv(dev);
682 	int ret;
683 
684 	debug("%s(dev=%p):\n", __func__, dev);
685 	if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
686 		ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
687 		if (ret < 0) {
688 			pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d",
689 			       ret);
690 			return ret;
691 		}
692 
693 		udelay(eqos->reset_delays[0]);
694 
695 		ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
696 		if (ret < 0) {
697 			pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
698 			       ret);
699 			return ret;
700 		}
701 
702 		udelay(eqos->reset_delays[1]);
703 
704 		ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
705 		if (ret < 0) {
706 			pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d",
707 			       ret);
708 			return ret;
709 		}
710 
711 		udelay(eqos->reset_delays[2]);
712 	}
713 	debug("%s: OK\n", __func__);
714 
715 	return 0;
716 }
717 
718 static int eqos_start_resets_imx(struct udevice *dev)
719 {
720 	return 0;
721 }
722 
723 static int eqos_stop_resets_tegra186(struct udevice *dev)
724 {
725 	struct eqos_priv *eqos = dev_get_priv(dev);
726 
727 	reset_assert(&eqos->reset_ctl);
728 	dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
729 
730 	return 0;
731 }
732 
733 static int eqos_stop_resets_stm32(struct udevice *dev)
734 {
735 	struct eqos_priv *eqos = dev_get_priv(dev);
736 	int ret;
737 
738 	if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
739 		ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
740 		if (ret < 0) {
741 			pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
742 			       ret);
743 			return ret;
744 		}
745 	}
746 
747 	return 0;
748 }
749 
750 static int eqos_stop_resets_imx(struct udevice *dev)
751 {
752 	return 0;
753 }
754 
755 static int eqos_calibrate_pads_tegra186(struct udevice *dev)
756 {
757 	struct eqos_priv *eqos = dev_get_priv(dev);
758 	int ret;
759 
760 	debug("%s(dev=%p):\n", __func__, dev);
761 
762 	setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
763 		     EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
764 
765 	udelay(1);
766 
767 	setbits_le32(&eqos->tegra186_regs->auto_cal_config,
768 		     EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE);
769 
770 	ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
771 				EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false);
772 	if (ret) {
773 		pr_err("calibrate didn't start");
774 		goto failed;
775 	}
776 
777 	ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
778 				EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false);
779 	if (ret) {
780 		pr_err("calibrate didn't finish");
781 		goto failed;
782 	}
783 
784 	ret = 0;
785 
786 failed:
787 	clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
788 		     EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
789 
790 	debug("%s: returns %d\n", __func__, ret);
791 
792 	return ret;
793 }
794 
795 static int eqos_disable_calibration_tegra186(struct udevice *dev)
796 {
797 	struct eqos_priv *eqos = dev_get_priv(dev);
798 
799 	debug("%s(dev=%p):\n", __func__, dev);
800 
801 	clrbits_le32(&eqos->tegra186_regs->auto_cal_config,
802 		     EQOS_AUTO_CAL_CONFIG_ENABLE);
803 
804 	return 0;
805 }
806 
807 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev)
808 {
809 #ifdef CONFIG_CLK
810 	struct eqos_priv *eqos = dev_get_priv(dev);
811 
812 	return clk_get_rate(&eqos->clk_slave_bus);
813 #else
814 	return 0;
815 #endif
816 }
817 
818 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev)
819 {
820 #ifdef CONFIG_CLK
821 	struct eqos_priv *eqos = dev_get_priv(dev);
822 
823 	return clk_get_rate(&eqos->clk_master_bus);
824 #else
825 	return 0;
826 #endif
827 }
828 
829 __weak u32 imx_get_eqos_csr_clk(void)
830 {
831 	return 100 * 1000000;
832 }
833 __weak int imx_eqos_txclk_set_rate(unsigned long rate)
834 {
835 	return 0;
836 }
837 
838 static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev)
839 {
840 	return imx_get_eqos_csr_clk();
841 }
842 
843 static int eqos_calibrate_pads_stm32(struct udevice *dev)
844 {
845 	return 0;
846 }
847 
848 static int eqos_calibrate_pads_imx(struct udevice *dev)
849 {
850 	return 0;
851 }
852 
853 static int eqos_disable_calibration_stm32(struct udevice *dev)
854 {
855 	return 0;
856 }
857 
858 static int eqos_disable_calibration_imx(struct udevice *dev)
859 {
860 	return 0;
861 }
862 
863 static int eqos_set_full_duplex(struct udevice *dev)
864 {
865 	struct eqos_priv *eqos = dev_get_priv(dev);
866 
867 	debug("%s(dev=%p):\n", __func__, dev);
868 
869 	setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
870 
871 	return 0;
872 }
873 
874 static int eqos_set_half_duplex(struct udevice *dev)
875 {
876 	struct eqos_priv *eqos = dev_get_priv(dev);
877 
878 	debug("%s(dev=%p):\n", __func__, dev);
879 
880 	clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
881 
882 	/* WAR: Flush TX queue when switching to half-duplex */
883 	setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
884 		     EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
885 
886 	return 0;
887 }
888 
889 static int eqos_set_gmii_speed(struct udevice *dev)
890 {
891 	struct eqos_priv *eqos = dev_get_priv(dev);
892 
893 	debug("%s(dev=%p):\n", __func__, dev);
894 
895 	clrbits_le32(&eqos->mac_regs->configuration,
896 		     EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
897 
898 	return 0;
899 }
900 
901 static int eqos_set_mii_speed_100(struct udevice *dev)
902 {
903 	struct eqos_priv *eqos = dev_get_priv(dev);
904 
905 	debug("%s(dev=%p):\n", __func__, dev);
906 
907 	setbits_le32(&eqos->mac_regs->configuration,
908 		     EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
909 
910 	return 0;
911 }
912 
913 static int eqos_set_mii_speed_10(struct udevice *dev)
914 {
915 	struct eqos_priv *eqos = dev_get_priv(dev);
916 
917 	debug("%s(dev=%p):\n", __func__, dev);
918 
919 	clrsetbits_le32(&eqos->mac_regs->configuration,
920 			EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS);
921 
922 	return 0;
923 }
924 
925 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev)
926 {
927 #ifdef CONFIG_CLK
928 	struct eqos_priv *eqos = dev_get_priv(dev);
929 	ulong rate;
930 	int ret;
931 
932 	debug("%s(dev=%p):\n", __func__, dev);
933 
934 	switch (eqos->phy->speed) {
935 	case SPEED_1000:
936 		rate = 125 * 1000 * 1000;
937 		break;
938 	case SPEED_100:
939 		rate = 25 * 1000 * 1000;
940 		break;
941 	case SPEED_10:
942 		rate = 2.5 * 1000 * 1000;
943 		break;
944 	default:
945 		pr_err("invalid speed %d", eqos->phy->speed);
946 		return -EINVAL;
947 	}
948 
949 	ret = clk_set_rate(&eqos->clk_tx, rate);
950 	if (ret < 0) {
951 		pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret);
952 		return ret;
953 	}
954 #endif
955 
956 	return 0;
957 }
958 
959 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev)
960 {
961 	return 0;
962 }
963 
964 static int eqos_set_tx_clk_speed_imx(struct udevice *dev)
965 {
966 	struct eqos_priv *eqos = dev_get_priv(dev);
967 	ulong rate;
968 	int ret;
969 
970 	debug("%s(dev=%p):\n", __func__, dev);
971 
972 	switch (eqos->phy->speed) {
973 	case SPEED_1000:
974 		rate = 125 * 1000 * 1000;
975 		break;
976 	case SPEED_100:
977 		rate = 25 * 1000 * 1000;
978 		break;
979 	case SPEED_10:
980 		rate = 2.5 * 1000 * 1000;
981 		break;
982 	default:
983 		pr_err("invalid speed %d", eqos->phy->speed);
984 		return -EINVAL;
985 	}
986 
987 	ret = imx_eqos_txclk_set_rate(rate);
988 	if (ret < 0) {
989 		pr_err("imx (tx_clk, %lu) failed: %d", rate, ret);
990 		return ret;
991 	}
992 
993 	return 0;
994 }
995 
996 static int eqos_adjust_link(struct udevice *dev)
997 {
998 	struct eqos_priv *eqos = dev_get_priv(dev);
999 	int ret;
1000 	bool en_calibration;
1001 
1002 	debug("%s(dev=%p):\n", __func__, dev);
1003 
1004 	if (eqos->phy->duplex)
1005 		ret = eqos_set_full_duplex(dev);
1006 	else
1007 		ret = eqos_set_half_duplex(dev);
1008 	if (ret < 0) {
1009 		pr_err("eqos_set_*_duplex() failed: %d", ret);
1010 		return ret;
1011 	}
1012 
1013 	switch (eqos->phy->speed) {
1014 	case SPEED_1000:
1015 		en_calibration = true;
1016 		ret = eqos_set_gmii_speed(dev);
1017 		break;
1018 	case SPEED_100:
1019 		en_calibration = true;
1020 		ret = eqos_set_mii_speed_100(dev);
1021 		break;
1022 	case SPEED_10:
1023 		en_calibration = false;
1024 		ret = eqos_set_mii_speed_10(dev);
1025 		break;
1026 	default:
1027 		pr_err("invalid speed %d", eqos->phy->speed);
1028 		return -EINVAL;
1029 	}
1030 	if (ret < 0) {
1031 		pr_err("eqos_set_*mii_speed*() failed: %d", ret);
1032 		return ret;
1033 	}
1034 
1035 	if (en_calibration) {
1036 		ret = eqos->config->ops->eqos_calibrate_pads(dev);
1037 		if (ret < 0) {
1038 			pr_err("eqos_calibrate_pads() failed: %d",
1039 			       ret);
1040 			return ret;
1041 		}
1042 	} else {
1043 		ret = eqos->config->ops->eqos_disable_calibration(dev);
1044 		if (ret < 0) {
1045 			pr_err("eqos_disable_calibration() failed: %d",
1046 			       ret);
1047 			return ret;
1048 		}
1049 	}
1050 	ret = eqos->config->ops->eqos_set_tx_clk_speed(dev);
1051 	if (ret < 0) {
1052 		pr_err("eqos_set_tx_clk_speed() failed: %d", ret);
1053 		return ret;
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 int eqos_write_hwaddr(struct udevice *dev)
1060 {
1061 	struct eth_pdata *plat = dev_get_platdata(dev);
1062 	struct eqos_priv *eqos = dev_get_priv(dev);
1063 	uint32_t val;
1064 
1065 	/*
1066 	 * This function may be called before start() or after stop(). At that
1067 	 * time, on at least some configurations of the EQoS HW, all clocks to
1068 	 * the EQoS HW block will be stopped, and a reset signal applied. If
1069 	 * any register access is attempted in this state, bus timeouts or CPU
1070 	 * hangs may occur. This check prevents that.
1071 	 *
1072 	 * A simple solution to this problem would be to not implement
1073 	 * write_hwaddr(), since start() always writes the MAC address into HW
1074 	 * anyway. However, it is desirable to implement write_hwaddr() to
1075 	 * support the case of SW that runs subsequent to U-Boot which expects
1076 	 * the MAC address to already be programmed into the EQoS registers,
1077 	 * which must happen irrespective of whether the U-Boot user (or
1078 	 * scripts) actually made use of the EQoS device, and hence
1079 	 * irrespective of whether start() was ever called.
1080 	 *
1081 	 * Note that this requirement by subsequent SW is not valid for
1082 	 * Tegra186, and is likely not valid for any non-PCI instantiation of
1083 	 * the EQoS HW block. This function is implemented solely as
1084 	 * future-proofing with the expectation the driver will eventually be
1085 	 * ported to some system where the expectation above is true.
1086 	 */
1087 	if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok)
1088 		return 0;
1089 
1090 	/* Update the MAC address */
1091 	val = (plat->enetaddr[5] << 8) |
1092 		(plat->enetaddr[4]);
1093 	writel(val, &eqos->mac_regs->address0_high);
1094 	val = (plat->enetaddr[3] << 24) |
1095 		(plat->enetaddr[2] << 16) |
1096 		(plat->enetaddr[1] << 8) |
1097 		(plat->enetaddr[0]);
1098 	writel(val, &eqos->mac_regs->address0_low);
1099 
1100 	return 0;
1101 }
1102 
1103 static int eqos_read_rom_hwaddr(struct udevice *dev)
1104 {
1105 	struct eth_pdata *pdata = dev_get_platdata(dev);
1106 
1107 #ifdef CONFIG_ARCH_IMX8M
1108 	imx_get_mac_from_fuse(dev->req_seq, pdata->enetaddr);
1109 #endif
1110 	return !is_valid_ethaddr(pdata->enetaddr);
1111 }
1112 
1113 int eqos_init(struct udevice *dev)
1114 {
1115 	struct eqos_priv *eqos = dev_get_priv(dev);
1116 	int ret, limit = 10;
1117 	ulong rate;
1118 	u32 val;
1119 
1120 	debug("%s(dev=%p):\n", __func__, dev);
1121 
1122 	if (eqos->config->ops->eqos_start_clks) {
1123 		ret = eqos->config->ops->eqos_start_clks(dev);
1124 		if (ret < 0) {
1125 			pr_err("eqos_start_clks() failed: %d", ret);
1126 			goto err;
1127 		}
1128 	}
1129 
1130 	ret = eqos->config->ops->eqos_start_resets(dev);
1131 	if (ret < 0) {
1132 		pr_err("eqos_start_resets() failed: %d", ret);
1133 		goto err_stop_clks;
1134 	}
1135 
1136 	udelay(10);
1137 
1138 	eqos->reg_access_ok = true;
1139 
1140 	/* DMA SW reset */
1141 	val = readl(&eqos->dma_regs->mode);
1142 	val |= EQOS_DMA_MODE_SWR;
1143 	writel(val, &eqos->dma_regs->mode);
1144 	while (limit--) {
1145 		if (!(readl(&eqos->dma_regs->mode) & EQOS_DMA_MODE_SWR))
1146 			break;
1147 		mdelay(10);
1148 	}
1149 
1150 	if (limit < 0) {
1151 		pr_err("EQOS_DMA_MODE_SWR stuck");
1152 		goto err_stop_resets;
1153 	}
1154 
1155 	ret = eqos->config->ops->eqos_calibrate_pads(dev);
1156 	if (ret < 0) {
1157 		pr_err("eqos_calibrate_pads() failed: %d", ret);
1158 		goto err_stop_resets;
1159 	}
1160 	rate = eqos->config->ops->eqos_get_tick_clk_rate(dev);
1161 
1162 	val = (rate / 1000000) - 1;
1163 	writel(val, &eqos->mac_regs->us_tic_counter);
1164 
1165 	/*
1166 	 * if PHY was already connected and configured,
1167 	 * don't need to reconnect/reconfigure again
1168 	 */
1169 	if (!eqos->phy) {
1170 		int addr = -1;
1171 #ifdef CONFIG_DM_ETH_PHY
1172 		addr = eth_phy_get_addr(dev);
1173 #endif
1174 #ifdef DWC_NET_PHYADDR
1175 		addr = DWC_NET_PHYADDR;
1176 #endif
1177 		eqos->phy = phy_connect(eqos->mii, addr, dev,
1178 		 eqos->config->ops->eqos_get_interface(dev));
1179 		if (!eqos->phy) {
1180 			pr_err("phy_connect() failed");
1181 			goto err_stop_resets;
1182 		}
1183 
1184 		if (eqos->max_speed) {
1185 			ret = phy_set_supported(eqos->phy, eqos->max_speed);
1186 			if (ret) {
1187 				pr_err("phy_set_supported() failed: %d", ret);
1188 				goto err_shutdown_phy;
1189 			}
1190 		}
1191 
1192 		ret = phy_config(eqos->phy);
1193 		if (ret < 0) {
1194 			pr_err("phy_config() failed: %d", ret);
1195 			goto err_shutdown_phy;
1196 		}
1197 	}
1198 
1199 	ret = phy_startup(eqos->phy);
1200 	if (ret < 0) {
1201 		pr_err("phy_startup() failed: %d", ret);
1202 		goto err_shutdown_phy;
1203 	}
1204 
1205 	if (!eqos->phy->link) {
1206 		pr_err("No link");
1207 		goto err_shutdown_phy;
1208 	}
1209 
1210 	ret = eqos_adjust_link(dev);
1211 	if (ret < 0) {
1212 		pr_err("eqos_adjust_link() failed: %d", ret);
1213 		goto err_shutdown_phy;
1214 	}
1215 
1216 	debug("%s: OK\n", __func__);
1217 	return 0;
1218 
1219 err_shutdown_phy:
1220 	phy_shutdown(eqos->phy);
1221 err_stop_resets:
1222 	eqos->config->ops->eqos_stop_resets(dev);
1223 err_stop_clks:
1224 	if (eqos->config->ops->eqos_stop_clks)
1225 		eqos->config->ops->eqos_stop_clks(dev);
1226 err:
1227 	pr_err("FAILED: %d", ret);
1228 	return ret;
1229 }
1230 
1231 void eqos_enable(struct udevice *dev)
1232 {
1233 	struct eqos_priv *eqos = dev_get_priv(dev);
1234 	u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
1235 	ulong last_rx_desc;
1236 	int i;
1237 
1238 	eqos->tx_desc_idx = 0;
1239 	eqos->rx_desc_idx = 0;
1240 
1241 	/* Configure MTL */
1242 	writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100);
1243 
1244 	/* Enable Store and Forward mode for TX */
1245 	/* Program Tx operating mode */
1246 	setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1247 		     EQOS_MTL_TXQ0_OPERATION_MODE_TSF |
1248 		     (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
1249 		      EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
1250 
1251 	/* Transmit Queue weight */
1252 	writel(0x10, &eqos->mtl_regs->txq0_quantum_weight);
1253 
1254 	/* Enable Store and Forward mode for RX, since no jumbo frame */
1255 	setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1256 		     EQOS_MTL_RXQ0_OPERATION_MODE_RSF |
1257 		     EQOS_MTL_RXQ0_OPERATION_MODE_FEP |
1258 		     EQOS_MTL_RXQ0_OPERATION_MODE_FUP);
1259 
1260 	/* Transmit/Receive queue fifo size; use all RAM for 1 queue */
1261 	val = readl(&eqos->mac_regs->hw_feature1);
1262 	tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
1263 		EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
1264 	rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
1265 		EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
1266 
1267 	/*
1268 	 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
1269 	 * r/tqs is encoded as (n / 256) - 1.
1270 	 */
1271 	tqs = (128 << tx_fifo_sz) / 256 - 1;
1272 	rqs = (128 << rx_fifo_sz) / 256 - 1;
1273 
1274 	clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1275 			EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
1276 			EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
1277 			tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
1278 	clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1279 			EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
1280 			EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
1281 			rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
1282 
1283 	/* Flow control used only if each channel gets 4KB or more FIFO */
1284 	if (rqs >= ((4096 / 256) - 1)) {
1285 		u32 rfd, rfa;
1286 
1287 		setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1288 			     EQOS_MTL_RXQ0_OPERATION_MODE_EHFC);
1289 
1290 		/*
1291 		 * Set Threshold for Activating Flow Contol space for min 2
1292 		 * frames ie, (1500 * 1) = 1500 bytes.
1293 		 *
1294 		 * Set Threshold for Deactivating Flow Contol for space of
1295 		 * min 1 frame (frame size 1500bytes) in receive fifo
1296 		 */
1297 		if (rqs == ((4096 / 256) - 1)) {
1298 			/*
1299 			 * This violates the above formula because of FIFO size
1300 			 * limit therefore overflow may occur inspite of this.
1301 			 */
1302 			rfd = 0x3;	/* Full-3K */
1303 			rfa = 0x1;	/* Full-1.5K */
1304 		} else if (rqs == ((8192 / 256) - 1)) {
1305 			rfd = 0x6;	/* Full-4K */
1306 			rfa = 0xa;	/* Full-6K */
1307 		} else if (rqs == ((16384 / 256) - 1)) {
1308 			rfd = 0x6;	/* Full-4K */
1309 			rfa = 0x12;	/* Full-10K */
1310 		} else {
1311 			rfd = 0x6;	/* Full-4K */
1312 			rfa = 0x1E;	/* Full-16K */
1313 		}
1314 
1315 		clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1316 				(EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK <<
1317 				 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1318 				(EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK <<
1319 				 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT),
1320 				(rfd <<
1321 				 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1322 				(rfa <<
1323 				 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT));
1324 	}
1325 
1326 	/* Configure MAC */
1327 
1328 	clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1329 			EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1330 			EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
1331 			eqos->config->config_mac <<
1332 			EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1333 
1334 	clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1335 			EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1336 			EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
1337 			0x2 <<
1338 			EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1339 
1340 	/* Multicast and Broadcast Queue Enable */
1341 	setbits_le32(&eqos->mac_regs->unused_0a4,
1342 		     0x00100000);
1343 	/* enable promise mode */
1344 	setbits_le32(&eqos->mac_regs->unused_004[1],
1345 		     0x1);
1346 
1347 	/* Set TX flow control parameters */
1348 	/* Set Pause Time */
1349 	setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1350 		     0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
1351 	/* Assign priority for TX flow control */
1352 	clrbits_le32(&eqos->mac_regs->txq_prty_map0,
1353 		     EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK <<
1354 		     EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT);
1355 	/* Assign priority for RX flow control */
1356 	clrbits_le32(&eqos->mac_regs->rxq_ctrl2,
1357 		     EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK <<
1358 		     EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
1359 	/* Enable flow control */
1360 	setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1361 		     EQOS_MAC_Q0_TX_FLOW_CTRL_TFE);
1362 	setbits_le32(&eqos->mac_regs->rx_flow_ctrl,
1363 		     EQOS_MAC_RX_FLOW_CTRL_RFE);
1364 
1365 	clrsetbits_le32(&eqos->mac_regs->configuration,
1366 			EQOS_MAC_CONFIGURATION_GPSLCE |
1367 			EQOS_MAC_CONFIGURATION_WD |
1368 			EQOS_MAC_CONFIGURATION_JD |
1369 			EQOS_MAC_CONFIGURATION_JE,
1370 			EQOS_MAC_CONFIGURATION_CST |
1371 			EQOS_MAC_CONFIGURATION_ACS);
1372 
1373 	eqos_write_hwaddr(dev);
1374 
1375 	/* Configure DMA */
1376 
1377 	/* Enable OSP mode */
1378 	setbits_le32(&eqos->dma_regs->ch0_tx_control,
1379 		     EQOS_DMA_CH0_TX_CONTROL_OSP);
1380 
1381 	/* RX buffer size. Must be a multiple of bus width */
1382 	clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1383 			EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
1384 			EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
1385 			EQOS_MAX_PACKET_SIZE <<
1386 			EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
1387 
1388 	setbits_le32(&eqos->dma_regs->ch0_control,
1389 		     EQOS_DMA_CH0_CONTROL_PBLX8);
1390 
1391 	/*
1392 	 * Burst length must be < 1/2 FIFO size.
1393 	 * FIFO size in tqs is encoded as (n / 256) - 1.
1394 	 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
1395 	 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
1396 	 */
1397 	pbl = tqs + 1;
1398 	if (pbl > 32)
1399 		pbl = 32;
1400 	clrsetbits_le32(&eqos->dma_regs->ch0_tx_control,
1401 			EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
1402 			EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
1403 			pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
1404 
1405 	clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1406 			EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
1407 			EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
1408 			8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
1409 
1410 	/* DMA performance configuration */
1411 	val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
1412 		EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 |
1413 		EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4;
1414 	writel(val, &eqos->dma_regs->sysbus_mode);
1415 
1416 	/* Set up descriptors */
1417 
1418 	memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE);
1419 	for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
1420 		struct eqos_desc *rx_desc = &(eqos->rx_descs[i]);
1421 		rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
1422 					     (i * EQOS_MAX_PACKET_SIZE));
1423 		rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
1424 		mb();
1425 		eqos->config->ops->eqos_flush_desc(rx_desc);
1426 		eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf +
1427 						(i * EQOS_MAX_PACKET_SIZE),
1428 						EQOS_MAX_PACKET_SIZE);
1429 	}
1430 
1431 	writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress);
1432 	writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address);
1433 	writel(EQOS_DESCRIPTORS_TX - 1,
1434 	       &eqos->dma_regs->ch0_txdesc_ring_length);
1435 
1436 	writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress);
1437 	writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address);
1438 	writel(EQOS_DESCRIPTORS_RX - 1,
1439 	       &eqos->dma_regs->ch0_rxdesc_ring_length);
1440 
1441 	/* Enable everything */
1442 	setbits_le32(&eqos->dma_regs->ch0_tx_control,
1443 		     EQOS_DMA_CH0_TX_CONTROL_ST);
1444 	setbits_le32(&eqos->dma_regs->ch0_rx_control,
1445 		     EQOS_DMA_CH0_RX_CONTROL_SR);
1446 	setbits_le32(&eqos->mac_regs->configuration,
1447 		     EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
1448 
1449 	/* TX tail pointer not written until we need to TX a packet */
1450 	/*
1451 	 * Point RX tail pointer at last descriptor. Ideally, we'd point at the
1452 	 * first descriptor, implying all descriptors were available. However,
1453 	 * that's not distinguishable from none of the descriptors being
1454 	 * available.
1455 	 */
1456 	last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]);
1457 	writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1458 
1459 	eqos->started = true;
1460 }
1461 
1462 static int eqos_start(struct udevice *dev)
1463 {
1464 	int ret;
1465 
1466 	ret = eqos_init(dev);
1467 	if (ret)
1468 		return ret;
1469 
1470 	eqos_enable(dev);
1471 
1472 	return 0;
1473 }
1474 
1475 void eqos_stop(struct udevice *dev)
1476 {
1477 	struct eqos_priv *eqos = dev_get_priv(dev);
1478 	int i;
1479 
1480 	debug("%s(dev=%p):\n", __func__, dev);
1481 
1482 	if (!eqos->started)
1483 		return;
1484 	eqos->started = false;
1485 	eqos->reg_access_ok = false;
1486 
1487 	/* Disable TX DMA */
1488 	clrbits_le32(&eqos->dma_regs->ch0_tx_control,
1489 		     EQOS_DMA_CH0_TX_CONTROL_ST);
1490 
1491 	/* Wait for TX all packets to drain out of MTL */
1492 	for (i = 0; i < 1000000; i++) {
1493 		u32 val = readl(&eqos->mtl_regs->txq0_debug);
1494 		u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
1495 			EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK;
1496 		u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS;
1497 		if ((trcsts != 1) && (!txqsts))
1498 			break;
1499 	}
1500 
1501 	/* Turn off MAC TX and RX */
1502 	clrbits_le32(&eqos->mac_regs->configuration,
1503 		     EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
1504 
1505 	/* Wait for all RX packets to drain out of MTL */
1506 	for (i = 0; i < 1000000; i++) {
1507 		u32 val = readl(&eqos->mtl_regs->rxq0_debug);
1508 		u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
1509 			EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK;
1510 		u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
1511 			EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK;
1512 		if ((!prxq) && (!rxqsts))
1513 			break;
1514 	}
1515 
1516 	/* Turn off RX DMA */
1517 	clrbits_le32(&eqos->dma_regs->ch0_rx_control,
1518 		     EQOS_DMA_CH0_RX_CONTROL_SR);
1519 
1520 	if (eqos->phy) {
1521 		phy_shutdown(eqos->phy);
1522 	}
1523 	eqos->config->ops->eqos_stop_resets(dev);
1524 	if (eqos->config->ops->eqos_stop_clks)
1525 		eqos->config->ops->eqos_stop_clks(dev);
1526 
1527 	debug("%s: OK\n", __func__);
1528 }
1529 
1530 int eqos_send(struct udevice *dev, void *packet, int length)
1531 {
1532 	struct eqos_priv *eqos = dev_get_priv(dev);
1533 	struct eqos_desc *tx_desc;
1534 	int i;
1535 
1536 	debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
1537 	      length);
1538 
1539 	memcpy(eqos->tx_dma_buf, packet, length);
1540 	eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length);
1541 
1542 	tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]);
1543 	eqos->tx_desc_idx++;
1544 	eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX;
1545 
1546 	tx_desc->des0 = (ulong)eqos->tx_dma_buf;
1547 	tx_desc->des1 = 0;
1548 	tx_desc->des2 = length;
1549 	/*
1550 	 * Make sure that if HW sees the _OWN write below, it will see all the
1551 	 * writes to the rest of the descriptor too.
1552 	 */
1553 	mb();
1554 	tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length;
1555 	eqos->config->ops->eqos_flush_desc(tx_desc);
1556 
1557 	writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])),
1558 		&eqos->dma_regs->ch0_txdesc_tail_pointer);
1559 
1560 	for (i = 0; i < 1000000; i++) {
1561 		eqos->config->ops->eqos_inval_desc(tx_desc);
1562 		if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN))
1563 			return 0;
1564 		udelay(1);
1565 	}
1566 
1567 	debug("%s: TX timeout\n", __func__);
1568 
1569 	return -ETIMEDOUT;
1570 }
1571 
1572 int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
1573 {
1574 	struct eqos_priv *eqos = dev_get_priv(dev);
1575 	struct eqos_desc *rx_desc;
1576 	int length;
1577 
1578 	debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags);
1579 
1580 	rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]);
1581 	eqos->config->ops->eqos_inval_desc(rx_desc);
1582 	if (rx_desc->des3 & EQOS_DESC3_OWN) {
1583 		debug("%s: RX packet not available\n", __func__);
1584 		return -EAGAIN;
1585 	}
1586 
1587 	*packetp = eqos->rx_dma_buf +
1588 		(eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1589 	length = rx_desc->des3 & 0x7fff;
1590 	debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
1591 
1592 	eqos->config->ops->eqos_inval_buffer(*packetp, length);
1593 
1594 	return length;
1595 }
1596 
1597 int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
1598 {
1599 	struct eqos_priv *eqos = dev_get_priv(dev);
1600 	uchar *packet_expected;
1601 	struct eqos_desc *rx_desc;
1602 
1603 	debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
1604 
1605 	packet_expected = eqos->rx_dma_buf +
1606 		(eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1607 	if (packet != packet_expected) {
1608 		debug("%s: Unexpected packet (expected %p)\n", __func__,
1609 		      packet_expected);
1610 		return -EINVAL;
1611 	}
1612 
1613 	eqos->config->ops->eqos_inval_buffer(packet, length);
1614 
1615 	rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]);
1616 
1617 	rx_desc->des0 = 0;
1618 	mb();
1619 	eqos->config->ops->eqos_flush_desc(rx_desc);
1620 	eqos->config->ops->eqos_inval_buffer(packet, length);
1621 	rx_desc->des0 = (u32)(ulong)packet;
1622 	rx_desc->des1 = 0;
1623 	rx_desc->des2 = 0;
1624 	/*
1625 	 * Make sure that if HW sees the _OWN write below, it will see all the
1626 	 * writes to the rest of the descriptor too.
1627 	 */
1628 	mb();
1629 	rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
1630 	eqos->config->ops->eqos_flush_desc(rx_desc);
1631 
1632 	writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1633 
1634 	eqos->rx_desc_idx++;
1635 	eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
1636 
1637 	return 0;
1638 }
1639 
1640 static int eqos_probe_resources_core(struct udevice *dev)
1641 {
1642 	struct eqos_priv *eqos = dev_get_priv(dev);
1643 	int ret;
1644 
1645 	debug("%s(dev=%p):\n", __func__, dev);
1646 
1647 	eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX +
1648 				       EQOS_DESCRIPTORS_RX);
1649 	if (!eqos->descs) {
1650 		debug("%s: eqos_alloc_descs() failed\n", __func__);
1651 		ret = -ENOMEM;
1652 		goto err;
1653 	}
1654 	eqos->tx_descs = (struct eqos_desc *)eqos->descs;
1655 	eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX);
1656 	debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs,
1657 	      eqos->rx_descs);
1658 
1659 	eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
1660 	if (!eqos->tx_dma_buf) {
1661 		debug("%s: memalign(tx_dma_buf) failed\n", __func__);
1662 		ret = -ENOMEM;
1663 		goto err_free_descs;
1664 	}
1665 	debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf);
1666 
1667 	eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE);
1668 	if (!eqos->rx_dma_buf) {
1669 		debug("%s: memalign(rx_dma_buf) failed\n", __func__);
1670 		ret = -ENOMEM;
1671 		goto err_free_tx_dma_buf;
1672 	}
1673 	debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf);
1674 
1675 	eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE);
1676 	if (!eqos->rx_pkt) {
1677 		debug("%s: malloc(rx_pkt) failed\n", __func__);
1678 		ret = -ENOMEM;
1679 		goto err_free_rx_dma_buf;
1680 	}
1681 	debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt);
1682 
1683 	eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf,
1684 			EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX);
1685 
1686 	debug("%s: OK\n", __func__);
1687 	return 0;
1688 
1689 err_free_rx_dma_buf:
1690 	free(eqos->rx_dma_buf);
1691 err_free_tx_dma_buf:
1692 	free(eqos->tx_dma_buf);
1693 err_free_descs:
1694 	eqos_free_descs(eqos->descs);
1695 err:
1696 
1697 	debug("%s: returns %d\n", __func__, ret);
1698 	return ret;
1699 }
1700 
1701 static int eqos_remove_resources_core(struct udevice *dev)
1702 {
1703 	struct eqos_priv *eqos = dev_get_priv(dev);
1704 
1705 	debug("%s(dev=%p):\n", __func__, dev);
1706 
1707 	free(eqos->rx_pkt);
1708 	free(eqos->rx_dma_buf);
1709 	free(eqos->tx_dma_buf);
1710 	eqos_free_descs(eqos->descs);
1711 
1712 	debug("%s: OK\n", __func__);
1713 	return 0;
1714 }
1715 
1716 static int eqos_probe_resources_tegra186(struct udevice *dev)
1717 {
1718 	struct eqos_priv *eqos = dev_get_priv(dev);
1719 	int ret;
1720 
1721 	debug("%s(dev=%p):\n", __func__, dev);
1722 
1723 	ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl);
1724 	if (ret) {
1725 		pr_err("reset_get_by_name(rst) failed: %d", ret);
1726 		return ret;
1727 	}
1728 
1729 	ret = gpio_request_by_name(dev, "phy-reset-gpios", 0,
1730 				   &eqos->phy_reset_gpio,
1731 				   GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
1732 	if (ret) {
1733 		pr_err("gpio_request_by_name(phy reset) failed: %d", ret);
1734 		goto err_free_reset_eqos;
1735 	}
1736 
1737 	ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus);
1738 	if (ret) {
1739 		pr_err("clk_get_by_name(slave_bus) failed: %d", ret);
1740 		goto err_free_gpio_phy_reset;
1741 	}
1742 
1743 	ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus);
1744 	if (ret) {
1745 		pr_err("clk_get_by_name(master_bus) failed: %d", ret);
1746 		goto err_free_clk_slave_bus;
1747 	}
1748 
1749 	ret = clk_get_by_name(dev, "rx", &eqos->clk_rx);
1750 	if (ret) {
1751 		pr_err("clk_get_by_name(rx) failed: %d", ret);
1752 		goto err_free_clk_master_bus;
1753 	}
1754 
1755 	ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref);
1756 	if (ret) {
1757 		pr_err("clk_get_by_name(ptp_ref) failed: %d", ret);
1758 		goto err_free_clk_rx;
1759 		return ret;
1760 	}
1761 
1762 	ret = clk_get_by_name(dev, "tx", &eqos->clk_tx);
1763 	if (ret) {
1764 		pr_err("clk_get_by_name(tx) failed: %d", ret);
1765 		goto err_free_clk_ptp_ref;
1766 	}
1767 
1768 	debug("%s: OK\n", __func__);
1769 	return 0;
1770 
1771 err_free_clk_ptp_ref:
1772 	clk_free(&eqos->clk_ptp_ref);
1773 err_free_clk_rx:
1774 	clk_free(&eqos->clk_rx);
1775 err_free_clk_master_bus:
1776 	clk_free(&eqos->clk_master_bus);
1777 err_free_clk_slave_bus:
1778 	clk_free(&eqos->clk_slave_bus);
1779 err_free_gpio_phy_reset:
1780 	dm_gpio_free(dev, &eqos->phy_reset_gpio);
1781 err_free_reset_eqos:
1782 	reset_free(&eqos->reset_ctl);
1783 
1784 	debug("%s: returns %d\n", __func__, ret);
1785 	return ret;
1786 }
1787 
1788 /* board-specific Ethernet Interface initializations. */
1789 __weak int board_interface_eth_init(struct udevice *dev,
1790 				    phy_interface_t interface_type)
1791 {
1792 	return 0;
1793 }
1794 
1795 static int eqos_probe_resources_stm32(struct udevice *dev)
1796 {
1797 	struct eqos_priv *eqos = dev_get_priv(dev);
1798 	int ret;
1799 	phy_interface_t interface;
1800 	struct ofnode_phandle_args phandle_args;
1801 
1802 	debug("%s(dev=%p):\n", __func__, dev);
1803 
1804 	interface = eqos->config->ops->eqos_get_interface(dev);
1805 
1806 	if (interface == PHY_INTERFACE_MODE_NONE) {
1807 		pr_err("Invalid PHY interface\n");
1808 		return -EINVAL;
1809 	}
1810 
1811 	ret = board_interface_eth_init(dev, interface);
1812 	if (ret)
1813 		return -EINVAL;
1814 
1815 	eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0);
1816 
1817 	ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus);
1818 	if (ret) {
1819 		pr_err("clk_get_by_name(master_bus) failed: %d", ret);
1820 		return ret;
1821 	}
1822 
1823 	ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx);
1824 	if (ret)
1825 		pr_warn("clk_get_by_name(rx) failed: %d", ret);
1826 
1827 	ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx);
1828 	if (ret)
1829 		pr_warn("clk_get_by_name(tx) failed: %d", ret);
1830 
1831 	/*  Get ETH_CLK clocks (optional) */
1832 	ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck);
1833 	if (ret)
1834 		pr_warn("No phy clock provided %d", ret);
1835 
1836 	eqos->phyaddr = -1;
1837 	ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
1838 					 &phandle_args);
1839 	if (!ret) {
1840 		/* search "reset-gpios" in phy node */
1841 		ret = gpio_request_by_name_nodev(phandle_args.node,
1842 						 "reset-gpios", 0,
1843 						 &eqos->phy_reset_gpio,
1844 						 GPIOD_IS_OUT |
1845 						 GPIOD_IS_OUT_ACTIVE);
1846 		if (ret)
1847 			pr_warn("gpio_request_by_name(phy reset) not provided %d",
1848 				ret);
1849 		else
1850 			eqos->reset_delays[1] = 2;
1851 
1852 		eqos->phyaddr = ofnode_read_u32_default(phandle_args.node,
1853 							"reg", -1);
1854 	}
1855 
1856 	if (!dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
1857 		int reset_flags = GPIOD_IS_OUT;
1858 
1859 		if (dev_read_bool(dev, "snps,reset-active-low"))
1860 			reset_flags |= GPIOD_ACTIVE_LOW;
1861 
1862 		ret = gpio_request_by_name(dev, "snps,reset-gpio", 0,
1863 					   &eqos->phy_reset_gpio, reset_flags);
1864 		if (ret == 0)
1865 			ret = dev_read_u32_array(dev, "snps,reset-delays-us",
1866 						 eqos->reset_delays, 3);
1867 		else
1868 			pr_warn("gpio_request_by_name(snps,reset-gpio) failed: %d",
1869 				ret);
1870 	}
1871 
1872 	debug("%s: OK\n", __func__);
1873 	return 0;
1874 }
1875 
1876 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev)
1877 {
1878 	const char *phy_mode;
1879 	phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1880 
1881 	debug("%s(dev=%p):\n", __func__, dev);
1882 
1883 	phy_mode = dev_read_string(dev, "phy-mode");
1884 	if (phy_mode)
1885 		interface = phy_get_interface_by_name(phy_mode);
1886 
1887 	return interface;
1888 }
1889 
1890 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev)
1891 {
1892 	return PHY_INTERFACE_MODE_MII;
1893 }
1894 
1895 static int eqos_probe_resources_imx(struct udevice *dev)
1896 {
1897 	struct eqos_priv *eqos = dev_get_priv(dev);
1898 	phy_interface_t interface;
1899 
1900 	debug("%s(dev=%p):\n", __func__, dev);
1901 
1902 	interface = eqos->config->ops->eqos_get_interface(dev);
1903 
1904 	if (interface == PHY_INTERFACE_MODE_NONE) {
1905 		pr_err("Invalid PHY interface\n");
1906 		return -EINVAL;
1907 	}
1908 
1909 	debug("%s: OK\n", __func__);
1910 	return 0;
1911 }
1912 
1913 static phy_interface_t eqos_get_interface_imx(struct udevice *dev)
1914 {
1915 	const char *phy_mode;
1916 	phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1917 
1918 	debug("%s(dev=%p):\n", __func__, dev);
1919 
1920 	phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1921 			       NULL);
1922 	if (phy_mode)
1923 		interface = phy_get_interface_by_name(phy_mode);
1924 
1925 	return interface;
1926 }
1927 
1928 static int eqos_remove_resources_tegra186(struct udevice *dev)
1929 {
1930 	struct eqos_priv *eqos = dev_get_priv(dev);
1931 
1932 	debug("%s(dev=%p):\n", __func__, dev);
1933 
1934 #ifdef CONFIG_CLK
1935 	clk_free(&eqos->clk_tx);
1936 	clk_free(&eqos->clk_ptp_ref);
1937 	clk_free(&eqos->clk_rx);
1938 	clk_free(&eqos->clk_slave_bus);
1939 	clk_free(&eqos->clk_master_bus);
1940 #endif
1941 	dm_gpio_free(dev, &eqos->phy_reset_gpio);
1942 	reset_free(&eqos->reset_ctl);
1943 
1944 	debug("%s: OK\n", __func__);
1945 	return 0;
1946 }
1947 
1948 static int eqos_remove_resources_stm32(struct udevice *dev)
1949 {
1950 #ifdef CONFIG_CLK
1951 	struct eqos_priv *eqos = dev_get_priv(dev);
1952 
1953 	debug("%s(dev=%p):\n", __func__, dev);
1954 
1955 	if (clk_valid(&eqos->clk_tx))
1956 		clk_free(&eqos->clk_tx);
1957 	if (clk_valid(&eqos->clk_rx))
1958 		clk_free(&eqos->clk_rx);
1959 	clk_free(&eqos->clk_master_bus);
1960 	if (clk_valid(&eqos->clk_ck))
1961 		clk_free(&eqos->clk_ck);
1962 #endif
1963 
1964 	if (dm_gpio_is_valid(&eqos->phy_reset_gpio))
1965 		dm_gpio_free(dev, &eqos->phy_reset_gpio);
1966 
1967 	debug("%s: OK\n", __func__);
1968 	return 0;
1969 }
1970 
1971 static int eqos_remove_resources_imx(struct udevice *dev)
1972 {
1973 	return 0;
1974 }
1975 
1976 int eqos_probe(struct udevice *dev)
1977 {
1978 	struct eqos_priv *eqos = dev_get_priv(dev);
1979 	int ret;
1980 
1981 	debug("%s(dev=%p):\n", __func__, dev);
1982 
1983 	eqos->dev = dev;
1984 	eqos->config = (void *)dev_get_driver_data(dev);
1985 
1986 	eqos->regs = dev_read_addr(dev);
1987 	if (eqos->regs == FDT_ADDR_T_NONE) {
1988 		pr_err("dev_read_addr() failed");
1989 		return -ENODEV;
1990 	}
1991 	eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE);
1992 	eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE);
1993 	eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE);
1994 	eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE);
1995 
1996 	ret = eqos_probe_resources_core(dev);
1997 	if (ret < 0) {
1998 		pr_err("eqos_probe_resources_core() failed: %d", ret);
1999 		return ret;
2000 	}
2001 
2002 	ret = eqos->config->ops->eqos_probe_resources(dev);
2003 	if (ret < 0) {
2004 		pr_err("eqos_probe_resources() failed: %d", ret);
2005 		goto err_remove_resources_core;
2006 	}
2007 
2008 #ifdef CONFIG_DM_ETH_PHY
2009 	eqos->mii = eth_phy_get_mdio_bus(dev);
2010 #endif
2011 	if (!eqos->mii) {
2012 		eqos->mii = mdio_alloc();
2013 		if (!eqos->mii) {
2014 			pr_err("mdio_alloc() failed");
2015 			ret = -ENOMEM;
2016 			goto err_remove_resources_tegra;
2017 		}
2018 		eqos->mii->read = eqos_mdio_read;
2019 		eqos->mii->write = eqos_mdio_write;
2020 		eqos->mii->priv = eqos;
2021 		strcpy(eqos->mii->name, dev->name);
2022 
2023 		ret = mdio_register(eqos->mii);
2024 		if (ret < 0) {
2025 			pr_err("mdio_register() failed: %d", ret);
2026 			goto err_free_mdio;
2027 		}
2028 	}
2029 
2030 #ifdef CONFIG_DM_ETH_PHY
2031 	eth_phy_set_mdio_bus(dev, eqos->mii);
2032 #endif
2033 
2034 	debug("%s: OK\n", __func__);
2035 	return 0;
2036 
2037 err_free_mdio:
2038 	mdio_free(eqos->mii);
2039 err_remove_resources_tegra:
2040 	eqos->config->ops->eqos_remove_resources(dev);
2041 err_remove_resources_core:
2042 	eqos_remove_resources_core(dev);
2043 
2044 	debug("%s: returns %d\n", __func__, ret);
2045 	return ret;
2046 }
2047 
2048 static int eqos_remove(struct udevice *dev)
2049 {
2050 	struct eqos_priv *eqos = dev_get_priv(dev);
2051 
2052 	debug("%s(dev=%p):\n", __func__, dev);
2053 
2054 	mdio_unregister(eqos->mii);
2055 	mdio_free(eqos->mii);
2056 	eqos->config->ops->eqos_remove_resources(dev);
2057 
2058 	eqos_probe_resources_core(dev);
2059 
2060 	debug("%s: OK\n", __func__);
2061 	return 0;
2062 }
2063 
2064 static const struct eth_ops eqos_ops = {
2065 	.start = eqos_start,
2066 	.stop = eqos_stop,
2067 	.send = eqos_send,
2068 	.recv = eqos_recv,
2069 	.free_pkt = eqos_free_pkt,
2070 	.write_hwaddr = eqos_write_hwaddr,
2071 	.read_rom_hwaddr	= eqos_read_rom_hwaddr,
2072 };
2073 
2074 static struct eqos_ops eqos_tegra186_ops = {
2075 	.eqos_inval_desc = eqos_inval_desc_tegra186,
2076 	.eqos_flush_desc = eqos_flush_desc_tegra186,
2077 	.eqos_inval_buffer = eqos_inval_buffer_tegra186,
2078 	.eqos_flush_buffer = eqos_flush_buffer_tegra186,
2079 	.eqos_probe_resources = eqos_probe_resources_tegra186,
2080 	.eqos_remove_resources = eqos_remove_resources_tegra186,
2081 	.eqos_stop_resets = eqos_stop_resets_tegra186,
2082 	.eqos_start_resets = eqos_start_resets_tegra186,
2083 	.eqos_stop_clks = eqos_stop_clks_tegra186,
2084 	.eqos_start_clks = eqos_start_clks_tegra186,
2085 	.eqos_calibrate_pads = eqos_calibrate_pads_tegra186,
2086 	.eqos_disable_calibration = eqos_disable_calibration_tegra186,
2087 	.eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186,
2088 	.eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186,
2089 	.eqos_get_interface = eqos_get_interface_tegra186
2090 };
2091 
2092 static const struct eqos_config eqos_tegra186_config = {
2093 	.reg_access_always_ok = false,
2094 	.mdio_wait = 10,
2095 	.swr_wait = 10,
2096 	.config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2097 	.config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35,
2098 	.ops = &eqos_tegra186_ops
2099 };
2100 
2101 static struct eqos_ops eqos_stm32_ops = {
2102 	.eqos_inval_desc = eqos_inval_desc_generic,
2103 	.eqos_flush_desc = eqos_flush_desc_generic,
2104 	.eqos_inval_buffer = eqos_inval_buffer_generic,
2105 	.eqos_flush_buffer = eqos_flush_buffer_generic,
2106 	.eqos_probe_resources = eqos_probe_resources_stm32,
2107 	.eqos_remove_resources = eqos_remove_resources_stm32,
2108 	.eqos_stop_resets = eqos_stop_resets_stm32,
2109 	.eqos_start_resets = eqos_start_resets_stm32,
2110 	.eqos_stop_clks = eqos_stop_clks_stm32,
2111 	.eqos_start_clks = eqos_start_clks_stm32,
2112 	.eqos_calibrate_pads = eqos_calibrate_pads_stm32,
2113 	.eqos_disable_calibration = eqos_disable_calibration_stm32,
2114 	.eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32,
2115 	.eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32,
2116 	.eqos_get_interface = eqos_get_interface_stm32
2117 };
2118 
2119 static const struct eqos_config eqos_stm32_config = {
2120 	.reg_access_always_ok = false,
2121 	.mdio_wait = 10000,
2122 	.swr_wait = 50,
2123 	.config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV,
2124 	.config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2125 	.ops = &eqos_stm32_ops
2126 };
2127 
2128 static struct eqos_ops eqos_imx_ops = {
2129 	.eqos_inval_desc = eqos_inval_desc_generic,
2130 	.eqos_flush_desc = eqos_flush_desc_generic,
2131 	.eqos_inval_buffer = eqos_inval_buffer_generic,
2132 	.eqos_flush_buffer = eqos_flush_buffer_generic,
2133 	.eqos_probe_resources = eqos_probe_resources_imx,
2134 	.eqos_remove_resources = eqos_remove_resources_imx,
2135 	.eqos_stop_resets = eqos_stop_resets_imx,
2136 	.eqos_start_resets = eqos_start_resets_imx,
2137 	.eqos_stop_clks = eqos_stop_clks_imx,
2138 	.eqos_start_clks = eqos_start_clks_imx,
2139 	.eqos_calibrate_pads = eqos_calibrate_pads_imx,
2140 	.eqos_disable_calibration = eqos_disable_calibration_imx,
2141 	.eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx,
2142 	.eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx,
2143 	.eqos_get_interface = eqos_get_interface_imx
2144 };
2145 
2146 struct eqos_config eqos_imx_config = {
2147 	.reg_access_always_ok = false,
2148 	.mdio_wait = 10000,
2149 	.swr_wait = 50,
2150 	.config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2151 	.config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2152 	.ops = &eqos_imx_ops
2153 };
2154 
2155 struct eqos_ops eqos_rockchip_ops = {
2156 	.eqos_inval_desc = eqos_inval_desc_generic,
2157 	.eqos_flush_desc = eqos_flush_desc_generic,
2158 	.eqos_inval_buffer = eqos_inval_buffer_generic,
2159 	.eqos_flush_buffer = eqos_flush_buffer_generic,
2160 	.eqos_probe_resources = eqos_probe_resources_stm32,
2161 	.eqos_remove_resources = eqos_remove_resources_stm32,
2162 	.eqos_stop_resets = eqos_stop_resets_stm32,
2163 	.eqos_start_resets = eqos_start_resets_stm32,
2164 	.eqos_calibrate_pads = eqos_calibrate_pads_stm32,
2165 	.eqos_disable_calibration = eqos_disable_calibration_stm32,
2166 	.eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32,
2167 	.eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32,
2168 	.eqos_get_interface = eqos_get_interface_stm32
2169 };
2170 
2171 static const struct udevice_id eqos_ids[] = {
2172 	{
2173 		.compatible = "nvidia,tegra186-eqos",
2174 		.data = (ulong)&eqos_tegra186_config
2175 	},
2176 	{
2177 		.compatible = "snps,dwmac-4.20a",
2178 		.data = (ulong)&eqos_stm32_config
2179 	},
2180 	{
2181 		.compatible = "fsl,imx-eqos",
2182 		.data = (ulong)&eqos_imx_config
2183 	},
2184 
2185 	{ }
2186 };
2187 
2188 U_BOOT_DRIVER(eth_eqos) = {
2189 	.name = "eth_eqos",
2190 	.id = UCLASS_ETH,
2191 	.of_match = of_match_ptr(eqos_ids),
2192 	.probe = eqos_probe,
2193 	.remove = eqos_remove,
2194 	.ops = &eqos_ops,
2195 	.priv_auto_alloc_size = sizeof(struct eqos_priv),
2196 	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
2197 };
2198