1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.00 has been used for developing this code.
5 *
6 * This only implements the mac core functions for this chip.
7 *
8 * Copyright (C) 2015 STMicroelectronics Ltd
9 *
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 */
12
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <net/dsa.h>
18 #include "stmmac.h"
19 #include "stmmac_pcs.h"
20 #include "dwmac4.h"
21 #include "dwmac5.h"
22
dwmac4_core_init(struct mac_device_info * hw,struct net_device * dev)23 static void dwmac4_core_init(struct mac_device_info *hw,
24 struct net_device *dev)
25 {
26 void __iomem *ioaddr = hw->pcsr;
27 u32 value = readl(ioaddr + GMAC_CONFIG);
28
29 value |= GMAC_CORE_INIT;
30
31 if (hw->ps) {
32 value |= GMAC_CONFIG_TE;
33
34 value &= hw->link.speed_mask;
35 switch (hw->ps) {
36 case SPEED_1000:
37 value |= hw->link.speed1000;
38 break;
39 case SPEED_100:
40 value |= hw->link.speed100;
41 break;
42 case SPEED_10:
43 value |= hw->link.speed10;
44 break;
45 }
46 }
47
48 writel(value, ioaddr + GMAC_CONFIG);
49
50 /* Enable GMAC interrupts */
51 value = GMAC_INT_DEFAULT_ENABLE;
52
53 if (hw->pcs)
54 value |= GMAC_PCS_IRQ_DEFAULT;
55
56 writel(value, ioaddr + GMAC_INT_EN);
57 }
58
dwmac4_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)59 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
60 u8 mode, u32 queue)
61 {
62 void __iomem *ioaddr = hw->pcsr;
63 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
64
65 value &= GMAC_RX_QUEUE_CLEAR(queue);
66 if (mode == MTL_QUEUE_AVB)
67 value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
68 else if (mode == MTL_QUEUE_DCB)
69 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
70
71 writel(value, ioaddr + GMAC_RXQ_CTRL0);
72 }
73
dwmac4_rx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)74 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
75 u32 prio, u32 queue)
76 {
77 void __iomem *ioaddr = hw->pcsr;
78 u32 base_register;
79 u32 value;
80
81 base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
82 if (queue >= 4)
83 queue -= 4;
84
85 value = readl(ioaddr + base_register);
86
87 value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
88 value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
89 GMAC_RXQCTRL_PSRQX_MASK(queue);
90 writel(value, ioaddr + base_register);
91 }
92
dwmac4_tx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)93 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
94 u32 prio, u32 queue)
95 {
96 void __iomem *ioaddr = hw->pcsr;
97 u32 base_register;
98 u32 value;
99
100 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
101 if (queue >= 4)
102 queue -= 4;
103
104 value = readl(ioaddr + base_register);
105
106 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
107 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
108 GMAC_TXQCTRL_PSTQX_MASK(queue);
109
110 writel(value, ioaddr + base_register);
111 }
112
dwmac4_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)113 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
114 u8 packet, u32 queue)
115 {
116 void __iomem *ioaddr = hw->pcsr;
117 u32 value;
118
119 static const struct stmmac_rx_routing route_possibilities[] = {
120 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
121 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
122 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
123 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
124 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
125 };
126
127 value = readl(ioaddr + GMAC_RXQ_CTRL1);
128
129 /* routing configuration */
130 value &= ~route_possibilities[packet - 1].reg_mask;
131 value |= (queue << route_possibilities[packet-1].reg_shift) &
132 route_possibilities[packet - 1].reg_mask;
133
134 /* some packets require extra ops */
135 if (packet == PACKET_AVCPQ) {
136 value &= ~GMAC_RXQCTRL_TACPQE;
137 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
138 } else if (packet == PACKET_MCBCQ) {
139 value &= ~GMAC_RXQCTRL_MCBCQEN;
140 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
141 }
142
143 writel(value, ioaddr + GMAC_RXQ_CTRL1);
144 }
145
dwmac4_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)146 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
147 u32 rx_alg)
148 {
149 void __iomem *ioaddr = hw->pcsr;
150 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
151
152 value &= ~MTL_OPERATION_RAA;
153 switch (rx_alg) {
154 case MTL_RX_ALGORITHM_SP:
155 value |= MTL_OPERATION_RAA_SP;
156 break;
157 case MTL_RX_ALGORITHM_WSP:
158 value |= MTL_OPERATION_RAA_WSP;
159 break;
160 default:
161 break;
162 }
163
164 writel(value, ioaddr + MTL_OPERATION_MODE);
165 }
166
dwmac4_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)167 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
168 u32 tx_alg)
169 {
170 void __iomem *ioaddr = hw->pcsr;
171 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
172
173 value &= ~MTL_OPERATION_SCHALG_MASK;
174 switch (tx_alg) {
175 case MTL_TX_ALGORITHM_WRR:
176 value |= MTL_OPERATION_SCHALG_WRR;
177 break;
178 case MTL_TX_ALGORITHM_WFQ:
179 value |= MTL_OPERATION_SCHALG_WFQ;
180 break;
181 case MTL_TX_ALGORITHM_DWRR:
182 value |= MTL_OPERATION_SCHALG_DWRR;
183 break;
184 case MTL_TX_ALGORITHM_SP:
185 value |= MTL_OPERATION_SCHALG_SP;
186 break;
187 default:
188 break;
189 }
190
191 writel(value, ioaddr + MTL_OPERATION_MODE);
192 }
193
dwmac4_set_mtl_tx_queue_weight(struct mac_device_info * hw,u32 weight,u32 queue)194 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
195 u32 weight, u32 queue)
196 {
197 void __iomem *ioaddr = hw->pcsr;
198 u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
199
200 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
201 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
202 writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
203 }
204
dwmac4_map_mtl_dma(struct mac_device_info * hw,u32 queue,u32 chan)205 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
206 {
207 void __iomem *ioaddr = hw->pcsr;
208 u32 value;
209
210 if (queue < 4)
211 value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
212 else
213 value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
214
215 if (queue == 0 || queue == 4) {
216 value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
217 value |= MTL_RXQ_DMA_Q04MDMACH(chan);
218 } else if (queue > 4) {
219 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
220 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
221 } else {
222 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
223 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
224 }
225
226 if (queue < 4)
227 writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
228 else
229 writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
230 }
231
dwmac4_config_cbs(struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)232 static void dwmac4_config_cbs(struct mac_device_info *hw,
233 u32 send_slope, u32 idle_slope,
234 u32 high_credit, u32 low_credit, u32 queue)
235 {
236 void __iomem *ioaddr = hw->pcsr;
237 u32 value;
238
239 pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
240 pr_debug("\tsend_slope: 0x%08x\n", send_slope);
241 pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
242 pr_debug("\thigh_credit: 0x%08x\n", high_credit);
243 pr_debug("\tlow_credit: 0x%08x\n", low_credit);
244
245 /* enable AV algorithm */
246 value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
247 value |= MTL_ETS_CTRL_AVALG;
248 value |= MTL_ETS_CTRL_CC;
249 writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
250
251 /* configure send slope */
252 value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
253 value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
254 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
255 writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
256
257 /* configure idle slope (same register as tx weight) */
258 dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
259
260 /* configure high credit */
261 value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
262 value &= ~MTL_HIGH_CRED_HC_MASK;
263 value |= high_credit & MTL_HIGH_CRED_HC_MASK;
264 writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
265
266 /* configure high credit */
267 value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
268 value &= ~MTL_HIGH_CRED_LC_MASK;
269 value |= low_credit & MTL_HIGH_CRED_LC_MASK;
270 writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
271 }
272
dwmac4_dump_regs(struct mac_device_info * hw,u32 * reg_space)273 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
274 {
275 void __iomem *ioaddr = hw->pcsr;
276 int i;
277
278 for (i = 0; i < GMAC_REG_NUM; i++)
279 reg_space[i] = readl(ioaddr + i * 4);
280 }
281
dwmac4_rx_ipc_enable(struct mac_device_info * hw)282 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
283 {
284 void __iomem *ioaddr = hw->pcsr;
285 u32 value = readl(ioaddr + GMAC_CONFIG);
286
287 if (hw->rx_csum)
288 value |= GMAC_CONFIG_IPC;
289 else
290 value &= ~GMAC_CONFIG_IPC;
291
292 writel(value, ioaddr + GMAC_CONFIG);
293
294 value = readl(ioaddr + GMAC_CONFIG);
295
296 return !!(value & GMAC_CONFIG_IPC);
297 }
298
dwmac4_pmt(struct mac_device_info * hw,unsigned long mode)299 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
300 {
301 void __iomem *ioaddr = hw->pcsr;
302 unsigned int pmt = 0;
303 u32 config;
304
305 if (mode & WAKE_MAGIC) {
306 pr_debug("GMAC: WOL Magic frame\n");
307 pmt |= power_down | magic_pkt_en;
308 }
309 if (mode & WAKE_UCAST) {
310 pr_debug("GMAC: WOL on global unicast\n");
311 pmt |= power_down | global_unicast | wake_up_frame_en;
312 }
313
314 if (pmt) {
315 /* The receiver must be enabled for WOL before powering down */
316 config = readl(ioaddr + GMAC_CONFIG);
317 config |= GMAC_CONFIG_RE;
318 writel(config, ioaddr + GMAC_CONFIG);
319 }
320 writel(pmt, ioaddr + GMAC_PMT);
321 }
322
dwmac4_set_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)323 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
324 unsigned char *addr, unsigned int reg_n)
325 {
326 void __iomem *ioaddr = hw->pcsr;
327
328 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
329 GMAC_ADDR_LOW(reg_n));
330 }
331
dwmac4_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)332 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
333 unsigned char *addr, unsigned int reg_n)
334 {
335 void __iomem *ioaddr = hw->pcsr;
336
337 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
338 GMAC_ADDR_LOW(reg_n));
339 }
340
dwmac4_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)341 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
342 bool en_tx_lpi_clockgating)
343 {
344 void __iomem *ioaddr = hw->pcsr;
345 u32 value;
346
347 /* Enable the link status receive on RGMII, SGMII ore SMII
348 * receive path and instruct the transmit to enter in LPI
349 * state.
350 */
351 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
352 value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
353
354 if (en_tx_lpi_clockgating)
355 value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
356
357 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
358 }
359
dwmac4_reset_eee_mode(struct mac_device_info * hw)360 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
361 {
362 void __iomem *ioaddr = hw->pcsr;
363 u32 value;
364
365 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
366 value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
367 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
368 }
369
dwmac4_set_eee_pls(struct mac_device_info * hw,int link)370 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
371 {
372 void __iomem *ioaddr = hw->pcsr;
373 u32 value;
374
375 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
376
377 if (link)
378 value |= GMAC4_LPI_CTRL_STATUS_PLS;
379 else
380 value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
381
382 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
383 }
384
dwmac4_set_eee_timer(struct mac_device_info * hw,int ls,int tw)385 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
386 {
387 void __iomem *ioaddr = hw->pcsr;
388 int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
389
390 /* Program the timers in the LPI timer control register:
391 * LS: minimum time (ms) for which the link
392 * status from PHY should be ok before transmitting
393 * the LPI pattern.
394 * TW: minimum time (us) for which the core waits
395 * after it has stopped transmitting the LPI pattern.
396 */
397 writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
398 }
399
dwmac4_write_single_vlan(struct net_device * dev,u16 vid)400 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
401 {
402 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
403 u32 val;
404
405 val = readl(ioaddr + GMAC_VLAN_TAG);
406 val &= ~GMAC_VLAN_TAG_VID;
407 val |= GMAC_VLAN_TAG_ETV | vid;
408
409 writel(val, ioaddr + GMAC_VLAN_TAG);
410 }
411
dwmac4_write_vlan_filter(struct net_device * dev,struct mac_device_info * hw,u8 index,u32 data)412 static int dwmac4_write_vlan_filter(struct net_device *dev,
413 struct mac_device_info *hw,
414 u8 index, u32 data)
415 {
416 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
417 int i, timeout = 10;
418 u32 val;
419
420 if (index >= hw->num_vlan)
421 return -EINVAL;
422
423 writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
424
425 val = readl(ioaddr + GMAC_VLAN_TAG);
426 val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
427 GMAC_VLAN_TAG_CTRL_CT |
428 GMAC_VLAN_TAG_CTRL_OB);
429 val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
430
431 writel(val, ioaddr + GMAC_VLAN_TAG);
432
433 for (i = 0; i < timeout; i++) {
434 val = readl(ioaddr + GMAC_VLAN_TAG);
435 if (!(val & GMAC_VLAN_TAG_CTRL_OB))
436 return 0;
437 udelay(1);
438 }
439
440 netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
441
442 return -EBUSY;
443 }
444
dwmac4_add_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)445 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
446 struct mac_device_info *hw,
447 __be16 proto, u16 vid)
448 {
449 int index = -1;
450 u32 val = 0;
451 int i, ret;
452
453 if (vid > 4095)
454 return -EINVAL;
455
456 if (hw->promisc) {
457 netdev_err(dev,
458 "Adding VLAN in promisc mode not supported\n");
459 return -EPERM;
460 }
461
462 /* Single Rx VLAN Filter */
463 if (hw->num_vlan == 1) {
464 /* For single VLAN filter, VID 0 means VLAN promiscuous */
465 if (vid == 0) {
466 netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
467 return -EPERM;
468 }
469
470 if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
471 netdev_err(dev, "Only single VLAN ID supported\n");
472 return -EPERM;
473 }
474
475 hw->vlan_filter[0] = vid;
476 dwmac4_write_single_vlan(dev, vid);
477
478 return 0;
479 }
480
481 /* Extended Rx VLAN Filter Enable */
482 val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
483
484 for (i = 0; i < hw->num_vlan; i++) {
485 if (hw->vlan_filter[i] == val)
486 return 0;
487 else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
488 index = i;
489 }
490
491 if (index == -1) {
492 netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
493 hw->num_vlan);
494 return -EPERM;
495 }
496
497 ret = dwmac4_write_vlan_filter(dev, hw, index, val);
498
499 if (!ret)
500 hw->vlan_filter[index] = val;
501
502 return ret;
503 }
504
dwmac4_del_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)505 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
506 struct mac_device_info *hw,
507 __be16 proto, u16 vid)
508 {
509 int i, ret = 0;
510
511 if (hw->promisc) {
512 netdev_err(dev,
513 "Deleting VLAN in promisc mode not supported\n");
514 return -EPERM;
515 }
516
517 /* Single Rx VLAN Filter */
518 if (hw->num_vlan == 1) {
519 if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
520 hw->vlan_filter[0] = 0;
521 dwmac4_write_single_vlan(dev, 0);
522 }
523 return 0;
524 }
525
526 /* Extended Rx VLAN Filter Enable */
527 for (i = 0; i < hw->num_vlan; i++) {
528 if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
529 ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
530
531 if (!ret)
532 hw->vlan_filter[i] = 0;
533 else
534 return ret;
535 }
536 }
537
538 return ret;
539 }
540
dwmac4_vlan_promisc_enable(struct net_device * dev,struct mac_device_info * hw)541 static void dwmac4_vlan_promisc_enable(struct net_device *dev,
542 struct mac_device_info *hw)
543 {
544 void __iomem *ioaddr = hw->pcsr;
545 u32 value;
546 u32 hash;
547 u32 val;
548 int i;
549
550 /* Single Rx VLAN Filter */
551 if (hw->num_vlan == 1) {
552 dwmac4_write_single_vlan(dev, 0);
553 return;
554 }
555
556 /* Extended Rx VLAN Filter Enable */
557 for (i = 0; i < hw->num_vlan; i++) {
558 if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
559 val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
560 dwmac4_write_vlan_filter(dev, hw, i, val);
561 }
562 }
563
564 hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
565 if (hash & GMAC_VLAN_VLHT) {
566 value = readl(ioaddr + GMAC_VLAN_TAG);
567 if (value & GMAC_VLAN_VTHM) {
568 value &= ~GMAC_VLAN_VTHM;
569 writel(value, ioaddr + GMAC_VLAN_TAG);
570 }
571 }
572 }
573
dwmac4_restore_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw)574 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
575 struct mac_device_info *hw)
576 {
577 void __iomem *ioaddr = hw->pcsr;
578 u32 value;
579 u32 hash;
580 u32 val;
581 int i;
582
583 /* Single Rx VLAN Filter */
584 if (hw->num_vlan == 1) {
585 dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
586 return;
587 }
588
589 /* Extended Rx VLAN Filter Enable */
590 for (i = 0; i < hw->num_vlan; i++) {
591 if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
592 val = hw->vlan_filter[i];
593 dwmac4_write_vlan_filter(dev, hw, i, val);
594 }
595 }
596
597 hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
598 if (hash & GMAC_VLAN_VLHT) {
599 value = readl(ioaddr + GMAC_VLAN_TAG);
600 value |= GMAC_VLAN_VTHM;
601 writel(value, ioaddr + GMAC_VLAN_TAG);
602 }
603 }
604
dwmac4_set_filter(struct mac_device_info * hw,struct net_device * dev)605 static void dwmac4_set_filter(struct mac_device_info *hw,
606 struct net_device *dev)
607 {
608 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
609 int numhashregs = (hw->multicast_filter_bins >> 5);
610 int mcbitslog2 = hw->mcast_bits_log2;
611 unsigned int value;
612 u32 mc_filter[8];
613 int i;
614
615 memset(mc_filter, 0, sizeof(mc_filter));
616
617 value = readl(ioaddr + GMAC_PACKET_FILTER);
618 value &= ~GMAC_PACKET_FILTER_HMC;
619 value &= ~GMAC_PACKET_FILTER_HPF;
620 value &= ~GMAC_PACKET_FILTER_PCF;
621 value &= ~GMAC_PACKET_FILTER_PM;
622 value &= ~GMAC_PACKET_FILTER_PR;
623 value &= ~GMAC_PACKET_FILTER_RA;
624 if (dev->flags & IFF_PROMISC) {
625 /* VLAN Tag Filter Fail Packets Queuing */
626 if (hw->vlan_fail_q_en) {
627 value = readl(ioaddr + GMAC_RXQ_CTRL4);
628 value &= ~GMAC_RXQCTRL_VFFQ_MASK;
629 value |= GMAC_RXQCTRL_VFFQE |
630 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
631 writel(value, ioaddr + GMAC_RXQ_CTRL4);
632 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
633 } else {
634 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
635 }
636
637 } else if ((dev->flags & IFF_ALLMULTI) ||
638 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
639 /* Pass all multi */
640 value |= GMAC_PACKET_FILTER_PM;
641 /* Set all the bits of the HASH tab */
642 memset(mc_filter, 0xff, sizeof(mc_filter));
643 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
644 struct netdev_hw_addr *ha;
645
646 /* Hash filter for multicast */
647 value |= GMAC_PACKET_FILTER_HMC;
648
649 netdev_for_each_mc_addr(ha, dev) {
650 /* The upper n bits of the calculated CRC are used to
651 * index the contents of the hash table. The number of
652 * bits used depends on the hardware configuration
653 * selected at core configuration time.
654 */
655 u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
656 ETH_ALEN)) >> (32 - mcbitslog2);
657 /* The most significant bit determines the register to
658 * use (H/L) while the other 5 bits determine the bit
659 * within the register.
660 */
661 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
662 }
663 }
664
665 for (i = 0; i < numhashregs; i++)
666 writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
667
668 value |= GMAC_PACKET_FILTER_HPF;
669
670 /* Handle multiple unicast addresses */
671 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
672 /* Switch to promiscuous mode if more than 128 addrs
673 * are required
674 */
675 value |= GMAC_PACKET_FILTER_PR;
676 } else {
677 struct netdev_hw_addr *ha;
678 int reg = 1;
679
680 netdev_for_each_uc_addr(ha, dev) {
681 dwmac4_set_umac_addr(hw, ha->addr, reg);
682 reg++;
683 }
684
685 while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
686 writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
687 writel(0, ioaddr + GMAC_ADDR_LOW(reg));
688 reg++;
689 }
690 }
691
692 /* VLAN filtering */
693 if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
694 value |= GMAC_PACKET_FILTER_VTFE;
695
696 writel(value, ioaddr + GMAC_PACKET_FILTER);
697
698 if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
699 if (!hw->promisc) {
700 hw->promisc = 1;
701 dwmac4_vlan_promisc_enable(dev, hw);
702 }
703 } else {
704 if (hw->promisc) {
705 hw->promisc = 0;
706 dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
707 }
708 }
709 }
710
dwmac4_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)711 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
712 unsigned int fc, unsigned int pause_time,
713 u32 tx_cnt)
714 {
715 void __iomem *ioaddr = hw->pcsr;
716 unsigned int flow = 0;
717 u32 queue = 0;
718
719 pr_debug("GMAC Flow-Control:\n");
720 if (fc & FLOW_RX) {
721 pr_debug("\tReceive Flow-Control ON\n");
722 flow |= GMAC_RX_FLOW_CTRL_RFE;
723 } else {
724 pr_debug("\tReceive Flow-Control OFF\n");
725 }
726 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
727
728 if (fc & FLOW_TX) {
729 pr_debug("\tTransmit Flow-Control ON\n");
730
731 if (duplex)
732 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
733
734 for (queue = 0; queue < tx_cnt; queue++) {
735 flow = GMAC_TX_FLOW_CTRL_TFE;
736
737 if (duplex)
738 flow |=
739 (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
740
741 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
742 }
743 } else {
744 for (queue = 0; queue < tx_cnt; queue++)
745 writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
746 }
747 }
748
dwmac4_ctrl_ane(void __iomem * ioaddr,bool ane,bool srgmi_ral,bool loopback)749 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
750 bool loopback)
751 {
752 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
753 }
754
dwmac4_rane(void __iomem * ioaddr,bool restart)755 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
756 {
757 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
758 }
759
dwmac4_get_adv_lp(void __iomem * ioaddr,struct rgmii_adv * adv)760 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
761 {
762 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
763 }
764
765 /* RGMII or SMII interface */
dwmac4_phystatus(void __iomem * ioaddr,struct stmmac_extra_stats * x)766 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
767 {
768 u32 status;
769
770 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
771 x->irq_rgmii_n++;
772
773 /* Check the link status */
774 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
775 int speed_value;
776
777 x->pcs_link = 1;
778
779 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
780 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
781 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
782 x->pcs_speed = SPEED_1000;
783 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
784 x->pcs_speed = SPEED_100;
785 else
786 x->pcs_speed = SPEED_10;
787
788 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
789
790 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
791 x->pcs_duplex ? "Full" : "Half");
792 } else {
793 x->pcs_link = 0;
794 pr_info("Link is Down\n");
795 }
796 }
797
dwmac4_irq_mtl_status(struct mac_device_info * hw,u32 chan)798 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
799 {
800 void __iomem *ioaddr = hw->pcsr;
801 u32 mtl_int_qx_status;
802 int ret = 0;
803
804 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
805
806 /* Check MTL Interrupt */
807 if (mtl_int_qx_status & MTL_INT_QX(chan)) {
808 /* read Queue x Interrupt status */
809 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
810
811 if (status & MTL_RX_OVERFLOW_INT) {
812 /* clear Interrupt */
813 writel(status | MTL_RX_OVERFLOW_INT,
814 ioaddr + MTL_CHAN_INT_CTRL(chan));
815 ret = CORE_IRQ_MTL_RX_OVERFLOW;
816 }
817 }
818
819 return ret;
820 }
821
dwmac4_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)822 static int dwmac4_irq_status(struct mac_device_info *hw,
823 struct stmmac_extra_stats *x)
824 {
825 void __iomem *ioaddr = hw->pcsr;
826 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
827 u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
828 int ret = 0;
829
830 /* Discard disabled bits */
831 intr_status &= intr_enable;
832
833 /* Not used events (e.g. MMC interrupts) are not handled. */
834 if ((intr_status & mmc_tx_irq))
835 x->mmc_tx_irq_n++;
836 if (unlikely(intr_status & mmc_rx_irq))
837 x->mmc_rx_irq_n++;
838 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
839 x->mmc_rx_csum_offload_irq_n++;
840 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
841 if (unlikely(intr_status & pmt_irq)) {
842 readl(ioaddr + GMAC_PMT);
843 x->irq_receive_pmt_irq_n++;
844 }
845
846 /* MAC tx/rx EEE LPI entry/exit interrupts */
847 if (intr_status & lpi_irq) {
848 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
849 u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
850
851 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
852 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
853 x->irq_tx_path_in_lpi_mode_n++;
854 }
855 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
856 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
857 x->irq_tx_path_exit_lpi_mode_n++;
858 }
859 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
860 x->irq_rx_path_in_lpi_mode_n++;
861 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
862 x->irq_rx_path_exit_lpi_mode_n++;
863 }
864
865 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
866 if (intr_status & PCS_RGSMIIIS_IRQ)
867 dwmac4_phystatus(ioaddr, x);
868
869 return ret;
870 }
871
dwmac4_debug(void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 rx_queues,u32 tx_queues)872 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
873 u32 rx_queues, u32 tx_queues)
874 {
875 u32 value;
876 u32 queue;
877
878 for (queue = 0; queue < tx_queues; queue++) {
879 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
880
881 if (value & MTL_DEBUG_TXSTSFSTS)
882 x->mtl_tx_status_fifo_full++;
883 if (value & MTL_DEBUG_TXFSTS)
884 x->mtl_tx_fifo_not_empty++;
885 if (value & MTL_DEBUG_TWCSTS)
886 x->mmtl_fifo_ctrl++;
887 if (value & MTL_DEBUG_TRCSTS_MASK) {
888 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
889 >> MTL_DEBUG_TRCSTS_SHIFT;
890 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
891 x->mtl_tx_fifo_read_ctrl_write++;
892 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
893 x->mtl_tx_fifo_read_ctrl_wait++;
894 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
895 x->mtl_tx_fifo_read_ctrl_read++;
896 else
897 x->mtl_tx_fifo_read_ctrl_idle++;
898 }
899 if (value & MTL_DEBUG_TXPAUSED)
900 x->mac_tx_in_pause++;
901 }
902
903 for (queue = 0; queue < rx_queues; queue++) {
904 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
905
906 if (value & MTL_DEBUG_RXFSTS_MASK) {
907 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
908 >> MTL_DEBUG_RRCSTS_SHIFT;
909
910 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
911 x->mtl_rx_fifo_fill_level_full++;
912 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
913 x->mtl_rx_fifo_fill_above_thresh++;
914 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
915 x->mtl_rx_fifo_fill_below_thresh++;
916 else
917 x->mtl_rx_fifo_fill_level_empty++;
918 }
919 if (value & MTL_DEBUG_RRCSTS_MASK) {
920 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
921 MTL_DEBUG_RRCSTS_SHIFT;
922
923 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
924 x->mtl_rx_fifo_read_ctrl_flush++;
925 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
926 x->mtl_rx_fifo_read_ctrl_read_data++;
927 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
928 x->mtl_rx_fifo_read_ctrl_status++;
929 else
930 x->mtl_rx_fifo_read_ctrl_idle++;
931 }
932 if (value & MTL_DEBUG_RWCSTS)
933 x->mtl_rx_fifo_ctrl_active++;
934 }
935
936 /* GMAC debug */
937 value = readl(ioaddr + GMAC_DEBUG);
938
939 if (value & GMAC_DEBUG_TFCSTS_MASK) {
940 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
941 >> GMAC_DEBUG_TFCSTS_SHIFT;
942
943 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
944 x->mac_tx_frame_ctrl_xfer++;
945 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
946 x->mac_tx_frame_ctrl_pause++;
947 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
948 x->mac_tx_frame_ctrl_wait++;
949 else
950 x->mac_tx_frame_ctrl_idle++;
951 }
952 if (value & GMAC_DEBUG_TPESTS)
953 x->mac_gmii_tx_proto_engine++;
954 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
955 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
956 >> GMAC_DEBUG_RFCFCSTS_SHIFT;
957 if (value & GMAC_DEBUG_RPESTS)
958 x->mac_gmii_rx_proto_engine++;
959 }
960
dwmac4_set_mac_loopback(void __iomem * ioaddr,bool enable)961 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
962 {
963 u32 value = readl(ioaddr + GMAC_CONFIG);
964
965 if (enable)
966 value |= GMAC_CONFIG_LM;
967 else
968 value &= ~GMAC_CONFIG_LM;
969
970 writel(value, ioaddr + GMAC_CONFIG);
971 }
972
dwmac4_update_vlan_hash(struct mac_device_info * hw,u32 hash,__le16 perfect_match,bool is_double)973 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
974 __le16 perfect_match, bool is_double)
975 {
976 void __iomem *ioaddr = hw->pcsr;
977 u32 value;
978
979 writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
980
981 value = readl(ioaddr + GMAC_VLAN_TAG);
982
983 if (hash) {
984 value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
985 if (is_double) {
986 value |= GMAC_VLAN_EDVLP;
987 value |= GMAC_VLAN_ESVL;
988 value |= GMAC_VLAN_DOVLTC;
989 }
990
991 writel(value, ioaddr + GMAC_VLAN_TAG);
992 } else if (perfect_match) {
993 u32 value = GMAC_VLAN_ETV;
994
995 if (is_double) {
996 value |= GMAC_VLAN_EDVLP;
997 value |= GMAC_VLAN_ESVL;
998 value |= GMAC_VLAN_DOVLTC;
999 }
1000
1001 writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
1002 } else {
1003 value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
1004 value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
1005 value &= ~GMAC_VLAN_DOVLTC;
1006 value &= ~GMAC_VLAN_VID;
1007
1008 writel(value, ioaddr + GMAC_VLAN_TAG);
1009 }
1010 }
1011
dwmac4_sarc_configure(void __iomem * ioaddr,int val)1012 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
1013 {
1014 u32 value = readl(ioaddr + GMAC_CONFIG);
1015
1016 value &= ~GMAC_CONFIG_SARC;
1017 value |= val << GMAC_CONFIG_SARC_SHIFT;
1018
1019 writel(value, ioaddr + GMAC_CONFIG);
1020 }
1021
dwmac4_enable_vlan(struct mac_device_info * hw,u32 type)1022 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
1023 {
1024 void __iomem *ioaddr = hw->pcsr;
1025 u32 value;
1026
1027 value = readl(ioaddr + GMAC_VLAN_INCL);
1028 value |= GMAC_VLAN_VLTI;
1029 value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
1030 value &= ~GMAC_VLAN_VLC;
1031 value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
1032 writel(value, ioaddr + GMAC_VLAN_INCL);
1033 }
1034
dwmac4_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1035 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
1036 u32 addr)
1037 {
1038 void __iomem *ioaddr = hw->pcsr;
1039 u32 value;
1040
1041 writel(addr, ioaddr + GMAC_ARP_ADDR);
1042
1043 value = readl(ioaddr + GMAC_CONFIG);
1044 if (en)
1045 value |= GMAC_CONFIG_ARPEN;
1046 else
1047 value &= ~GMAC_CONFIG_ARPEN;
1048 writel(value, ioaddr + GMAC_CONFIG);
1049 }
1050
dwmac4_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1051 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1052 bool en, bool ipv6, bool sa, bool inv,
1053 u32 match)
1054 {
1055 void __iomem *ioaddr = hw->pcsr;
1056 u32 value;
1057
1058 value = readl(ioaddr + GMAC_PACKET_FILTER);
1059 value |= GMAC_PACKET_FILTER_IPFE;
1060 writel(value, ioaddr + GMAC_PACKET_FILTER);
1061
1062 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1063
1064 /* For IPv6 not both SA/DA filters can be active */
1065 if (ipv6) {
1066 value |= GMAC_L3PEN0;
1067 value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
1068 value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
1069 if (sa) {
1070 value |= GMAC_L3SAM0;
1071 if (inv)
1072 value |= GMAC_L3SAIM0;
1073 } else {
1074 value |= GMAC_L3DAM0;
1075 if (inv)
1076 value |= GMAC_L3DAIM0;
1077 }
1078 } else {
1079 value &= ~GMAC_L3PEN0;
1080 if (sa) {
1081 value |= GMAC_L3SAM0;
1082 if (inv)
1083 value |= GMAC_L3SAIM0;
1084 } else {
1085 value |= GMAC_L3DAM0;
1086 if (inv)
1087 value |= GMAC_L3DAIM0;
1088 }
1089 }
1090
1091 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1092
1093 if (sa) {
1094 writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
1095 } else {
1096 writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
1097 }
1098
1099 if (!en)
1100 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1101
1102 return 0;
1103 }
1104
dwmac4_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1105 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1106 bool en, bool udp, bool sa, bool inv,
1107 u32 match)
1108 {
1109 void __iomem *ioaddr = hw->pcsr;
1110 u32 value;
1111
1112 value = readl(ioaddr + GMAC_PACKET_FILTER);
1113 value |= GMAC_PACKET_FILTER_IPFE;
1114 writel(value, ioaddr + GMAC_PACKET_FILTER);
1115
1116 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1117 if (udp) {
1118 value |= GMAC_L4PEN0;
1119 } else {
1120 value &= ~GMAC_L4PEN0;
1121 }
1122
1123 value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
1124 value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
1125 if (sa) {
1126 value |= GMAC_L4SPM0;
1127 if (inv)
1128 value |= GMAC_L4SPIM0;
1129 } else {
1130 value |= GMAC_L4DPM0;
1131 if (inv)
1132 value |= GMAC_L4DPIM0;
1133 }
1134
1135 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1136
1137 if (sa) {
1138 value = match & GMAC_L4SP0;
1139 } else {
1140 value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
1141 }
1142
1143 writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
1144
1145 if (!en)
1146 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1147
1148 return 0;
1149 }
1150
1151 #ifdef CONFIG_STMMAC_FULL
1152 const struct stmmac_ops dwmac4_ops = {
1153 .core_init = dwmac4_core_init,
1154 .set_mac = stmmac_set_mac,
1155 .rx_ipc = dwmac4_rx_ipc_enable,
1156 .rx_queue_enable = dwmac4_rx_queue_enable,
1157 .rx_queue_prio = dwmac4_rx_queue_priority,
1158 .tx_queue_prio = dwmac4_tx_queue_priority,
1159 .rx_queue_routing = dwmac4_rx_queue_routing,
1160 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1161 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1162 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1163 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1164 .config_cbs = dwmac4_config_cbs,
1165 .dump_regs = dwmac4_dump_regs,
1166 .host_irq_status = dwmac4_irq_status,
1167 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1168 .flow_ctrl = dwmac4_flow_ctrl,
1169 .pmt = dwmac4_pmt,
1170 .set_umac_addr = dwmac4_set_umac_addr,
1171 .get_umac_addr = dwmac4_get_umac_addr,
1172 .set_eee_mode = dwmac4_set_eee_mode,
1173 .reset_eee_mode = dwmac4_reset_eee_mode,
1174 .set_eee_timer = dwmac4_set_eee_timer,
1175 .set_eee_pls = dwmac4_set_eee_pls,
1176 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1177 .pcs_rane = dwmac4_rane,
1178 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1179 .debug = dwmac4_debug,
1180 .set_filter = dwmac4_set_filter,
1181 .set_mac_loopback = dwmac4_set_mac_loopback,
1182 .update_vlan_hash = dwmac4_update_vlan_hash,
1183 .sarc_configure = dwmac4_sarc_configure,
1184 .enable_vlan = dwmac4_enable_vlan,
1185 .set_arp_offload = dwmac4_set_arp_offload,
1186 .config_l3_filter = dwmac4_config_l3_filter,
1187 .config_l4_filter = dwmac4_config_l4_filter,
1188 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1189 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1190 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1191 };
1192 #endif
1193
1194 const struct stmmac_ops dwmac410_ops = {
1195 .core_init = dwmac4_core_init,
1196 .set_mac = stmmac_dwmac4_set_mac,
1197 .rx_ipc = dwmac4_rx_ipc_enable,
1198 .rx_queue_enable = dwmac4_rx_queue_enable,
1199 .rx_queue_prio = dwmac4_rx_queue_priority,
1200 .tx_queue_prio = dwmac4_tx_queue_priority,
1201 .rx_queue_routing = dwmac4_rx_queue_routing,
1202 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1203 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1204 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1205 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1206 .config_cbs = dwmac4_config_cbs,
1207 .dump_regs = dwmac4_dump_regs,
1208 .host_irq_status = dwmac4_irq_status,
1209 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1210 .flow_ctrl = dwmac4_flow_ctrl,
1211 .pmt = dwmac4_pmt,
1212 .set_umac_addr = dwmac4_set_umac_addr,
1213 .get_umac_addr = dwmac4_get_umac_addr,
1214 .set_eee_mode = dwmac4_set_eee_mode,
1215 .reset_eee_mode = dwmac4_reset_eee_mode,
1216 .set_eee_timer = dwmac4_set_eee_timer,
1217 .set_eee_pls = dwmac4_set_eee_pls,
1218 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1219 .pcs_rane = dwmac4_rane,
1220 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1221 .debug = dwmac4_debug,
1222 .set_filter = dwmac4_set_filter,
1223 #ifdef CONFIG_STMMAC_FULL
1224 .flex_pps_config = dwmac5_flex_pps_config,
1225 #endif
1226 .set_mac_loopback = dwmac4_set_mac_loopback,
1227 .update_vlan_hash = dwmac4_update_vlan_hash,
1228 .sarc_configure = dwmac4_sarc_configure,
1229 .enable_vlan = dwmac4_enable_vlan,
1230 .set_arp_offload = dwmac4_set_arp_offload,
1231 .config_l3_filter = dwmac4_config_l3_filter,
1232 .config_l4_filter = dwmac4_config_l4_filter,
1233 #ifdef CONFIG_STMMAC_FULL
1234 .est_configure = dwmac5_est_configure,
1235 .fpe_configure = dwmac5_fpe_configure,
1236 #endif
1237 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1238 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1239 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1240 };
1241
1242 #ifdef CONFIG_STMMAC_FULL
1243 const struct stmmac_ops dwmac510_ops = {
1244 .core_init = dwmac4_core_init,
1245 .set_mac = stmmac_dwmac4_set_mac,
1246 .rx_ipc = dwmac4_rx_ipc_enable,
1247 .rx_queue_enable = dwmac4_rx_queue_enable,
1248 .rx_queue_prio = dwmac4_rx_queue_priority,
1249 .tx_queue_prio = dwmac4_tx_queue_priority,
1250 .rx_queue_routing = dwmac4_rx_queue_routing,
1251 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1252 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1253 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1254 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1255 .config_cbs = dwmac4_config_cbs,
1256 .dump_regs = dwmac4_dump_regs,
1257 .host_irq_status = dwmac4_irq_status,
1258 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1259 .flow_ctrl = dwmac4_flow_ctrl,
1260 .pmt = dwmac4_pmt,
1261 .set_umac_addr = dwmac4_set_umac_addr,
1262 .get_umac_addr = dwmac4_get_umac_addr,
1263 .set_eee_mode = dwmac4_set_eee_mode,
1264 .reset_eee_mode = dwmac4_reset_eee_mode,
1265 .set_eee_timer = dwmac4_set_eee_timer,
1266 .set_eee_pls = dwmac4_set_eee_pls,
1267 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1268 .pcs_rane = dwmac4_rane,
1269 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1270 .debug = dwmac4_debug,
1271 .set_filter = dwmac4_set_filter,
1272 .safety_feat_config = dwmac5_safety_feat_config,
1273 .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1274 .safety_feat_dump = dwmac5_safety_feat_dump,
1275 .rxp_config = dwmac5_rxp_config,
1276 .flex_pps_config = dwmac5_flex_pps_config,
1277 .set_mac_loopback = dwmac4_set_mac_loopback,
1278 .update_vlan_hash = dwmac4_update_vlan_hash,
1279 .sarc_configure = dwmac4_sarc_configure,
1280 .enable_vlan = dwmac4_enable_vlan,
1281 .set_arp_offload = dwmac4_set_arp_offload,
1282 .config_l3_filter = dwmac4_config_l3_filter,
1283 .config_l4_filter = dwmac4_config_l4_filter,
1284 .est_configure = dwmac5_est_configure,
1285 .fpe_configure = dwmac5_fpe_configure,
1286 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1287 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1288 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1289 };
1290 #endif
1291
dwmac4_get_num_vlan(void __iomem * ioaddr)1292 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
1293 {
1294 u32 val, num_vlan;
1295
1296 val = readl(ioaddr + GMAC_HW_FEATURE3);
1297 switch (val & GMAC_HW_FEAT_NRVF) {
1298 case 0:
1299 num_vlan = 1;
1300 break;
1301 case 1:
1302 num_vlan = 4;
1303 break;
1304 case 2:
1305 num_vlan = 8;
1306 break;
1307 case 3:
1308 num_vlan = 16;
1309 break;
1310 case 4:
1311 num_vlan = 24;
1312 break;
1313 case 5:
1314 num_vlan = 32;
1315 break;
1316 default:
1317 num_vlan = 1;
1318 }
1319
1320 return num_vlan;
1321 }
1322
dwmac4_setup(struct stmmac_priv * priv)1323 int dwmac4_setup(struct stmmac_priv *priv)
1324 {
1325 struct mac_device_info *mac = priv->hw;
1326
1327 dev_info(priv->device, "\tDWMAC4/5\n");
1328
1329 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1330 mac->pcsr = priv->ioaddr;
1331 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1332 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1333 mac->mcast_bits_log2 = 0;
1334
1335 if (mac->multicast_filter_bins)
1336 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1337
1338 mac->link.duplex = GMAC_CONFIG_DM;
1339 mac->link.speed10 = GMAC_CONFIG_PS;
1340 mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1341 mac->link.speed1000 = 0;
1342 mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1343 mac->mii.addr = GMAC_MDIO_ADDR;
1344 mac->mii.data = GMAC_MDIO_DATA;
1345 mac->mii.addr_shift = 21;
1346 mac->mii.addr_mask = GENMASK(25, 21);
1347 mac->mii.reg_shift = 16;
1348 mac->mii.reg_mask = GENMASK(20, 16);
1349 mac->mii.clk_csr_shift = 8;
1350 mac->mii.clk_csr_mask = GENMASK(11, 8);
1351 mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
1352
1353 return 0;
1354 }
1355