Lines Matching refs:bp
53 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ argument
54 * (bp)->rx_ring_size)
59 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ argument
60 * (bp)->tx_ring_size)
63 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) argument
123 static unsigned int macb_dma_desc_get_size(struct macb *bp) in macb_dma_desc_get_size() argument
128 switch (bp->hw_dma_cap) { in macb_dma_desc_get_size()
150 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) in macb_adj_dma_desc_idx() argument
153 switch (bp->hw_dma_cap) { in macb_adj_dma_desc_idx()
169 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) in macb_64b_desc() argument
177 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) in macb_tx_ring_wrap() argument
179 return index & (bp->tx_ring_size - 1); in macb_tx_ring_wrap()
185 index = macb_tx_ring_wrap(queue->bp, index); in macb_tx_desc()
186 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_tx_desc()
193 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; in macb_tx_skb()
200 offset = macb_tx_ring_wrap(queue->bp, index) * in macb_tx_dma()
201 macb_dma_desc_get_size(queue->bp); in macb_tx_dma()
206 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) in macb_rx_ring_wrap() argument
208 return index & (bp->rx_ring_size - 1); in macb_rx_ring_wrap()
213 index = macb_rx_ring_wrap(queue->bp, index); in macb_rx_desc()
214 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_rx_desc()
220 return queue->rx_buffers + queue->bp->rx_buffer_size * in macb_rx_buffer()
221 macb_rx_ring_wrap(queue->bp, index); in macb_rx_buffer()
225 static u32 hw_readl_native(struct macb *bp, int offset) in hw_readl_native() argument
227 return __raw_readl(bp->regs + offset); in hw_readl_native()
230 static void hw_writel_native(struct macb *bp, int offset, u32 value) in hw_writel_native() argument
232 __raw_writel(value, bp->regs + offset); in hw_writel_native()
235 static u32 hw_readl(struct macb *bp, int offset) in hw_readl() argument
237 return readl_relaxed(bp->regs + offset); in hw_readl()
240 static void hw_writel(struct macb *bp, int offset, u32 value) in hw_writel() argument
242 writel_relaxed(value, bp->regs + offset); in hw_writel()
274 static void macb_set_hwaddr(struct macb *bp) in macb_set_hwaddr() argument
279 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); in macb_set_hwaddr()
280 macb_or_gem_writel(bp, SA1B, bottom); in macb_set_hwaddr()
281 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); in macb_set_hwaddr()
282 macb_or_gem_writel(bp, SA1T, top); in macb_set_hwaddr()
285 macb_or_gem_writel(bp, SA2B, 0); in macb_set_hwaddr()
286 macb_or_gem_writel(bp, SA2T, 0); in macb_set_hwaddr()
287 macb_or_gem_writel(bp, SA3B, 0); in macb_set_hwaddr()
288 macb_or_gem_writel(bp, SA3T, 0); in macb_set_hwaddr()
289 macb_or_gem_writel(bp, SA4B, 0); in macb_set_hwaddr()
290 macb_or_gem_writel(bp, SA4T, 0); in macb_set_hwaddr()
293 static void macb_get_hwaddr(struct macb *bp) in macb_get_hwaddr() argument
302 bottom = macb_or_gem_readl(bp, SA1B + i * 8); in macb_get_hwaddr()
303 top = macb_or_gem_readl(bp, SA1T + i * 8); in macb_get_hwaddr()
313 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); in macb_get_hwaddr()
318 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); in macb_get_hwaddr()
319 eth_hw_addr_random(bp->dev); in macb_get_hwaddr()
322 static int macb_mdio_wait_for_idle(struct macb *bp) in macb_mdio_wait_for_idle() argument
326 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), in macb_mdio_wait_for_idle()
332 struct macb *bp = bus->priv; in macb_mdio_read() local
335 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_read()
337 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_read()
341 status = macb_mdio_wait_for_idle(bp); in macb_mdio_read()
346 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_read()
353 status = macb_mdio_wait_for_idle(bp); in macb_mdio_read()
357 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_read()
363 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) in macb_mdio_read()
370 status = macb_mdio_wait_for_idle(bp); in macb_mdio_read()
374 status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); in macb_mdio_read()
377 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_read()
378 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_read()
386 struct macb *bp = bus->priv; in macb_mdio_write() local
389 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_write()
391 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_write()
395 status = macb_mdio_wait_for_idle(bp); in macb_mdio_write()
400 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_write()
407 status = macb_mdio_wait_for_idle(bp); in macb_mdio_write()
411 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_write()
418 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) in macb_mdio_write()
426 status = macb_mdio_wait_for_idle(bp); in macb_mdio_write()
431 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_write()
432 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_write()
437 static void macb_init_buffers(struct macb *bp) in macb_init_buffers() argument
442 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_init_buffers()
445 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
451 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
508 struct macb *bp = netdev_priv(ndev); in macb_validate() local
521 if (!macb_is_gem(bp) && in macb_validate()
537 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && in macb_validate()
545 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) in macb_validate()
569 struct macb *bp = netdev_priv(ndev); in macb_mac_config() local
573 spin_lock_irqsave(&bp->lock, flags); in macb_mac_config()
575 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); in macb_mac_config()
577 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { in macb_mac_config()
580 } else if (macb_is_gem(bp)) { in macb_mac_config()
589 macb_or_gem_writel(bp, NCFGR, ctrl); in macb_mac_config()
591 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_config()
598 struct macb *bp = netdev_priv(ndev); in macb_mac_link_down() local
603 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) in macb_mac_link_down()
604 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_down()
606 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_down()
609 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); in macb_mac_link_down()
610 macb_writel(bp, NCR, ctrl); in macb_mac_link_down()
622 struct macb *bp = netdev_priv(ndev); in macb_mac_link_up() local
628 spin_lock_irqsave(&bp->lock, flags); in macb_mac_link_up()
630 ctrl = macb_or_gem_readl(bp, NCFGR); in macb_mac_link_up()
640 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { in macb_mac_link_up()
642 if (macb_is_gem(bp)) { in macb_mac_link_up()
652 macb_set_tx_clk(bp->tx_clk, speed, ndev); in macb_mac_link_up()
657 bp->macbgem_ops.mog_init_rings(bp); in macb_mac_link_up()
658 macb_init_buffers(bp); in macb_mac_link_up()
660 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_up()
662 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_up()
665 macb_or_gem_writel(bp, NCFGR, ctrl); in macb_mac_link_up()
667 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_link_up()
670 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); in macb_mac_link_up()
691 static int macb_phylink_connect(struct macb *bp) in macb_phylink_connect() argument
693 struct device_node *dn = bp->pdev->dev.of_node; in macb_phylink_connect()
694 struct net_device *dev = bp->dev; in macb_phylink_connect()
699 ret = phylink_of_phy_connect(bp->phylink, dn, 0); in macb_phylink_connect()
702 phydev = phy_find_first(bp->mii_bus); in macb_phylink_connect()
709 ret = phylink_connect_phy(bp->phylink, phydev); in macb_phylink_connect()
717 phylink_start(bp->phylink); in macb_phylink_connect()
725 struct macb *bp = netdev_priv(dev); in macb_mii_probe() local
727 bp->phylink_config.dev = &dev->dev; in macb_mii_probe()
728 bp->phylink_config.type = PHYLINK_NETDEV; in macb_mii_probe()
730 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, in macb_mii_probe()
731 bp->phy_interface, &macb_phylink_ops); in macb_mii_probe()
732 if (IS_ERR(bp->phylink)) { in macb_mii_probe()
734 PTR_ERR(bp->phylink)); in macb_mii_probe()
735 return PTR_ERR(bp->phylink); in macb_mii_probe()
741 static int macb_mdiobus_register(struct macb *bp) in macb_mdiobus_register() argument
743 struct device_node *child, *np = bp->pdev->dev.of_node; in macb_mdiobus_register()
746 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
760 return of_mdiobus_register(bp->mii_bus, np); in macb_mdiobus_register()
763 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
766 static int macb_mii_init(struct macb *bp) in macb_mii_init() argument
771 macb_writel(bp, NCR, MACB_BIT(MPE)); in macb_mii_init()
773 bp->mii_bus = mdiobus_alloc(); in macb_mii_init()
774 if (!bp->mii_bus) { in macb_mii_init()
779 bp->mii_bus->name = "MACB_mii_bus"; in macb_mii_init()
780 bp->mii_bus->read = &macb_mdio_read; in macb_mii_init()
781 bp->mii_bus->write = &macb_mdio_write; in macb_mii_init()
782 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in macb_mii_init()
783 bp->pdev->name, bp->pdev->id); in macb_mii_init()
784 bp->mii_bus->priv = bp; in macb_mii_init()
785 bp->mii_bus->parent = &bp->pdev->dev; in macb_mii_init()
787 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); in macb_mii_init()
789 err = macb_mdiobus_register(bp); in macb_mii_init()
793 err = macb_mii_probe(bp->dev); in macb_mii_init()
800 mdiobus_unregister(bp->mii_bus); in macb_mii_init()
802 mdiobus_free(bp->mii_bus); in macb_mii_init()
807 static void macb_update_stats(struct macb *bp) in macb_update_stats() argument
809 u32 *p = &bp->hw_stats.macb.rx_pause_frames; in macb_update_stats()
810 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; in macb_update_stats()
816 *p += bp->macb_reg_readl(bp, offset); in macb_update_stats()
819 static int macb_halt_tx(struct macb *bp) in macb_halt_tx() argument
824 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); in macb_halt_tx()
829 status = macb_readl(bp, TSR); in macb_halt_tx()
839 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) in macb_tx_unmap() argument
843 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
846 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
857 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) in macb_set_addr() argument
862 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_set_addr()
863 desc_64 = macb_64b_desc(bp, desc); in macb_set_addr()
875 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) in macb_get_addr() argument
881 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_get_addr()
882 desc_64 = macb_64b_desc(bp, desc); in macb_get_addr()
894 struct macb *bp = queue->bp; in macb_tx_error_task() local
901 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", in macb_tx_error_task()
902 (unsigned int)(queue - bp->queues), in macb_tx_error_task()
911 spin_lock_irqsave(&bp->lock, flags); in macb_tx_error_task()
914 netif_tx_stop_all_queues(bp->dev); in macb_tx_error_task()
920 if (macb_halt_tx(bp)) in macb_tx_error_task()
922 netdev_err(bp->dev, "BUG: halt tx timed out\n"); in macb_tx_error_task()
938 macb_tx_unmap(bp, tx_skb); in macb_tx_error_task()
948 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", in macb_tx_error_task()
949 macb_tx_ring_wrap(bp, tail), in macb_tx_error_task()
951 bp->dev->stats.tx_packets++; in macb_tx_error_task()
953 bp->dev->stats.tx_bytes += skb->len; in macb_tx_error_task()
962 netdev_err(bp->dev, in macb_tx_error_task()
968 macb_tx_unmap(bp, tx_skb); in macb_tx_error_task()
973 macb_set_addr(bp, desc, 0); in macb_tx_error_task()
982 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_tx_error_task()
990 macb_writel(bp, TSR, macb_readl(bp, TSR)); in macb_tx_error_task()
994 netif_tx_start_all_queues(bp->dev); in macb_tx_error_task()
995 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_tx_error_task()
997 spin_unlock_irqrestore(&bp->lock, flags); in macb_tx_error_task()
1005 struct macb *bp = queue->bp; in macb_tx_interrupt() local
1006 u16 queue_index = queue - bp->queues; in macb_tx_interrupt()
1008 status = macb_readl(bp, TSR); in macb_tx_interrupt()
1009 macb_writel(bp, TSR, status); in macb_tx_interrupt()
1011 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_interrupt()
1014 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", in macb_tx_interrupt()
1052 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", in macb_tx_interrupt()
1053 macb_tx_ring_wrap(bp, tail), in macb_tx_interrupt()
1055 bp->dev->stats.tx_packets++; in macb_tx_interrupt()
1057 bp->dev->stats.tx_bytes += skb->len; in macb_tx_interrupt()
1062 macb_tx_unmap(bp, tx_skb); in macb_tx_interrupt()
1074 if (__netif_subqueue_stopped(bp->dev, queue_index) && in macb_tx_interrupt()
1076 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) in macb_tx_interrupt()
1077 netif_wake_subqueue(bp->dev, queue_index); in macb_tx_interrupt()
1085 struct macb *bp = queue->bp; in gem_rx_refill() local
1089 bp->rx_ring_size) > 0) { in gem_rx_refill()
1090 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); in gem_rx_refill()
1099 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); in gem_rx_refill()
1101 netdev_err(bp->dev, in gem_rx_refill()
1107 paddr = dma_map_single(&bp->pdev->dev, skb->data, in gem_rx_refill()
1108 bp->rx_buffer_size, in gem_rx_refill()
1110 if (dma_mapping_error(&bp->pdev->dev, paddr)) { in gem_rx_refill()
1117 if (entry == bp->rx_ring_size - 1) in gem_rx_refill()
1124 macb_set_addr(bp, desc, paddr); in gem_rx_refill()
1139 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", in gem_rx_refill()
1167 struct macb *bp = queue->bp; in gem_rx() local
1179 entry = macb_rx_ring_wrap(bp, queue->rx_tail); in gem_rx()
1186 addr = macb_get_addr(bp, desc); in gem_rx()
1200 netdev_err(bp->dev, in gem_rx()
1202 bp->dev->stats.rx_dropped++; in gem_rx()
1208 netdev_err(bp->dev, in gem_rx()
1210 bp->dev->stats.rx_dropped++; in gem_rx()
1216 len = ctrl & bp->rx_frm_len_mask; in gem_rx()
1218 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); in gem_rx()
1221 dma_unmap_single(&bp->pdev->dev, addr, in gem_rx()
1222 bp->rx_buffer_size, DMA_FROM_DEVICE); in gem_rx()
1224 skb->protocol = eth_type_trans(skb, bp->dev); in gem_rx()
1226 if (bp->dev->features & NETIF_F_RXCSUM && in gem_rx()
1227 !(bp->dev->flags & IFF_PROMISC) && in gem_rx()
1231 bp->dev->stats.rx_packets++; in gem_rx()
1233 bp->dev->stats.rx_bytes += skb->len; in gem_rx()
1236 gem_ptp_do_rxstamp(bp, skb, desc); in gem_rx()
1239 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in gem_rx()
1263 struct macb *bp = queue->bp; in macb_rx_frame() local
1266 len = desc->ctrl & bp->rx_frm_len_mask; in macb_rx_frame()
1268 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", in macb_rx_frame()
1269 macb_rx_ring_wrap(bp, first_frag), in macb_rx_frame()
1270 macb_rx_ring_wrap(bp, last_frag), len); in macb_rx_frame()
1280 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); in macb_rx_frame()
1282 bp->dev->stats.rx_dropped++; in macb_rx_frame()
1302 unsigned int frag_len = bp->rx_buffer_size; in macb_rx_frame()
1314 offset += bp->rx_buffer_size; in macb_rx_frame()
1326 skb->protocol = eth_type_trans(skb, bp->dev); in macb_rx_frame()
1328 bp->dev->stats.rx_packets++; in macb_rx_frame()
1329 bp->dev->stats.rx_bytes += skb->len; in macb_rx_frame()
1330 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in macb_rx_frame()
1339 struct macb *bp = queue->bp; in macb_init_rx_ring() local
1345 for (i = 0; i < bp->rx_ring_size; i++) { in macb_init_rx_ring()
1347 macb_set_addr(bp, desc, addr); in macb_init_rx_ring()
1349 addr += bp->rx_buffer_size; in macb_init_rx_ring()
1358 struct macb *bp = queue->bp; in macb_rx() local
1410 netdev_err(bp->dev, "RX queue corruption: reset it\n"); in macb_rx()
1412 spin_lock_irqsave(&bp->lock, flags); in macb_rx()
1414 ctrl = macb_readl(bp, NCR); in macb_rx()
1415 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); in macb_rx()
1420 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); in macb_rx()
1422 spin_unlock_irqrestore(&bp->lock, flags); in macb_rx()
1437 struct macb *bp = queue->bp; in macb_poll() local
1441 status = macb_readl(bp, RSR); in macb_poll()
1442 macb_writel(bp, RSR, status); in macb_poll()
1444 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", in macb_poll()
1447 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); in macb_poll()
1459 status = macb_readl(bp, RSR); in macb_poll()
1461 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_poll()
1465 queue_writel(queue, IER, bp->rx_intr_mask); in macb_poll()
1475 status = macb_readl(bp, RSR); in macb_poll()
1477 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_poll()
1478 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_poll()
1492 struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet); in macb_hresp_error_task() local
1493 struct net_device *dev = bp->dev; in macb_hresp_error_task()
1498 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_hresp_error_task()
1499 queue_writel(queue, IDR, bp->rx_intr_mask | in macb_hresp_error_task()
1503 ctrl = macb_readl(bp, NCR); in macb_hresp_error_task()
1505 macb_writel(bp, NCR, ctrl); in macb_hresp_error_task()
1510 bp->macbgem_ops.mog_init_rings(bp); in macb_hresp_error_task()
1513 macb_init_buffers(bp); in macb_hresp_error_task()
1516 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_hresp_error_task()
1518 bp->rx_intr_mask | in macb_hresp_error_task()
1523 macb_writel(bp, NCR, ctrl); in macb_hresp_error_task()
1533 struct macb *bp = queue->bp; in macb_tx_restart() local
1536 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_restart()
1542 tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); in macb_tx_restart()
1543 tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); in macb_tx_restart()
1544 head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head)); in macb_tx_restart()
1549 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_tx_restart()
1555 struct macb *bp = queue->bp; in macb_wol_interrupt() local
1563 spin_lock(&bp->lock); in macb_wol_interrupt()
1567 macb_writel(bp, WOL, 0); in macb_wol_interrupt()
1568 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", in macb_wol_interrupt()
1569 (unsigned int)(queue - bp->queues), in macb_wol_interrupt()
1571 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_wol_interrupt()
1573 pm_wakeup_event(&bp->pdev->dev, 0); in macb_wol_interrupt()
1576 spin_unlock(&bp->lock); in macb_wol_interrupt()
1584 struct macb *bp = queue->bp; in gem_wol_interrupt() local
1592 spin_lock(&bp->lock); in gem_wol_interrupt()
1596 gem_writel(bp, WOL, 0); in gem_wol_interrupt()
1597 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", in gem_wol_interrupt()
1598 (unsigned int)(queue - bp->queues), in gem_wol_interrupt()
1600 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in gem_wol_interrupt()
1602 pm_wakeup_event(&bp->pdev->dev, 0); in gem_wol_interrupt()
1605 spin_unlock(&bp->lock); in gem_wol_interrupt()
1613 struct macb *bp = queue->bp; in macb_interrupt() local
1614 struct net_device *dev = bp->dev; in macb_interrupt()
1622 spin_lock(&bp->lock); in macb_interrupt()
1628 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1633 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", in macb_interrupt()
1634 (unsigned int)(queue - bp->queues), in macb_interrupt()
1637 if (status & bp->rx_intr_mask) { in macb_interrupt()
1644 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_interrupt()
1645 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1649 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); in macb_interrupt()
1658 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1682 ctrl = macb_readl(bp, NCR); in macb_interrupt()
1683 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); in macb_interrupt()
1685 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); in macb_interrupt()
1687 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1693 if (macb_is_gem(bp)) in macb_interrupt()
1694 bp->hw_stats.gem.rx_overruns++; in macb_interrupt()
1696 bp->hw_stats.macb.rx_overruns++; in macb_interrupt()
1698 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1703 tasklet_schedule(&bp->hresp_err_tasklet); in macb_interrupt()
1706 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1712 spin_unlock(&bp->lock); in macb_interrupt()
1723 struct macb *bp = netdev_priv(dev); in macb_poll_controller() local
1729 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller()
1735 static unsigned int macb_tx_map(struct macb *bp, in macb_tx_map() argument
1767 entry = macb_tx_ring_wrap(bp, tx_head); in macb_tx_map()
1770 mapping = dma_map_single(&bp->pdev->dev, in macb_tx_map()
1773 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
1787 size = min(len, bp->max_tx_length); in macb_tx_map()
1797 size = min(len, bp->max_tx_length); in macb_tx_map()
1798 entry = macb_tx_ring_wrap(bp, tx_head); in macb_tx_map()
1801 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, in macb_tx_map()
1803 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
1821 netdev_err(bp->dev, "BUG! empty skb!\n"); in macb_tx_map()
1836 entry = macb_tx_ring_wrap(bp, i); in macb_tx_map()
1858 entry = macb_tx_ring_wrap(bp, i); in macb_tx_map()
1867 if (unlikely(entry == (bp->tx_ring_size - 1))) in macb_tx_map()
1874 if ((bp->dev->features & NETIF_F_HW_CSUM) && in macb_tx_map()
1884 macb_set_addr(bp, desc, tx_skb->mapping); in macb_tx_map()
1897 netdev_err(bp->dev, "TX DMA map failed\n"); in macb_tx_map()
1902 macb_tx_unmap(bp, tx_skb); in macb_tx_map()
2022 struct macb *bp = netdev_priv(dev); in macb_start_xmit() local
2023 struct macb_queue *queue = &bp->queues[queue_index]; in macb_start_xmit()
2050 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); in macb_start_xmit()
2055 hdrlen = min(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2058 netdev_vdbg(bp->dev, in macb_start_xmit()
2072 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; in macb_start_xmit()
2074 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2078 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); in macb_start_xmit()
2081 spin_lock_irqsave(&bp->lock, flags); in macb_start_xmit()
2085 bp->tx_ring_size) < desc_cnt) { in macb_start_xmit()
2087 spin_unlock_irqrestore(&bp->lock, flags); in macb_start_xmit()
2088 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", in macb_start_xmit()
2094 if (!macb_tx_map(bp, queue, skb, hdrlen)) { in macb_start_xmit()
2103 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_start_xmit()
2105 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) in macb_start_xmit()
2109 spin_unlock_irqrestore(&bp->lock, flags); in macb_start_xmit()
2114 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) in macb_init_rx_buffer_size() argument
2116 if (!macb_is_gem(bp)) { in macb_init_rx_buffer_size()
2117 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; in macb_init_rx_buffer_size()
2119 bp->rx_buffer_size = size; in macb_init_rx_buffer_size()
2121 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { in macb_init_rx_buffer_size()
2122 netdev_dbg(bp->dev, in macb_init_rx_buffer_size()
2125 bp->rx_buffer_size = in macb_init_rx_buffer_size()
2126 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); in macb_init_rx_buffer_size()
2130 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", in macb_init_rx_buffer_size()
2131 bp->dev->mtu, bp->rx_buffer_size); in macb_init_rx_buffer_size()
2134 static void gem_free_rx_buffers(struct macb *bp) in gem_free_rx_buffers() argument
2143 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_free_rx_buffers()
2147 for (i = 0; i < bp->rx_ring_size; i++) { in gem_free_rx_buffers()
2154 addr = macb_get_addr(bp, desc); in gem_free_rx_buffers()
2156 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, in gem_free_rx_buffers()
2167 static void macb_free_rx_buffers(struct macb *bp) in macb_free_rx_buffers() argument
2169 struct macb_queue *queue = &bp->queues[0]; in macb_free_rx_buffers()
2172 dma_free_coherent(&bp->pdev->dev, in macb_free_rx_buffers()
2173 bp->rx_ring_size * bp->rx_buffer_size, in macb_free_rx_buffers()
2179 static void macb_free_consistent(struct macb *bp) in macb_free_consistent() argument
2185 bp->macbgem_ops.mog_free_rx_buffers(bp); in macb_free_consistent()
2187 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent()
2191 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_free_consistent()
2192 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2197 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_free_consistent()
2198 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2205 static int gem_alloc_rx_buffers(struct macb *bp) in gem_alloc_rx_buffers() argument
2211 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_alloc_rx_buffers()
2212 size = bp->rx_ring_size * sizeof(struct sk_buff *); in gem_alloc_rx_buffers()
2217 netdev_dbg(bp->dev, in gem_alloc_rx_buffers()
2219 bp->rx_ring_size, queue->rx_skbuff); in gem_alloc_rx_buffers()
2224 static int macb_alloc_rx_buffers(struct macb *bp) in macb_alloc_rx_buffers() argument
2226 struct macb_queue *queue = &bp->queues[0]; in macb_alloc_rx_buffers()
2229 size = bp->rx_ring_size * bp->rx_buffer_size; in macb_alloc_rx_buffers()
2230 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_rx_buffers()
2235 netdev_dbg(bp->dev, in macb_alloc_rx_buffers()
2241 static int macb_alloc_consistent(struct macb *bp) in macb_alloc_consistent() argument
2247 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent()
2248 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_alloc_consistent()
2249 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2254 netdev_dbg(bp->dev, in macb_alloc_consistent()
2259 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); in macb_alloc_consistent()
2264 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_alloc_consistent()
2265 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2269 netdev_dbg(bp->dev, in macb_alloc_consistent()
2273 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) in macb_alloc_consistent()
2279 macb_free_consistent(bp); in macb_alloc_consistent()
2283 static void gem_init_rings(struct macb *bp) in gem_init_rings() argument
2290 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_init_rings()
2291 for (i = 0; i < bp->tx_ring_size; i++) { in gem_init_rings()
2293 macb_set_addr(bp, desc, 0); in gem_init_rings()
2308 static void macb_init_rings(struct macb *bp) in macb_init_rings() argument
2313 macb_init_rx_ring(&bp->queues[0]); in macb_init_rings()
2315 for (i = 0; i < bp->tx_ring_size; i++) { in macb_init_rings()
2316 desc = macb_tx_desc(&bp->queues[0], i); in macb_init_rings()
2317 macb_set_addr(bp, desc, 0); in macb_init_rings()
2320 bp->queues[0].tx_head = 0; in macb_init_rings()
2321 bp->queues[0].tx_tail = 0; in macb_init_rings()
2325 static void macb_reset_hw(struct macb *bp) in macb_reset_hw() argument
2329 u32 ctrl = macb_readl(bp, NCR); in macb_reset_hw()
2339 macb_writel(bp, NCR, ctrl); in macb_reset_hw()
2342 macb_writel(bp, TSR, -1); in macb_reset_hw()
2343 macb_writel(bp, RSR, -1); in macb_reset_hw()
2346 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_reset_hw()
2349 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_reset_hw()
2354 static u32 gem_mdc_clk_div(struct macb *bp) in gem_mdc_clk_div() argument
2357 unsigned long pclk_hz = clk_get_rate(bp->pclk); in gem_mdc_clk_div()
2375 static u32 macb_mdc_clk_div(struct macb *bp) in macb_mdc_clk_div() argument
2380 if (macb_is_gem(bp)) in macb_mdc_clk_div()
2381 return gem_mdc_clk_div(bp); in macb_mdc_clk_div()
2383 pclk_hz = clk_get_rate(bp->pclk); in macb_mdc_clk_div()
2400 static u32 macb_dbw(struct macb *bp) in macb_dbw() argument
2402 if (!macb_is_gem(bp)) in macb_dbw()
2405 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { in macb_dbw()
2423 static void macb_configure_dma(struct macb *bp) in macb_configure_dma() argument
2430 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; in macb_configure_dma()
2431 if (macb_is_gem(bp)) { in macb_configure_dma()
2432 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); in macb_configure_dma()
2433 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_configure_dma()
2439 if (bp->dma_burst_length) in macb_configure_dma()
2440 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); in macb_configure_dma()
2444 if (bp->native_io) in macb_configure_dma()
2449 if (bp->dev->features & NETIF_F_HW_CSUM) in macb_configure_dma()
2456 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_configure_dma()
2460 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) in macb_configure_dma()
2463 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", in macb_configure_dma()
2465 gem_writel(bp, DMACFG, dmacfg); in macb_configure_dma()
2469 static void macb_init_hw(struct macb *bp) in macb_init_hw() argument
2473 macb_reset_hw(bp); in macb_init_hw()
2474 macb_set_hwaddr(bp); in macb_init_hw()
2476 config = macb_mdc_clk_div(bp); in macb_init_hw()
2479 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2483 if (bp->dev->flags & IFF_PROMISC) in macb_init_hw()
2485 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) in macb_init_hw()
2487 if (!(bp->dev->flags & IFF_BROADCAST)) in macb_init_hw()
2489 config |= macb_dbw(bp); in macb_init_hw()
2490 macb_writel(bp, NCFGR, config); in macb_init_hw()
2491 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) in macb_init_hw()
2492 gem_writel(bp, JML, bp->jumbo_max_len); in macb_init_hw()
2493 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; in macb_init_hw()
2494 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2495 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; in macb_init_hw()
2497 macb_configure_dma(bp); in macb_init_hw()
2562 struct macb *bp = netdev_priv(dev); in macb_sethashtable() local
2572 macb_or_gem_writel(bp, HRB, mc_filter[0]); in macb_sethashtable()
2573 macb_or_gem_writel(bp, HRT, mc_filter[1]); in macb_sethashtable()
2580 struct macb *bp = netdev_priv(dev); in macb_set_rx_mode() local
2582 cfg = macb_readl(bp, NCFGR); in macb_set_rx_mode()
2589 if (macb_is_gem(bp)) in macb_set_rx_mode()
2596 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) in macb_set_rx_mode()
2602 macb_or_gem_writel(bp, HRB, -1); in macb_set_rx_mode()
2603 macb_or_gem_writel(bp, HRT, -1); in macb_set_rx_mode()
2611 macb_or_gem_writel(bp, HRB, 0); in macb_set_rx_mode()
2612 macb_or_gem_writel(bp, HRT, 0); in macb_set_rx_mode()
2616 macb_writel(bp, NCFGR, cfg); in macb_set_rx_mode()
2622 struct macb *bp = netdev_priv(dev); in macb_open() local
2627 netdev_dbg(bp->dev, "open\n"); in macb_open()
2629 err = pm_runtime_get_sync(&bp->pdev->dev); in macb_open()
2634 macb_init_rx_buffer_size(bp, bufsz); in macb_open()
2636 err = macb_alloc_consistent(bp); in macb_open()
2643 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_open()
2646 macb_init_hw(bp); in macb_open()
2648 err = macb_phylink_connect(bp); in macb_open()
2654 if (bp->ptp_info) in macb_open()
2655 bp->ptp_info->ptp_init(dev); in macb_open()
2660 macb_reset_hw(bp); in macb_open()
2661 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_open()
2663 macb_free_consistent(bp); in macb_open()
2665 pm_runtime_put_sync(&bp->pdev->dev); in macb_open()
2671 struct macb *bp = netdev_priv(dev); in macb_close() local
2678 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_close()
2681 phylink_stop(bp->phylink); in macb_close()
2682 phylink_disconnect_phy(bp->phylink); in macb_close()
2684 spin_lock_irqsave(&bp->lock, flags); in macb_close()
2685 macb_reset_hw(bp); in macb_close()
2687 spin_unlock_irqrestore(&bp->lock, flags); in macb_close()
2689 macb_free_consistent(bp); in macb_close()
2691 if (bp->ptp_info) in macb_close()
2692 bp->ptp_info->ptp_remove(dev); in macb_close()
2694 pm_runtime_put(&bp->pdev->dev); in macb_close()
2709 static void gem_update_stats(struct macb *bp) in gem_update_stats() argument
2715 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; in gem_update_stats()
2719 u64 val = bp->macb_reg_readl(bp, offset); in gem_update_stats()
2721 bp->ethtool_stats[i] += val; in gem_update_stats()
2726 val = bp->macb_reg_readl(bp, offset + 4); in gem_update_stats()
2727 bp->ethtool_stats[i] += ((u64)val) << 32; in gem_update_stats()
2733 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in gem_update_stats()
2735 bp->ethtool_stats[idx++] = *stat; in gem_update_stats()
2738 static struct net_device_stats *gem_get_stats(struct macb *bp) in gem_get_stats() argument
2740 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_stats()
2741 struct net_device_stats *nstat = &bp->dev->stats; in gem_get_stats()
2743 if (!netif_running(bp->dev)) in gem_get_stats()
2746 gem_update_stats(bp); in gem_get_stats()
2782 struct macb *bp; in gem_get_ethtool_stats() local
2784 bp = netdev_priv(dev); in gem_get_ethtool_stats()
2785 gem_update_stats(bp); in gem_get_ethtool_stats()
2786 memcpy(data, &bp->ethtool_stats, sizeof(u64) in gem_get_ethtool_stats()
2792 struct macb *bp = netdev_priv(dev); in gem_get_sset_count() local
2796 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; in gem_get_sset_count()
2805 struct macb *bp = netdev_priv(dev); in gem_get_ethtool_strings() local
2816 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_get_ethtool_strings()
2829 struct macb *bp = netdev_priv(dev); in macb_get_stats() local
2830 struct net_device_stats *nstat = &bp->dev->stats; in macb_get_stats()
2831 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_stats()
2833 if (macb_is_gem(bp)) in macb_get_stats()
2834 return gem_get_stats(bp); in macb_get_stats()
2837 macb_update_stats(bp); in macb_get_stats()
2882 struct macb *bp = netdev_priv(dev); in macb_get_regs() local
2886 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) in macb_get_regs()
2889 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); in macb_get_regs()
2890 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); in macb_get_regs()
2892 regs_buff[0] = macb_readl(bp, NCR); in macb_get_regs()
2893 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); in macb_get_regs()
2894 regs_buff[2] = macb_readl(bp, NSR); in macb_get_regs()
2895 regs_buff[3] = macb_readl(bp, TSR); in macb_get_regs()
2896 regs_buff[4] = macb_readl(bp, RBQP); in macb_get_regs()
2897 regs_buff[5] = macb_readl(bp, TBQP); in macb_get_regs()
2898 regs_buff[6] = macb_readl(bp, RSR); in macb_get_regs()
2899 regs_buff[7] = macb_readl(bp, IMR); in macb_get_regs()
2903 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); in macb_get_regs()
2904 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); in macb_get_regs()
2906 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_get_regs()
2907 regs_buff[12] = macb_or_gem_readl(bp, USRIO); in macb_get_regs()
2908 if (macb_is_gem(bp)) in macb_get_regs()
2909 regs_buff[13] = gem_readl(bp, DMACFG); in macb_get_regs()
2914 struct macb *bp = netdev_priv(netdev); in macb_get_wol() local
2916 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { in macb_get_wol()
2917 phylink_ethtool_get_wol(bp->phylink, wol); in macb_get_wol()
2920 if (bp->wol & MACB_WOL_ENABLED) in macb_get_wol()
2927 struct macb *bp = netdev_priv(netdev); in macb_set_wol() local
2931 ret = phylink_ethtool_set_wol(bp->phylink, wol); in macb_set_wol()
2938 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || in macb_set_wol()
2943 bp->wol |= MACB_WOL_ENABLED; in macb_set_wol()
2945 bp->wol &= ~MACB_WOL_ENABLED; in macb_set_wol()
2947 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); in macb_set_wol()
2955 struct macb *bp = netdev_priv(netdev); in macb_get_link_ksettings() local
2957 return phylink_ethtool_ksettings_get(bp->phylink, kset); in macb_get_link_ksettings()
2963 struct macb *bp = netdev_priv(netdev); in macb_set_link_ksettings() local
2965 return phylink_ethtool_ksettings_set(bp->phylink, kset); in macb_set_link_ksettings()
2971 struct macb *bp = netdev_priv(netdev); in macb_get_ringparam() local
2976 ring->rx_pending = bp->rx_ring_size; in macb_get_ringparam()
2977 ring->tx_pending = bp->tx_ring_size; in macb_get_ringparam()
2983 struct macb *bp = netdev_priv(netdev); in macb_set_ringparam() local
2998 if ((new_tx_size == bp->tx_ring_size) && in macb_set_ringparam()
2999 (new_rx_size == bp->rx_ring_size)) { in macb_set_ringparam()
3004 if (netif_running(bp->dev)) { in macb_set_ringparam()
3006 macb_close(bp->dev); in macb_set_ringparam()
3009 bp->rx_ring_size = new_rx_size; in macb_set_ringparam()
3010 bp->tx_ring_size = new_tx_size; in macb_set_ringparam()
3013 macb_open(bp->dev); in macb_set_ringparam()
3019 static unsigned int gem_get_tsu_rate(struct macb *bp) in gem_get_tsu_rate() argument
3024 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); in gem_get_tsu_rate()
3028 else if (!IS_ERR(bp->pclk)) { in gem_get_tsu_rate()
3029 tsu_clk = bp->pclk; in gem_get_tsu_rate()
3044 struct macb *bp = netdev_priv(dev); in gem_get_ts_info() local
3046 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { in gem_get_ts_info()
3066 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; in gem_get_ts_info()
3085 struct macb *bp = netdev_priv(netdev); in macb_get_ts_info() local
3087 if (bp->ptp_info) in macb_get_ts_info()
3088 return bp->ptp_info->get_ts_info(netdev, info); in macb_get_ts_info()
3093 static void gem_enable_flow_filters(struct macb *bp, bool enable) in gem_enable_flow_filters() argument
3095 struct net_device *netdev = bp->dev; in gem_enable_flow_filters()
3103 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); in gem_enable_flow_filters()
3105 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_enable_flow_filters()
3112 t2_scr = gem_readl_n(bp, SCRT2, fs->location); in gem_enable_flow_filters()
3135 gem_writel_n(bp, SCRT2, fs->location, t2_scr); in gem_enable_flow_filters()
3139 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) in gem_prog_cmp_regs() argument
3148 if (!macb_is_gem(bp)) in gem_prog_cmp_regs()
3163 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); in gem_prog_cmp_regs()
3164 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); in gem_prog_cmp_regs()
3177 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); in gem_prog_cmp_regs()
3178 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); in gem_prog_cmp_regs()
3205 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); in gem_prog_cmp_regs()
3206 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); in gem_prog_cmp_regs()
3219 gem_writel_n(bp, SCRT2, index, t2_scr); in gem_prog_cmp_regs()
3225 struct macb *bp = netdev_priv(netdev); in gem_add_flow_filter() local
3244 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3247 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_add_flow_filter()
3260 list_add_tail(&newfs->list, &bp->rx_fs_list.list); in gem_add_flow_filter()
3262 gem_prog_cmp_regs(bp, fs); in gem_add_flow_filter()
3263 bp->rx_fs_list.count++; in gem_add_flow_filter()
3265 gem_enable_flow_filters(bp, 1); in gem_add_flow_filter()
3267 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3271 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3279 struct macb *bp = netdev_priv(netdev); in gem_del_flow_filter() local
3284 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3286 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_del_flow_filter()
3298 gem_writel_n(bp, SCRT2, fs->location, 0); in gem_del_flow_filter()
3301 bp->rx_fs_list.count--; in gem_del_flow_filter()
3302 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3308 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3315 struct macb *bp = netdev_priv(netdev); in gem_get_flow_entry() local
3318 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_flow_entry()
3330 struct macb *bp = netdev_priv(netdev); in gem_get_all_flow_entries() local
3334 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_all_flow_entries()
3340 cmd->data = bp->max_tuples; in gem_get_all_flow_entries()
3349 struct macb *bp = netdev_priv(netdev); in gem_get_rxnfc() local
3354 cmd->data = bp->num_queues; in gem_get_rxnfc()
3357 cmd->rule_cnt = bp->rx_fs_list.count; in gem_get_rxnfc()
3376 struct macb *bp = netdev_priv(netdev); in gem_set_rxnfc() local
3381 if ((cmd->fs.location >= bp->max_tuples) in gem_set_rxnfc()
3382 || (cmd->fs.ring_cookie >= bp->num_queues)) { in gem_set_rxnfc()
3433 struct macb *bp = netdev_priv(dev); in macb_ioctl() local
3438 if (bp->ptp_info) { in macb_ioctl()
3441 return bp->ptp_info->set_hwtst(dev, rq, cmd); in macb_ioctl()
3443 return bp->ptp_info->get_hwtst(dev, rq); in macb_ioctl()
3447 return phylink_mii_ioctl(bp->phylink, rq, cmd); in macb_ioctl()
3450 static inline void macb_set_txcsum_feature(struct macb *bp, in macb_set_txcsum_feature() argument
3455 if (!macb_is_gem(bp)) in macb_set_txcsum_feature()
3458 val = gem_readl(bp, DMACFG); in macb_set_txcsum_feature()
3464 gem_writel(bp, DMACFG, val); in macb_set_txcsum_feature()
3467 static inline void macb_set_rxcsum_feature(struct macb *bp, in macb_set_rxcsum_feature() argument
3470 struct net_device *netdev = bp->dev; in macb_set_rxcsum_feature()
3473 if (!macb_is_gem(bp)) in macb_set_rxcsum_feature()
3476 val = gem_readl(bp, NCFGR); in macb_set_rxcsum_feature()
3482 gem_writel(bp, NCFGR, val); in macb_set_rxcsum_feature()
3485 static inline void macb_set_rxflow_feature(struct macb *bp, in macb_set_rxflow_feature() argument
3488 if (!macb_is_gem(bp)) in macb_set_rxflow_feature()
3491 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE)); in macb_set_rxflow_feature()
3497 struct macb *bp = netdev_priv(netdev); in macb_set_features() local
3502 macb_set_txcsum_feature(bp, features); in macb_set_features()
3506 macb_set_rxcsum_feature(bp, features); in macb_set_features()
3510 macb_set_rxflow_feature(bp, features); in macb_set_features()
3515 static void macb_restore_features(struct macb *bp) in macb_restore_features() argument
3517 struct net_device *netdev = bp->dev; in macb_restore_features()
3522 macb_set_txcsum_feature(bp, features); in macb_restore_features()
3525 macb_set_rxcsum_feature(bp, features); in macb_restore_features()
3528 list_for_each_entry(item, &bp->rx_fs_list.list, list) in macb_restore_features()
3529 gem_prog_cmp_regs(bp, &item->fs); in macb_restore_features()
3531 macb_set_rxflow_feature(bp, features); in macb_restore_features()
3554 static void macb_configure_caps(struct macb *bp, in macb_configure_caps() argument
3560 bp->caps = dt_conf->caps; in macb_configure_caps()
3562 if (hw_is_gem(bp->regs, bp->native_io)) { in macb_configure_caps()
3563 bp->caps |= MACB_CAPS_MACB_IS_GEM; in macb_configure_caps()
3565 dcfg = gem_readl(bp, DCFG1); in macb_configure_caps()
3567 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; in macb_configure_caps()
3568 dcfg = gem_readl(bp, DCFG2); in macb_configure_caps()
3570 bp->caps |= MACB_CAPS_FIFO_MODE; in macb_configure_caps()
3572 if (gem_has_ptp(bp)) { in macb_configure_caps()
3573 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) in macb_configure_caps()
3574 dev_err(&bp->pdev->dev, in macb_configure_caps()
3577 bp->hw_dma_cap |= HW_DMA_CAP_PTP; in macb_configure_caps()
3578 bp->ptp_info = &gem_ptp_info; in macb_configure_caps()
3584 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); in macb_configure_caps()
3706 struct macb *bp = netdev_priv(dev); in macb_init() local
3711 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; in macb_init()
3712 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; in macb_init()
3719 if (!(bp->queue_mask & (1 << hw_q))) in macb_init()
3722 queue = &bp->queues[q]; in macb_init()
3723 queue->bp = bp; in macb_init()
3734 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
3748 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
3777 if (macb_is_gem(bp)) { in macb_init()
3778 bp->max_tx_length = GEM_MAX_TX_LEN; in macb_init()
3779 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; in macb_init()
3780 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; in macb_init()
3781 bp->macbgem_ops.mog_init_rings = gem_init_rings; in macb_init()
3782 bp->macbgem_ops.mog_rx = gem_rx; in macb_init()
3785 bp->max_tx_length = MACB_MAX_TX_LEN; in macb_init()
3786 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; in macb_init()
3787 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; in macb_init()
3788 bp->macbgem_ops.mog_init_rings = macb_init_rings; in macb_init()
3789 bp->macbgem_ops.mog_rx = macb_rx; in macb_init()
3797 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) in macb_init()
3801 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) in macb_init()
3803 if (bp->caps & MACB_CAPS_SG_DISABLED) in macb_init()
3811 reg = gem_readl(bp, DCFG8); in macb_init()
3812 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), in macb_init()
3814 INIT_LIST_HEAD(&bp->rx_fs_list.list); in macb_init()
3815 if (bp->max_tuples > 0) { in macb_init()
3821 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); in macb_init()
3825 bp->rx_fs_list.count = 0; in macb_init()
3826 spin_lock_init(&bp->rx_fs_lock); in macb_init()
3828 bp->max_tuples = 0; in macb_init()
3831 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { in macb_init()
3833 if (phy_interface_mode_is_rgmii(bp->phy_interface)) in macb_init()
3835 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && in macb_init()
3836 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
3838 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
3841 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) in macb_init()
3844 macb_or_gem_writel(bp, USRIO, val); in macb_init()
3848 val = macb_mdc_clk_div(bp); in macb_init()
3849 val |= macb_dbw(bp); in macb_init()
3850 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) in macb_init()
3852 macb_writel(bp, NCFGR, val); in macb_init()
4245 struct macb *bp = netdev_priv(dev); in at91ether_init() local
4248 bp->queues[0].bp = bp; in at91ether_init()
4258 macb_writel(bp, NCR, 0); in at91ether_init()
4260 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG)); in at91ether_init()
4491 struct macb *bp; in macb_probe() local
4522 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); in macb_probe()
4532 bp = netdev_priv(dev); in macb_probe()
4533 bp->pdev = pdev; in macb_probe()
4534 bp->dev = dev; in macb_probe()
4535 bp->regs = mem; in macb_probe()
4536 bp->native_io = native_io; in macb_probe()
4538 bp->macb_reg_readl = hw_readl_native; in macb_probe()
4539 bp->macb_reg_writel = hw_writel_native; in macb_probe()
4541 bp->macb_reg_readl = hw_readl; in macb_probe()
4542 bp->macb_reg_writel = hw_writel; in macb_probe()
4544 bp->num_queues = num_queues; in macb_probe()
4545 bp->queue_mask = queue_mask; in macb_probe()
4547 bp->dma_burst_length = macb_config->dma_burst_length; in macb_probe()
4548 bp->pclk = pclk; in macb_probe()
4549 bp->hclk = hclk; in macb_probe()
4550 bp->tx_clk = tx_clk; in macb_probe()
4551 bp->rx_clk = rx_clk; in macb_probe()
4552 bp->tsu_clk = tsu_clk; in macb_probe()
4554 bp->jumbo_max_len = macb_config->jumbo_max_len; in macb_probe()
4556 bp->wol = 0; in macb_probe()
4558 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; in macb_probe()
4559 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); in macb_probe()
4561 spin_lock_init(&bp->lock); in macb_probe()
4564 macb_configure_caps(bp, macb_config); in macb_probe()
4567 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { in macb_probe()
4569 bp->hw_dma_cap |= HW_DMA_CAP_64B; in macb_probe()
4582 if (bp->caps & MACB_CAPS_JUMBO) in macb_probe()
4583 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; in macb_probe()
4587 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { in macb_probe()
4588 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); in macb_probe()
4590 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
4591 macb_dma_desc_get_size(bp); in macb_probe()
4593 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); in macb_probe()
4595 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
4596 macb_dma_desc_get_size(bp); in macb_probe()
4599 bp->rx_intr_mask = MACB_RX_INT_FLAGS; in macb_probe()
4600 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) in macb_probe()
4601 bp->rx_intr_mask |= MACB_BIT(RXUBR); in macb_probe()
4608 ether_addr_copy(bp->dev->dev_addr, mac); in macb_probe()
4610 macb_get_hwaddr(bp); in macb_probe()
4616 bp->phy_interface = PHY_INTERFACE_MODE_MII; in macb_probe()
4618 bp->phy_interface = interface; in macb_probe()
4625 err = macb_mii_init(bp); in macb_probe()
4637 tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task); in macb_probe()
4640 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), in macb_probe()
4643 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_probe()
4644 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_probe()
4649 mdiobus_unregister(bp->mii_bus); in macb_probe()
4650 mdiobus_free(bp->mii_bus); in macb_probe()
4671 struct macb *bp; in macb_remove() local
4676 bp = netdev_priv(dev); in macb_remove()
4677 mdiobus_unregister(bp->mii_bus); in macb_remove()
4678 mdiobus_free(bp->mii_bus); in macb_remove()
4681 tasklet_kill(&bp->hresp_err_tasklet); in macb_remove()
4685 clk_disable_unprepare(bp->tx_clk); in macb_remove()
4686 clk_disable_unprepare(bp->hclk); in macb_remove()
4687 clk_disable_unprepare(bp->pclk); in macb_remove()
4688 clk_disable_unprepare(bp->rx_clk); in macb_remove()
4689 clk_disable_unprepare(bp->tsu_clk); in macb_remove()
4692 phylink_destroy(bp->phylink); in macb_remove()
4702 struct macb *bp = netdev_priv(netdev); in macb_suspend() local
4703 struct macb_queue *queue = bp->queues; in macb_suspend()
4711 if (bp->wol & MACB_WOL_ENABLED) { in macb_suspend()
4712 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
4714 macb_writel(bp, TSR, -1); in macb_suspend()
4715 macb_writel(bp, RSR, -1); in macb_suspend()
4716 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
4721 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_suspend()
4727 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_suspend()
4728 if (macb_is_gem(bp)) { in macb_suspend()
4729 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, in macb_suspend()
4730 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
4734 bp->queues[0].irq, err); in macb_suspend()
4735 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4738 queue_writel(bp->queues, IER, GEM_BIT(WOL)); in macb_suspend()
4739 gem_writel(bp, WOL, MACB_BIT(MAG)); in macb_suspend()
4741 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, in macb_suspend()
4742 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
4746 bp->queues[0].irq, err); in macb_suspend()
4747 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4750 queue_writel(bp->queues, IER, MACB_BIT(WOL)); in macb_suspend()
4751 macb_writel(bp, WOL, MACB_BIT(MAG)); in macb_suspend()
4753 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4755 enable_irq_wake(bp->queues[0].irq); in macb_suspend()
4759 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
4763 if (!(bp->wol & MACB_WOL_ENABLED)) { in macb_suspend()
4765 phylink_stop(bp->phylink); in macb_suspend()
4767 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
4768 macb_reset_hw(bp); in macb_suspend()
4769 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4772 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_suspend()
4773 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); in macb_suspend()
4776 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); in macb_suspend()
4778 if (bp->ptp_info) in macb_suspend()
4779 bp->ptp_info->ptp_remove(netdev); in macb_suspend()
4789 struct macb *bp = netdev_priv(netdev); in macb_resume() local
4790 struct macb_queue *queue = bp->queues; in macb_resume()
4801 if (bp->wol & MACB_WOL_ENABLED) { in macb_resume()
4802 spin_lock_irqsave(&bp->lock, flags); in macb_resume()
4804 if (macb_is_gem(bp)) { in macb_resume()
4805 queue_writel(bp->queues, IDR, GEM_BIT(WOL)); in macb_resume()
4806 gem_writel(bp, WOL, 0); in macb_resume()
4808 queue_writel(bp->queues, IDR, MACB_BIT(WOL)); in macb_resume()
4809 macb_writel(bp, WOL, 0); in macb_resume()
4812 queue_readl(bp->queues, ISR); in macb_resume()
4813 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_resume()
4814 queue_writel(bp->queues, ISR, -1); in macb_resume()
4816 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_resume()
4817 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, in macb_resume()
4818 IRQF_SHARED, netdev->name, bp->queues); in macb_resume()
4822 bp->queues[0].irq, err); in macb_resume()
4823 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
4826 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
4828 disable_irq_wake(bp->queues[0].irq); in macb_resume()
4834 phylink_stop(bp->phylink); in macb_resume()
4838 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_resume()
4843 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); in macb_resume()
4845 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_resume()
4846 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); in macb_resume()
4848 macb_writel(bp, NCR, MACB_BIT(MPE)); in macb_resume()
4849 macb_init_hw(bp); in macb_resume()
4851 macb_restore_features(bp); in macb_resume()
4853 phylink_start(bp->phylink); in macb_resume()
4857 if (bp->ptp_info) in macb_resume()
4858 bp->ptp_info->ptp_init(netdev); in macb_resume()
4866 struct macb *bp = netdev_priv(netdev); in macb_runtime_suspend() local
4869 clk_disable_unprepare(bp->tx_clk); in macb_runtime_suspend()
4870 clk_disable_unprepare(bp->hclk); in macb_runtime_suspend()
4871 clk_disable_unprepare(bp->pclk); in macb_runtime_suspend()
4872 clk_disable_unprepare(bp->rx_clk); in macb_runtime_suspend()
4874 clk_disable_unprepare(bp->tsu_clk); in macb_runtime_suspend()
4882 struct macb *bp = netdev_priv(netdev); in macb_runtime_resume() local
4885 clk_prepare_enable(bp->pclk); in macb_runtime_resume()
4886 clk_prepare_enable(bp->hclk); in macb_runtime_resume()
4887 clk_prepare_enable(bp->tx_clk); in macb_runtime_resume()
4888 clk_prepare_enable(bp->rx_clk); in macb_runtime_resume()
4890 clk_prepare_enable(bp->tsu_clk); in macb_runtime_resume()