Lines Matching refs:fep
333 struct fec_enet_private *fep = netdev_priv(ndev); in fec_dump() local
341 txq = fep->tx_queue[0]; in fec_dump()
385 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_submit_frag_skb() local
411 if (fep->bufdesc_ex) { in fec_enet_txq_submit_frag_skb()
414 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) in fec_enet_txq_submit_frag_skb()
419 if (fep->bufdesc_ex) { in fec_enet_txq_submit_frag_skb()
420 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_submit_frag_skb()
431 if (((unsigned long) bufaddr) & fep->tx_align || in fec_enet_txq_submit_frag_skb()
432 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_submit_frag_skb()
436 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_submit_frag_skb()
440 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, in fec_enet_txq_submit_frag_skb()
442 if (dma_mapping_error(&fep->pdev->dev, addr)) { in fec_enet_txq_submit_frag_skb()
462 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), in fec_enet_txq_submit_frag_skb()
471 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_submit_skb() local
507 if (((unsigned long) bufaddr) & fep->tx_align || in fec_enet_txq_submit_skb()
508 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_submit_skb()
512 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_submit_skb()
517 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); in fec_enet_txq_submit_skb()
518 if (dma_mapping_error(&fep->pdev->dev, addr)) { in fec_enet_txq_submit_skb()
528 dma_unmap_single(&fep->pdev->dev, addr, in fec_enet_txq_submit_skb()
535 if (fep->bufdesc_ex) { in fec_enet_txq_submit_skb()
538 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) in fec_enet_txq_submit_skb()
545 if (fep->bufdesc_ex) { in fec_enet_txq_submit_skb()
550 fep->hwts_tx_en)) in fec_enet_txq_submit_skb()
553 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_submit_skb()
601 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_put_data_tso() local
612 if (((unsigned long) data) & fep->tx_align || in fec_enet_txq_put_data_tso()
613 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_put_data_tso()
617 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_put_data_tso()
621 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); in fec_enet_txq_put_data_tso()
622 if (dma_mapping_error(&fep->pdev->dev, addr)) { in fec_enet_txq_put_data_tso()
632 if (fep->bufdesc_ex) { in fec_enet_txq_put_data_tso()
633 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_put_data_tso()
646 if (fep->bufdesc_ex) in fec_enet_txq_put_data_tso()
660 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_put_hdr_tso() local
674 if (((unsigned long)bufaddr) & fep->tx_align || in fec_enet_txq_put_hdr_tso()
675 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_put_hdr_tso()
679 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_put_hdr_tso()
682 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, in fec_enet_txq_put_hdr_tso()
684 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { in fec_enet_txq_put_hdr_tso()
695 if (fep->bufdesc_ex) { in fec_enet_txq_put_hdr_tso()
696 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_put_hdr_tso()
713 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_submit_tso() local
779 if (!(fep->quirks & FEC_QUIRK_ERR007885) || in fec_enet_txq_submit_tso()
796 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_start_xmit() local
804 txq = fep->tx_queue[queue]; in fec_enet_start_xmit()
825 struct fec_enet_private *fep = netdev_priv(dev); in fec_enet_bd_init() local
832 for (q = 0; q < fep->num_rx_queues; q++) { in fec_enet_bd_init()
834 rxq = fep->rx_queue[q]; in fec_enet_bd_init()
854 for (q = 0; q < fep->num_tx_queues; q++) { in fec_enet_bd_init()
856 txq = fep->tx_queue[q]; in fec_enet_bd_init()
865 dma_unmap_single(&fep->pdev->dev, in fec_enet_bd_init()
886 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_active_rxring() local
889 for (i = 0; i < fep->num_rx_queues; i++) in fec_enet_active_rxring()
890 writel(0, fep->rx_queue[i]->bd.reg_desc_active); in fec_enet_active_rxring()
895 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_enable_ring() local
900 for (i = 0; i < fep->num_rx_queues; i++) { in fec_enet_enable_ring()
901 rxq = fep->rx_queue[i]; in fec_enet_enable_ring()
902 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); in fec_enet_enable_ring()
903 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); in fec_enet_enable_ring()
908 fep->hwp + FEC_RCMR(i)); in fec_enet_enable_ring()
911 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_enable_ring()
912 txq = fep->tx_queue[i]; in fec_enet_enable_ring()
913 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); in fec_enet_enable_ring()
918 fep->hwp + FEC_DMA_CFG(i)); in fec_enet_enable_ring()
924 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_reset_skb() local
928 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_reset_skb()
929 txq = fep->tx_queue[i]; in fec_enet_reset_skb()
948 struct fec_enet_private *fep = netdev_priv(ndev); in fec_restart() local
958 if (fep->quirks & FEC_QUIRK_HAS_AVB) { in fec_restart()
959 writel(0, fep->hwp + FEC_ECNTRL); in fec_restart()
961 writel(1, fep->hwp + FEC_ECNTRL); in fec_restart()
971 fep->hwp + FEC_ADDR_LOW); in fec_restart()
973 fep->hwp + FEC_ADDR_HIGH); in fec_restart()
976 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT); in fec_restart()
986 if (fep->full_duplex == DUPLEX_FULL) { in fec_restart()
988 writel(0x04, fep->hwp + FEC_X_CNTRL); in fec_restart()
992 writel(0x0, fep->hwp + FEC_X_CNTRL); in fec_restart()
996 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); in fec_restart()
999 if (fep->quirks & FEC_QUIRK_HAS_RACC) { in fec_restart()
1000 val = readl(fep->hwp + FEC_RACC); in fec_restart()
1003 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) in fec_restart()
1008 writel(val, fep->hwp + FEC_RACC); in fec_restart()
1009 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); in fec_restart()
1017 if (fep->quirks & FEC_QUIRK_ENET_MAC) { in fec_restart()
1022 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || in fec_restart()
1023 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || in fec_restart()
1024 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || in fec_restart()
1025 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) in fec_restart()
1027 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) in fec_restart()
1043 if (fep->quirks & FEC_QUIRK_USE_GASKET) { in fec_restart()
1046 writel(0, fep->hwp + FEC_MIIGSK_ENR); in fec_restart()
1047 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) in fec_restart()
1055 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) in fec_restart()
1059 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); in fec_restart()
1062 writel(2, fep->hwp + FEC_MIIGSK_ENR); in fec_restart()
1069 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || in fec_restart()
1070 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && in fec_restart()
1075 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); in fec_restart()
1076 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); in fec_restart()
1077 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); in fec_restart()
1078 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); in fec_restart()
1081 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); in fec_restart()
1087 writel(rcntl, fep->hwp + FEC_R_CNTRL); in fec_restart()
1092 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); in fec_restart()
1093 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); in fec_restart()
1096 if (fep->quirks & FEC_QUIRK_ENET_MAC) { in fec_restart()
1100 writel(1 << 8, fep->hwp + FEC_X_WMRK); in fec_restart()
1103 if (fep->bufdesc_ex) in fec_restart()
1108 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); in fec_restart()
1112 writel(ecntl, fep->hwp + FEC_ECNTRL); in fec_restart()
1115 if (fep->bufdesc_ex) in fec_restart()
1119 if (fep->link) in fec_restart()
1120 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); in fec_restart()
1122 writel(0, fep->hwp + FEC_IMASK); in fec_restart()
1129 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) in fec_enet_stop_mode() argument
1131 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; in fec_enet_stop_mode()
1132 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; in fec_enet_stop_mode()
1150 struct fec_enet_private *fep = netdev_priv(ndev); in fec_stop() local
1151 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); in fec_stop()
1155 if (fep->link) { in fec_stop()
1156 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ in fec_stop()
1158 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) in fec_stop()
1166 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { in fec_stop()
1167 if (fep->quirks & FEC_QUIRK_HAS_AVB) { in fec_stop()
1168 writel(0, fep->hwp + FEC_ECNTRL); in fec_stop()
1170 writel(1, fep->hwp + FEC_ECNTRL); in fec_stop()
1173 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); in fec_stop()
1175 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); in fec_stop()
1176 val = readl(fep->hwp + FEC_ECNTRL); in fec_stop()
1178 writel(val, fep->hwp + FEC_ECNTRL); in fec_stop()
1179 fec_enet_stop_mode(fep, true); in fec_stop()
1181 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); in fec_stop()
1184 if (fep->quirks & FEC_QUIRK_ENET_MAC && in fec_stop()
1185 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { in fec_stop()
1186 writel(2, fep->hwp + FEC_ECNTRL); in fec_stop()
1187 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); in fec_stop()
1195 struct fec_enet_private *fep = netdev_priv(ndev); in fec_timeout() local
1201 schedule_work(&fep->tx_timeout_work); in fec_timeout()
1206 struct fec_enet_private *fep = in fec_enet_timeout_work() local
1208 struct net_device *ndev = fep->netdev; in fec_enet_timeout_work()
1212 napi_disable(&fep->napi); in fec_enet_timeout_work()
1217 napi_enable(&fep->napi); in fec_enet_timeout_work()
1223 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, in fec_enet_hwtstamp() argument
1229 spin_lock_irqsave(&fep->tmreg_lock, flags); in fec_enet_hwtstamp()
1230 ns = timecounter_cyc2time(&fep->tc, ts); in fec_enet_hwtstamp()
1231 spin_unlock_irqrestore(&fep->tmreg_lock, flags); in fec_enet_hwtstamp()
1240 struct fec_enet_private *fep; in fec_enet_tx_queue() local
1249 fep = netdev_priv(ndev); in fec_enet_tx_queue()
1251 txq = fep->tx_queue[queue_id]; in fec_enet_tx_queue()
1271 dma_unmap_single(&fep->pdev->dev, in fec_enet_tx_queue()
1304 fep->hwts_tx_en) && in fec_enet_tx_queue()
1305 fep->bufdesc_ex) { in fec_enet_tx_queue()
1309 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); in fec_enet_tx_queue()
1348 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_tx() local
1352 for (i = fep->num_tx_queues - 1; i >= 0; i--) in fec_enet_tx()
1359 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_new_rxbdp() local
1362 off = ((unsigned long)skb->data) & fep->rx_align; in fec_enet_new_rxbdp()
1364 skb_reserve(skb, fep->rx_align + 1 - off); in fec_enet_new_rxbdp()
1366 …bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fe… in fec_enet_new_rxbdp()
1367 if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { in fec_enet_new_rxbdp()
1379 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_copybreak() local
1382 if (length > fep->rx_copybreak) in fec_enet_copybreak()
1389 dma_sync_single_for_cpu(&fep->pdev->dev, in fec_enet_copybreak()
1391 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_copybreak()
1410 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_rx_queue() local
1424 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; in fec_enet_rx_queue()
1429 rxq = fep->rx_queue[queue_id]; in fec_enet_rx_queue()
1442 writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); in fec_enet_rx_queue()
1490 dma_unmap_single(&fep->pdev->dev, in fec_enet_rx_queue()
1492 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_rx_queue()
1504 if (fep->quirks & FEC_QUIRK_HAS_RACC) in fec_enet_rx_queue()
1510 if (fep->bufdesc_ex) in fec_enet_rx_queue()
1516 fep->bufdesc_ex && in fec_enet_rx_queue()
1532 if (fep->hwts_rx_en && fep->bufdesc_ex) in fec_enet_rx_queue()
1533 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), in fec_enet_rx_queue()
1536 if (fep->bufdesc_ex && in fec_enet_rx_queue()
1537 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { in fec_enet_rx_queue()
1553 napi_gro_receive(&fep->napi, skb); in fec_enet_rx_queue()
1556 dma_sync_single_for_device(&fep->pdev->dev, in fec_enet_rx_queue()
1558 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_rx_queue()
1572 if (fep->bufdesc_ex) { in fec_enet_rx_queue()
1600 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_rx() local
1604 for (i = fep->num_rx_queues - 1; i >= 0; i--) in fec_enet_rx()
1610 static bool fec_enet_collect_events(struct fec_enet_private *fep) in fec_enet_collect_events() argument
1614 int_events = readl(fep->hwp + FEC_IEVENT); in fec_enet_collect_events()
1619 writel(int_events, fep->hwp + FEC_IEVENT); in fec_enet_collect_events()
1628 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_interrupt() local
1631 if (fec_enet_collect_events(fep) && fep->link) { in fec_enet_interrupt()
1634 if (napi_schedule_prep(&fep->napi)) { in fec_enet_interrupt()
1636 writel(0, fep->hwp + FEC_IMASK); in fec_enet_interrupt()
1637 __napi_schedule(&fep->napi); in fec_enet_interrupt()
1647 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_rx_napi() local
1653 } while ((done < budget) && fec_enet_collect_events(fep)); in fec_enet_rx_napi()
1657 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); in fec_enet_rx_napi()
1666 struct fec_enet_private *fep = netdev_priv(ndev); in fec_get_mac() local
1667 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); in fec_get_mac()
1682 struct device_node *np = fep->pdev->dev.of_node; in fec_get_mac()
1708 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); in fec_get_mac()
1710 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); in fec_get_mac()
1719 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap); in fec_get_mac()
1721 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n", in fec_get_mac()
1730 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; in fec_get_mac()
1740 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_adjust_link() local
1750 fep->link = 0; in fec_enet_adjust_link()
1752 if (!fep->link) { in fec_enet_adjust_link()
1753 fep->link = phy_dev->link; in fec_enet_adjust_link()
1757 if (fep->full_duplex != phy_dev->duplex) { in fec_enet_adjust_link()
1758 fep->full_duplex = phy_dev->duplex; in fec_enet_adjust_link()
1762 if (phy_dev->speed != fep->speed) { in fec_enet_adjust_link()
1763 fep->speed = phy_dev->speed; in fec_enet_adjust_link()
1769 napi_disable(&fep->napi); in fec_enet_adjust_link()
1774 napi_enable(&fep->napi); in fec_enet_adjust_link()
1777 if (fep->link) { in fec_enet_adjust_link()
1778 napi_disable(&fep->napi); in fec_enet_adjust_link()
1782 napi_enable(&fep->napi); in fec_enet_adjust_link()
1783 fep->link = phy_dev->link; in fec_enet_adjust_link()
1792 static int fec_enet_mdio_wait(struct fec_enet_private *fep) in fec_enet_mdio_wait() argument
1797 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent, in fec_enet_mdio_wait()
1801 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); in fec_enet_mdio_wait()
1808 struct fec_enet_private *fep = bus->priv; in fec_enet_mdio_read() local
1809 struct device *dev = &fep->pdev->dev; in fec_enet_mdio_read()
1825 fep->hwp + FEC_MII_DATA); in fec_enet_mdio_read()
1828 ret = fec_enet_mdio_wait(fep); in fec_enet_mdio_read()
1830 netdev_err(fep->netdev, "MDIO address write timeout\n"); in fec_enet_mdio_read()
1846 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); in fec_enet_mdio_read()
1849 ret = fec_enet_mdio_wait(fep); in fec_enet_mdio_read()
1851 netdev_err(fep->netdev, "MDIO read timeout\n"); in fec_enet_mdio_read()
1855 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); in fec_enet_mdio_read()
1867 struct fec_enet_private *fep = bus->priv; in fec_enet_mdio_write() local
1868 struct device *dev = &fep->pdev->dev; in fec_enet_mdio_write()
1884 fep->hwp + FEC_MII_DATA); in fec_enet_mdio_write()
1887 ret = fec_enet_mdio_wait(fep); in fec_enet_mdio_write()
1889 netdev_err(fep->netdev, "MDIO address write timeout\n"); in fec_enet_mdio_write()
1902 fep->hwp + FEC_MII_DATA); in fec_enet_mdio_write()
1905 ret = fec_enet_mdio_wait(fep); in fec_enet_mdio_write()
1907 netdev_err(fep->netdev, "MDIO write timeout\n"); in fec_enet_mdio_write()
1918 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_phy_reset_after_clk_enable() local
1923 } else if (fep->phy_node) { in fec_enet_phy_reset_after_clk_enable()
1931 phy_dev = of_phy_find_device(fep->phy_node); in fec_enet_phy_reset_after_clk_enable()
1939 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_clk_enable() local
1943 ret = clk_prepare_enable(fep->clk_enet_out); in fec_enet_clk_enable()
1947 if (fep->clk_ptp) { in fec_enet_clk_enable()
1948 mutex_lock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1949 ret = clk_prepare_enable(fep->clk_ptp); in fec_enet_clk_enable()
1951 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1954 fep->ptp_clk_on = true; in fec_enet_clk_enable()
1956 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1959 ret = clk_prepare_enable(fep->clk_ref); in fec_enet_clk_enable()
1965 clk_disable_unprepare(fep->clk_enet_out); in fec_enet_clk_enable()
1966 if (fep->clk_ptp) { in fec_enet_clk_enable()
1967 mutex_lock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1968 clk_disable_unprepare(fep->clk_ptp); in fec_enet_clk_enable()
1969 fep->ptp_clk_on = false; in fec_enet_clk_enable()
1970 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1972 clk_disable_unprepare(fep->clk_ref); in fec_enet_clk_enable()
1978 if (fep->clk_ptp) { in fec_enet_clk_enable()
1979 mutex_lock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1980 clk_disable_unprepare(fep->clk_ptp); in fec_enet_clk_enable()
1981 fep->ptp_clk_on = false; in fec_enet_clk_enable()
1982 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1985 clk_disable_unprepare(fep->clk_enet_out); in fec_enet_clk_enable()
1992 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_mii_probe() local
1997 int dev_id = fep->dev_id; in fec_enet_mii_probe()
1999 if (fep->phy_node) { in fec_enet_mii_probe()
2000 phy_dev = of_phy_connect(ndev, fep->phy_node, in fec_enet_mii_probe()
2002 fep->phy_interface); in fec_enet_mii_probe()
2010 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) in fec_enet_mii_probe()
2014 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); in fec_enet_mii_probe()
2027 fep->phy_interface); in fec_enet_mii_probe()
2036 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { in fec_enet_mii_probe()
2047 fep->link = 0; in fec_enet_mii_probe()
2048 fep->full_duplex = 0; in fec_enet_mii_probe()
2059 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_mii_init() local
2082 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { in fec_enet_mii_init()
2085 fep->mii_bus = fec0_mii_bus; in fec_enet_mii_init()
2108 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2); in fec_enet_mii_init()
2109 if (fep->quirks & FEC_QUIRK_ENET_MAC) in fec_enet_mii_init()
2114 clk_get_rate(fep->clk_ipg)); in fec_enet_mii_init()
2131 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; in fec_enet_mii_init()
2133 fep->phy_speed = mii_speed << 1 | holdtime << 8; in fec_enet_mii_init()
2136 fep->phy_speed |= BIT(7); in fec_enet_mii_init()
2138 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { in fec_enet_mii_init()
2147 writel(0, fep->hwp + FEC_MII_DATA); in fec_enet_mii_init()
2150 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); in fec_enet_mii_init()
2153 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); in fec_enet_mii_init()
2155 fep->mii_bus = mdiobus_alloc(); in fec_enet_mii_init()
2156 if (fep->mii_bus == NULL) { in fec_enet_mii_init()
2161 fep->mii_bus->name = "fec_enet_mii_bus"; in fec_enet_mii_init()
2162 fep->mii_bus->read = fec_enet_mdio_read; in fec_enet_mii_init()
2163 fep->mii_bus->write = fec_enet_mdio_write; in fec_enet_mii_init()
2164 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in fec_enet_mii_init()
2165 pdev->name, fep->dev_id + 1); in fec_enet_mii_init()
2166 fep->mii_bus->priv = fep; in fec_enet_mii_init()
2167 fep->mii_bus->parent = &pdev->dev; in fec_enet_mii_init()
2169 err = of_mdiobus_register(fep->mii_bus, node); in fec_enet_mii_init()
2177 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) in fec_enet_mii_init()
2178 fec0_mii_bus = fep->mii_bus; in fec_enet_mii_init()
2183 mdiobus_free(fep->mii_bus); in fec_enet_mii_init()
2189 static void fec_enet_mii_remove(struct fec_enet_private *fep) in fec_enet_mii_remove() argument
2192 mdiobus_unregister(fep->mii_bus); in fec_enet_mii_remove()
2193 mdiobus_free(fep->mii_bus); in fec_enet_mii_remove()
2200 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_drvinfo() local
2202 strlcpy(info->driver, fep->pdev->dev.driver->name, in fec_enet_get_drvinfo()
2209 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_regs_len() local
2213 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); in fec_enet_get_regs_len()
2297 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_regs() local
2298 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; in fec_enet_get_regs()
2299 struct device *dev = &fep->pdev->dev; in fec_enet_get_regs()
2333 !(fep->quirks & FEC_QUIRK_HAS_FRREG)) in fec_enet_get_regs()
2347 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_ts_info() local
2349 if (fep->bufdesc_ex) { in fec_enet_get_ts_info()
2357 if (fep->ptp_clock) in fec_enet_get_ts_info()
2358 info->phc_index = ptp_clock_index(fep->ptp_clock); in fec_enet_get_ts_info()
2378 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_pauseparam() local
2380 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; in fec_enet_get_pauseparam()
2381 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; in fec_enet_get_pauseparam()
2388 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_set_pauseparam() local
2399 fep->pause_flag = 0; in fec_enet_set_pauseparam()
2402 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; in fec_enet_set_pauseparam()
2403 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; in fec_enet_set_pauseparam()
2414 napi_disable(&fep->napi); in fec_enet_set_pauseparam()
2419 napi_enable(&fep->napi); in fec_enet_set_pauseparam()
2495 struct fec_enet_private *fep = netdev_priv(dev); in fec_enet_update_ethtool_stats() local
2499 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); in fec_enet_update_ethtool_stats()
2505 struct fec_enet_private *fep = netdev_priv(dev); in fec_enet_get_ethtool_stats() local
2510 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); in fec_enet_get_ethtool_stats()
2538 struct fec_enet_private *fep = netdev_priv(dev); in fec_enet_clear_ethtool_stats() local
2542 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); in fec_enet_clear_ethtool_stats()
2545 writel(0, fep->hwp + fec_stats[i].offset); in fec_enet_clear_ethtool_stats()
2548 writel(0, fep->hwp + FEC_MIB_CTRLSTAT); in fec_enet_clear_ethtool_stats()
2568 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_us_to_itr_clock() local
2570 return us * (fep->itr_clk_rate / 64000) / 1000; in fec_enet_us_to_itr_clock()
2576 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_itr_coal_set() local
2580 if (!fep->rx_time_itr || !fep->rx_pkts_itr || in fec_enet_itr_coal_set()
2581 !fep->tx_time_itr || !fep->tx_pkts_itr) in fec_enet_itr_coal_set()
2591 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); in fec_enet_itr_coal_set()
2592 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); in fec_enet_itr_coal_set()
2593 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); in fec_enet_itr_coal_set()
2594 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); in fec_enet_itr_coal_set()
2599 writel(tx_itr, fep->hwp + FEC_TXIC0); in fec_enet_itr_coal_set()
2600 writel(rx_itr, fep->hwp + FEC_RXIC0); in fec_enet_itr_coal_set()
2601 if (fep->quirks & FEC_QUIRK_HAS_AVB) { in fec_enet_itr_coal_set()
2602 writel(tx_itr, fep->hwp + FEC_TXIC1); in fec_enet_itr_coal_set()
2603 writel(rx_itr, fep->hwp + FEC_RXIC1); in fec_enet_itr_coal_set()
2604 writel(tx_itr, fep->hwp + FEC_TXIC2); in fec_enet_itr_coal_set()
2605 writel(rx_itr, fep->hwp + FEC_RXIC2); in fec_enet_itr_coal_set()
2612 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_coalesce() local
2614 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) in fec_enet_get_coalesce()
2617 ec->rx_coalesce_usecs = fep->rx_time_itr; in fec_enet_get_coalesce()
2618 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; in fec_enet_get_coalesce()
2620 ec->tx_coalesce_usecs = fep->tx_time_itr; in fec_enet_get_coalesce()
2621 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; in fec_enet_get_coalesce()
2629 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_set_coalesce() local
2630 struct device *dev = &fep->pdev->dev; in fec_enet_set_coalesce()
2633 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) in fec_enet_set_coalesce()
2658 fep->rx_time_itr = ec->rx_coalesce_usecs; in fec_enet_set_coalesce()
2659 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; in fec_enet_set_coalesce()
2661 fep->tx_time_itr = ec->tx_coalesce_usecs; in fec_enet_set_coalesce()
2662 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; in fec_enet_set_coalesce()
2686 struct fec_enet_private *fep = netdev_priv(netdev); in fec_enet_get_tunable() local
2691 *(u32 *)data = fep->rx_copybreak; in fec_enet_get_tunable()
2705 struct fec_enet_private *fep = netdev_priv(netdev); in fec_enet_set_tunable() local
2710 fep->rx_copybreak = *(u32 *)data; in fec_enet_set_tunable()
2723 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_wol() local
2725 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { in fec_enet_get_wol()
2727 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; in fec_enet_get_wol()
2736 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_set_wol() local
2738 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) in fec_enet_set_wol()
2746 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; in fec_enet_set_wol()
2747 if (fep->irq[0] > 0) in fec_enet_set_wol()
2748 enable_irq_wake(fep->irq[0]); in fec_enet_set_wol()
2750 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); in fec_enet_set_wol()
2751 if (fep->irq[0] > 0) in fec_enet_set_wol()
2752 disable_irq_wake(fep->irq[0]); in fec_enet_set_wol()
2786 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_ioctl() local
2795 if (fep->bufdesc_ex) { in fec_enet_ioctl()
2813 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_free_buffers() local
2821 for (q = 0; q < fep->num_rx_queues; q++) { in fec_enet_free_buffers()
2822 rxq = fep->rx_queue[q]; in fec_enet_free_buffers()
2828 dma_unmap_single(&fep->pdev->dev, in fec_enet_free_buffers()
2830 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_free_buffers()
2838 for (q = 0; q < fep->num_tx_queues; q++) { in fec_enet_free_buffers()
2839 txq = fep->tx_queue[q]; in fec_enet_free_buffers()
2852 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_free_queue() local
2856 for (i = 0; i < fep->num_tx_queues; i++) in fec_enet_free_queue()
2857 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { in fec_enet_free_queue()
2858 txq = fep->tx_queue[i]; in fec_enet_free_queue()
2859 dma_free_coherent(&fep->pdev->dev, in fec_enet_free_queue()
2865 for (i = 0; i < fep->num_rx_queues; i++) in fec_enet_free_queue()
2866 kfree(fep->rx_queue[i]); in fec_enet_free_queue()
2867 for (i = 0; i < fep->num_tx_queues; i++) in fec_enet_free_queue()
2868 kfree(fep->tx_queue[i]); in fec_enet_free_queue()
2873 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_alloc_queue() local
2878 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_alloc_queue()
2885 fep->tx_queue[i] = txq; in fec_enet_alloc_queue()
2887 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; in fec_enet_alloc_queue()
2893 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, in fec_enet_alloc_queue()
2903 for (i = 0; i < fep->num_rx_queues; i++) { in fec_enet_alloc_queue()
2904 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), in fec_enet_alloc_queue()
2906 if (!fep->rx_queue[i]) { in fec_enet_alloc_queue()
2911 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; in fec_enet_alloc_queue()
2912 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; in fec_enet_alloc_queue()
2924 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_alloc_rxq_buffers() local
2930 rxq = fep->rx_queue[queue]; in fec_enet_alloc_rxq_buffers()
2945 if (fep->bufdesc_ex) { in fec_enet_alloc_rxq_buffers()
2966 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_alloc_txq_buffers() local
2971 txq = fep->tx_queue[queue]; in fec_enet_alloc_txq_buffers()
2981 if (fep->bufdesc_ex) { in fec_enet_alloc_txq_buffers()
3002 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_alloc_buffers() local
3005 for (i = 0; i < fep->num_rx_queues; i++) in fec_enet_alloc_buffers()
3009 for (i = 0; i < fep->num_tx_queues; i++) in fec_enet_alloc_buffers()
3018 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_open() local
3022 ret = pm_runtime_resume_and_get(&fep->pdev->dev); in fec_enet_open()
3026 pinctrl_pm_select_default_state(&fep->pdev->dev); in fec_enet_open()
3064 if (fep->quirks & FEC_QUIRK_ERR006687) in fec_enet_open()
3067 napi_enable(&fep->napi); in fec_enet_open()
3071 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & in fec_enet_open()
3081 pm_runtime_mark_last_busy(&fep->pdev->dev); in fec_enet_open()
3082 pm_runtime_put_autosuspend(&fep->pdev->dev); in fec_enet_open()
3083 pinctrl_pm_select_sleep_state(&fep->pdev->dev); in fec_enet_open()
3090 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_close() local
3095 napi_disable(&fep->napi); in fec_enet_close()
3102 if (fep->quirks & FEC_QUIRK_ERR006687) in fec_enet_close()
3108 pinctrl_pm_select_sleep_state(&fep->pdev->dev); in fec_enet_close()
3109 pm_runtime_mark_last_busy(&fep->pdev->dev); in fec_enet_close()
3110 pm_runtime_put_autosuspend(&fep->pdev->dev); in fec_enet_close()
3131 struct fec_enet_private *fep = netdev_priv(ndev); in set_multicast_list() local
3138 tmp = readl(fep->hwp + FEC_R_CNTRL); in set_multicast_list()
3140 writel(tmp, fep->hwp + FEC_R_CNTRL); in set_multicast_list()
3144 tmp = readl(fep->hwp + FEC_R_CNTRL); in set_multicast_list()
3146 writel(tmp, fep->hwp + FEC_R_CNTRL); in set_multicast_list()
3152 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); in set_multicast_list()
3153 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); in set_multicast_list()
3174 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); in set_multicast_list()
3175 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); in set_multicast_list()
3182 struct fec_enet_private *fep = netdev_priv(ndev); in fec_set_mac_address() local
3201 fep->hwp + FEC_ADDR_LOW); in fec_set_mac_address()
3203 fep->hwp + FEC_ADDR_HIGH); in fec_set_mac_address()
3218 struct fec_enet_private *fep = netdev_priv(dev); in fec_poll_controller() local
3221 if (fep->irq[i] > 0) { in fec_poll_controller()
3222 disable_irq(fep->irq[i]); in fec_poll_controller()
3223 fec_enet_interrupt(fep->irq[i], dev); in fec_poll_controller()
3224 enable_irq(fep->irq[i]); in fec_poll_controller()
3233 struct fec_enet_private *fep = netdev_priv(netdev); in fec_enet_set_netdev_features() local
3241 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; in fec_enet_set_netdev_features()
3243 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; in fec_enet_set_netdev_features()
3250 struct fec_enet_private *fep = netdev_priv(netdev); in fec_set_features() local
3254 napi_disable(&fep->napi); in fec_set_features()
3261 napi_enable(&fep->napi); in fec_set_features()
3285 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_select_queue() local
3288 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) in fec_enet_select_queue()
3328 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_init() local
3333 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : in fec_enet_init()
3340 fep->rx_align = 0xf; in fec_enet_init()
3341 fep->tx_align = 0xf; in fec_enet_init()
3343 fep->rx_align = 0x3; in fec_enet_init()
3344 fep->tx_align = 0x3; in fec_enet_init()
3348 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); in fec_enet_init()
3350 dev_warn(&fep->pdev->dev, "No suitable DMA available\n"); in fec_enet_init()
3358 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; in fec_enet_init()
3361 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, in fec_enet_init()
3374 for (i = 0; i < fep->num_rx_queues; i++) { in fec_enet_init()
3375 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; in fec_enet_init()
3384 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; in fec_enet_init()
3390 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_init()
3391 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; in fec_enet_init()
3400 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; in fec_enet_init()
3412 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); in fec_enet_init()
3413 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); in fec_enet_init()
3415 if (fep->quirks & FEC_QUIRK_HAS_VLAN) in fec_enet_init()
3419 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { in fec_enet_init()
3425 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; in fec_enet_init()
3428 if (fep->quirks & FEC_QUIRK_HAS_AVB) { in fec_enet_init()
3429 fep->tx_align = 0; in fec_enet_init()
3430 fep->rx_align = 0x3f; in fec_enet_init()
3437 if (fep->quirks & FEC_QUIRK_MIB_CLEAR) in fec_enet_init()
3559 static int fec_enet_init_stop_mode(struct fec_enet_private *fep, in fec_enet_init_stop_mode() argument
3573 dev_dbg(&fep->pdev->dev, "no stop mode property\n"); in fec_enet_init_stop_mode()
3577 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); in fec_enet_init_stop_mode()
3578 if (IS_ERR(fep->stop_gpr.gpr)) { in fec_enet_init_stop_mode()
3579 dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); in fec_enet_init_stop_mode()
3580 ret = PTR_ERR(fep->stop_gpr.gpr); in fec_enet_init_stop_mode()
3581 fep->stop_gpr.gpr = NULL; in fec_enet_init_stop_mode()
3585 fep->stop_gpr.reg = out_val[1]; in fec_enet_init_stop_mode()
3586 fep->stop_gpr.bit = out_val[2]; in fec_enet_init_stop_mode()
3597 struct fec_enet_private *fep; in fec_probe() local
3622 fep = netdev_priv(ndev); in fec_probe()
3629 fep->quirks = dev_info->quirks; in fec_probe()
3631 fep->netdev = ndev; in fec_probe()
3632 fep->num_rx_queues = num_rx_qs; in fec_probe()
3633 fep->num_tx_queues = num_tx_qs; in fec_probe()
3637 if (fep->quirks & FEC_QUIRK_HAS_GBIT) in fec_probe()
3638 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; in fec_probe()
3644 fep->hwp = devm_platform_ioremap_resource(pdev, 0); in fec_probe()
3645 if (IS_ERR(fep->hwp)) { in fec_probe()
3646 ret = PTR_ERR(fep->hwp); in fec_probe()
3650 fep->pdev = pdev; in fec_probe()
3651 fep->dev_id = dev_id++; in fec_probe()
3658 fep->quirks |= FEC_QUIRK_ERR006687; in fec_probe()
3661 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; in fec_probe()
3663 ret = fec_enet_init_stop_mode(fep, np); in fec_probe()
3677 fep->phy_node = phy_node; in fec_probe()
3683 fep->phy_interface = pdata->phy; in fec_probe()
3685 fep->phy_interface = PHY_INTERFACE_MODE_MII; in fec_probe()
3687 fep->phy_interface = interface; in fec_probe()
3690 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); in fec_probe()
3691 if (IS_ERR(fep->clk_ipg)) { in fec_probe()
3692 ret = PTR_ERR(fep->clk_ipg); in fec_probe()
3696 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); in fec_probe()
3697 if (IS_ERR(fep->clk_ahb)) { in fec_probe()
3698 ret = PTR_ERR(fep->clk_ahb); in fec_probe()
3702 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); in fec_probe()
3705 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); in fec_probe()
3706 if (IS_ERR(fep->clk_enet_out)) in fec_probe()
3707 fep->clk_enet_out = NULL; in fec_probe()
3709 fep->ptp_clk_on = false; in fec_probe()
3710 mutex_init(&fep->ptp_clk_mutex); in fec_probe()
3713 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); in fec_probe()
3714 if (IS_ERR(fep->clk_ref)) in fec_probe()
3715 fep->clk_ref = NULL; in fec_probe()
3717 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; in fec_probe()
3718 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); in fec_probe()
3719 if (IS_ERR(fep->clk_ptp)) { in fec_probe()
3720 fep->clk_ptp = NULL; in fec_probe()
3721 fep->bufdesc_ex = false; in fec_probe()
3728 ret = clk_prepare_enable(fep->clk_ipg); in fec_probe()
3731 ret = clk_prepare_enable(fep->clk_ahb); in fec_probe()
3735 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); in fec_probe()
3736 if (!IS_ERR(fep->reg_phy)) { in fec_probe()
3737 ret = regulator_enable(fep->reg_phy); in fec_probe()
3744 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { in fec_probe()
3748 fep->reg_phy = NULL; in fec_probe()
3762 if (fep->bufdesc_ex) in fec_probe()
3783 fep->irq[i] = irq; in fec_probe()
3801 device_init_wakeup(&ndev->dev, fep->wol_flag & in fec_probe()
3804 if (fep->bufdesc_ex && fep->ptp_clock) in fec_probe()
3805 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); in fec_probe()
3807 fep->rx_copybreak = COPYBREAK_DEFAULT; in fec_probe()
3808 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); in fec_probe()
3816 fec_enet_mii_remove(fep); in fec_probe()
3824 if (fep->reg_phy) in fec_probe()
3825 regulator_disable(fep->reg_phy); in fec_probe()
3827 clk_disable_unprepare(fep->clk_ahb); in fec_probe()
3829 clk_disable_unprepare(fep->clk_ipg); in fec_probe()
3849 struct fec_enet_private *fep = netdev_priv(ndev); in fec_drv_remove() local
3857 cancel_work_sync(&fep->tx_timeout_work); in fec_drv_remove()
3860 fec_enet_mii_remove(fep); in fec_drv_remove()
3861 if (fep->reg_phy) in fec_drv_remove()
3862 regulator_disable(fep->reg_phy); in fec_drv_remove()
3866 of_node_put(fep->phy_node); in fec_drv_remove()
3868 clk_disable_unprepare(fep->clk_ahb); in fec_drv_remove()
3869 clk_disable_unprepare(fep->clk_ipg); in fec_drv_remove()
3880 struct fec_enet_private *fep = netdev_priv(ndev); in fec_suspend() local
3884 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) in fec_suspend()
3885 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; in fec_suspend()
3887 napi_disable(&fep->napi); in fec_suspend()
3893 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) in fec_suspend()
3894 pinctrl_pm_select_sleep_state(&fep->pdev->dev); in fec_suspend()
3898 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) in fec_suspend()
3899 regulator_disable(fep->reg_phy); in fec_suspend()
3904 if (fep->clk_enet_out || fep->reg_phy) in fec_suspend()
3905 fep->link = 0; in fec_suspend()
3913 struct fec_enet_private *fep = netdev_priv(ndev); in fec_resume() local
3917 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { in fec_resume()
3918 ret = regulator_enable(fep->reg_phy); in fec_resume()
3930 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { in fec_resume()
3931 fec_enet_stop_mode(fep, false); in fec_resume()
3933 val = readl(fep->hwp + FEC_ECNTRL); in fec_resume()
3935 writel(val, fep->hwp + FEC_ECNTRL); in fec_resume()
3936 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; in fec_resume()
3938 pinctrl_pm_select_default_state(&fep->pdev->dev); in fec_resume()
3944 napi_enable(&fep->napi); in fec_resume()
3952 if (fep->reg_phy) in fec_resume()
3953 regulator_disable(fep->reg_phy); in fec_resume()
3960 struct fec_enet_private *fep = netdev_priv(ndev); in fec_runtime_suspend() local
3962 clk_disable_unprepare(fep->clk_ahb); in fec_runtime_suspend()
3963 clk_disable_unprepare(fep->clk_ipg); in fec_runtime_suspend()
3971 struct fec_enet_private *fep = netdev_priv(ndev); in fec_runtime_resume() local
3974 ret = clk_prepare_enable(fep->clk_ahb); in fec_runtime_resume()
3977 ret = clk_prepare_enable(fep->clk_ipg); in fec_runtime_resume()
3984 clk_disable_unprepare(fep->clk_ahb); in fec_runtime_resume()