Lines Matching refs:vptr
86 static void velocity_set_power_state(struct velocity_info *vptr, char state) in velocity_set_power_state() argument
88 void *addr = vptr->mac_regs; in velocity_set_power_state()
90 if (vptr->pdev) in velocity_set_power_state()
91 pci_set_power_state(vptr->pdev, state); in velocity_set_power_state()
504 static void velocity_init_cam_filter(struct velocity_info *vptr) in velocity_init_cam_filter() argument
506 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_init_cam_filter()
514 memset(vptr->vCAMmask, 0, sizeof(u8) * 8); in velocity_init_cam_filter()
515 memset(vptr->mCAMmask, 0, sizeof(u8) * 8); in velocity_init_cam_filter()
516 mac_set_vlan_cam_mask(regs, vptr->vCAMmask); in velocity_init_cam_filter()
517 mac_set_cam_mask(regs, vptr->mCAMmask); in velocity_init_cam_filter()
520 for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { in velocity_init_cam_filter()
522 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); in velocity_init_cam_filter()
526 mac_set_vlan_cam_mask(regs, vptr->vCAMmask); in velocity_init_cam_filter()
532 struct velocity_info *vptr = netdev_priv(dev); in velocity_vlan_rx_add_vid() local
534 spin_lock_irq(&vptr->lock); in velocity_vlan_rx_add_vid()
535 set_bit(vid, vptr->active_vlans); in velocity_vlan_rx_add_vid()
536 velocity_init_cam_filter(vptr); in velocity_vlan_rx_add_vid()
537 spin_unlock_irq(&vptr->lock); in velocity_vlan_rx_add_vid()
544 struct velocity_info *vptr = netdev_priv(dev); in velocity_vlan_rx_kill_vid() local
546 spin_lock_irq(&vptr->lock); in velocity_vlan_rx_kill_vid()
547 clear_bit(vid, vptr->active_vlans); in velocity_vlan_rx_kill_vid()
548 velocity_init_cam_filter(vptr); in velocity_vlan_rx_kill_vid()
549 spin_unlock_irq(&vptr->lock); in velocity_vlan_rx_kill_vid()
553 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) in velocity_init_rx_ring_indexes() argument
555 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0; in velocity_init_rx_ring_indexes()
565 static void velocity_rx_reset(struct velocity_info *vptr) in velocity_rx_reset() argument
568 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_rx_reset()
571 velocity_init_rx_ring_indexes(vptr); in velocity_rx_reset()
576 for (i = 0; i < vptr->options.numrx; ++i) in velocity_rx_reset()
577 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC; in velocity_rx_reset()
579 writew(vptr->options.numrx, ®s->RBRDU); in velocity_rx_reset()
580 writel(vptr->rx.pool_dma, ®s->RDBaseLo); in velocity_rx_reset()
582 writew(vptr->options.numrx - 1, ®s->RDCSize); in velocity_rx_reset()
593 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) in velocity_get_opt_media_mode() argument
597 switch (vptr->options.spd_dpx) { in velocity_get_opt_media_mode()
617 vptr->mii_status = status; in velocity_get_opt_media_mode()
794 static void set_mii_flow_control(struct velocity_info *vptr) in set_mii_flow_control() argument
797 switch (vptr->options.flow_cntl) { in set_mii_flow_control()
799 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
800 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
804 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
805 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
809 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
810 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
814 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
815 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); in set_mii_flow_control()
828 static void mii_set_auto_on(struct velocity_info *vptr) in mii_set_auto_on() argument
830 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs)) in mii_set_auto_on()
831 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); in mii_set_auto_on()
833 MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); in mii_set_auto_on()
879 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) in velocity_set_media_mode() argument
881 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_set_media_mode()
883 vptr->mii_status = mii_check_media_mode(vptr->mac_regs); in velocity_set_media_mode()
886 set_mii_flow_control(vptr); in velocity_set_media_mode()
888 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) in velocity_set_media_mode()
889 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); in velocity_set_media_mode()
895 netdev_info(vptr->netdev, "Velocity is in AUTO mode\n"); in velocity_set_media_mode()
899 …_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs); in velocity_set_media_mode()
900 MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); in velocity_set_media_mode()
901 MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); in velocity_set_media_mode()
904 mii_set_auto_on(vptr); in velocity_set_media_mode()
930 netdev_info(vptr->netdev, in velocity_set_media_mode()
932 if (vptr->rev_id < REV_ID_VT3216_A0) in velocity_set_media_mode()
936 netdev_info(vptr->netdev, in velocity_set_media_mode()
939 if (vptr->rev_id < REV_ID_VT3216_A0) in velocity_set_media_mode()
943 velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000); in velocity_set_media_mode()
949 velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000); in velocity_set_media_mode()
957 velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR); in velocity_set_media_mode()
970 velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR); in velocity_set_media_mode()
972 mii_set_auto_on(vptr); in velocity_set_media_mode()
988 static void velocity_print_link_status(struct velocity_info *vptr) in velocity_print_link_status() argument
994 if (vptr->mii_status & VELOCITY_LINK_FAIL) { in velocity_print_link_status()
995 netdev_notice(vptr->netdev, "failed to detect cable link\n"); in velocity_print_link_status()
999 if (vptr->options.spd_dpx == SPD_DPX_AUTO) { in velocity_print_link_status()
1002 if (vptr->mii_status & VELOCITY_SPEED_1000) in velocity_print_link_status()
1004 else if (vptr->mii_status & VELOCITY_SPEED_100) in velocity_print_link_status()
1009 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in velocity_print_link_status()
1016 switch (vptr->options.spd_dpx) { in velocity_print_link_status()
1043 netdev_notice(vptr->netdev, "Link %s speed %sM bps %s duplex\n", in velocity_print_link_status()
1054 static void enable_flow_control_ability(struct velocity_info *vptr) in enable_flow_control_ability() argument
1057 struct mac_regs __iomem *regs = vptr->mac_regs; in enable_flow_control_ability()
1059 switch (vptr->options.flow_cntl) { in enable_flow_control_ability()
1106 static int velocity_soft_reset(struct velocity_info *vptr) in velocity_soft_reset() argument
1108 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_soft_reset()
1138 struct velocity_info *vptr = netdev_priv(dev); in velocity_set_multi() local
1139 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_set_multi()
1148 } else if ((netdev_mc_count(dev) > vptr->multicast_limit) || in velocity_set_multi()
1154 int offset = MCAM_SIZE - vptr->multicast_limit; in velocity_set_multi()
1155 mac_get_cam_mask(regs, vptr->mCAMmask); in velocity_set_multi()
1160 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); in velocity_set_multi()
1164 mac_set_cam_mask(regs, vptr->mCAMmask); in velocity_set_multi()
1185 static void mii_init(struct velocity_info *vptr, u32 mii_status) in mii_init() argument
1189 switch (PHYID_GET_PHY_ID(vptr->phy_id)) { in mii_init()
1192 MII_ADVERTISE, vptr->mac_regs); in mii_init()
1193 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
1195 vptr->mac_regs); in mii_init()
1198 vptr->mac_regs); in mii_init()
1199 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); in mii_init()
1205 MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); in mii_init()
1211 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
1212 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); in mii_init()
1214 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); in mii_init()
1218 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); in mii_init()
1225 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); in mii_init()
1231 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in mii_init()
1232 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); in mii_init()
1234 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); in mii_init()
1242 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); in mii_init()
1246 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); in mii_init()
1251 velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR); in mii_init()
1254 velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR); in mii_init()
1265 static void setup_queue_timers(struct velocity_info *vptr) in setup_queue_timers() argument
1268 if (vptr->rev_id >= REV_ID_VT3216_A0) { in setup_queue_timers()
1272 if (vptr->mii_status & (VELOCITY_SPEED_1000 | in setup_queue_timers()
1274 txqueue_timer = vptr->options.txqueue_timer; in setup_queue_timers()
1275 rxqueue_timer = vptr->options.rxqueue_timer; in setup_queue_timers()
1278 writeb(txqueue_timer, &vptr->mac_regs->TQETMR); in setup_queue_timers()
1279 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR); in setup_queue_timers()
1290 static void setup_adaptive_interrupts(struct velocity_info *vptr) in setup_adaptive_interrupts() argument
1292 struct mac_regs __iomem *regs = vptr->mac_regs; in setup_adaptive_interrupts()
1293 u16 tx_intsup = vptr->options.tx_intsup; in setup_adaptive_interrupts()
1294 u16 rx_intsup = vptr->options.rx_intsup; in setup_adaptive_interrupts()
1297 vptr->int_mask = INT_MASK_DEF; in setup_adaptive_interrupts()
1302 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I | in setup_adaptive_interrupts()
1311 vptr->int_mask &= ~ISR_PRXI; in setup_adaptive_interrupts()
1328 static void velocity_init_registers(struct velocity_info *vptr, in velocity_init_registers() argument
1331 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_init_registers()
1332 struct net_device *netdev = vptr->netdev; in velocity_init_registers()
1346 velocity_rx_reset(vptr); in velocity_init_registers()
1350 mii_status = velocity_get_opt_media_mode(vptr); in velocity_init_registers()
1351 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { in velocity_init_registers()
1352 velocity_print_link_status(vptr); in velocity_init_registers()
1353 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) in velocity_init_registers()
1357 enable_flow_control_ability(vptr); in velocity_init_registers()
1371 velocity_soft_reset(vptr); in velocity_init_registers()
1374 if (!vptr->no_eeprom) { in velocity_init_registers()
1384 mac_set_rx_thresh(regs, vptr->options.rx_thresh); in velocity_init_registers()
1385 mac_set_dma_length(regs, vptr->options.DMA_length); in velocity_init_registers()
1396 velocity_init_cam_filter(vptr); in velocity_init_registers()
1408 setup_adaptive_interrupts(vptr); in velocity_init_registers()
1410 writel(vptr->rx.pool_dma, ®s->RDBaseLo); in velocity_init_registers()
1411 writew(vptr->options.numrx - 1, ®s->RDCSize); in velocity_init_registers()
1415 writew(vptr->options.numtx - 1, ®s->TDCSize); in velocity_init_registers()
1417 for (i = 0; i < vptr->tx.numq; i++) { in velocity_init_registers()
1418 writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]); in velocity_init_registers()
1422 init_flow_control_register(vptr); in velocity_init_registers()
1427 mii_status = velocity_get_opt_media_mode(vptr); in velocity_init_registers()
1430 mii_init(vptr, mii_status); in velocity_init_registers()
1432 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { in velocity_init_registers()
1433 velocity_print_link_status(vptr); in velocity_init_registers()
1434 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) in velocity_init_registers()
1438 enable_flow_control_ability(vptr); in velocity_init_registers()
1440 mac_write_int_mask(vptr->int_mask, regs); in velocity_init_registers()
1446 static void velocity_give_many_rx_descs(struct velocity_info *vptr) in velocity_give_many_rx_descs() argument
1448 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_give_many_rx_descs()
1455 if (vptr->rx.filled < 4) in velocity_give_many_rx_descs()
1460 unusable = vptr->rx.filled & 0x0003; in velocity_give_many_rx_descs()
1461 dirty = vptr->rx.dirty - unusable; in velocity_give_many_rx_descs()
1462 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { in velocity_give_many_rx_descs()
1463 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; in velocity_give_many_rx_descs()
1464 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC; in velocity_give_many_rx_descs()
1467 writew(vptr->rx.filled & 0xfffc, ®s->RBRDU); in velocity_give_many_rx_descs()
1468 vptr->rx.filled = unusable; in velocity_give_many_rx_descs()
1478 static int velocity_init_dma_rings(struct velocity_info *vptr) in velocity_init_dma_rings() argument
1480 struct velocity_opt *opt = &vptr->options; in velocity_init_dma_rings()
1493 pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq + in velocity_init_dma_rings()
1496 dev_err(vptr->dev, "%s : DMA memory allocation failed.\n", in velocity_init_dma_rings()
1497 vptr->netdev->name); in velocity_init_dma_rings()
1501 vptr->rx.ring = pool; in velocity_init_dma_rings()
1502 vptr->rx.pool_dma = pool_dma; in velocity_init_dma_rings()
1507 for (i = 0; i < vptr->tx.numq; i++) { in velocity_init_dma_rings()
1508 vptr->tx.rings[i] = pool; in velocity_init_dma_rings()
1509 vptr->tx.pool_dma[i] = pool_dma; in velocity_init_dma_rings()
1517 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) in velocity_set_rxbufsize() argument
1519 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; in velocity_set_rxbufsize()
1532 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) in velocity_alloc_rx_buf() argument
1534 struct rx_desc *rd = &(vptr->rx.ring[idx]); in velocity_alloc_rx_buf()
1535 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); in velocity_alloc_rx_buf()
1537 rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64); in velocity_alloc_rx_buf()
1547 rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data, in velocity_alloc_rx_buf()
1548 vptr->rx.buf_sz, DMA_FROM_DEVICE); in velocity_alloc_rx_buf()
1555 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; in velocity_alloc_rx_buf()
1562 static int velocity_rx_refill(struct velocity_info *vptr) in velocity_rx_refill() argument
1564 int dirty = vptr->rx.dirty, done = 0; in velocity_rx_refill()
1567 struct rx_desc *rd = vptr->rx.ring + dirty; in velocity_rx_refill()
1573 if (!vptr->rx.info[dirty].skb) { in velocity_rx_refill()
1574 if (velocity_alloc_rx_buf(vptr, dirty) < 0) in velocity_rx_refill()
1578 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; in velocity_rx_refill()
1579 } while (dirty != vptr->rx.curr); in velocity_rx_refill()
1582 vptr->rx.dirty = dirty; in velocity_rx_refill()
1583 vptr->rx.filled += done; in velocity_rx_refill()
1596 static void velocity_free_rd_ring(struct velocity_info *vptr) in velocity_free_rd_ring() argument
1600 if (vptr->rx.info == NULL) in velocity_free_rd_ring()
1603 for (i = 0; i < vptr->options.numrx; i++) { in velocity_free_rd_ring()
1604 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]); in velocity_free_rd_ring()
1605 struct rx_desc *rd = vptr->rx.ring + i; in velocity_free_rd_ring()
1611 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, in velocity_free_rd_ring()
1619 kfree(vptr->rx.info); in velocity_free_rd_ring()
1620 vptr->rx.info = NULL; in velocity_free_rd_ring()
1630 static int velocity_init_rd_ring(struct velocity_info *vptr) in velocity_init_rd_ring() argument
1634 vptr->rx.info = kcalloc(vptr->options.numrx, in velocity_init_rd_ring()
1636 if (!vptr->rx.info) in velocity_init_rd_ring()
1639 velocity_init_rx_ring_indexes(vptr); in velocity_init_rd_ring()
1641 if (velocity_rx_refill(vptr) != vptr->options.numrx) { in velocity_init_rd_ring()
1642 netdev_err(vptr->netdev, "failed to allocate RX buffer\n"); in velocity_init_rd_ring()
1643 velocity_free_rd_ring(vptr); in velocity_init_rd_ring()
1660 static int velocity_init_td_ring(struct velocity_info *vptr) in velocity_init_td_ring() argument
1665 for (j = 0; j < vptr->tx.numq; j++) { in velocity_init_td_ring()
1667 vptr->tx.infos[j] = kcalloc(vptr->options.numtx, in velocity_init_td_ring()
1670 if (!vptr->tx.infos[j]) { in velocity_init_td_ring()
1672 kfree(vptr->tx.infos[j]); in velocity_init_td_ring()
1676 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0; in velocity_init_td_ring()
1687 static void velocity_free_dma_rings(struct velocity_info *vptr) in velocity_free_dma_rings() argument
1689 const int size = vptr->options.numrx * sizeof(struct rx_desc) + in velocity_free_dma_rings()
1690 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; in velocity_free_dma_rings()
1692 dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma); in velocity_free_dma_rings()
1695 static int velocity_init_rings(struct velocity_info *vptr, int mtu) in velocity_init_rings() argument
1699 velocity_set_rxbufsize(vptr, mtu); in velocity_init_rings()
1701 ret = velocity_init_dma_rings(vptr); in velocity_init_rings()
1705 ret = velocity_init_rd_ring(vptr); in velocity_init_rings()
1709 ret = velocity_init_td_ring(vptr); in velocity_init_rings()
1716 velocity_free_rd_ring(vptr); in velocity_init_rings()
1718 velocity_free_dma_rings(vptr); in velocity_init_rings()
1731 static void velocity_free_tx_buf(struct velocity_info *vptr, in velocity_free_tx_buf() argument
1748 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], in velocity_free_tx_buf()
1758 static void velocity_free_td_ring_entry(struct velocity_info *vptr, in velocity_free_td_ring_entry() argument
1761 struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]); in velocity_free_td_ring_entry()
1770 dma_unmap_single(vptr->dev, td_info->skb_dma[i], in velocity_free_td_ring_entry()
1787 static void velocity_free_td_ring(struct velocity_info *vptr) in velocity_free_td_ring() argument
1791 for (j = 0; j < vptr->tx.numq; j++) { in velocity_free_td_ring()
1792 if (vptr->tx.infos[j] == NULL) in velocity_free_td_ring()
1794 for (i = 0; i < vptr->options.numtx; i++) in velocity_free_td_ring()
1795 velocity_free_td_ring_entry(vptr, j, i); in velocity_free_td_ring()
1797 kfree(vptr->tx.infos[j]); in velocity_free_td_ring()
1798 vptr->tx.infos[j] = NULL; in velocity_free_td_ring()
1802 static void velocity_free_rings(struct velocity_info *vptr) in velocity_free_rings() argument
1804 velocity_free_td_ring(vptr); in velocity_free_rings()
1805 velocity_free_rd_ring(vptr); in velocity_free_rings()
1806 velocity_free_dma_rings(vptr); in velocity_free_rings()
1820 static void velocity_error(struct velocity_info *vptr, int status) in velocity_error() argument
1824 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_error()
1826 netdev_err(vptr->netdev, "TD structure error TDindex=%hx\n", in velocity_error()
1830 netif_stop_queue(vptr->netdev); in velocity_error()
1837 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_error()
1840 if (vptr->options.spd_dpx == SPD_DPX_AUTO) { in velocity_error()
1841 vptr->mii_status = check_connection_type(regs); in velocity_error()
1848 if (vptr->rev_id < REV_ID_VT3216_A0) { in velocity_error()
1849 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) in velocity_error()
1857 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) in velocity_error()
1862 setup_queue_timers(vptr); in velocity_error()
1870 vptr->mii_status &= ~VELOCITY_LINK_FAIL; in velocity_error()
1871 netif_carrier_on(vptr->netdev); in velocity_error()
1873 vptr->mii_status |= VELOCITY_LINK_FAIL; in velocity_error()
1874 netif_carrier_off(vptr->netdev); in velocity_error()
1877 velocity_print_link_status(vptr); in velocity_error()
1878 enable_flow_control_ability(vptr); in velocity_error()
1887 if (vptr->mii_status & VELOCITY_LINK_FAIL) in velocity_error()
1888 netif_stop_queue(vptr->netdev); in velocity_error()
1890 netif_wake_queue(vptr->netdev); in velocity_error()
1894 velocity_update_hw_mibs(vptr); in velocity_error()
1896 mac_rx_queue_wake(vptr->mac_regs); in velocity_error()
1907 static int velocity_tx_srv(struct velocity_info *vptr) in velocity_tx_srv() argument
1915 struct net_device_stats *stats = &vptr->netdev->stats; in velocity_tx_srv()
1917 for (qnum = 0; qnum < vptr->tx.numq; qnum++) { in velocity_tx_srv()
1918 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; in velocity_tx_srv()
1919 idx = (idx + 1) % vptr->options.numtx) { in velocity_tx_srv()
1924 td = &(vptr->tx.rings[qnum][idx]); in velocity_tx_srv()
1925 tdinfo = &(vptr->tx.infos[qnum][idx]); in velocity_tx_srv()
1948 velocity_free_tx_buf(vptr, tdinfo, td); in velocity_tx_srv()
1949 vptr->tx.used[qnum]--; in velocity_tx_srv()
1951 vptr->tx.tail[qnum] = idx; in velocity_tx_srv()
1953 if (AVAIL_TD(vptr, qnum) < 1) in velocity_tx_srv()
1960 if (netif_queue_stopped(vptr->netdev) && (full == 0) && in velocity_tx_srv()
1961 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { in velocity_tx_srv()
1962 netif_wake_queue(vptr->netdev); in velocity_tx_srv()
2003 struct velocity_info *vptr) in velocity_rx_copy() argument
2009 new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size); in velocity_rx_copy()
2030 static inline void velocity_iph_realign(struct velocity_info *vptr, in velocity_iph_realign() argument
2033 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { in velocity_iph_realign()
2047 static int velocity_receive_frame(struct velocity_info *vptr, int idx) in velocity_receive_frame() argument
2049 struct net_device_stats *stats = &vptr->netdev->stats; in velocity_receive_frame()
2050 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); in velocity_receive_frame()
2051 struct rx_desc *rd = &(vptr->rx.ring[idx]); in velocity_receive_frame()
2057 netdev_err(vptr->netdev, "received frame spans multiple RDs\n"); in velocity_receive_frame()
2067 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, in velocity_receive_frame()
2068 vptr->rx.buf_sz, DMA_FROM_DEVICE); in velocity_receive_frame()
2072 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { in velocity_receive_frame()
2073 velocity_iph_realign(vptr, skb, pkt_len); in velocity_receive_frame()
2075 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, in velocity_receive_frame()
2078 dma_sync_single_for_device(vptr->dev, rd_info->skb_dma, in velocity_receive_frame()
2079 vptr->rx.buf_sz, DMA_FROM_DEVICE); in velocity_receive_frame()
2083 skb->protocol = eth_type_trans(skb, vptr->netdev); in velocity_receive_frame()
2107 static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) in velocity_rx_srv() argument
2109 struct net_device_stats *stats = &vptr->netdev->stats; in velocity_rx_srv()
2110 int rd_curr = vptr->rx.curr; in velocity_rx_srv()
2114 struct rx_desc *rd = vptr->rx.ring + rd_curr; in velocity_rx_srv()
2116 if (!vptr->rx.info[rd_curr].skb) in velocity_rx_srv()
2128 if (velocity_receive_frame(vptr, rd_curr) < 0) in velocity_rx_srv()
2142 if (rd_curr >= vptr->options.numrx) in velocity_rx_srv()
2147 vptr->rx.curr = rd_curr; in velocity_rx_srv()
2149 if ((works > 0) && (velocity_rx_refill(vptr) > 0)) in velocity_rx_srv()
2150 velocity_give_many_rx_descs(vptr); in velocity_rx_srv()
2158 struct velocity_info *vptr = container_of(napi, in velocity_poll() local
2167 rx_done = velocity_rx_srv(vptr, budget); in velocity_poll()
2168 spin_lock_irqsave(&vptr->lock, flags); in velocity_poll()
2169 velocity_tx_srv(vptr); in velocity_poll()
2173 mac_enable_int(vptr->mac_regs); in velocity_poll()
2175 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_poll()
2193 struct velocity_info *vptr = netdev_priv(dev); in velocity_intr() local
2196 spin_lock(&vptr->lock); in velocity_intr()
2197 isr_status = mac_read_isr(vptr->mac_regs); in velocity_intr()
2201 spin_unlock(&vptr->lock); in velocity_intr()
2206 mac_write_isr(vptr->mac_regs, isr_status); in velocity_intr()
2208 if (likely(napi_schedule_prep(&vptr->napi))) { in velocity_intr()
2209 mac_disable_int(vptr->mac_regs); in velocity_intr()
2210 __napi_schedule(&vptr->napi); in velocity_intr()
2214 velocity_error(vptr, isr_status); in velocity_intr()
2216 spin_unlock(&vptr->lock); in velocity_intr()
2233 struct velocity_info *vptr = netdev_priv(dev); in velocity_open() local
2236 ret = velocity_init_rings(vptr, dev->mtu); in velocity_open()
2241 velocity_set_power_state(vptr, PCI_D0); in velocity_open()
2243 velocity_init_registers(vptr, VELOCITY_INIT_COLD); in velocity_open()
2249 velocity_set_power_state(vptr, PCI_D3hot); in velocity_open()
2250 velocity_free_rings(vptr); in velocity_open()
2254 velocity_give_many_rx_descs(vptr); in velocity_open()
2256 mac_enable_int(vptr->mac_regs); in velocity_open()
2258 napi_enable(&vptr->napi); in velocity_open()
2259 vptr->flags |= VELOCITY_FLAGS_OPENED; in velocity_open()
2271 static void velocity_shutdown(struct velocity_info *vptr) in velocity_shutdown() argument
2273 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_shutdown()
2293 struct velocity_info *vptr = netdev_priv(dev); in velocity_change_mtu() local
2314 tmp_vptr->pdev = vptr->pdev; in velocity_change_mtu()
2315 tmp_vptr->dev = vptr->dev; in velocity_change_mtu()
2316 tmp_vptr->options = vptr->options; in velocity_change_mtu()
2317 tmp_vptr->tx.numq = vptr->tx.numq; in velocity_change_mtu()
2323 napi_disable(&vptr->napi); in velocity_change_mtu()
2325 spin_lock_irqsave(&vptr->lock, flags); in velocity_change_mtu()
2328 velocity_shutdown(vptr); in velocity_change_mtu()
2330 rx = vptr->rx; in velocity_change_mtu()
2331 tx = vptr->tx; in velocity_change_mtu()
2333 vptr->rx = tmp_vptr->rx; in velocity_change_mtu()
2334 vptr->tx = tmp_vptr->tx; in velocity_change_mtu()
2341 velocity_init_registers(vptr, VELOCITY_INIT_COLD); in velocity_change_mtu()
2343 velocity_give_many_rx_descs(vptr); in velocity_change_mtu()
2345 napi_enable(&vptr->napi); in velocity_change_mtu()
2347 mac_enable_int(vptr->mac_regs); in velocity_change_mtu()
2350 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_change_mtu()
2390 struct velocity_info *vptr = netdev_priv(dev); in velocity_mii_ioctl() local
2391 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_mii_ioctl()
2401 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0) in velocity_mii_ioctl()
2405 spin_lock_irqsave(&vptr->lock, flags); in velocity_mii_ioctl()
2406 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in); in velocity_mii_ioctl()
2407 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_mii_ioctl()
2408 check_connection_type(vptr->mac_regs); in velocity_mii_ioctl()
2429 struct velocity_info *vptr = netdev_priv(dev); in velocity_ioctl() local
2436 velocity_set_power_state(vptr, PCI_D0); in velocity_ioctl()
2449 velocity_set_power_state(vptr, PCI_D3hot); in velocity_ioctl()
2467 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_stats() local
2473 spin_lock_irq(&vptr->lock); in velocity_get_stats()
2474 velocity_update_hw_mibs(vptr); in velocity_get_stats()
2475 spin_unlock_irq(&vptr->lock); in velocity_get_stats()
2477 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts]; in velocity_get_stats()
2478 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts]; in velocity_get_stats()
2479 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors]; in velocity_get_stats()
2482 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions]; in velocity_get_stats()
2486 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE]; in velocity_get_stats()
2506 struct velocity_info *vptr = netdev_priv(dev); in velocity_close() local
2508 napi_disable(&vptr->napi); in velocity_close()
2510 velocity_shutdown(vptr); in velocity_close()
2512 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) in velocity_close()
2513 velocity_get_ip(vptr); in velocity_close()
2517 velocity_free_rings(vptr); in velocity_close()
2519 vptr->flags &= (~VELOCITY_FLAGS_OPENED); in velocity_close()
2534 struct velocity_info *vptr = netdev_priv(dev); in velocity_xmit() local
2557 spin_lock_irqsave(&vptr->lock, flags); in velocity_xmit()
2559 index = vptr->tx.curr[qnum]; in velocity_xmit()
2560 td_ptr = &(vptr->tx.rings[qnum][index]); in velocity_xmit()
2561 tdinfo = &(vptr->tx.infos[qnum][index]); in velocity_xmit()
2571 tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen, in velocity_xmit()
2582 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev, in velocity_xmit()
2614 prev = vptr->options.numtx - 1; in velocity_xmit()
2616 vptr->tx.used[qnum]++; in velocity_xmit()
2617 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; in velocity_xmit()
2619 if (AVAIL_TD(vptr, qnum) < 1) in velocity_xmit()
2622 td_ptr = &(vptr->tx.rings[qnum][prev]); in velocity_xmit()
2624 mac_tx_queue_wake(vptr->mac_regs, qnum); in velocity_xmit()
2626 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_xmit()
2656 static void velocity_init_info(struct velocity_info *vptr, in velocity_init_info() argument
2659 vptr->chip_id = info->chip_id; in velocity_init_info()
2660 vptr->tx.numq = info->txqueue; in velocity_init_info()
2661 vptr->multicast_limit = MCAM_SIZE; in velocity_init_info()
2662 spin_lock_init(&vptr->lock); in velocity_init_info()
2672 static int velocity_get_pci_info(struct velocity_info *vptr) in velocity_get_pci_info() argument
2674 struct pci_dev *pdev = vptr->pdev; in velocity_get_pci_info()
2678 vptr->ioaddr = pci_resource_start(pdev, 0); in velocity_get_pci_info()
2679 vptr->memaddr = pci_resource_start(pdev, 1); in velocity_get_pci_info()
2707 static int velocity_get_platform_info(struct velocity_info *vptr) in velocity_get_platform_info() argument
2712 if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL)) in velocity_get_platform_info()
2713 vptr->no_eeprom = 1; in velocity_get_platform_info()
2715 ret = of_address_to_resource(vptr->dev->of_node, 0, &res); in velocity_get_platform_info()
2717 dev_err(vptr->dev, "unable to find memory address\n"); in velocity_get_platform_info()
2721 vptr->memaddr = res.start; in velocity_get_platform_info()
2724 dev_err(vptr->dev, "memory region is too small.\n"); in velocity_get_platform_info()
2738 static void velocity_print_info(struct velocity_info *vptr) in velocity_print_info() argument
2740 netdev_info(vptr->netdev, "%s - Ethernet Address: %pM\n", in velocity_print_info()
2741 get_chip_name(vptr->chip_id), vptr->netdev->dev_addr); in velocity_print_info()
2746 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_link() local
2747 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_get_link()
2767 struct velocity_info *vptr; in velocity_probe() local
2786 vptr = netdev_priv(netdev); in velocity_probe()
2793 vptr->netdev = netdev; in velocity_probe()
2794 vptr->dev = dev; in velocity_probe()
2796 velocity_init_info(vptr, info); in velocity_probe()
2799 vptr->pdev = to_pci_dev(dev); in velocity_probe()
2801 ret = velocity_get_pci_info(vptr); in velocity_probe()
2805 vptr->pdev = NULL; in velocity_probe()
2806 ret = velocity_get_platform_info(vptr); in velocity_probe()
2811 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); in velocity_probe()
2817 vptr->mac_regs = regs; in velocity_probe()
2818 vptr->rev_id = readb(®s->rev_id); in velocity_probe()
2826 velocity_get_options(&vptr->options, velocity_nics); in velocity_probe()
2832 vptr->options.flags &= info->flags; in velocity_probe()
2838 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL); in velocity_probe()
2840 vptr->wol_opts = vptr->options.wol_opts; in velocity_probe()
2841 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_probe()
2843 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); in velocity_probe()
2847 netif_napi_add(netdev, &vptr->napi, velocity_poll, in velocity_probe()
2866 vptr->mii_status |= VELOCITY_LINK_FAIL; in velocity_probe()
2869 velocity_print_info(vptr); in velocity_probe()
2870 dev_set_drvdata(vptr->dev, netdev); in velocity_probe()
2874 velocity_set_power_state(vptr, PCI_D3hot); in velocity_probe()
2880 netif_napi_del(&vptr->napi); in velocity_probe()
2898 struct velocity_info *vptr = netdev_priv(netdev); in velocity_remove() local
2901 netif_napi_del(&vptr->napi); in velocity_remove()
2902 iounmap(vptr->mac_regs); in velocity_remove()
3015 static int velocity_set_wol(struct velocity_info *vptr) in velocity_set_wol() argument
3017 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_set_wol()
3018 enum speed_opt spd_dpx = vptr->options.spd_dpx; in velocity_set_wol()
3036 if (vptr->wol_opts & VELOCITY_WOL_UCAST) in velocity_set_wol()
3039 if (vptr->wol_opts & VELOCITY_WOL_ARP) { in velocity_set_wol()
3050 memcpy(arp->ar_tip, vptr->ip_addr, 4); in velocity_set_wol()
3070 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { in velocity_set_wol()
3071 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) in velocity_set_wol()
3072 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); in velocity_set_wol()
3074 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); in velocity_set_wol()
3077 if (vptr->mii_status & VELOCITY_SPEED_1000) in velocity_set_wol()
3078 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); in velocity_set_wol()
3110 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context) in velocity_save_context() argument
3112 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_save_context()
3130 struct velocity_info *vptr = netdev_priv(netdev); in velocity_suspend() local
3133 if (!netif_running(vptr->netdev)) in velocity_suspend()
3136 netif_device_detach(vptr->netdev); in velocity_suspend()
3138 spin_lock_irqsave(&vptr->lock, flags); in velocity_suspend()
3139 if (vptr->pdev) in velocity_suspend()
3140 pci_save_state(vptr->pdev); in velocity_suspend()
3142 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { in velocity_suspend()
3143 velocity_get_ip(vptr); in velocity_suspend()
3144 velocity_save_context(vptr, &vptr->context); in velocity_suspend()
3145 velocity_shutdown(vptr); in velocity_suspend()
3146 velocity_set_wol(vptr); in velocity_suspend()
3147 if (vptr->pdev) in velocity_suspend()
3148 pci_enable_wake(vptr->pdev, PCI_D3hot, 1); in velocity_suspend()
3149 velocity_set_power_state(vptr, PCI_D3hot); in velocity_suspend()
3151 velocity_save_context(vptr, &vptr->context); in velocity_suspend()
3152 velocity_shutdown(vptr); in velocity_suspend()
3153 if (vptr->pdev) in velocity_suspend()
3154 pci_disable_device(vptr->pdev); in velocity_suspend()
3155 velocity_set_power_state(vptr, PCI_D3hot); in velocity_suspend()
3158 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_suspend()
3170 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context) in velocity_restore_context() argument
3172 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_restore_context()
3200 struct velocity_info *vptr = netdev_priv(netdev); in velocity_resume() local
3204 if (!netif_running(vptr->netdev)) in velocity_resume()
3207 velocity_set_power_state(vptr, PCI_D0); in velocity_resume()
3209 if (vptr->pdev) { in velocity_resume()
3210 pci_enable_wake(vptr->pdev, PCI_D0, 0); in velocity_resume()
3211 pci_restore_state(vptr->pdev); in velocity_resume()
3214 mac_wol_reset(vptr->mac_regs); in velocity_resume()
3216 spin_lock_irqsave(&vptr->lock, flags); in velocity_resume()
3217 velocity_restore_context(vptr, &vptr->context); in velocity_resume()
3218 velocity_init_registers(vptr, VELOCITY_INIT_WOL); in velocity_resume()
3219 mac_disable_int(vptr->mac_regs); in velocity_resume()
3221 velocity_tx_srv(vptr); in velocity_resume()
3223 for (i = 0; i < vptr->tx.numq; i++) { in velocity_resume()
3224 if (vptr->tx.used[i]) in velocity_resume()
3225 mac_tx_queue_wake(vptr->mac_regs, i); in velocity_resume()
3228 mac_enable_int(vptr->mac_regs); in velocity_resume()
3229 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_resume()
3230 netif_device_attach(vptr->netdev); in velocity_resume()
3272 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_up() local
3274 if (vptr->ethtool_ops_nesting == U32_MAX) in velocity_ethtool_up()
3276 if (!vptr->ethtool_ops_nesting++ && !netif_running(dev)) in velocity_ethtool_up()
3277 velocity_set_power_state(vptr, PCI_D0); in velocity_ethtool_up()
3291 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_down() local
3293 if (!--vptr->ethtool_ops_nesting && !netif_running(dev)) in velocity_ethtool_down()
3294 velocity_set_power_state(vptr, PCI_D3hot); in velocity_ethtool_down()
3300 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_link_ksettings() local
3301 struct mac_regs __iomem *regs = vptr->mac_regs; in velocity_get_link_ksettings()
3305 status = check_connection_type(vptr->mac_regs); in velocity_get_link_ksettings()
3317 if (vptr->options.spd_dpx == SPD_DPX_AUTO) { in velocity_get_link_ksettings()
3326 switch (vptr->options.spd_dpx) { in velocity_get_link_ksettings()
3375 struct velocity_info *vptr = netdev_priv(dev); in velocity_set_link_ksettings() local
3381 curr_status = check_connection_type(vptr->mac_regs); in velocity_set_link_ksettings()
3411 vptr->options.spd_dpx = spd_dpx; in velocity_set_link_ksettings()
3413 velocity_set_media_mode(vptr, new_status); in velocity_set_link_ksettings()
3421 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_drvinfo() local
3425 if (vptr->pdev) in velocity_get_drvinfo()
3426 strlcpy(info->bus_info, pci_name(vptr->pdev), in velocity_get_drvinfo()
3434 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_get_wol() local
3441 if (vptr->wol_opts & VELOCITY_WOL_UCAST) in velocity_ethtool_get_wol()
3443 if (vptr->wol_opts & VELOCITY_WOL_ARP) in velocity_ethtool_get_wol()
3445 memcpy(&wol->sopass, vptr->wol_passwd, 6); in velocity_ethtool_get_wol()
3450 struct velocity_info *vptr = netdev_priv(dev); in velocity_ethtool_set_wol() local
3454 vptr->wol_opts = VELOCITY_WOL_MAGIC; in velocity_ethtool_set_wol()
3464 vptr->wol_opts |= VELOCITY_WOL_MAGIC; in velocity_ethtool_set_wol()
3465 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_ethtool_set_wol()
3468 vptr->wol_opts |= VELOCITY_WOL_UCAST; in velocity_ethtool_set_wol()
3469 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_ethtool_set_wol()
3472 vptr->wol_opts |= VELOCITY_WOL_ARP; in velocity_ethtool_set_wol()
3473 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; in velocity_ethtool_set_wol()
3475 memcpy(vptr->wol_passwd, wol->sopass, 6); in velocity_ethtool_set_wol()
3525 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_coalesce() local
3527 ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup; in velocity_get_coalesce()
3528 ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup; in velocity_get_coalesce()
3530 ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer); in velocity_get_coalesce()
3531 ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer); in velocity_get_coalesce()
3539 struct velocity_info *vptr = netdev_priv(dev); in velocity_set_coalesce() local
3554 vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames; in velocity_set_coalesce()
3555 vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames; in velocity_set_coalesce()
3557 set_pending_timer_val(&vptr->options.rxqueue_timer, in velocity_set_coalesce()
3559 set_pending_timer_val(&vptr->options.txqueue_timer, in velocity_set_coalesce()
3563 spin_lock_irqsave(&vptr->lock, flags); in velocity_set_coalesce()
3564 mac_disable_int(vptr->mac_regs); in velocity_set_coalesce()
3565 setup_adaptive_interrupts(vptr); in velocity_set_coalesce()
3566 setup_queue_timers(vptr); in velocity_set_coalesce()
3568 mac_write_int_mask(vptr->int_mask, vptr->mac_regs); in velocity_set_coalesce()
3569 mac_clear_isr(vptr->mac_regs); in velocity_set_coalesce()
3570 mac_enable_int(vptr->mac_regs); in velocity_set_coalesce()
3571 spin_unlock_irqrestore(&vptr->lock, flags); in velocity_set_coalesce()
3634 struct velocity_info *vptr = netdev_priv(dev); in velocity_get_ethtool_stats() local
3635 u32 *p = vptr->mib_counter; in velocity_get_ethtool_stats()
3638 spin_lock_irq(&vptr->lock); in velocity_get_ethtool_stats()
3639 velocity_update_hw_mibs(vptr); in velocity_get_ethtool_stats()
3640 spin_unlock_irq(&vptr->lock); in velocity_get_ethtool_stats()