Lines Matching refs:ep
177 #define rdb(ep, off) __raw_readb((ep)->base_addr + (off)) argument
178 #define rdw(ep, off) __raw_readw((ep)->base_addr + (off)) argument
179 #define rdl(ep, off) __raw_readl((ep)->base_addr + (off)) argument
180 #define wrb(ep, off, val) __raw_writeb((val), (ep)->base_addr + (off)) argument
181 #define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off)) argument
182 #define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off)) argument
186 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_mdio_read() local
190 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg); in ep93xx_mdio_read()
193 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) in ep93xx_mdio_read()
202 data = rdl(ep, REG_MIIDATA); in ep93xx_mdio_read()
210 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_mdio_write() local
213 wrl(ep, REG_MIIDATA, data); in ep93xx_mdio_write()
214 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg); in ep93xx_mdio_write()
217 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) in ep93xx_mdio_write()
228 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_rx() local
239 entry = ep->rx_pointer; in ep93xx_rx()
240 rstat = ep->descs->rstat + entry; in ep93xx_rx()
282 struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; in ep93xx_rx()
286 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); in ep93xx_rx()
293 napi_gro_receive(&ep->napi, skb); in ep93xx_rx()
302 ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); in ep93xx_rx()
311 struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); in ep93xx_poll() local
312 struct net_device *dev = ep->dev; in ep93xx_poll()
317 spin_lock_irq(&ep->rx_lock); in ep93xx_poll()
318 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); in ep93xx_poll()
319 spin_unlock_irq(&ep->rx_lock); in ep93xx_poll()
323 wrw(ep, REG_RXDENQ, rx); in ep93xx_poll()
324 wrw(ep, REG_RXSTSENQ, rx); in ep93xx_poll()
332 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_xmit() local
342 entry = ep->tx_pointer; in ep93xx_xmit()
343 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); in ep93xx_xmit()
345 txd = &ep->descs->tdesc[entry]; in ep93xx_xmit()
350 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); in ep93xx_xmit()
355 spin_lock_irq(&ep->tx_pending_lock); in ep93xx_xmit()
356 ep->tx_pending++; in ep93xx_xmit()
357 if (ep->tx_pending == TX_QUEUE_ENTRIES) in ep93xx_xmit()
359 spin_unlock_irq(&ep->tx_pending_lock); in ep93xx_xmit()
361 wrl(ep, REG_TXDENQ, 1); in ep93xx_xmit()
368 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_tx_complete() local
373 spin_lock(&ep->tx_pending_lock); in ep93xx_tx_complete()
379 entry = ep->tx_clean_pointer; in ep93xx_tx_complete()
380 tstat = ep->descs->tstat + entry; in ep93xx_tx_complete()
394 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; in ep93xx_tx_complete()
408 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); in ep93xx_tx_complete()
409 if (ep->tx_pending == TX_QUEUE_ENTRIES) in ep93xx_tx_complete()
411 ep->tx_pending--; in ep93xx_tx_complete()
413 spin_unlock(&ep->tx_pending_lock); in ep93xx_tx_complete()
422 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_irq() local
425 status = rdl(ep, REG_INTSTSC); in ep93xx_irq()
430 spin_lock(&ep->rx_lock); in ep93xx_irq()
431 if (likely(napi_schedule_prep(&ep->napi))) { in ep93xx_irq()
432 wrl(ep, REG_INTEN, REG_INTEN_TX); in ep93xx_irq()
433 __napi_schedule(&ep->napi); in ep93xx_irq()
435 spin_unlock(&ep->rx_lock); in ep93xx_irq()
444 static void ep93xx_free_buffers(struct ep93xx_priv *ep) in ep93xx_free_buffers() argument
446 struct device *dev = ep->dev->dev.parent; in ep93xx_free_buffers()
449 if (!ep->descs) in ep93xx_free_buffers()
455 d = ep->descs->rdesc[i].buf_addr; in ep93xx_free_buffers()
459 kfree(ep->rx_buf[i]); in ep93xx_free_buffers()
465 d = ep->descs->tdesc[i].buf_addr; in ep93xx_free_buffers()
469 kfree(ep->tx_buf[i]); in ep93xx_free_buffers()
472 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, in ep93xx_free_buffers()
473 ep->descs_dma_addr); in ep93xx_free_buffers()
474 ep->descs = NULL; in ep93xx_free_buffers()
477 static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) in ep93xx_alloc_buffers() argument
479 struct device *dev = ep->dev->dev.parent; in ep93xx_alloc_buffers()
482 ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs), in ep93xx_alloc_buffers()
483 &ep->descs_dma_addr, GFP_KERNEL); in ep93xx_alloc_buffers()
484 if (ep->descs == NULL) in ep93xx_alloc_buffers()
501 ep->rx_buf[i] = buf; in ep93xx_alloc_buffers()
502 ep->descs->rdesc[i].buf_addr = d; in ep93xx_alloc_buffers()
503 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; in ep93xx_alloc_buffers()
520 ep->tx_buf[i] = buf; in ep93xx_alloc_buffers()
521 ep->descs->tdesc[i].buf_addr = d; in ep93xx_alloc_buffers()
527 ep93xx_free_buffers(ep); in ep93xx_alloc_buffers()
533 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_start_hw() local
537 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); in ep93xx_start_hw()
539 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) in ep93xx_start_hw()
549 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9)); in ep93xx_start_hw()
552 if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0) in ep93xx_start_hw()
553 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8)); in ep93xx_start_hw()
556 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc); in ep93xx_start_hw()
557 wrl(ep, REG_RXDQBADD, addr); in ep93xx_start_hw()
558 wrl(ep, REG_RXDCURADD, addr); in ep93xx_start_hw()
559 wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc)); in ep93xx_start_hw()
562 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat); in ep93xx_start_hw()
563 wrl(ep, REG_RXSTSQBADD, addr); in ep93xx_start_hw()
564 wrl(ep, REG_RXSTSQCURADD, addr); in ep93xx_start_hw()
565 wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat)); in ep93xx_start_hw()
568 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc); in ep93xx_start_hw()
569 wrl(ep, REG_TXDQBADD, addr); in ep93xx_start_hw()
570 wrl(ep, REG_TXDQCURADD, addr); in ep93xx_start_hw()
571 wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc)); in ep93xx_start_hw()
574 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat); in ep93xx_start_hw()
575 wrl(ep, REG_TXSTSQBADD, addr); in ep93xx_start_hw()
576 wrl(ep, REG_TXSTSQCURADD, addr); in ep93xx_start_hw()
577 wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat)); in ep93xx_start_hw()
579 wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX); in ep93xx_start_hw()
580 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); in ep93xx_start_hw()
581 wrl(ep, REG_GIINTMSK, 0); in ep93xx_start_hw()
584 if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0) in ep93xx_start_hw()
594 wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES); in ep93xx_start_hw()
595 wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES); in ep93xx_start_hw()
597 wrb(ep, REG_INDAD0, dev->dev_addr[0]); in ep93xx_start_hw()
598 wrb(ep, REG_INDAD1, dev->dev_addr[1]); in ep93xx_start_hw()
599 wrb(ep, REG_INDAD2, dev->dev_addr[2]); in ep93xx_start_hw()
600 wrb(ep, REG_INDAD3, dev->dev_addr[3]); in ep93xx_start_hw()
601 wrb(ep, REG_INDAD4, dev->dev_addr[4]); in ep93xx_start_hw()
602 wrb(ep, REG_INDAD5, dev->dev_addr[5]); in ep93xx_start_hw()
603 wrl(ep, REG_AFP, 0); in ep93xx_start_hw()
605 wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE); in ep93xx_start_hw()
607 wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT); in ep93xx_start_hw()
608 wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE); in ep93xx_start_hw()
615 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_stop_hw() local
618 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); in ep93xx_stop_hw()
620 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) in ep93xx_stop_hw()
631 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_open() local
634 if (ep93xx_alloc_buffers(ep)) in ep93xx_open()
637 napi_enable(&ep->napi); in ep93xx_open()
640 napi_disable(&ep->napi); in ep93xx_open()
641 ep93xx_free_buffers(ep); in ep93xx_open()
645 spin_lock_init(&ep->rx_lock); in ep93xx_open()
646 ep->rx_pointer = 0; in ep93xx_open()
647 ep->tx_clean_pointer = 0; in ep93xx_open()
648 ep->tx_pointer = 0; in ep93xx_open()
649 spin_lock_init(&ep->tx_pending_lock); in ep93xx_open()
650 ep->tx_pending = 0; in ep93xx_open()
652 err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); in ep93xx_open()
654 napi_disable(&ep->napi); in ep93xx_open()
656 ep93xx_free_buffers(ep); in ep93xx_open()
660 wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE); in ep93xx_open()
669 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_close() local
671 napi_disable(&ep->napi); in ep93xx_close()
674 wrl(ep, REG_GIINTMSK, 0); in ep93xx_close()
675 free_irq(ep->irq, dev); in ep93xx_close()
677 ep93xx_free_buffers(ep); in ep93xx_close()
684 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_ioctl() local
687 return generic_mii_ioctl(&ep->mii, data, cmd, NULL); in ep93xx_ioctl()
698 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_get_link_ksettings() local
700 mii_ethtool_get_link_ksettings(&ep->mii, cmd); in ep93xx_get_link_ksettings()
708 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_set_link_ksettings() local
709 return mii_ethtool_set_link_ksettings(&ep->mii, cmd); in ep93xx_set_link_ksettings()
714 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_nway_reset() local
715 return mii_nway_restart(&ep->mii); in ep93xx_nway_reset()
720 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_get_link() local
721 return mii_link_ok(&ep->mii); in ep93xx_get_link()
763 struct ep93xx_priv *ep; in ep93xx_eth_remove() local
770 ep = netdev_priv(dev); in ep93xx_eth_remove()
774 ep93xx_free_buffers(ep); in ep93xx_eth_remove()
776 if (ep->base_addr != NULL) in ep93xx_eth_remove()
777 iounmap(ep->base_addr); in ep93xx_eth_remove()
779 if (ep->res != NULL) { in ep93xx_eth_remove()
793 struct ep93xx_priv *ep; in ep93xx_eth_probe() local
812 ep = netdev_priv(dev); in ep93xx_eth_probe()
813 ep->dev = dev; in ep93xx_eth_probe()
815 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); in ep93xx_eth_probe()
819 ep->res = request_mem_region(mem->start, resource_size(mem), in ep93xx_eth_probe()
821 if (ep->res == NULL) { in ep93xx_eth_probe()
827 ep->base_addr = ioremap(mem->start, resource_size(mem)); in ep93xx_eth_probe()
828 if (ep->base_addr == NULL) { in ep93xx_eth_probe()
833 ep->irq = irq; in ep93xx_eth_probe()
835 ep->mii.phy_id = data->phy_id; in ep93xx_eth_probe()
836 ep->mii.phy_id_mask = 0x1f; in ep93xx_eth_probe()
837 ep->mii.reg_num_mask = 0x1f; in ep93xx_eth_probe()
838 ep->mii.dev = dev; in ep93xx_eth_probe()
839 ep->mii.mdio_read = ep93xx_mdio_read; in ep93xx_eth_probe()
840 ep->mii.mdio_write = ep93xx_mdio_write; in ep93xx_eth_probe()
841 ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */ in ep93xx_eth_probe()
853 dev->name, ep->irq, dev->dev_addr); in ep93xx_eth_probe()