Lines Matching refs:sge

242 	struct sge *sge;  member
256 struct sge { struct
284 static void tx_sched_stop(struct sge *sge) in tx_sched_stop() argument
286 struct sched *s = sge->tx_sched; in tx_sched_stop()
299 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, in t1_sched_update_parms() argument
302 struct sched *s = sge->tx_sched; in t1_sched_update_parms()
322 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { in t1_sched_update_parms()
345 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
347 struct sched *s = sge->tx_sched;
352 t1_sched_update_parms(sge, i, 0, 0);
359 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
362 struct sched *s = sge->tx_sched;
365 t1_sched_update_parms(sge, port, 0, 0);
373 static int tx_sched_init(struct sge *sge) in tx_sched_init() argument
384 s->sge = sge; in tx_sched_init()
385 sge->tx_sched = s; in tx_sched_init()
389 t1_sched_update_parms(sge, i, 1500, 1000); in tx_sched_init()
400 static inline int sched_update_avail(struct sge *sge) in sched_update_avail() argument
402 struct sched *s = sge->tx_sched; in sched_update_avail()
434 static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, in sched_skb() argument
437 struct sched *s = sge->tx_sched; in sched_skb()
475 if (update-- && sched_update_avail(sge)) in sched_skb()
483 struct cmdQ *q = &sge->cmdQ[0]; in sched_skb()
487 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); in sched_skb()
527 static void free_rx_resources(struct sge *sge) in free_rx_resources() argument
529 struct pci_dev *pdev = sge->adapter->pdev; in free_rx_resources()
532 if (sge->respQ.entries) { in free_rx_resources()
533 size = sizeof(struct respQ_e) * sge->respQ.size; in free_rx_resources()
534 dma_free_coherent(&pdev->dev, size, sge->respQ.entries, in free_rx_resources()
535 sge->respQ.dma_addr); in free_rx_resources()
539 struct freelQ *q = &sge->freelQ[i]; in free_rx_resources()
557 static int alloc_rx_resources(struct sge *sge, struct sge_params *p) in alloc_rx_resources() argument
559 struct pci_dev *pdev = sge->adapter->pdev; in alloc_rx_resources()
563 struct freelQ *q = &sge->freelQ[i]; in alloc_rx_resources()
567 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; in alloc_rx_resources()
587 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + in alloc_rx_resources()
589 sge->freelQ[!sge->jumbo_fl].dma_offset; in alloc_rx_resources()
593 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; in alloc_rx_resources()
599 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; in alloc_rx_resources()
600 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; in alloc_rx_resources()
602 sge->respQ.genbit = 1; in alloc_rx_resources()
603 sge->respQ.size = SGE_RESPQ_E_N; in alloc_rx_resources()
604 sge->respQ.credits = 0; in alloc_rx_resources()
605 size = sizeof(struct respQ_e) * sge->respQ.size; in alloc_rx_resources()
606 sge->respQ.entries = in alloc_rx_resources()
607 dma_alloc_coherent(&pdev->dev, size, &sge->respQ.dma_addr, in alloc_rx_resources()
609 if (!sge->respQ.entries) in alloc_rx_resources()
614 free_rx_resources(sge); in alloc_rx_resources()
621 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) in free_cmdQ_buffers() argument
624 struct pci_dev *pdev = sge->adapter->pdev; in free_cmdQ_buffers()
656 static void free_tx_resources(struct sge *sge) in free_tx_resources() argument
658 struct pci_dev *pdev = sge->adapter->pdev; in free_tx_resources()
662 struct cmdQ *q = &sge->cmdQ[i]; in free_tx_resources()
666 free_cmdQ_buffers(sge, q, q->in_use); in free_tx_resources()
680 static int alloc_tx_resources(struct sge *sge, struct sge_params *p) in alloc_tx_resources() argument
682 struct pci_dev *pdev = sge->adapter->pdev; in alloc_tx_resources()
686 struct cmdQ *q = &sge->cmdQ[i]; in alloc_tx_resources()
715 sge->cmdQ[0].stop_thres = sge->adapter->params.nports * in alloc_tx_resources()
720 free_tx_resources(sge); in alloc_tx_resources()
738 struct sge *sge = adapter->sge; in t1_vlan_mode() local
741 sge->sge_control |= F_VLAN_XTRACT; in t1_vlan_mode()
743 sge->sge_control &= ~F_VLAN_XTRACT; in t1_vlan_mode()
745 writel(sge->sge_control, adapter->regs + A_SG_CONTROL); in t1_vlan_mode()
754 static void configure_sge(struct sge *sge, struct sge_params *p) in configure_sge() argument
756 struct adapter *ap = sge->adapter; in configure_sge()
759 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, in configure_sge()
761 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, in configure_sge()
763 setup_ring_params(ap, sge->freelQ[0].dma_addr, in configure_sge()
764 sge->freelQ[0].size, A_SG_FL0BASELWR, in configure_sge()
766 setup_ring_params(ap, sge->freelQ[1].dma_addr, in configure_sge()
767 sge->freelQ[1].size, A_SG_FL1BASELWR, in configure_sge()
773 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, in configure_sge()
775 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); in configure_sge()
777 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | in configure_sge()
780 V_RX_PKT_OFFSET(sge->rx_pkt_pad); in configure_sge()
783 sge->sge_control |= F_ENABLE_BIG_ENDIAN; in configure_sge()
787 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); in configure_sge()
789 t1_sge_set_coalesce_params(sge, p); in configure_sge()
795 static inline unsigned int jumbo_payload_capacity(const struct sge *sge) in jumbo_payload_capacity() argument
797 return sge->freelQ[sge->jumbo_fl].rx_buffer_size - in jumbo_payload_capacity()
798 sge->freelQ[sge->jumbo_fl].dma_offset - in jumbo_payload_capacity()
805 void t1_sge_destroy(struct sge *sge) in t1_sge_destroy() argument
809 for_each_port(sge->adapter, i) in t1_sge_destroy()
810 free_percpu(sge->port_stats[i]); in t1_sge_destroy()
812 kfree(sge->tx_sched); in t1_sge_destroy()
813 free_tx_resources(sge); in t1_sge_destroy()
814 free_rx_resources(sge); in t1_sge_destroy()
815 kfree(sge); in t1_sge_destroy()
830 static void refill_free_list(struct sge *sge, struct freelQ *q) in refill_free_list() argument
832 struct pci_dev *pdev = sge->adapter->pdev; in refill_free_list()
848 skb_reserve(skb, sge->rx_pkt_pad); in refill_free_list()
876 static void freelQs_empty(struct sge *sge) in freelQs_empty() argument
878 struct adapter *adapter = sge->adapter; in freelQs_empty()
882 refill_free_list(sge, &sge->freelQ[0]); in freelQs_empty()
883 refill_free_list(sge, &sge->freelQ[1]); in freelQs_empty()
885 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && in freelQs_empty()
886 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { in freelQs_empty()
888 irqholdoff_reg = sge->fixed_intrtimer; in freelQs_empty()
892 irqholdoff_reg = sge->intrtimer_nres; in freelQs_empty()
909 void t1_sge_intr_disable(struct sge *sge) in t1_sge_intr_disable() argument
911 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); in t1_sge_intr_disable()
913 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); in t1_sge_intr_disable()
914 writel(0, sge->adapter->regs + A_SG_INT_ENABLE); in t1_sge_intr_disable()
920 void t1_sge_intr_enable(struct sge *sge) in t1_sge_intr_enable() argument
923 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); in t1_sge_intr_enable()
925 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO) in t1_sge_intr_enable()
927 writel(en, sge->adapter->regs + A_SG_INT_ENABLE); in t1_sge_intr_enable()
928 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); in t1_sge_intr_enable()
934 void t1_sge_intr_clear(struct sge *sge) in t1_sge_intr_clear() argument
936 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); in t1_sge_intr_clear()
937 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); in t1_sge_intr_clear()
943 int t1_sge_intr_error_handler(struct sge *sge) in t1_sge_intr_error_handler() argument
945 struct adapter *adapter = sge->adapter; in t1_sge_intr_error_handler()
951 sge->stats.respQ_empty++; in t1_sge_intr_error_handler()
953 sge->stats.respQ_overflow++; in t1_sge_intr_error_handler()
958 sge->stats.freelistQ_empty++; in t1_sge_intr_error_handler()
959 freelQs_empty(sge); in t1_sge_intr_error_handler()
962 sge->stats.pkt_too_big++; in t1_sge_intr_error_handler()
967 sge->stats.pkt_mismatch++; in t1_sge_intr_error_handler()
977 const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) in t1_sge_get_intr_counts() argument
979 return &sge->stats; in t1_sge_get_intr_counts()
982 void t1_sge_get_port_stats(const struct sge *sge, int port, in t1_sge_get_port_stats() argument
989 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); in t1_sge_get_port_stats()
1295 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) in reclaim_completed_tx() argument
1302 free_cmdQ_buffers(sge, q, reclaim); in reclaim_completed_tx()
1314 struct sge *sge = s->sge; in restart_sched() local
1315 struct adapter *adapter = sge->adapter; in restart_sched()
1316 struct cmdQ *q = &sge->cmdQ[0]; in restart_sched()
1321 reclaim_completed_tx(sge, q); in restart_sched()
1325 while ((skb = sched_skb(sge, NULL, credits)) != NULL) { in restart_sched()
1360 static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) in sge_rx() argument
1364 struct adapter *adapter = sge->adapter; in sge_rx()
1368 skb = get_packet(adapter, fl, len - sge->rx_pkt_pad); in sge_rx()
1370 sge->stats.rx_drops++; in sge_rx()
1381 st = this_cpu_ptr(sge->port_stats[p->iff]); in sge_rx()
1415 static void restart_tx_queues(struct sge *sge) in restart_tx_queues() argument
1417 struct adapter *adap = sge->adapter; in restart_tx_queues()
1420 if (!enough_free_Tx_descs(&sge->cmdQ[0])) in restart_tx_queues()
1426 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && in restart_tx_queues()
1428 sge->stats.cmdQ_restarted[2]++; in restart_tx_queues()
1442 struct sge *sge = adapter->sge; in update_tx_info() local
1443 struct cmdQ *cmdq = &sge->cmdQ[0]; in update_tx_info()
1447 freelQs_empty(sge); in update_tx_info()
1458 if (sge->tx_sched) in update_tx_info()
1459 tasklet_hi_schedule(&sge->tx_sched->sched_tsk); in update_tx_info()
1464 if (unlikely(sge->stopped_tx_queues != 0)) in update_tx_info()
1465 restart_tx_queues(sge); in update_tx_info()
1476 struct sge *sge = adapter->sge; in process_responses() local
1477 struct respQ *q = &sge->respQ; in process_responses()
1499 sge->cmdQ[1].processed += cmdq_processed[1]; in process_responses()
1504 struct freelQ *fl = &sge->freelQ[e->FreelistQid]; in process_responses()
1510 sge_rx(sge, fl, e->BufferLength); in process_responses()
1524 refill_free_list(sge, fl); in process_responses()
1526 sge->stats.pure_rsps++; in process_responses()
1543 sge->cmdQ[1].processed += cmdq_processed[1]; in process_responses()
1550 const struct respQ *Q = &adapter->sge->respQ; in responses_pending()
1566 struct sge *sge = adapter->sge; in process_pure_responses() local
1567 struct respQ *q = &sge->respQ; in process_pure_responses()
1569 const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; in process_pure_responses()
1595 sge->stats.pure_rsps++; in process_pure_responses()
1599 sge->cmdQ[1].processed += cmdq_processed[1]; in process_pure_responses()
1616 writel(adapter->sge->respQ.cidx, in t1_poll()
1625 struct sge *sge = adapter->sge; in t1_interrupt() local
1636 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); in t1_interrupt()
1649 sge->stats.unhandled_irqs++; in t1_interrupt()
1670 struct sge *sge = adapter->sge; in t1_sge_tx() local
1671 struct cmdQ *q = &sge->cmdQ[qid]; in t1_sge_tx()
1676 reclaim_completed_tx(sge, q); in t1_sge_tx()
1687 set_bit(dev->if_port, &sge->stopped_tx_queues); in t1_sge_tx()
1688 sge->stats.cmdQ_full[2]++; in t1_sge_tx()
1698 set_bit(dev->if_port, &sge->stopped_tx_queues); in t1_sge_tx()
1699 sge->stats.cmdQ_full[2]++; in t1_sge_tx()
1705 if (sge->tx_sched && !qid && skb->dev) { in t1_sge_tx()
1711 skb = sched_skb(sge, skb, credits); in t1_sge_tx()
1781 struct sge *sge = adapter->sge; in t1_start_xmit() local
1782 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); in t1_start_xmit()
1847 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { in t1_start_xmit()
1850 adapter->sge->espibug_skb[dev->if_port] = skb; in t1_start_xmit()
1895 struct sge *sge = from_timer(sge, t, tx_reclaim_timer); in sge_tx_reclaim_cb() local
1898 struct cmdQ *q = &sge->cmdQ[i]; in sge_tx_reclaim_cb()
1903 reclaim_completed_tx(sge, q); in sge_tx_reclaim_cb()
1905 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); in sge_tx_reclaim_cb()
1909 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); in sge_tx_reclaim_cb()
1915 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) in t1_sge_set_coalesce_params() argument
1917 sge->fixed_intrtimer = p->rx_coalesce_usecs * in t1_sge_set_coalesce_params()
1918 core_ticks_per_usec(sge->adapter); in t1_sge_set_coalesce_params()
1919 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); in t1_sge_set_coalesce_params()
1927 int t1_sge_configure(struct sge *sge, struct sge_params *p) in t1_sge_configure() argument
1929 if (alloc_rx_resources(sge, p)) in t1_sge_configure()
1931 if (alloc_tx_resources(sge, p)) { in t1_sge_configure()
1932 free_rx_resources(sge); in t1_sge_configure()
1935 configure_sge(sge, p); in t1_sge_configure()
1943 p->large_buf_capacity = jumbo_payload_capacity(sge); in t1_sge_configure()
1950 void t1_sge_stop(struct sge *sge) in t1_sge_stop() argument
1953 writel(0, sge->adapter->regs + A_SG_CONTROL); in t1_sge_stop()
1954 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ in t1_sge_stop()
1956 if (is_T2(sge->adapter)) in t1_sge_stop()
1957 del_timer_sync(&sge->espibug_timer); in t1_sge_stop()
1959 del_timer_sync(&sge->tx_reclaim_timer); in t1_sge_stop()
1960 if (sge->tx_sched) in t1_sge_stop()
1961 tx_sched_stop(sge); in t1_sge_stop()
1964 kfree_skb(sge->espibug_skb[i]); in t1_sge_stop()
1970 void t1_sge_start(struct sge *sge) in t1_sge_start() argument
1972 refill_free_list(sge, &sge->freelQ[0]); in t1_sge_start()
1973 refill_free_list(sge, &sge->freelQ[1]); in t1_sge_start()
1975 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); in t1_sge_start()
1976 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); in t1_sge_start()
1977 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ in t1_sge_start()
1979 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); in t1_sge_start()
1981 if (is_T2(sge->adapter)) in t1_sge_start()
1982 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); in t1_sge_start()
1990 struct sge *sge = from_timer(sge, t, espibug_timer); in espibug_workaround_t204() local
1991 struct adapter *adapter = sge->adapter; in espibug_workaround_t204()
2002 struct sk_buff *skb = sge->espibug_skb[i]; in espibug_workaround_t204()
2028 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); in espibug_workaround_t204()
2033 struct sge *sge = from_timer(sge, t, espibug_timer); in espibug_workaround() local
2034 struct adapter *adapter = sge->adapter; in espibug_workaround()
2037 struct sk_buff *skb = sge->espibug_skb[0]; in espibug_workaround()
2060 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); in espibug_workaround()
2066 struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p) in t1_sge_create()
2068 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); in t1_sge_create() local
2071 if (!sge) in t1_sge_create()
2074 sge->adapter = adapter; in t1_sge_create()
2075 sge->netdev = adapter->port[0].dev; in t1_sge_create()
2076 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; in t1_sge_create()
2077 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; in t1_sge_create()
2080 sge->port_stats[i] = alloc_percpu(struct sge_port_stats); in t1_sge_create()
2081 if (!sge->port_stats[i]) in t1_sge_create()
2085 timer_setup(&sge->tx_reclaim_timer, sge_tx_reclaim_cb, 0); in t1_sge_create()
2087 if (is_T2(sge->adapter)) { in t1_sge_create()
2088 timer_setup(&sge->espibug_timer, in t1_sge_create()
2093 tx_sched_init(sge); in t1_sge_create()
2095 sge->espibug_timeout = 1; in t1_sge_create()
2098 sge->espibug_timeout = HZ/100; in t1_sge_create()
2104 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; in t1_sge_create()
2105 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; in t1_sge_create()
2106 if (sge->tx_sched) { in t1_sge_create()
2107 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) in t1_sge_create()
2117 return sge; in t1_sge_create()
2120 free_percpu(sge->port_stats[i]); in t1_sge_create()
2123 kfree(sge); in t1_sge_create()