Lines Matching refs:tq

103 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)  in vmxnet3_tq_stopped()  argument
105 return tq->stopped; in vmxnet3_tq_stopped()
110 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_start() argument
112 tq->stopped = false; in vmxnet3_tq_start()
113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); in vmxnet3_tq_start()
118 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_wake() argument
120 tq->stopped = false; in vmxnet3_tq_wake()
121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_wake()
126 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_stop() argument
128 tq->stopped = true; in vmxnet3_tq_stop()
129 tq->num_stop++; in vmxnet3_tq_stop()
130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_stop()
329 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, in vmxnet3_unmap_pkt() argument
336 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); in vmxnet3_unmap_pkt()
337 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); in vmxnet3_unmap_pkt()
339 skb = tq->buf_info[eop_idx].skb; in vmxnet3_unmap_pkt()
341 tq->buf_info[eop_idx].skb = NULL; in vmxnet3_unmap_pkt()
343 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); in vmxnet3_unmap_pkt()
345 while (tq->tx_ring.next2comp != eop_idx) { in vmxnet3_unmap_pkt()
346 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, in vmxnet3_unmap_pkt()
354 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_unmap_pkt()
364 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_tx_complete() argument
370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; in vmxnet3_tq_tx_complete()
371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { in vmxnet3_tq_tx_complete()
378 &gdesc->tcd), tq, adapter->pdev, in vmxnet3_tq_tx_complete()
381 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); in vmxnet3_tq_tx_complete()
382 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; in vmxnet3_tq_tx_complete()
386 spin_lock(&tq->tx_lock); in vmxnet3_tq_tx_complete()
387 if (unlikely(vmxnet3_tq_stopped(tq, adapter) && in vmxnet3_tq_tx_complete()
388 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > in vmxnet3_tq_tx_complete()
389 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) && in vmxnet3_tq_tx_complete()
391 vmxnet3_tq_wake(tq, adapter); in vmxnet3_tq_tx_complete()
393 spin_unlock(&tq->tx_lock); in vmxnet3_tq_tx_complete()
400 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_cleanup() argument
405 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { in vmxnet3_tq_cleanup()
408 tbi = tq->buf_info + tq->tx_ring.next2comp; in vmxnet3_tq_cleanup()
415 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_tq_cleanup()
419 for (i = 0; i < tq->tx_ring.size; i++) { in vmxnet3_tq_cleanup()
420 BUG_ON(tq->buf_info[i].skb != NULL || in vmxnet3_tq_cleanup()
421 tq->buf_info[i].map_type != VMXNET3_MAP_NONE); in vmxnet3_tq_cleanup()
424 tq->tx_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_cleanup()
425 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; in vmxnet3_tq_cleanup()
427 tq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_cleanup()
428 tq->comp_ring.next2proc = 0; in vmxnet3_tq_cleanup()
433 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_destroy() argument
436 if (tq->tx_ring.base) { in vmxnet3_tq_destroy()
437 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * in vmxnet3_tq_destroy()
439 tq->tx_ring.base, tq->tx_ring.basePA); in vmxnet3_tq_destroy()
440 tq->tx_ring.base = NULL; in vmxnet3_tq_destroy()
442 if (tq->data_ring.base) { in vmxnet3_tq_destroy()
444 tq->data_ring.size * tq->txdata_desc_size, in vmxnet3_tq_destroy()
445 tq->data_ring.base, tq->data_ring.basePA); in vmxnet3_tq_destroy()
446 tq->data_ring.base = NULL; in vmxnet3_tq_destroy()
448 if (tq->comp_ring.base) { in vmxnet3_tq_destroy()
449 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * in vmxnet3_tq_destroy()
451 tq->comp_ring.base, tq->comp_ring.basePA); in vmxnet3_tq_destroy()
452 tq->comp_ring.base = NULL; in vmxnet3_tq_destroy()
454 if (tq->buf_info) { in vmxnet3_tq_destroy()
456 tq->tx_ring.size * sizeof(tq->buf_info[0]), in vmxnet3_tq_destroy()
457 tq->buf_info, tq->buf_info_pa); in vmxnet3_tq_destroy()
458 tq->buf_info = NULL; in vmxnet3_tq_destroy()
475 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_init() argument
481 memset(tq->tx_ring.base, 0, tq->tx_ring.size * in vmxnet3_tq_init()
483 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; in vmxnet3_tq_init()
484 tq->tx_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_init()
486 memset(tq->data_ring.base, 0, in vmxnet3_tq_init()
487 tq->data_ring.size * tq->txdata_desc_size); in vmxnet3_tq_init()
490 memset(tq->comp_ring.base, 0, tq->comp_ring.size * in vmxnet3_tq_init()
492 tq->comp_ring.next2proc = 0; in vmxnet3_tq_init()
493 tq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_init()
496 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); in vmxnet3_tq_init()
497 for (i = 0; i < tq->tx_ring.size; i++) in vmxnet3_tq_init()
498 tq->buf_info[i].map_type = VMXNET3_MAP_NONE; in vmxnet3_tq_init()
505 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_create() argument
510 BUG_ON(tq->tx_ring.base || tq->data_ring.base || in vmxnet3_tq_create()
511 tq->comp_ring.base || tq->buf_info); in vmxnet3_tq_create()
513 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
514 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc), in vmxnet3_tq_create()
515 &tq->tx_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
516 if (!tq->tx_ring.base) { in vmxnet3_tq_create()
521 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
522 tq->data_ring.size * tq->txdata_desc_size, in vmxnet3_tq_create()
523 &tq->data_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
524 if (!tq->data_ring.base) { in vmxnet3_tq_create()
529 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
530 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), in vmxnet3_tq_create()
531 &tq->comp_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
532 if (!tq->comp_ring.base) { in vmxnet3_tq_create()
537 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); in vmxnet3_tq_create()
538 tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_tq_create()
539 &tq->buf_info_pa, GFP_KERNEL); in vmxnet3_tq_create()
540 if (!tq->buf_info) in vmxnet3_tq_create()
546 vmxnet3_tq_destroy(tq, adapter); in vmxnet3_tq_create()
677 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, in vmxnet3_map_pkt() argument
689 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
691 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
696 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + in vmxnet3_map_pkt()
697 tq->tx_ring.next2fill * in vmxnet3_map_pkt()
698 tq->txdata_desc_size); in vmxnet3_map_pkt()
702 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
707 tq->tx_ring.next2fill, in vmxnet3_map_pkt()
710 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
713 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
730 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
740 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
741 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); in vmxnet3_map_pkt()
749 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), in vmxnet3_map_pkt()
751 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
752 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
765 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
782 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
783 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); in vmxnet3_map_pkt()
791 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), in vmxnet3_map_pkt()
793 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
794 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
805 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; in vmxnet3_map_pkt()
840 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, in vmxnet3_parse_hdr() argument
909 tq->txdata_desc_size, in vmxnet3_parse_hdr()
921 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) { in vmxnet3_parse_hdr()
922 tq->stats.oversized_hdr++; in vmxnet3_parse_hdr()
943 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, in vmxnet3_copy_hdr() argument
949 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base + in vmxnet3_copy_hdr()
950 tq->tx_ring.next2fill * in vmxnet3_copy_hdr()
951 tq->txdata_desc_size); in vmxnet3_copy_hdr()
956 ctx->copy_size, tq->tx_ring.next2fill); in vmxnet3_copy_hdr()
1023 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, in vmxnet3_tq_xmit() argument
1048 tq->stats.drop_tso++; in vmxnet3_tq_xmit()
1051 tq->stats.copy_skb_header++; in vmxnet3_tq_xmit()
1065 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1068 tq->stats.linearized++; in vmxnet3_tq_xmit()
1075 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter); in vmxnet3_tq_xmit()
1082 tq->stats.drop_oversized_hdr++; in vmxnet3_tq_xmit()
1090 tq->stats.drop_oversized_hdr++; in vmxnet3_tq_xmit()
1096 tq->stats.drop_hdr_inspect_err++; in vmxnet3_tq_xmit()
1100 spin_lock_irqsave(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1102 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { in vmxnet3_tq_xmit()
1103 tq->stats.tx_ring_full++; in vmxnet3_tq_xmit()
1107 tq->tx_ring.next2comp, tq->tx_ring.next2fill); in vmxnet3_tq_xmit()
1109 vmxnet3_tq_stop(tq, adapter); in vmxnet3_tq_xmit()
1110 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1115 vmxnet3_copy_hdr(skb, tq, &ctx, adapter); in vmxnet3_tq_xmit()
1118 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) in vmxnet3_tq_xmit()
1132 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); in vmxnet3_tq_xmit()
1167 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts); in vmxnet3_tq_xmit()
1194 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), in vmxnet3_tq_xmit()
1197 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1199 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) { in vmxnet3_tq_xmit()
1200 tq->shared->txNumDeferred = 0; in vmxnet3_tq_xmit()
1202 VMXNET3_REG_TXPROD + tq->qid * 8, in vmxnet3_tq_xmit()
1203 tq->tx_ring.next2fill); in vmxnet3_tq_xmit()
1209 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1211 tq->stats.drop_total++; in vmxnet3_tq_xmit()
1995 struct vmxnet3_tx_queue *tq = in vmxnet3_poll_rx_only() local
1997 vmxnet3_tq_tx_complete(tq, adapter); in vmxnet3_poll_rx_only()
2020 struct vmxnet3_tx_queue *tq = data; in vmxnet3_msix_tx() local
2021 struct vmxnet3_adapter *adapter = tq->adapter; in vmxnet3_msix_tx()
2024 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2034 vmxnet3_tq_tx_complete(tq, adapter); in vmxnet3_msix_tx()
2036 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2532 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_setup_driver_shared() local
2535 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); in vmxnet3_setup_driver_shared()
2536 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); in vmxnet3_setup_driver_shared()
2537 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); in vmxnet3_setup_driver_shared()
2538 tqc->ddPA = cpu_to_le64(tq->buf_info_pa); in vmxnet3_setup_driver_shared()
2539 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); in vmxnet3_setup_driver_shared()
2540 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); in vmxnet3_setup_driver_shared()
2541 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size); in vmxnet3_setup_driver_shared()
2542 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); in vmxnet3_setup_driver_shared()
2546 tqc->intrIdx = tq->comp_ring.intr_idx; in vmxnet3_setup_driver_shared()
2949 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_create_queues() local
2950 tq->tx_ring.size = tx_ring_size; in vmxnet3_create_queues()
2951 tq->data_ring.size = tx_ring_size; in vmxnet3_create_queues()
2952 tq->comp_ring.size = tx_ring_size; in vmxnet3_create_queues()
2953 tq->txdata_desc_size = txdata_desc_size; in vmxnet3_create_queues()
2954 tq->shared = &adapter->tqd_start[i].ctrl; in vmxnet3_create_queues()
2955 tq->stopped = true; in vmxnet3_create_queues()
2956 tq->adapter = adapter; in vmxnet3_create_queues()
2957 tq->qid = i; in vmxnet3_create_queues()
2958 err = vmxnet3_tq_create(tq, adapter); in vmxnet3_create_queues()