Lines Matching refs:scrq

2839 				   struct ibmvnic_sub_crq_queue *scrq)  in reset_one_sub_crq_queue()  argument
2843 if (!scrq) { in reset_one_sub_crq_queue()
2848 if (scrq->irq) { in reset_one_sub_crq_queue()
2849 free_irq(scrq->irq, scrq); in reset_one_sub_crq_queue()
2850 irq_dispose_mapping(scrq->irq); in reset_one_sub_crq_queue()
2851 scrq->irq = 0; in reset_one_sub_crq_queue()
2853 if (scrq->msgs) { in reset_one_sub_crq_queue()
2854 memset(scrq->msgs, 0, 4 * PAGE_SIZE); in reset_one_sub_crq_queue()
2855 atomic_set(&scrq->used, 0); in reset_one_sub_crq_queue()
2856 scrq->cur = 0; in reset_one_sub_crq_queue()
2862 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, in reset_one_sub_crq_queue()
2863 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); in reset_one_sub_crq_queue()
2892 struct ibmvnic_sub_crq_queue *scrq, in release_sub_crq_queue() argument
2905 scrq->crq_num); in release_sub_crq_queue()
2911 scrq->crq_num, rc); in release_sub_crq_queue()
2915 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, in release_sub_crq_queue()
2917 free_pages((unsigned long)scrq->msgs, 2); in release_sub_crq_queue()
2918 kfree(scrq); in release_sub_crq_queue()
2925 struct ibmvnic_sub_crq_queue *scrq; in init_sub_crq_queue() local
2928 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); in init_sub_crq_queue()
2929 if (!scrq) in init_sub_crq_queue()
2932 scrq->msgs = in init_sub_crq_queue()
2934 if (!scrq->msgs) { in init_sub_crq_queue()
2939 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, in init_sub_crq_queue()
2941 if (dma_mapping_error(dev, scrq->msg_token)) { in init_sub_crq_queue()
2946 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, in init_sub_crq_queue()
2947 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); in init_sub_crq_queue()
2959 scrq->adapter = adapter; in init_sub_crq_queue()
2960 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); in init_sub_crq_queue()
2961 spin_lock_init(&scrq->lock); in init_sub_crq_queue()
2965 scrq->crq_num, scrq->hw_irq, scrq->irq); in init_sub_crq_queue()
2967 return scrq; in init_sub_crq_queue()
2970 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, in init_sub_crq_queue()
2973 free_pages((unsigned long)scrq->msgs, 2); in init_sub_crq_queue()
2975 kfree(scrq); in init_sub_crq_queue()
3032 struct ibmvnic_sub_crq_queue *scrq) in disable_scrq_irq() argument
3038 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); in disable_scrq_irq()
3041 scrq->hw_irq, rc); in disable_scrq_irq()
3046 struct ibmvnic_sub_crq_queue *scrq) in enable_scrq_irq() argument
3051 if (scrq->hw_irq > 0x100000000ULL) { in enable_scrq_irq()
3052 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); in enable_scrq_irq()
3058 u64 val = (0xff000000) | scrq->hw_irq; in enable_scrq_irq()
3070 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); in enable_scrq_irq()
3073 scrq->hw_irq, rc); in enable_scrq_irq()
3078 struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_complete_tx() argument
3088 while (pending_scrq(adapter, scrq)) { in ibmvnic_complete_tx()
3089 unsigned int pool = scrq->pool_index; in ibmvnic_complete_tx()
3099 next = ibmvnic_next_scrq(adapter, scrq); in ibmvnic_complete_tx()
3136 if (atomic_sub_return(num_entries, &scrq->used) <= in ibmvnic_complete_tx()
3139 scrq->pool_index)) { in ibmvnic_complete_tx()
3140 netif_wake_subqueue(adapter->netdev, scrq->pool_index); in ibmvnic_complete_tx()
3142 scrq->pool_index); in ibmvnic_complete_tx()
3146 enable_scrq_irq(adapter, scrq); in ibmvnic_complete_tx()
3148 if (pending_scrq(adapter, scrq)) { in ibmvnic_complete_tx()
3149 disable_scrq_irq(adapter, scrq); in ibmvnic_complete_tx()
3158 struct ibmvnic_sub_crq_queue *scrq = instance; in ibmvnic_interrupt_tx() local
3159 struct ibmvnic_adapter *adapter = scrq->adapter; in ibmvnic_interrupt_tx()
3161 disable_scrq_irq(adapter, scrq); in ibmvnic_interrupt_tx()
3162 ibmvnic_complete_tx(adapter, scrq); in ibmvnic_interrupt_tx()
3169 struct ibmvnic_sub_crq_queue *scrq = instance; in ibmvnic_interrupt_rx() local
3170 struct ibmvnic_adapter *adapter = scrq->adapter; in ibmvnic_interrupt_rx()
3178 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; in ibmvnic_interrupt_rx()
3180 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { in ibmvnic_interrupt_rx()
3181 disable_scrq_irq(adapter, scrq); in ibmvnic_interrupt_rx()
3182 __napi_schedule(&adapter->napi[scrq->scrq_num]); in ibmvnic_interrupt_rx()
3191 struct ibmvnic_sub_crq_queue *scrq; in init_sub_crq_irqs() local
3198 scrq = adapter->tx_scrq[i]; in init_sub_crq_irqs()
3199 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); in init_sub_crq_irqs()
3201 if (!scrq->irq) { in init_sub_crq_irqs()
3207 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", in init_sub_crq_irqs()
3209 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, in init_sub_crq_irqs()
3210 0, scrq->name, scrq); in init_sub_crq_irqs()
3214 scrq->irq, rc); in init_sub_crq_irqs()
3215 irq_dispose_mapping(scrq->irq); in init_sub_crq_irqs()
3223 scrq = adapter->rx_scrq[i]; in init_sub_crq_irqs()
3224 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); in init_sub_crq_irqs()
3225 if (!scrq->irq) { in init_sub_crq_irqs()
3230 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", in init_sub_crq_irqs()
3232 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, in init_sub_crq_irqs()
3233 0, scrq->name, scrq); in init_sub_crq_irqs()
3236 scrq->irq, rc); in init_sub_crq_irqs()
3237 irq_dispose_mapping(scrq->irq); in init_sub_crq_irqs()
3491 struct ibmvnic_sub_crq_queue *scrq) in pending_scrq() argument
3493 union sub_crq *entry = &scrq->msgs[scrq->cur]; in pending_scrq()
3502 struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_next_scrq() argument
3507 spin_lock_irqsave(&scrq->lock, flags); in ibmvnic_next_scrq()
3508 entry = &scrq->msgs[scrq->cur]; in ibmvnic_next_scrq()
3510 if (++scrq->cur == scrq->size) in ibmvnic_next_scrq()
3511 scrq->cur = 0; in ibmvnic_next_scrq()
3515 spin_unlock_irqrestore(&scrq->lock, flags); in ibmvnic_next_scrq()