Lines Matching refs:qi
249 static int vector_advancehead(struct vector_queue *qi, int advance) in vector_advancehead() argument
253 qi->head = in vector_advancehead()
254 (qi->head + advance) in vector_advancehead()
255 % qi->max_depth; in vector_advancehead()
258 spin_lock(&qi->tail_lock); in vector_advancehead()
259 qi->queue_depth -= advance; in vector_advancehead()
265 if (qi->queue_depth == 0) { in vector_advancehead()
266 qi->head = 0; in vector_advancehead()
267 qi->tail = 0; in vector_advancehead()
269 queue_depth = qi->queue_depth; in vector_advancehead()
270 spin_unlock(&qi->tail_lock); in vector_advancehead()
279 static int vector_advancetail(struct vector_queue *qi, int advance) in vector_advancetail() argument
283 qi->tail = in vector_advancetail()
284 (qi->tail + advance) in vector_advancetail()
285 % qi->max_depth; in vector_advancetail()
286 spin_lock(&qi->head_lock); in vector_advancetail()
287 qi->queue_depth += advance; in vector_advancetail()
288 queue_depth = qi->queue_depth; in vector_advancetail()
289 spin_unlock(&qi->head_lock); in vector_advancetail()
334 static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb) in vector_enqueue() argument
336 struct vector_private *vp = netdev_priv(qi->dev); in vector_enqueue()
339 struct mmsghdr *mmsg_vector = qi->mmsg_vector; in vector_enqueue()
342 spin_lock(&qi->tail_lock); in vector_enqueue()
343 spin_lock(&qi->head_lock); in vector_enqueue()
344 queue_depth = qi->queue_depth; in vector_enqueue()
345 spin_unlock(&qi->head_lock); in vector_enqueue()
350 if (queue_depth < qi->max_depth) { in vector_enqueue()
352 *(qi->skbuff_vector + qi->tail) = skb; in vector_enqueue()
353 mmsg_vector += qi->tail; in vector_enqueue()
364 queue_depth = vector_advancetail(qi, 1); in vector_enqueue()
367 spin_unlock(&qi->tail_lock); in vector_enqueue()
370 qi->dev->stats.tx_dropped++; in vector_enqueue()
374 netdev_completed_queue(qi->dev, 1, packet_len); in vector_enqueue()
376 spin_unlock(&qi->tail_lock); in vector_enqueue()
380 static int consume_vector_skbs(struct vector_queue *qi, int count) in consume_vector_skbs() argument
386 for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) { in consume_vector_skbs()
387 skb = *(qi->skbuff_vector + skb_index); in consume_vector_skbs()
392 *(qi->skbuff_vector + skb_index) = NULL; in consume_vector_skbs()
395 qi->dev->stats.tx_bytes += bytes_compl; in consume_vector_skbs()
396 qi->dev->stats.tx_packets += count; in consume_vector_skbs()
397 netdev_completed_queue(qi->dev, count, bytes_compl); in consume_vector_skbs()
398 return vector_advancehead(qi, count); in consume_vector_skbs()
408 static int vector_send(struct vector_queue *qi) in vector_send() argument
410 struct vector_private *vp = netdev_priv(qi->dev); in vector_send()
412 int result = 0, send_len, queue_depth = qi->max_depth; in vector_send()
414 if (spin_trylock(&qi->head_lock)) { in vector_send()
415 if (spin_trylock(&qi->tail_lock)) { in vector_send()
417 queue_depth = qi->queue_depth; in vector_send()
418 spin_unlock(&qi->tail_lock); in vector_send()
422 send_from = qi->mmsg_vector; in vector_send()
423 send_from += qi->head; in vector_send()
425 if (send_len + qi->head > qi->max_depth) in vector_send()
426 send_len = qi->max_depth - qi->head; in vector_send()
452 consume_vector_skbs(qi, result); in vector_send()
462 netif_trans_update(qi->dev); in vector_send()
463 netif_wake_queue(qi->dev); in vector_send()
473 spin_unlock(&qi->head_lock); in vector_send()
484 static void destroy_queue(struct vector_queue *qi) in destroy_queue() argument
488 struct vector_private *vp = netdev_priv(qi->dev); in destroy_queue()
491 if (qi == NULL) in destroy_queue()
496 if (qi->skbuff_vector != NULL) { in destroy_queue()
497 for (i = 0; i < qi->max_depth; i++) { in destroy_queue()
498 if (*(qi->skbuff_vector + i) != NULL) in destroy_queue()
499 dev_kfree_skb_any(*(qi->skbuff_vector + i)); in destroy_queue()
501 kfree(qi->skbuff_vector); in destroy_queue()
504 if (qi->mmsg_vector != NULL) { in destroy_queue()
505 mmsg_vector = qi->mmsg_vector; in destroy_queue()
506 for (i = 0; i < qi->max_depth; i++) { in destroy_queue()
516 kfree(qi->mmsg_vector); in destroy_queue()
518 kfree(qi); in destroy_queue()
677 static void prep_queue_for_rx(struct vector_queue *qi) in prep_queue_for_rx() argument
679 struct vector_private *vp = netdev_priv(qi->dev); in prep_queue_for_rx()
680 struct mmsghdr *mmsg_vector = qi->mmsg_vector; in prep_queue_for_rx()
681 void **skbuff_vector = qi->skbuff_vector; in prep_queue_for_rx()
684 if (qi->queue_depth == 0) in prep_queue_for_rx()
686 for (i = 0; i < qi->queue_depth; i++) { in prep_queue_for_rx()
696 qi->queue_depth = 0; in prep_queue_for_rx()
962 struct vector_queue *qi = vp->rx_queue; in vector_mmsg_rx() local
964 struct mmsghdr *mmsg_vector = qi->mmsg_vector; in vector_mmsg_rx()
965 void **skbuff_vector = qi->skbuff_vector; in vector_mmsg_rx()
972 prep_queue_for_rx(qi); in vector_mmsg_rx()
977 vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0); in vector_mmsg_rx()
990 qi->queue_depth = packet_count; in vector_mmsg_rx()