Lines Matching refs:cfhsi
71 struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer); in cfhsi_inactivity_tout() local
73 netdev_dbg(cfhsi->ndev, "%s.\n", in cfhsi_inactivity_tout()
77 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) in cfhsi_inactivity_tout()
78 queue_work(cfhsi->wq, &cfhsi->wake_down_work); in cfhsi_inactivity_tout()
81 static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi, in cfhsi_update_aggregation_stats() argument
89 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); in cfhsi_update_aggregation_stats()
90 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); in cfhsi_update_aggregation_stats()
94 cfhsi->aggregation_len += len; in cfhsi_update_aggregation_stats()
96 cfhsi->aggregation_len -= len; in cfhsi_update_aggregation_stats()
99 static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi) in cfhsi_can_send_aggregate() argument
103 if (cfhsi->cfg.aggregation_timeout == 0) in cfhsi_can_send_aggregate()
107 if (cfhsi->qhead[i].qlen) in cfhsi_can_send_aggregate()
112 if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS) in cfhsi_can_send_aggregate()
118 static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi) in cfhsi_dequeue() argument
124 skb = skb_dequeue(&cfhsi->qhead[i]); in cfhsi_dequeue()
132 static int cfhsi_tx_queue_len(struct cfhsi *cfhsi) in cfhsi_tx_queue_len() argument
136 len += skb_queue_len(&cfhsi->qhead[i]); in cfhsi_tx_queue_len()
140 static void cfhsi_abort_tx(struct cfhsi *cfhsi) in cfhsi_abort_tx() argument
145 spin_lock_bh(&cfhsi->lock); in cfhsi_abort_tx()
146 skb = cfhsi_dequeue(cfhsi); in cfhsi_abort_tx()
150 cfhsi->ndev->stats.tx_errors++; in cfhsi_abort_tx()
151 cfhsi->ndev->stats.tx_dropped++; in cfhsi_abort_tx()
152 cfhsi_update_aggregation_stats(cfhsi, skb, -1); in cfhsi_abort_tx()
153 spin_unlock_bh(&cfhsi->lock); in cfhsi_abort_tx()
156 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; in cfhsi_abort_tx()
157 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) in cfhsi_abort_tx()
158 mod_timer(&cfhsi->inactivity_timer, in cfhsi_abort_tx()
159 jiffies + cfhsi->cfg.inactivity_timeout); in cfhsi_abort_tx()
160 spin_unlock_bh(&cfhsi->lock); in cfhsi_abort_tx()
163 static int cfhsi_flush_fifo(struct cfhsi *cfhsi) in cfhsi_flush_fifo() argument
169 netdev_dbg(cfhsi->ndev, "%s.\n", in cfhsi_flush_fifo()
173 ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, in cfhsi_flush_fifo()
176 netdev_warn(cfhsi->ndev, in cfhsi_flush_fifo()
185 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); in cfhsi_flush_fifo()
186 ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy, in cfhsi_flush_fifo()
187 cfhsi->ops); in cfhsi_flush_fifo()
189 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); in cfhsi_flush_fifo()
190 netdev_warn(cfhsi->ndev, in cfhsi_flush_fifo()
197 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait, in cfhsi_flush_fifo()
198 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret); in cfhsi_flush_fifo()
201 netdev_warn(cfhsi->ndev, in cfhsi_flush_fifo()
207 netdev_warn(cfhsi->ndev, in cfhsi_flush_fifo()
217 static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) in cfhsi_tx_frm() argument
224 skb = cfhsi_dequeue(cfhsi); in cfhsi_tx_frm()
240 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); in cfhsi_tx_frm()
241 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); in cfhsi_tx_frm()
251 spin_lock_bh(&cfhsi->lock); in cfhsi_tx_frm()
252 cfhsi->ndev->stats.tx_packets++; in cfhsi_tx_frm()
253 cfhsi->ndev->stats.tx_bytes += skb->len; in cfhsi_tx_frm()
254 cfhsi_update_aggregation_stats(cfhsi, skb, -1); in cfhsi_tx_frm()
255 spin_unlock_bh(&cfhsi->lock); in cfhsi_tx_frm()
273 skb = cfhsi_dequeue(cfhsi); in cfhsi_tx_frm()
281 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); in cfhsi_tx_frm()
282 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); in cfhsi_tx_frm()
292 spin_lock_bh(&cfhsi->lock); in cfhsi_tx_frm()
293 cfhsi->ndev->stats.tx_packets++; in cfhsi_tx_frm()
294 cfhsi->ndev->stats.tx_bytes += skb->len; in cfhsi_tx_frm()
295 cfhsi_update_aggregation_stats(cfhsi, skb, -1); in cfhsi_tx_frm()
296 spin_unlock_bh(&cfhsi->lock); in cfhsi_tx_frm()
322 if (cfhsi_can_send_aggregate(cfhsi)) in cfhsi_tx_frm()
330 static void cfhsi_start_tx(struct cfhsi *cfhsi) in cfhsi_start_tx() argument
332 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; in cfhsi_start_tx()
335 netdev_dbg(cfhsi->ndev, "%s.\n", __func__); in cfhsi_start_tx()
337 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) in cfhsi_start_tx()
342 len = cfhsi_tx_frm(desc, cfhsi); in cfhsi_start_tx()
344 spin_lock_bh(&cfhsi->lock); in cfhsi_start_tx()
345 if (unlikely(cfhsi_tx_queue_len(cfhsi))) { in cfhsi_start_tx()
346 spin_unlock_bh(&cfhsi->lock); in cfhsi_start_tx()
350 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; in cfhsi_start_tx()
352 mod_timer(&cfhsi->inactivity_timer, in cfhsi_start_tx()
353 jiffies + cfhsi->cfg.inactivity_timeout); in cfhsi_start_tx()
354 spin_unlock_bh(&cfhsi->lock); in cfhsi_start_tx()
359 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); in cfhsi_start_tx()
361 netdev_err(cfhsi->ndev, "%s: TX error %d.\n", in cfhsi_start_tx()
366 static void cfhsi_tx_done(struct cfhsi *cfhsi) in cfhsi_tx_done() argument
368 netdev_dbg(cfhsi->ndev, "%s.\n", __func__); in cfhsi_tx_done()
370 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) in cfhsi_tx_done()
377 spin_lock_bh(&cfhsi->lock); in cfhsi_tx_done()
378 if (cfhsi->flow_off_sent && in cfhsi_tx_done()
379 cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark && in cfhsi_tx_done()
380 cfhsi->cfdev.flowctrl) { in cfhsi_tx_done()
382 cfhsi->flow_off_sent = 0; in cfhsi_tx_done()
383 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON); in cfhsi_tx_done()
386 if (cfhsi_can_send_aggregate(cfhsi)) { in cfhsi_tx_done()
387 spin_unlock_bh(&cfhsi->lock); in cfhsi_tx_done()
388 cfhsi_start_tx(cfhsi); in cfhsi_tx_done()
390 mod_timer(&cfhsi->aggregation_timer, in cfhsi_tx_done()
391 jiffies + cfhsi->cfg.aggregation_timeout); in cfhsi_tx_done()
392 spin_unlock_bh(&cfhsi->lock); in cfhsi_tx_done()
400 struct cfhsi *cfhsi; in cfhsi_tx_done_cb() local
402 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); in cfhsi_tx_done_cb()
403 netdev_dbg(cfhsi->ndev, "%s.\n", in cfhsi_tx_done_cb()
406 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) in cfhsi_tx_done_cb()
408 cfhsi_tx_done(cfhsi); in cfhsi_tx_done_cb()
411 static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) in cfhsi_rx_desc() argument
420 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n", in cfhsi_rx_desc()
441 netdev_err(cfhsi->ndev, "%s: Invalid length.\n", in cfhsi_rx_desc()
449 netdev_err(cfhsi->ndev, "%s: Out of memory !\n", in cfhsi_rx_desc()
459 skb->dev = cfhsi->ndev; in cfhsi_rx_desc()
464 cfhsi->ndev->stats.rx_packets++; in cfhsi_rx_desc()
465 cfhsi->ndev->stats.rx_bytes += len; in cfhsi_rx_desc()
481 netdev_err(cfhsi->ndev, in cfhsi_rx_desc()
518 static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) in cfhsi_rx_pld() argument
528 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n", in cfhsi_rx_pld()
538 while (nfrms < cfhsi->rx_state.nfrms) { in cfhsi_rx_pld()
561 netdev_err(cfhsi->ndev, "%s: Invalid length.\n", in cfhsi_rx_pld()
569 netdev_err(cfhsi->ndev, "%s: Out of memory !\n", in cfhsi_rx_pld()
571 cfhsi->rx_state.nfrms = nfrms; in cfhsi_rx_pld()
580 skb->dev = cfhsi->ndev; in cfhsi_rx_pld()
585 cfhsi->ndev->stats.rx_packets++; in cfhsi_rx_pld()
586 cfhsi->ndev->stats.rx_bytes += len; in cfhsi_rx_pld()
597 static void cfhsi_rx_done(struct cfhsi *cfhsi) in cfhsi_rx_done() argument
605 desc = (struct cfhsi_desc *)cfhsi->rx_buf; in cfhsi_rx_done()
607 netdev_dbg(cfhsi->ndev, "%s\n", __func__); in cfhsi_rx_done()
609 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) in cfhsi_rx_done()
613 spin_lock_bh(&cfhsi->lock); in cfhsi_rx_done()
614 mod_timer_pending(&cfhsi->inactivity_timer, in cfhsi_rx_done()
615 jiffies + cfhsi->cfg.inactivity_timeout); in cfhsi_rx_done()
616 spin_unlock_bh(&cfhsi->lock); in cfhsi_rx_done()
618 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { in cfhsi_rx_done()
624 rx_buf = cfhsi->rx_buf; in cfhsi_rx_done()
629 rx_buf = cfhsi->rx_flip_buf; in cfhsi_rx_done()
631 rx_buf = cfhsi->rx_flip_buf; in cfhsi_rx_done()
634 if (cfhsi->rx_state.pld_len > 0 && in cfhsi_rx_done()
639 cfhsi->rx_state.pld_len); in cfhsi_rx_done()
641 cfhsi->rx_state.piggy_desc = true; in cfhsi_rx_done()
673 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { in cfhsi_rx_done()
675 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", in cfhsi_rx_done()
678 res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len, in cfhsi_rx_done()
679 cfhsi->ops); in cfhsi_rx_done()
681 netdev_err(cfhsi->ndev, "%s: RX error %d.\n", in cfhsi_rx_done()
683 cfhsi->ndev->stats.rx_errors++; in cfhsi_rx_done()
684 cfhsi->ndev->stats.rx_dropped++; in cfhsi_rx_done()
688 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { in cfhsi_rx_done()
690 if (cfhsi_rx_desc(desc, cfhsi) < 0) in cfhsi_rx_done()
694 if (cfhsi_rx_pld(desc, cfhsi) < 0) in cfhsi_rx_done()
698 if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0) in cfhsi_rx_done()
706 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state)); in cfhsi_rx_done()
707 cfhsi->rx_state.state = rx_state; in cfhsi_rx_done()
708 cfhsi->rx_ptr = rx_ptr; in cfhsi_rx_done()
709 cfhsi->rx_len = rx_len; in cfhsi_rx_done()
710 cfhsi->rx_state.pld_len = desc_pld_len; in cfhsi_rx_done()
711 cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC; in cfhsi_rx_done()
713 if (rx_buf != cfhsi->rx_buf) in cfhsi_rx_done()
714 swap(cfhsi->rx_buf, cfhsi->rx_flip_buf); in cfhsi_rx_done()
718 netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__); in cfhsi_rx_done()
720 cfhsi->rx_buf, CFHSI_DESC_SZ); in cfhsi_rx_done()
721 schedule_work(&cfhsi->out_of_sync_work); in cfhsi_rx_done()
726 struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer); in cfhsi_rx_slowpath() local
728 netdev_dbg(cfhsi->ndev, "%s.\n", in cfhsi_rx_slowpath()
731 cfhsi_rx_done(cfhsi); in cfhsi_rx_slowpath()
736 struct cfhsi *cfhsi; in cfhsi_rx_done_cb() local
738 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); in cfhsi_rx_done_cb()
739 netdev_dbg(cfhsi->ndev, "%s.\n", in cfhsi_rx_done_cb()
742 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) in cfhsi_rx_done_cb()
745 if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits)) in cfhsi_rx_done_cb()
746 wake_up_interruptible(&cfhsi->flush_fifo_wait); in cfhsi_rx_done_cb()
748 cfhsi_rx_done(cfhsi); in cfhsi_rx_done_cb()
753 struct cfhsi *cfhsi = NULL; in cfhsi_wake_up() local
758 cfhsi = container_of(work, struct cfhsi, wake_up_work); in cfhsi_wake_up()
760 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) in cfhsi_wake_up()
763 if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) { in cfhsi_wake_up()
766 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); in cfhsi_wake_up()
767 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); in cfhsi_wake_up()
772 cfhsi->ops->cfhsi_wake_up(cfhsi->ops); in cfhsi_wake_up()
774 netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n", in cfhsi_wake_up()
779 ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait, in cfhsi_wake_up()
781 &cfhsi->bits), ret); in cfhsi_wake_up()
784 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n", in cfhsi_wake_up()
787 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); in cfhsi_wake_up()
788 cfhsi->ops->cfhsi_wake_down(cfhsi->ops); in cfhsi_wake_up()
795 netdev_dbg(cfhsi->ndev, "%s: Timeout.\n", in cfhsi_wake_up()
799 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, in cfhsi_wake_up()
802 netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n", in cfhsi_wake_up()
806 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops, in cfhsi_wake_up()
810 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n", in cfhsi_wake_up()
814 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); in cfhsi_wake_up()
820 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); in cfhsi_wake_up()
821 cfhsi->ops->cfhsi_wake_down(cfhsi->ops); in cfhsi_wake_up()
825 netdev_dbg(cfhsi->ndev, "%s: Woken.\n", in cfhsi_wake_up()
829 set_bit(CFHSI_AWAKE, &cfhsi->bits); in cfhsi_wake_up()
830 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); in cfhsi_wake_up()
833 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__); in cfhsi_wake_up()
834 res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops); in cfhsi_wake_up()
837 netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res); in cfhsi_wake_up()
840 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); in cfhsi_wake_up()
842 spin_lock_bh(&cfhsi->lock); in cfhsi_wake_up()
845 if (!cfhsi_tx_queue_len(cfhsi)) { in cfhsi_wake_up()
846 netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n", in cfhsi_wake_up()
849 mod_timer(&cfhsi->inactivity_timer, in cfhsi_wake_up()
850 jiffies + cfhsi->cfg.inactivity_timeout); in cfhsi_wake_up()
851 spin_unlock_bh(&cfhsi->lock); in cfhsi_wake_up()
855 netdev_dbg(cfhsi->ndev, "%s: Host wake.\n", in cfhsi_wake_up()
858 spin_unlock_bh(&cfhsi->lock); in cfhsi_wake_up()
861 len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi); in cfhsi_wake_up()
865 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); in cfhsi_wake_up()
867 netdev_err(cfhsi->ndev, "%s: TX error %d.\n", in cfhsi_wake_up()
869 cfhsi_abort_tx(cfhsi); in cfhsi_wake_up()
872 netdev_err(cfhsi->ndev, in cfhsi_wake_up()
881 struct cfhsi *cfhsi = NULL; in cfhsi_wake_down() local
885 cfhsi = container_of(work, struct cfhsi, wake_down_work); in cfhsi_wake_down()
886 netdev_dbg(cfhsi->ndev, "%s.\n", __func__); in cfhsi_wake_down()
888 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) in cfhsi_wake_down()
892 cfhsi->ops->cfhsi_wake_down(cfhsi->ops); in cfhsi_wake_down()
896 ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait, in cfhsi_wake_down()
898 &cfhsi->bits), ret); in cfhsi_wake_down()
901 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n", in cfhsi_wake_down()
908 netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__); in cfhsi_wake_down()
911 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops, in cfhsi_wake_down()
914 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n", in cfhsi_wake_down()
920 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, in cfhsi_wake_down()
932 netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__); in cfhsi_wake_down()
935 clear_bit(CFHSI_AWAKE, &cfhsi->bits); in cfhsi_wake_down()
938 cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops); in cfhsi_wake_down()
943 struct cfhsi *cfhsi = NULL; in cfhsi_out_of_sync() local
945 cfhsi = container_of(work, struct cfhsi, out_of_sync_work); in cfhsi_out_of_sync()
948 dev_close(cfhsi->ndev); in cfhsi_out_of_sync()
954 struct cfhsi *cfhsi = NULL; in cfhsi_wake_up_cb() local
956 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); in cfhsi_wake_up_cb()
957 netdev_dbg(cfhsi->ndev, "%s.\n", in cfhsi_wake_up_cb()
960 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); in cfhsi_wake_up_cb()
961 wake_up_interruptible(&cfhsi->wake_up_wait); in cfhsi_wake_up_cb()
963 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) in cfhsi_wake_up_cb()
967 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits)) in cfhsi_wake_up_cb()
968 queue_work(cfhsi->wq, &cfhsi->wake_up_work); in cfhsi_wake_up_cb()
973 struct cfhsi *cfhsi = NULL; in cfhsi_wake_down_cb() local
975 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); in cfhsi_wake_down_cb()
976 netdev_dbg(cfhsi->ndev, "%s.\n", in cfhsi_wake_down_cb()
980 set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); in cfhsi_wake_down_cb()
981 wake_up_interruptible(&cfhsi->wake_down_wait); in cfhsi_wake_down_cb()
986 struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer); in cfhsi_aggregation_tout() local
988 netdev_dbg(cfhsi->ndev, "%s.\n", in cfhsi_aggregation_tout()
991 cfhsi_start_tx(cfhsi); in cfhsi_aggregation_tout()
996 struct cfhsi *cfhsi = NULL; in cfhsi_xmit() local
1004 cfhsi = netdev_priv(dev); in cfhsi_xmit()
1024 spin_lock_bh(&cfhsi->lock); in cfhsi_xmit()
1027 cfhsi_update_aggregation_stats(cfhsi, skb, 1); in cfhsi_xmit()
1030 skb_queue_tail(&cfhsi->qhead[prio], skb); in cfhsi_xmit()
1033 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) { in cfhsi_xmit()
1034 spin_unlock_bh(&cfhsi->lock); in cfhsi_xmit()
1035 cfhsi_abort_tx(cfhsi); in cfhsi_xmit()
1040 if (!cfhsi->flow_off_sent && in cfhsi_xmit()
1041 cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark && in cfhsi_xmit()
1042 cfhsi->cfdev.flowctrl) { in cfhsi_xmit()
1043 cfhsi->flow_off_sent = 1; in cfhsi_xmit()
1044 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); in cfhsi_xmit()
1047 if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) { in cfhsi_xmit()
1048 cfhsi->tx_state = CFHSI_TX_STATE_XFER; in cfhsi_xmit()
1055 cfhsi_can_send_aggregate(cfhsi) && in cfhsi_xmit()
1056 del_timer(&cfhsi->aggregation_timer) > 0; in cfhsi_xmit()
1057 spin_unlock_bh(&cfhsi->lock); in cfhsi_xmit()
1059 cfhsi_start_tx(cfhsi); in cfhsi_xmit()
1064 timer_active = del_timer_sync(&cfhsi->inactivity_timer); in cfhsi_xmit()
1066 spin_unlock_bh(&cfhsi->lock); in cfhsi_xmit()
1069 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; in cfhsi_xmit()
1074 len = cfhsi_tx_frm(desc, cfhsi); in cfhsi_xmit()
1078 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); in cfhsi_xmit()
1080 netdev_err(cfhsi->ndev, "%s: TX error %d.\n", in cfhsi_xmit()
1082 cfhsi_abort_tx(cfhsi); in cfhsi_xmit()
1086 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits)) in cfhsi_xmit()
1087 queue_work(cfhsi->wq, &cfhsi->wake_up_work); in cfhsi_xmit()
1098 struct cfhsi *cfhsi = netdev_priv(dev); in cfhsi_setup() local
1107 skb_queue_head_init(&cfhsi->qhead[i]); in cfhsi_setup()
1108 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; in cfhsi_setup()
1109 cfhsi->cfdev.use_frag = false; in cfhsi_setup()
1110 cfhsi->cfdev.use_stx = false; in cfhsi_setup()
1111 cfhsi->cfdev.use_fcs = false; in cfhsi_setup()
1112 cfhsi->ndev = dev; in cfhsi_setup()
1113 cfhsi->cfg = hsi_default_config; in cfhsi_setup()
1118 struct cfhsi *cfhsi = netdev_priv(ndev); in cfhsi_open() local
1121 clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits); in cfhsi_open()
1124 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; in cfhsi_open()
1125 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; in cfhsi_open()
1128 cfhsi->flow_off_sent = 0; in cfhsi_open()
1134 cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL); in cfhsi_open()
1135 if (!cfhsi->tx_buf) { in cfhsi_open()
1144 cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL); in cfhsi_open()
1145 if (!cfhsi->rx_buf) { in cfhsi_open()
1150 cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL); in cfhsi_open()
1151 if (!cfhsi->rx_flip_buf) { in cfhsi_open()
1157 cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout; in cfhsi_open()
1160 cfhsi->rx_ptr = cfhsi->rx_buf; in cfhsi_open()
1161 cfhsi->rx_len = CFHSI_DESC_SZ; in cfhsi_open()
1164 spin_lock_init(&cfhsi->lock); in cfhsi_open()
1167 cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb; in cfhsi_open()
1168 cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb; in cfhsi_open()
1169 cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb; in cfhsi_open()
1170 cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb; in cfhsi_open()
1173 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up); in cfhsi_open()
1174 INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down); in cfhsi_open()
1175 INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync); in cfhsi_open()
1178 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); in cfhsi_open()
1179 clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); in cfhsi_open()
1180 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); in cfhsi_open()
1181 clear_bit(CFHSI_AWAKE, &cfhsi->bits); in cfhsi_open()
1184 cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM); in cfhsi_open()
1185 if (!cfhsi->wq) { in cfhsi_open()
1186 netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n", in cfhsi_open()
1193 init_waitqueue_head(&cfhsi->wake_up_wait); in cfhsi_open()
1194 init_waitqueue_head(&cfhsi->wake_down_wait); in cfhsi_open()
1195 init_waitqueue_head(&cfhsi->flush_fifo_wait); in cfhsi_open()
1198 timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0); in cfhsi_open()
1200 timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0); in cfhsi_open()
1202 timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0); in cfhsi_open()
1205 res = cfhsi->ops->cfhsi_up(cfhsi->ops); in cfhsi_open()
1207 netdev_err(cfhsi->ndev, in cfhsi_open()
1214 res = cfhsi_flush_fifo(cfhsi); in cfhsi_open()
1216 netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n", in cfhsi_open()
1223 cfhsi->ops->cfhsi_down(cfhsi->ops); in cfhsi_open()
1225 destroy_workqueue(cfhsi->wq); in cfhsi_open()
1227 kfree(cfhsi->rx_flip_buf); in cfhsi_open()
1229 kfree(cfhsi->rx_buf); in cfhsi_open()
1231 kfree(cfhsi->tx_buf); in cfhsi_open()
1238 struct cfhsi *cfhsi = netdev_priv(ndev); in cfhsi_close() local
1242 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits); in cfhsi_close()
1245 del_timer_sync(&cfhsi->inactivity_timer); in cfhsi_close()
1246 del_timer_sync(&cfhsi->rx_slowpath_timer); in cfhsi_close()
1247 del_timer_sync(&cfhsi->aggregation_timer); in cfhsi_close()
1250 cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops); in cfhsi_close()
1253 destroy_workqueue(cfhsi->wq); in cfhsi_close()
1256 tx_buf = cfhsi->tx_buf; in cfhsi_close()
1257 rx_buf = cfhsi->rx_buf; in cfhsi_close()
1258 flip_buf = cfhsi->rx_flip_buf; in cfhsi_close()
1260 cfhsi_abort_tx(cfhsi); in cfhsi_close()
1263 cfhsi->ops->cfhsi_down(cfhsi->ops); in cfhsi_close()
1274 struct cfhsi *cfhsi = netdev_priv(dev); in cfhsi_uninit() local
1277 list_del(&cfhsi->list); in cfhsi_uninit()
1287 static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi) in cfhsi_netlink_parms() argument
1304 cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000; in cfhsi_netlink_parms()
1305 if (cfhsi->cfg.inactivity_timeout == 0) in cfhsi_netlink_parms()
1306 cfhsi->cfg.inactivity_timeout = 1; in cfhsi_netlink_parms()
1307 else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA) in cfhsi_netlink_parms()
1308 cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA; in cfhsi_netlink_parms()
1313 cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]); in cfhsi_netlink_parms()
1317 cfhsi->cfg.head_align = nla_get_u32(data[i]); in cfhsi_netlink_parms()
1321 cfhsi->cfg.tail_align = nla_get_u32(data[i]); in cfhsi_netlink_parms()
1325 cfhsi->cfg.q_high_mark = nla_get_u32(data[i]); in cfhsi_netlink_parms()
1329 cfhsi->cfg.q_low_mark = nla_get_u32(data[i]); in cfhsi_netlink_parms()
1361 struct cfhsi *cfhsi = netdev_priv(dev); in caif_hsi_fill_info() local
1364 cfhsi->cfg.inactivity_timeout) || in caif_hsi_fill_info()
1366 cfhsi->cfg.aggregation_timeout) || in caif_hsi_fill_info()
1368 cfhsi->cfg.head_align) || in caif_hsi_fill_info()
1370 cfhsi->cfg.tail_align) || in caif_hsi_fill_info()
1372 cfhsi->cfg.q_high_mark) || in caif_hsi_fill_info()
1374 cfhsi->cfg.q_low_mark)) in caif_hsi_fill_info()
1384 struct cfhsi *cfhsi = NULL; in caif_hsi_newlink() local
1389 cfhsi = netdev_priv(dev); in caif_hsi_newlink()
1390 cfhsi_netlink_parms(data, cfhsi); in caif_hsi_newlink()
1399 cfhsi->ops = (*get_ops)(); in caif_hsi_newlink()
1400 if (!cfhsi->ops) { in caif_hsi_newlink()
1406 cfhsi->ops->cb_ops = &cfhsi->cb_ops; in caif_hsi_newlink()
1412 list_add_tail(&cfhsi->list, &cfhsi_list); in caif_hsi_newlink()
1422 .priv_size = sizeof(struct cfhsi),
1436 struct cfhsi *cfhsi; in cfhsi_exit_module() local
1442 cfhsi = list_entry(list_node, struct cfhsi, list); in cfhsi_exit_module()
1443 unregister_netdevice(cfhsi->ndev); in cfhsi_exit_module()