1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Atlantic Network Driver
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2014-2019 aQuantia Corporation
5*4882a593Smuzhiyun * Copyright (C) 2019-2020 Marvell International Ltd.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "aq_ring.h"
11*4882a593Smuzhiyun #include "aq_nic.h"
12*4882a593Smuzhiyun #include "aq_hw.h"
13*4882a593Smuzhiyun #include "aq_hw_utils.h"
14*4882a593Smuzhiyun #include "aq_ptp.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/netdevice.h>
17*4882a593Smuzhiyun #include <linux/etherdevice.h>
18*4882a593Smuzhiyun
aq_free_rxpage(struct aq_rxpage * rxpage,struct device * dev)19*4882a593Smuzhiyun static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun unsigned int len = PAGE_SIZE << rxpage->order;
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* Drop the ref for being in the ring. */
26*4882a593Smuzhiyun __free_pages(rxpage->page, rxpage->order);
27*4882a593Smuzhiyun rxpage->page = NULL;
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
aq_get_rxpage(struct aq_rxpage * rxpage,unsigned int order,struct device * dev)30*4882a593Smuzhiyun static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
31*4882a593Smuzhiyun struct device *dev)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun struct page *page;
34*4882a593Smuzhiyun int ret = -ENOMEM;
35*4882a593Smuzhiyun dma_addr_t daddr;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun page = dev_alloc_pages(order);
38*4882a593Smuzhiyun if (unlikely(!page))
39*4882a593Smuzhiyun goto err_exit;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
42*4882a593Smuzhiyun DMA_FROM_DEVICE);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun if (unlikely(dma_mapping_error(dev, daddr)))
45*4882a593Smuzhiyun goto free_page;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun rxpage->page = page;
48*4882a593Smuzhiyun rxpage->daddr = daddr;
49*4882a593Smuzhiyun rxpage->order = order;
50*4882a593Smuzhiyun rxpage->pg_off = 0;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun return 0;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun free_page:
55*4882a593Smuzhiyun __free_pages(page, order);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun err_exit:
58*4882a593Smuzhiyun return ret;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
aq_get_rxpages(struct aq_ring_s * self,struct aq_ring_buff_s * rxbuf,int order)61*4882a593Smuzhiyun static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
62*4882a593Smuzhiyun int order)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun int ret;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun if (rxbuf->rxdata.page) {
67*4882a593Smuzhiyun /* One means ring is the only user and can reuse */
68*4882a593Smuzhiyun if (page_ref_count(rxbuf->rxdata.page) > 1) {
69*4882a593Smuzhiyun /* Try reuse buffer */
70*4882a593Smuzhiyun rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
71*4882a593Smuzhiyun if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
72*4882a593Smuzhiyun (PAGE_SIZE << order)) {
73*4882a593Smuzhiyun u64_stats_update_begin(&self->stats.rx.syncp);
74*4882a593Smuzhiyun self->stats.rx.pg_flips++;
75*4882a593Smuzhiyun u64_stats_update_end(&self->stats.rx.syncp);
76*4882a593Smuzhiyun } else {
77*4882a593Smuzhiyun /* Buffer exhausted. We have other users and
78*4882a593Smuzhiyun * should release this page and realloc
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun aq_free_rxpage(&rxbuf->rxdata,
81*4882a593Smuzhiyun aq_nic_get_dev(self->aq_nic));
82*4882a593Smuzhiyun u64_stats_update_begin(&self->stats.rx.syncp);
83*4882a593Smuzhiyun self->stats.rx.pg_losts++;
84*4882a593Smuzhiyun u64_stats_update_end(&self->stats.rx.syncp);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun } else {
87*4882a593Smuzhiyun rxbuf->rxdata.pg_off = 0;
88*4882a593Smuzhiyun u64_stats_update_begin(&self->stats.rx.syncp);
89*4882a593Smuzhiyun self->stats.rx.pg_reuses++;
90*4882a593Smuzhiyun u64_stats_update_end(&self->stats.rx.syncp);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (!rxbuf->rxdata.page) {
95*4882a593Smuzhiyun ret = aq_get_rxpage(&rxbuf->rxdata, order,
96*4882a593Smuzhiyun aq_nic_get_dev(self->aq_nic));
97*4882a593Smuzhiyun if (ret) {
98*4882a593Smuzhiyun u64_stats_update_begin(&self->stats.rx.syncp);
99*4882a593Smuzhiyun self->stats.rx.alloc_fails++;
100*4882a593Smuzhiyun u64_stats_update_end(&self->stats.rx.syncp);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun return ret;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun return 0;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
aq_ring_alloc(struct aq_ring_s * self,struct aq_nic_s * aq_nic)108*4882a593Smuzhiyun static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
109*4882a593Smuzhiyun struct aq_nic_s *aq_nic)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun int err = 0;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun self->buff_ring =
114*4882a593Smuzhiyun kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (!self->buff_ring) {
117*4882a593Smuzhiyun err = -ENOMEM;
118*4882a593Smuzhiyun goto err_exit;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
121*4882a593Smuzhiyun self->size * self->dx_size,
122*4882a593Smuzhiyun &self->dx_ring_pa, GFP_KERNEL);
123*4882a593Smuzhiyun if (!self->dx_ring) {
124*4882a593Smuzhiyun err = -ENOMEM;
125*4882a593Smuzhiyun goto err_exit;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun err_exit:
129*4882a593Smuzhiyun if (err < 0) {
130*4882a593Smuzhiyun aq_ring_free(self);
131*4882a593Smuzhiyun self = NULL;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun return self;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
aq_ring_tx_alloc(struct aq_ring_s * self,struct aq_nic_s * aq_nic,unsigned int idx,struct aq_nic_cfg_s * aq_nic_cfg)137*4882a593Smuzhiyun struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
138*4882a593Smuzhiyun struct aq_nic_s *aq_nic,
139*4882a593Smuzhiyun unsigned int idx,
140*4882a593Smuzhiyun struct aq_nic_cfg_s *aq_nic_cfg)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun int err = 0;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun self->aq_nic = aq_nic;
145*4882a593Smuzhiyun self->idx = idx;
146*4882a593Smuzhiyun self->size = aq_nic_cfg->txds;
147*4882a593Smuzhiyun self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun self = aq_ring_alloc(self, aq_nic);
150*4882a593Smuzhiyun if (!self) {
151*4882a593Smuzhiyun err = -ENOMEM;
152*4882a593Smuzhiyun goto err_exit;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun err_exit:
156*4882a593Smuzhiyun if (err < 0) {
157*4882a593Smuzhiyun aq_ring_free(self);
158*4882a593Smuzhiyun self = NULL;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun return self;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
aq_ring_rx_alloc(struct aq_ring_s * self,struct aq_nic_s * aq_nic,unsigned int idx,struct aq_nic_cfg_s * aq_nic_cfg)164*4882a593Smuzhiyun struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
165*4882a593Smuzhiyun struct aq_nic_s *aq_nic,
166*4882a593Smuzhiyun unsigned int idx,
167*4882a593Smuzhiyun struct aq_nic_cfg_s *aq_nic_cfg)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun int err = 0;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun self->aq_nic = aq_nic;
172*4882a593Smuzhiyun self->idx = idx;
173*4882a593Smuzhiyun self->size = aq_nic_cfg->rxds;
174*4882a593Smuzhiyun self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
175*4882a593Smuzhiyun self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
176*4882a593Smuzhiyun (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (aq_nic_cfg->rxpageorder > self->page_order)
179*4882a593Smuzhiyun self->page_order = aq_nic_cfg->rxpageorder;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun self = aq_ring_alloc(self, aq_nic);
182*4882a593Smuzhiyun if (!self) {
183*4882a593Smuzhiyun err = -ENOMEM;
184*4882a593Smuzhiyun goto err_exit;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun err_exit:
188*4882a593Smuzhiyun if (err < 0) {
189*4882a593Smuzhiyun aq_ring_free(self);
190*4882a593Smuzhiyun self = NULL;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return self;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun struct aq_ring_s *
aq_ring_hwts_rx_alloc(struct aq_ring_s * self,struct aq_nic_s * aq_nic,unsigned int idx,unsigned int size,unsigned int dx_size)197*4882a593Smuzhiyun aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
198*4882a593Smuzhiyun unsigned int idx, unsigned int size, unsigned int dx_size)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct device *dev = aq_nic_get_dev(aq_nic);
201*4882a593Smuzhiyun size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun memset(self, 0, sizeof(*self));
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun self->aq_nic = aq_nic;
206*4882a593Smuzhiyun self->idx = idx;
207*4882a593Smuzhiyun self->size = size;
208*4882a593Smuzhiyun self->dx_size = dx_size;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
211*4882a593Smuzhiyun GFP_KERNEL);
212*4882a593Smuzhiyun if (!self->dx_ring) {
213*4882a593Smuzhiyun aq_ring_free(self);
214*4882a593Smuzhiyun return NULL;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun return self;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
aq_ring_init(struct aq_ring_s * self,const enum atl_ring_type ring_type)220*4882a593Smuzhiyun int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun self->hw_head = 0;
223*4882a593Smuzhiyun self->sw_head = 0;
224*4882a593Smuzhiyun self->sw_tail = 0;
225*4882a593Smuzhiyun self->ring_type = ring_type;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (self->ring_type == ATL_RING_RX)
228*4882a593Smuzhiyun u64_stats_init(&self->stats.rx.syncp);
229*4882a593Smuzhiyun else
230*4882a593Smuzhiyun u64_stats_init(&self->stats.tx.syncp);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun return 0;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
aq_ring_dx_in_range(unsigned int h,unsigned int i,unsigned int t)235*4882a593Smuzhiyun static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
236*4882a593Smuzhiyun unsigned int t)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
aq_ring_update_queue_state(struct aq_ring_s * ring)241*4882a593Smuzhiyun void aq_ring_update_queue_state(struct aq_ring_s *ring)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
244*4882a593Smuzhiyun aq_ring_queue_stop(ring);
245*4882a593Smuzhiyun else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
246*4882a593Smuzhiyun aq_ring_queue_wake(ring);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
aq_ring_queue_wake(struct aq_ring_s * ring)249*4882a593Smuzhiyun void aq_ring_queue_wake(struct aq_ring_s *ring)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (__netif_subqueue_stopped(ndev,
254*4882a593Smuzhiyun AQ_NIC_RING2QMAP(ring->aq_nic,
255*4882a593Smuzhiyun ring->idx))) {
256*4882a593Smuzhiyun netif_wake_subqueue(ndev,
257*4882a593Smuzhiyun AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
258*4882a593Smuzhiyun u64_stats_update_begin(&ring->stats.tx.syncp);
259*4882a593Smuzhiyun ring->stats.tx.queue_restarts++;
260*4882a593Smuzhiyun u64_stats_update_end(&ring->stats.tx.syncp);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
aq_ring_queue_stop(struct aq_ring_s * ring)264*4882a593Smuzhiyun void aq_ring_queue_stop(struct aq_ring_s *ring)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (!__netif_subqueue_stopped(ndev,
269*4882a593Smuzhiyun AQ_NIC_RING2QMAP(ring->aq_nic,
270*4882a593Smuzhiyun ring->idx)))
271*4882a593Smuzhiyun netif_stop_subqueue(ndev,
272*4882a593Smuzhiyun AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
aq_ring_tx_clean(struct aq_ring_s * self)275*4882a593Smuzhiyun bool aq_ring_tx_clean(struct aq_ring_s *self)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct device *dev = aq_nic_get_dev(self->aq_nic);
278*4882a593Smuzhiyun unsigned int budget;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun for (budget = AQ_CFG_TX_CLEAN_BUDGET;
281*4882a593Smuzhiyun budget && self->sw_head != self->hw_head; budget--) {
282*4882a593Smuzhiyun struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (likely(buff->is_mapped)) {
285*4882a593Smuzhiyun if (unlikely(buff->is_sop)) {
286*4882a593Smuzhiyun if (!buff->is_eop &&
287*4882a593Smuzhiyun buff->eop_index != 0xffffU &&
288*4882a593Smuzhiyun (!aq_ring_dx_in_range(self->sw_head,
289*4882a593Smuzhiyun buff->eop_index,
290*4882a593Smuzhiyun self->hw_head)))
291*4882a593Smuzhiyun break;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun dma_unmap_single(dev, buff->pa, buff->len,
294*4882a593Smuzhiyun DMA_TO_DEVICE);
295*4882a593Smuzhiyun } else {
296*4882a593Smuzhiyun dma_unmap_page(dev, buff->pa, buff->len,
297*4882a593Smuzhiyun DMA_TO_DEVICE);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (unlikely(buff->is_eop)) {
302*4882a593Smuzhiyun u64_stats_update_begin(&self->stats.tx.syncp);
303*4882a593Smuzhiyun ++self->stats.tx.packets;
304*4882a593Smuzhiyun self->stats.tx.bytes += buff->skb->len;
305*4882a593Smuzhiyun u64_stats_update_end(&self->stats.tx.syncp);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun dev_kfree_skb_any(buff->skb);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun buff->pa = 0U;
310*4882a593Smuzhiyun buff->eop_index = 0xffffU;
311*4882a593Smuzhiyun self->sw_head = aq_ring_next_dx(self, self->sw_head);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun return !!budget;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
aq_rx_checksum(struct aq_ring_s * self,struct aq_ring_buff_s * buff,struct sk_buff * skb)317*4882a593Smuzhiyun static void aq_rx_checksum(struct aq_ring_s *self,
318*4882a593Smuzhiyun struct aq_ring_buff_s *buff,
319*4882a593Smuzhiyun struct sk_buff *skb)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
322*4882a593Smuzhiyun return;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (unlikely(buff->is_cso_err)) {
325*4882a593Smuzhiyun u64_stats_update_begin(&self->stats.rx.syncp);
326*4882a593Smuzhiyun ++self->stats.rx.errors;
327*4882a593Smuzhiyun u64_stats_update_end(&self->stats.rx.syncp);
328*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_NONE;
329*4882a593Smuzhiyun return;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun if (buff->is_ip_cso) {
332*4882a593Smuzhiyun __skb_incr_checksum_unnecessary(skb);
333*4882a593Smuzhiyun } else {
334*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_NONE;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (buff->is_udp_cso || buff->is_tcp_cso)
338*4882a593Smuzhiyun __skb_incr_checksum_unnecessary(skb);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
aq_ring_rx_clean(struct aq_ring_s * self,struct napi_struct * napi,int * work_done,int budget)342*4882a593Smuzhiyun int aq_ring_rx_clean(struct aq_ring_s *self,
343*4882a593Smuzhiyun struct napi_struct *napi,
344*4882a593Smuzhiyun int *work_done,
345*4882a593Smuzhiyun int budget)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
348*4882a593Smuzhiyun int err = 0;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun for (; (self->sw_head != self->hw_head) && budget;
351*4882a593Smuzhiyun self->sw_head = aq_ring_next_dx(self, self->sw_head),
352*4882a593Smuzhiyun --budget, ++(*work_done)) {
353*4882a593Smuzhiyun struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
354*4882a593Smuzhiyun bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
355*4882a593Smuzhiyun struct aq_ring_buff_s *buff_ = NULL;
356*4882a593Smuzhiyun struct sk_buff *skb = NULL;
357*4882a593Smuzhiyun unsigned int next_ = 0U;
358*4882a593Smuzhiyun unsigned int i = 0U;
359*4882a593Smuzhiyun u16 hdr_len;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (buff->is_cleaned)
362*4882a593Smuzhiyun continue;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun if (!buff->is_eop) {
365*4882a593Smuzhiyun unsigned int frag_cnt = 0U;
366*4882a593Smuzhiyun buff_ = buff;
367*4882a593Smuzhiyun do {
368*4882a593Smuzhiyun bool is_rsc_completed = true;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun if (buff_->next >= self->size) {
371*4882a593Smuzhiyun err = -EIO;
372*4882a593Smuzhiyun goto err_exit;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun frag_cnt++;
376*4882a593Smuzhiyun next_ = buff_->next,
377*4882a593Smuzhiyun buff_ = &self->buff_ring[next_];
378*4882a593Smuzhiyun is_rsc_completed =
379*4882a593Smuzhiyun aq_ring_dx_in_range(self->sw_head,
380*4882a593Smuzhiyun next_,
381*4882a593Smuzhiyun self->hw_head);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (unlikely(!is_rsc_completed) ||
384*4882a593Smuzhiyun frag_cnt > MAX_SKB_FRAGS) {
385*4882a593Smuzhiyun err = 0;
386*4882a593Smuzhiyun goto err_exit;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun buff->is_error |= buff_->is_error;
390*4882a593Smuzhiyun buff->is_cso_err |= buff_->is_cso_err;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun } while (!buff_->is_eop);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (buff->is_error ||
395*4882a593Smuzhiyun (buff->is_lro && buff->is_cso_err)) {
396*4882a593Smuzhiyun buff_ = buff;
397*4882a593Smuzhiyun do {
398*4882a593Smuzhiyun if (buff_->next >= self->size) {
399*4882a593Smuzhiyun err = -EIO;
400*4882a593Smuzhiyun goto err_exit;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun next_ = buff_->next,
403*4882a593Smuzhiyun buff_ = &self->buff_ring[next_];
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun buff_->is_cleaned = true;
406*4882a593Smuzhiyun } while (!buff_->is_eop);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun u64_stats_update_begin(&self->stats.rx.syncp);
409*4882a593Smuzhiyun ++self->stats.rx.errors;
410*4882a593Smuzhiyun u64_stats_update_end(&self->stats.rx.syncp);
411*4882a593Smuzhiyun continue;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (buff->is_error) {
416*4882a593Smuzhiyun u64_stats_update_begin(&self->stats.rx.syncp);
417*4882a593Smuzhiyun ++self->stats.rx.errors;
418*4882a593Smuzhiyun u64_stats_update_end(&self->stats.rx.syncp);
419*4882a593Smuzhiyun continue;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
423*4882a593Smuzhiyun buff->rxdata.daddr,
424*4882a593Smuzhiyun buff->rxdata.pg_off,
425*4882a593Smuzhiyun buff->len, DMA_FROM_DEVICE);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
428*4882a593Smuzhiyun if (unlikely(!skb)) {
429*4882a593Smuzhiyun u64_stats_update_begin(&self->stats.rx.syncp);
430*4882a593Smuzhiyun self->stats.rx.skb_alloc_fails++;
431*4882a593Smuzhiyun u64_stats_update_end(&self->stats.rx.syncp);
432*4882a593Smuzhiyun err = -ENOMEM;
433*4882a593Smuzhiyun goto err_exit;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun if (is_ptp_ring)
436*4882a593Smuzhiyun buff->len -=
437*4882a593Smuzhiyun aq_ptp_extract_ts(self->aq_nic, skb,
438*4882a593Smuzhiyun aq_buf_vaddr(&buff->rxdata),
439*4882a593Smuzhiyun buff->len);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun hdr_len = buff->len;
442*4882a593Smuzhiyun if (hdr_len > AQ_CFG_RX_HDR_SIZE)
443*4882a593Smuzhiyun hdr_len = eth_get_headlen(skb->dev,
444*4882a593Smuzhiyun aq_buf_vaddr(&buff->rxdata),
445*4882a593Smuzhiyun AQ_CFG_RX_HDR_SIZE);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
448*4882a593Smuzhiyun ALIGN(hdr_len, sizeof(long)));
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (buff->len - hdr_len > 0) {
451*4882a593Smuzhiyun skb_add_rx_frag(skb, i++, buff->rxdata.page,
452*4882a593Smuzhiyun buff->rxdata.pg_off + hdr_len,
453*4882a593Smuzhiyun buff->len - hdr_len,
454*4882a593Smuzhiyun AQ_CFG_RX_FRAME_MAX);
455*4882a593Smuzhiyun page_ref_inc(buff->rxdata.page);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun if (!buff->is_eop) {
459*4882a593Smuzhiyun buff_ = buff;
460*4882a593Smuzhiyun do {
461*4882a593Smuzhiyun next_ = buff_->next;
462*4882a593Smuzhiyun buff_ = &self->buff_ring[next_];
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
465*4882a593Smuzhiyun buff_->rxdata.daddr,
466*4882a593Smuzhiyun buff_->rxdata.pg_off,
467*4882a593Smuzhiyun buff_->len,
468*4882a593Smuzhiyun DMA_FROM_DEVICE);
469*4882a593Smuzhiyun skb_add_rx_frag(skb, i++,
470*4882a593Smuzhiyun buff_->rxdata.page,
471*4882a593Smuzhiyun buff_->rxdata.pg_off,
472*4882a593Smuzhiyun buff_->len,
473*4882a593Smuzhiyun AQ_CFG_RX_FRAME_MAX);
474*4882a593Smuzhiyun page_ref_inc(buff_->rxdata.page);
475*4882a593Smuzhiyun buff_->is_cleaned = 1;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun buff->is_ip_cso &= buff_->is_ip_cso;
478*4882a593Smuzhiyun buff->is_udp_cso &= buff_->is_udp_cso;
479*4882a593Smuzhiyun buff->is_tcp_cso &= buff_->is_tcp_cso;
480*4882a593Smuzhiyun buff->is_cso_err |= buff_->is_cso_err;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun } while (!buff_->is_eop);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun if (buff->is_vlan)
486*4882a593Smuzhiyun __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
487*4882a593Smuzhiyun buff->vlan_rx_tag);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, ndev);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun aq_rx_checksum(self, buff, skb);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun skb_set_hash(skb, buff->rss_hash,
494*4882a593Smuzhiyun buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
495*4882a593Smuzhiyun PKT_HASH_TYPE_NONE);
496*4882a593Smuzhiyun /* Send all PTP traffic to 0 queue */
497*4882a593Smuzhiyun skb_record_rx_queue(skb,
498*4882a593Smuzhiyun is_ptp_ring ? 0
499*4882a593Smuzhiyun : AQ_NIC_RING2QMAP(self->aq_nic,
500*4882a593Smuzhiyun self->idx));
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun u64_stats_update_begin(&self->stats.rx.syncp);
503*4882a593Smuzhiyun ++self->stats.rx.packets;
504*4882a593Smuzhiyun self->stats.rx.bytes += skb->len;
505*4882a593Smuzhiyun u64_stats_update_end(&self->stats.rx.syncp);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun napi_gro_receive(napi, skb);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun err_exit:
511*4882a593Smuzhiyun return err;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
aq_ring_hwts_rx_clean(struct aq_ring_s * self,struct aq_nic_s * aq_nic)514*4882a593Smuzhiyun void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
517*4882a593Smuzhiyun while (self->sw_head != self->hw_head) {
518*4882a593Smuzhiyun u64 ns;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
521*4882a593Smuzhiyun self->dx_ring +
522*4882a593Smuzhiyun (self->sw_head * self->dx_size),
523*4882a593Smuzhiyun self->dx_size, &ns);
524*4882a593Smuzhiyun aq_ptp_tx_hwtstamp(aq_nic, ns);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun self->sw_head = aq_ring_next_dx(self, self->sw_head);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun #endif
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
aq_ring_rx_fill(struct aq_ring_s * self)531*4882a593Smuzhiyun int aq_ring_rx_fill(struct aq_ring_s *self)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun unsigned int page_order = self->page_order;
534*4882a593Smuzhiyun struct aq_ring_buff_s *buff = NULL;
535*4882a593Smuzhiyun int err = 0;
536*4882a593Smuzhiyun int i = 0;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
539*4882a593Smuzhiyun self->size / 2))
540*4882a593Smuzhiyun return err;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun for (i = aq_ring_avail_dx(self); i--;
543*4882a593Smuzhiyun self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
544*4882a593Smuzhiyun buff = &self->buff_ring[self->sw_tail];
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun buff->flags = 0U;
547*4882a593Smuzhiyun buff->len = AQ_CFG_RX_FRAME_MAX;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun err = aq_get_rxpages(self, buff, page_order);
550*4882a593Smuzhiyun if (err)
551*4882a593Smuzhiyun goto err_exit;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun buff->pa = aq_buf_daddr(&buff->rxdata);
554*4882a593Smuzhiyun buff = NULL;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun err_exit:
558*4882a593Smuzhiyun return err;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
aq_ring_rx_deinit(struct aq_ring_s * self)561*4882a593Smuzhiyun void aq_ring_rx_deinit(struct aq_ring_s *self)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun if (!self)
564*4882a593Smuzhiyun return;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun for (; self->sw_head != self->sw_tail;
567*4882a593Smuzhiyun self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
568*4882a593Smuzhiyun struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
aq_ring_free(struct aq_ring_s * self)574*4882a593Smuzhiyun void aq_ring_free(struct aq_ring_s *self)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun if (!self)
577*4882a593Smuzhiyun return;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun kfree(self->buff_ring);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun if (self->dx_ring)
582*4882a593Smuzhiyun dma_free_coherent(aq_nic_get_dev(self->aq_nic),
583*4882a593Smuzhiyun self->size * self->dx_size, self->dx_ring,
584*4882a593Smuzhiyun self->dx_ring_pa);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
aq_ring_fill_stats_data(struct aq_ring_s * self,u64 * data)587*4882a593Smuzhiyun unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun unsigned int count;
590*4882a593Smuzhiyun unsigned int start;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun if (self->ring_type == ATL_RING_RX) {
593*4882a593Smuzhiyun /* This data should mimic aq_ethtool_queue_rx_stat_names structure */
594*4882a593Smuzhiyun do {
595*4882a593Smuzhiyun count = 0;
596*4882a593Smuzhiyun start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp);
597*4882a593Smuzhiyun data[count] = self->stats.rx.packets;
598*4882a593Smuzhiyun data[++count] = self->stats.rx.jumbo_packets;
599*4882a593Smuzhiyun data[++count] = self->stats.rx.lro_packets;
600*4882a593Smuzhiyun data[++count] = self->stats.rx.errors;
601*4882a593Smuzhiyun data[++count] = self->stats.rx.alloc_fails;
602*4882a593Smuzhiyun data[++count] = self->stats.rx.skb_alloc_fails;
603*4882a593Smuzhiyun data[++count] = self->stats.rx.polls;
604*4882a593Smuzhiyun } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
605*4882a593Smuzhiyun } else {
606*4882a593Smuzhiyun /* This data should mimic aq_ethtool_queue_tx_stat_names structure */
607*4882a593Smuzhiyun do {
608*4882a593Smuzhiyun count = 0;
609*4882a593Smuzhiyun start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp);
610*4882a593Smuzhiyun data[count] = self->stats.tx.packets;
611*4882a593Smuzhiyun data[++count] = self->stats.tx.queue_restarts;
612*4882a593Smuzhiyun } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start));
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun return ++count;
616*4882a593Smuzhiyun }
617