1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2*4882a593Smuzhiyun /* Copyright 2014-2016 Freescale Semiconductor Inc.
3*4882a593Smuzhiyun * Copyright 2016-2020 NXP
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #include <linux/init.h>
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/platform_device.h>
8*4882a593Smuzhiyun #include <linux/etherdevice.h>
9*4882a593Smuzhiyun #include <linux/of_net.h>
10*4882a593Smuzhiyun #include <linux/interrupt.h>
11*4882a593Smuzhiyun #include <linux/msi.h>
12*4882a593Smuzhiyun #include <linux/kthread.h>
13*4882a593Smuzhiyun #include <linux/iommu.h>
14*4882a593Smuzhiyun #include <linux/fsl/mc.h>
15*4882a593Smuzhiyun #include <linux/bpf.h>
16*4882a593Smuzhiyun #include <linux/bpf_trace.h>
17*4882a593Smuzhiyun #include <linux/fsl/ptp_qoriq.h>
18*4882a593Smuzhiyun #include <linux/ptp_classify.h>
19*4882a593Smuzhiyun #include <net/pkt_cls.h>
20*4882a593Smuzhiyun #include <net/sock.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "dpaa2-eth.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
25*4882a593Smuzhiyun * using trace events only need to #include <trace/events/sched.h>
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
28*4882a593Smuzhiyun #include "dpaa2-eth-trace.h"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
31*4882a593Smuzhiyun MODULE_AUTHOR("Freescale Semiconductor, Inc");
32*4882a593Smuzhiyun MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct ptp_qoriq *dpaa2_ptp;
35*4882a593Smuzhiyun EXPORT_SYMBOL(dpaa2_ptp);
36*4882a593Smuzhiyun
dpaa2_iova_to_virt(struct iommu_domain * domain,dma_addr_t iova_addr)37*4882a593Smuzhiyun static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
38*4882a593Smuzhiyun dma_addr_t iova_addr)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun phys_addr_t phys_addr;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun return phys_to_virt(phys_addr);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv * priv,u32 fd_status,struct sk_buff * skb)47*4882a593Smuzhiyun static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
48*4882a593Smuzhiyun u32 fd_status,
49*4882a593Smuzhiyun struct sk_buff *skb)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun skb_checksum_none_assert(skb);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* HW checksum validation is disabled, nothing to do here */
54*4882a593Smuzhiyun if (!(priv->net_dev->features & NETIF_F_RXCSUM))
55*4882a593Smuzhiyun return;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* Read checksum validation bits */
58*4882a593Smuzhiyun if (!((fd_status & DPAA2_FAS_L3CV) &&
59*4882a593Smuzhiyun (fd_status & DPAA2_FAS_L4CV)))
60*4882a593Smuzhiyun return;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* Inform the stack there's no need to compute L3/L4 csum anymore */
63*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Free a received FD.
67*4882a593Smuzhiyun * Not to be used for Tx conf FDs or on any other paths.
68*4882a593Smuzhiyun */
dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv * priv,const struct dpaa2_fd * fd,void * vaddr)69*4882a593Smuzhiyun static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
70*4882a593Smuzhiyun const struct dpaa2_fd *fd,
71*4882a593Smuzhiyun void *vaddr)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
74*4882a593Smuzhiyun dma_addr_t addr = dpaa2_fd_get_addr(fd);
75*4882a593Smuzhiyun u8 fd_format = dpaa2_fd_get_format(fd);
76*4882a593Smuzhiyun struct dpaa2_sg_entry *sgt;
77*4882a593Smuzhiyun void *sg_vaddr;
78*4882a593Smuzhiyun int i;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* If single buffer frame, just free the data buffer */
81*4882a593Smuzhiyun if (fd_format == dpaa2_fd_single)
82*4882a593Smuzhiyun goto free_buf;
83*4882a593Smuzhiyun else if (fd_format != dpaa2_fd_sg)
84*4882a593Smuzhiyun /* We don't support any other format */
85*4882a593Smuzhiyun return;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* For S/G frames, we first need to free all SG entries
88*4882a593Smuzhiyun * except the first one, which was taken care of already
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun sgt = vaddr + dpaa2_fd_get_offset(fd);
91*4882a593Smuzhiyun for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
92*4882a593Smuzhiyun addr = dpaa2_sg_get_addr(&sgt[i]);
93*4882a593Smuzhiyun sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
94*4882a593Smuzhiyun dma_unmap_page(dev, addr, priv->rx_buf_size,
95*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun free_pages((unsigned long)sg_vaddr, 0);
98*4882a593Smuzhiyun if (dpaa2_sg_is_final(&sgt[i]))
99*4882a593Smuzhiyun break;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun free_buf:
103*4882a593Smuzhiyun free_pages((unsigned long)vaddr, 0);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* Build a linear skb based on a single-buffer frame descriptor */
dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel * ch,const struct dpaa2_fd * fd,void * fd_vaddr)107*4882a593Smuzhiyun static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
108*4882a593Smuzhiyun const struct dpaa2_fd *fd,
109*4882a593Smuzhiyun void *fd_vaddr)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct sk_buff *skb = NULL;
112*4882a593Smuzhiyun u16 fd_offset = dpaa2_fd_get_offset(fd);
113*4882a593Smuzhiyun u32 fd_length = dpaa2_fd_get_len(fd);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun ch->buf_count--;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
118*4882a593Smuzhiyun if (unlikely(!skb))
119*4882a593Smuzhiyun return NULL;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun skb_reserve(skb, fd_offset);
122*4882a593Smuzhiyun skb_put(skb, fd_length);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun return skb;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* Build a non linear (fragmented) skb based on a S/G table */
dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,struct dpaa2_sg_entry * sgt)128*4882a593Smuzhiyun static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
129*4882a593Smuzhiyun struct dpaa2_eth_channel *ch,
130*4882a593Smuzhiyun struct dpaa2_sg_entry *sgt)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun struct sk_buff *skb = NULL;
133*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
134*4882a593Smuzhiyun void *sg_vaddr;
135*4882a593Smuzhiyun dma_addr_t sg_addr;
136*4882a593Smuzhiyun u16 sg_offset;
137*4882a593Smuzhiyun u32 sg_length;
138*4882a593Smuzhiyun struct page *page, *head_page;
139*4882a593Smuzhiyun int page_offset;
140*4882a593Smuzhiyun int i;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
143*4882a593Smuzhiyun struct dpaa2_sg_entry *sge = &sgt[i];
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* NOTE: We only support SG entries in dpaa2_sg_single format,
146*4882a593Smuzhiyun * but this is the only format we may receive from HW anyway
147*4882a593Smuzhiyun */
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* Get the address and length from the S/G entry */
150*4882a593Smuzhiyun sg_addr = dpaa2_sg_get_addr(sge);
151*4882a593Smuzhiyun sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
152*4882a593Smuzhiyun dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
153*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun sg_length = dpaa2_sg_get_len(sge);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (i == 0) {
158*4882a593Smuzhiyun /* We build the skb around the first data buffer */
159*4882a593Smuzhiyun skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
160*4882a593Smuzhiyun if (unlikely(!skb)) {
161*4882a593Smuzhiyun /* Free the first SG entry now, since we already
162*4882a593Smuzhiyun * unmapped it and obtained the virtual address
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun free_pages((unsigned long)sg_vaddr, 0);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* We still need to subtract the buffers used
167*4882a593Smuzhiyun * by this FD from our software counter
168*4882a593Smuzhiyun */
169*4882a593Smuzhiyun while (!dpaa2_sg_is_final(&sgt[i]) &&
170*4882a593Smuzhiyun i < DPAA2_ETH_MAX_SG_ENTRIES)
171*4882a593Smuzhiyun i++;
172*4882a593Smuzhiyun break;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun sg_offset = dpaa2_sg_get_offset(sge);
176*4882a593Smuzhiyun skb_reserve(skb, sg_offset);
177*4882a593Smuzhiyun skb_put(skb, sg_length);
178*4882a593Smuzhiyun } else {
179*4882a593Smuzhiyun /* Rest of the data buffers are stored as skb frags */
180*4882a593Smuzhiyun page = virt_to_page(sg_vaddr);
181*4882a593Smuzhiyun head_page = virt_to_head_page(sg_vaddr);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* Offset in page (which may be compound).
184*4882a593Smuzhiyun * Data in subsequent SG entries is stored from the
185*4882a593Smuzhiyun * beginning of the buffer, so we don't need to add the
186*4882a593Smuzhiyun * sg_offset.
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun page_offset = ((unsigned long)sg_vaddr &
189*4882a593Smuzhiyun (PAGE_SIZE - 1)) +
190*4882a593Smuzhiyun (page_address(page) - page_address(head_page));
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun skb_add_rx_frag(skb, i - 1, head_page, page_offset,
193*4882a593Smuzhiyun sg_length, priv->rx_buf_size);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (dpaa2_sg_is_final(sge))
197*4882a593Smuzhiyun break;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* Count all data buffers + SG table buffer */
203*4882a593Smuzhiyun ch->buf_count -= i + 2;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return skb;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* Free buffers acquired from the buffer pool or which were meant to
209*4882a593Smuzhiyun * be released in the pool
210*4882a593Smuzhiyun */
dpaa2_eth_free_bufs(struct dpaa2_eth_priv * priv,u64 * buf_array,int count)211*4882a593Smuzhiyun static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
212*4882a593Smuzhiyun int count)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
215*4882a593Smuzhiyun void *vaddr;
216*4882a593Smuzhiyun int i;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun for (i = 0; i < count; i++) {
219*4882a593Smuzhiyun vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
220*4882a593Smuzhiyun dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
221*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
222*4882a593Smuzhiyun free_pages((unsigned long)vaddr, 0);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,dma_addr_t addr)226*4882a593Smuzhiyun static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv,
227*4882a593Smuzhiyun struct dpaa2_eth_channel *ch,
228*4882a593Smuzhiyun dma_addr_t addr)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun int retries = 0;
231*4882a593Smuzhiyun int err;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
234*4882a593Smuzhiyun if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
235*4882a593Smuzhiyun return;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
238*4882a593Smuzhiyun ch->xdp.drop_bufs,
239*4882a593Smuzhiyun ch->xdp.drop_cnt)) == -EBUSY) {
240*4882a593Smuzhiyun if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
241*4882a593Smuzhiyun break;
242*4882a593Smuzhiyun cpu_relax();
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (err) {
246*4882a593Smuzhiyun dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
247*4882a593Smuzhiyun ch->buf_count -= ch->xdp.drop_cnt;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun ch->xdp.drop_cnt = 0;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
dpaa2_eth_xdp_flush(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq,struct dpaa2_eth_xdp_fds * xdp_fds)253*4882a593Smuzhiyun static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
254*4882a593Smuzhiyun struct dpaa2_eth_fq *fq,
255*4882a593Smuzhiyun struct dpaa2_eth_xdp_fds *xdp_fds)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun int total_enqueued = 0, retries = 0, enqueued;
258*4882a593Smuzhiyun struct dpaa2_eth_drv_stats *percpu_extras;
259*4882a593Smuzhiyun int num_fds, err, max_retries;
260*4882a593Smuzhiyun struct dpaa2_fd *fds;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun percpu_extras = this_cpu_ptr(priv->percpu_extras);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* try to enqueue all the FDs until the max number of retries is hit */
265*4882a593Smuzhiyun fds = xdp_fds->fds;
266*4882a593Smuzhiyun num_fds = xdp_fds->num;
267*4882a593Smuzhiyun max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
268*4882a593Smuzhiyun while (total_enqueued < num_fds && retries < max_retries) {
269*4882a593Smuzhiyun err = priv->enqueue(priv, fq, &fds[total_enqueued],
270*4882a593Smuzhiyun 0, num_fds - total_enqueued, &enqueued);
271*4882a593Smuzhiyun if (err == -EBUSY) {
272*4882a593Smuzhiyun percpu_extras->tx_portal_busy += ++retries;
273*4882a593Smuzhiyun continue;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun total_enqueued += enqueued;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun xdp_fds->num = 0;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun return total_enqueued;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,struct dpaa2_eth_fq * fq)282*4882a593Smuzhiyun static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
283*4882a593Smuzhiyun struct dpaa2_eth_channel *ch,
284*4882a593Smuzhiyun struct dpaa2_eth_fq *fq)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun struct rtnl_link_stats64 *percpu_stats;
287*4882a593Smuzhiyun struct dpaa2_fd *fds;
288*4882a593Smuzhiyun int enqueued, i;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun percpu_stats = this_cpu_ptr(priv->percpu_stats);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun // enqueue the array of XDP_TX frames
293*4882a593Smuzhiyun enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* update statistics */
296*4882a593Smuzhiyun percpu_stats->tx_packets += enqueued;
297*4882a593Smuzhiyun fds = fq->xdp_tx_fds.fds;
298*4882a593Smuzhiyun for (i = 0; i < enqueued; i++) {
299*4882a593Smuzhiyun percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
300*4882a593Smuzhiyun ch->stats.xdp_tx++;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
303*4882a593Smuzhiyun dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
304*4882a593Smuzhiyun percpu_stats->tx_errors++;
305*4882a593Smuzhiyun ch->stats.xdp_tx_err++;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun fq->xdp_tx_fds.num = 0;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,struct dpaa2_fd * fd,void * buf_start,u16 queue_id)310*4882a593Smuzhiyun static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
311*4882a593Smuzhiyun struct dpaa2_eth_channel *ch,
312*4882a593Smuzhiyun struct dpaa2_fd *fd,
313*4882a593Smuzhiyun void *buf_start, u16 queue_id)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct dpaa2_faead *faead;
316*4882a593Smuzhiyun struct dpaa2_fd *dest_fd;
317*4882a593Smuzhiyun struct dpaa2_eth_fq *fq;
318*4882a593Smuzhiyun u32 ctrl, frc;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Mark the egress frame hardware annotation area as valid */
321*4882a593Smuzhiyun frc = dpaa2_fd_get_frc(fd);
322*4882a593Smuzhiyun dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
323*4882a593Smuzhiyun dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /* Instruct hardware to release the FD buffer directly into
326*4882a593Smuzhiyun * the buffer pool once transmission is completed, instead of
327*4882a593Smuzhiyun * sending a Tx confirmation frame to us
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
330*4882a593Smuzhiyun faead = dpaa2_get_faead(buf_start, false);
331*4882a593Smuzhiyun faead->ctrl = cpu_to_le32(ctrl);
332*4882a593Smuzhiyun faead->conf_fqid = 0;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun fq = &priv->fq[queue_id];
335*4882a593Smuzhiyun dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
336*4882a593Smuzhiyun memcpy(dest_fd, fd, sizeof(*dest_fd));
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
339*4882a593Smuzhiyun return;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun dpaa2_eth_xdp_tx_flush(priv, ch, fq);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
dpaa2_eth_run_xdp(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,struct dpaa2_eth_fq * rx_fq,struct dpaa2_fd * fd,void * vaddr)344*4882a593Smuzhiyun static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
345*4882a593Smuzhiyun struct dpaa2_eth_channel *ch,
346*4882a593Smuzhiyun struct dpaa2_eth_fq *rx_fq,
347*4882a593Smuzhiyun struct dpaa2_fd *fd, void *vaddr)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun dma_addr_t addr = dpaa2_fd_get_addr(fd);
350*4882a593Smuzhiyun struct bpf_prog *xdp_prog;
351*4882a593Smuzhiyun struct xdp_buff xdp;
352*4882a593Smuzhiyun u32 xdp_act = XDP_PASS;
353*4882a593Smuzhiyun int err;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun rcu_read_lock();
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun xdp_prog = READ_ONCE(ch->xdp.prog);
358*4882a593Smuzhiyun if (!xdp_prog)
359*4882a593Smuzhiyun goto out;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun xdp.data = vaddr + dpaa2_fd_get_offset(fd);
362*4882a593Smuzhiyun xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
363*4882a593Smuzhiyun xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
364*4882a593Smuzhiyun xdp_set_data_meta_invalid(&xdp);
365*4882a593Smuzhiyun xdp.rxq = &ch->xdp_rxq;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
368*4882a593Smuzhiyun (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* xdp.data pointer may have changed */
373*4882a593Smuzhiyun dpaa2_fd_set_offset(fd, xdp.data - vaddr);
374*4882a593Smuzhiyun dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun switch (xdp_act) {
377*4882a593Smuzhiyun case XDP_PASS:
378*4882a593Smuzhiyun break;
379*4882a593Smuzhiyun case XDP_TX:
380*4882a593Smuzhiyun dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
381*4882a593Smuzhiyun break;
382*4882a593Smuzhiyun default:
383*4882a593Smuzhiyun bpf_warn_invalid_xdp_action(xdp_act);
384*4882a593Smuzhiyun fallthrough;
385*4882a593Smuzhiyun case XDP_ABORTED:
386*4882a593Smuzhiyun trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
387*4882a593Smuzhiyun fallthrough;
388*4882a593Smuzhiyun case XDP_DROP:
389*4882a593Smuzhiyun dpaa2_eth_xdp_release_buf(priv, ch, addr);
390*4882a593Smuzhiyun ch->stats.xdp_drop++;
391*4882a593Smuzhiyun break;
392*4882a593Smuzhiyun case XDP_REDIRECT:
393*4882a593Smuzhiyun dma_unmap_page(priv->net_dev->dev.parent, addr,
394*4882a593Smuzhiyun priv->rx_buf_size, DMA_BIDIRECTIONAL);
395*4882a593Smuzhiyun ch->buf_count--;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* Allow redirect use of full headroom */
398*4882a593Smuzhiyun xdp.data_hard_start = vaddr;
399*4882a593Smuzhiyun xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
402*4882a593Smuzhiyun if (unlikely(err)) {
403*4882a593Smuzhiyun addr = dma_map_page(priv->net_dev->dev.parent,
404*4882a593Smuzhiyun virt_to_page(vaddr), 0,
405*4882a593Smuzhiyun priv->rx_buf_size, DMA_BIDIRECTIONAL);
406*4882a593Smuzhiyun if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
407*4882a593Smuzhiyun free_pages((unsigned long)vaddr, 0);
408*4882a593Smuzhiyun } else {
409*4882a593Smuzhiyun ch->buf_count++;
410*4882a593Smuzhiyun dpaa2_eth_xdp_release_buf(priv, ch, addr);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun ch->stats.xdp_drop++;
413*4882a593Smuzhiyun } else {
414*4882a593Smuzhiyun ch->stats.xdp_redirect++;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun break;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun ch->xdp.res |= xdp_act;
420*4882a593Smuzhiyun out:
421*4882a593Smuzhiyun rcu_read_unlock();
422*4882a593Smuzhiyun return xdp_act;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /* Main Rx frame processing routine */
dpaa2_eth_rx(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,const struct dpaa2_fd * fd,struct dpaa2_eth_fq * fq)426*4882a593Smuzhiyun static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
427*4882a593Smuzhiyun struct dpaa2_eth_channel *ch,
428*4882a593Smuzhiyun const struct dpaa2_fd *fd,
429*4882a593Smuzhiyun struct dpaa2_eth_fq *fq)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun dma_addr_t addr = dpaa2_fd_get_addr(fd);
432*4882a593Smuzhiyun u8 fd_format = dpaa2_fd_get_format(fd);
433*4882a593Smuzhiyun void *vaddr;
434*4882a593Smuzhiyun struct sk_buff *skb;
435*4882a593Smuzhiyun struct rtnl_link_stats64 *percpu_stats;
436*4882a593Smuzhiyun struct dpaa2_eth_drv_stats *percpu_extras;
437*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
438*4882a593Smuzhiyun struct dpaa2_fas *fas;
439*4882a593Smuzhiyun void *buf_data;
440*4882a593Smuzhiyun u32 status = 0;
441*4882a593Smuzhiyun u32 xdp_act;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /* Tracing point */
444*4882a593Smuzhiyun trace_dpaa2_rx_fd(priv->net_dev, fd);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
447*4882a593Smuzhiyun dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
448*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun fas = dpaa2_get_fas(vaddr, false);
451*4882a593Smuzhiyun prefetch(fas);
452*4882a593Smuzhiyun buf_data = vaddr + dpaa2_fd_get_offset(fd);
453*4882a593Smuzhiyun prefetch(buf_data);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun percpu_stats = this_cpu_ptr(priv->percpu_stats);
456*4882a593Smuzhiyun percpu_extras = this_cpu_ptr(priv->percpu_extras);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun if (fd_format == dpaa2_fd_single) {
459*4882a593Smuzhiyun xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
460*4882a593Smuzhiyun if (xdp_act != XDP_PASS) {
461*4882a593Smuzhiyun percpu_stats->rx_packets++;
462*4882a593Smuzhiyun percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
463*4882a593Smuzhiyun return;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun dma_unmap_page(dev, addr, priv->rx_buf_size,
467*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
468*4882a593Smuzhiyun skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
469*4882a593Smuzhiyun } else if (fd_format == dpaa2_fd_sg) {
470*4882a593Smuzhiyun WARN_ON(priv->xdp_prog);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun dma_unmap_page(dev, addr, priv->rx_buf_size,
473*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
474*4882a593Smuzhiyun skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
475*4882a593Smuzhiyun free_pages((unsigned long)vaddr, 0);
476*4882a593Smuzhiyun percpu_extras->rx_sg_frames++;
477*4882a593Smuzhiyun percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
478*4882a593Smuzhiyun } else {
479*4882a593Smuzhiyun /* We don't support any other format */
480*4882a593Smuzhiyun goto err_frame_format;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (unlikely(!skb))
484*4882a593Smuzhiyun goto err_build_skb;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun prefetch(skb->data);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /* Get the timestamp value */
489*4882a593Smuzhiyun if (priv->rx_tstamp) {
490*4882a593Smuzhiyun struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
491*4882a593Smuzhiyun __le64 *ts = dpaa2_get_ts(vaddr, false);
492*4882a593Smuzhiyun u64 ns;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun memset(shhwtstamps, 0, sizeof(*shhwtstamps));
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
497*4882a593Smuzhiyun shhwtstamps->hwtstamp = ns_to_ktime(ns);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* Check if we need to validate the L4 csum */
501*4882a593Smuzhiyun if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
502*4882a593Smuzhiyun status = le32_to_cpu(fas->status);
503*4882a593Smuzhiyun dpaa2_eth_validate_rx_csum(priv, status, skb);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, priv->net_dev);
507*4882a593Smuzhiyun skb_record_rx_queue(skb, fq->flowid);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun percpu_stats->rx_packets++;
510*4882a593Smuzhiyun percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun list_add_tail(&skb->list, ch->rx_list);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun return;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun err_build_skb:
517*4882a593Smuzhiyun dpaa2_eth_free_rx_fd(priv, fd, vaddr);
518*4882a593Smuzhiyun err_frame_format:
519*4882a593Smuzhiyun percpu_stats->rx_dropped++;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /* Processing of Rx frames received on the error FQ
523*4882a593Smuzhiyun * We check and print the error bits and then free the frame
524*4882a593Smuzhiyun */
dpaa2_eth_rx_err(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,const struct dpaa2_fd * fd,struct dpaa2_eth_fq * fq __always_unused)525*4882a593Smuzhiyun static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
526*4882a593Smuzhiyun struct dpaa2_eth_channel *ch,
527*4882a593Smuzhiyun const struct dpaa2_fd *fd,
528*4882a593Smuzhiyun struct dpaa2_eth_fq *fq __always_unused)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
531*4882a593Smuzhiyun dma_addr_t addr = dpaa2_fd_get_addr(fd);
532*4882a593Smuzhiyun u8 fd_format = dpaa2_fd_get_format(fd);
533*4882a593Smuzhiyun struct rtnl_link_stats64 *percpu_stats;
534*4882a593Smuzhiyun struct dpaa2_eth_trap_item *trap_item;
535*4882a593Smuzhiyun struct dpaa2_fapr *fapr;
536*4882a593Smuzhiyun struct sk_buff *skb;
537*4882a593Smuzhiyun void *buf_data;
538*4882a593Smuzhiyun void *vaddr;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
541*4882a593Smuzhiyun dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
542*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun buf_data = vaddr + dpaa2_fd_get_offset(fd);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun if (fd_format == dpaa2_fd_single) {
547*4882a593Smuzhiyun dma_unmap_page(dev, addr, priv->rx_buf_size,
548*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
549*4882a593Smuzhiyun skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
550*4882a593Smuzhiyun } else if (fd_format == dpaa2_fd_sg) {
551*4882a593Smuzhiyun dma_unmap_page(dev, addr, priv->rx_buf_size,
552*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
553*4882a593Smuzhiyun skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
554*4882a593Smuzhiyun free_pages((unsigned long)vaddr, 0);
555*4882a593Smuzhiyun } else {
556*4882a593Smuzhiyun /* We don't support any other format */
557*4882a593Smuzhiyun dpaa2_eth_free_rx_fd(priv, fd, vaddr);
558*4882a593Smuzhiyun goto err_frame_format;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun fapr = dpaa2_get_fapr(vaddr, false);
562*4882a593Smuzhiyun trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
563*4882a593Smuzhiyun if (trap_item)
564*4882a593Smuzhiyun devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
565*4882a593Smuzhiyun &priv->devlink_port, NULL);
566*4882a593Smuzhiyun consume_skb(skb);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun err_frame_format:
569*4882a593Smuzhiyun percpu_stats = this_cpu_ptr(priv->percpu_stats);
570*4882a593Smuzhiyun percpu_stats->rx_errors++;
571*4882a593Smuzhiyun ch->buf_count--;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* Consume all frames pull-dequeued into the store. This is the simplest way to
575*4882a593Smuzhiyun * make sure we don't accidentally issue another volatile dequeue which would
576*4882a593Smuzhiyun * overwrite (leak) frames already in the store.
577*4882a593Smuzhiyun *
578*4882a593Smuzhiyun * Observance of NAPI budget is not our concern, leaving that to the caller.
579*4882a593Smuzhiyun */
dpaa2_eth_consume_frames(struct dpaa2_eth_channel * ch,struct dpaa2_eth_fq ** src)580*4882a593Smuzhiyun static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
581*4882a593Smuzhiyun struct dpaa2_eth_fq **src)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = ch->priv;
584*4882a593Smuzhiyun struct dpaa2_eth_fq *fq = NULL;
585*4882a593Smuzhiyun struct dpaa2_dq *dq;
586*4882a593Smuzhiyun const struct dpaa2_fd *fd;
587*4882a593Smuzhiyun int cleaned = 0, retries = 0;
588*4882a593Smuzhiyun int is_last;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun do {
591*4882a593Smuzhiyun dq = dpaa2_io_store_next(ch->store, &is_last);
592*4882a593Smuzhiyun if (unlikely(!dq)) {
593*4882a593Smuzhiyun /* If we're here, we *must* have placed a
594*4882a593Smuzhiyun * volatile dequeue comnmand, so keep reading through
595*4882a593Smuzhiyun * the store until we get some sort of valid response
596*4882a593Smuzhiyun * token (either a valid frame or an "empty dequeue")
597*4882a593Smuzhiyun */
598*4882a593Smuzhiyun if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
599*4882a593Smuzhiyun netdev_err_once(priv->net_dev,
600*4882a593Smuzhiyun "Unable to read a valid dequeue response\n");
601*4882a593Smuzhiyun return -ETIMEDOUT;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun continue;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun fd = dpaa2_dq_fd(dq);
607*4882a593Smuzhiyun fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun fq->consume(priv, ch, fd, fq);
610*4882a593Smuzhiyun cleaned++;
611*4882a593Smuzhiyun retries = 0;
612*4882a593Smuzhiyun } while (!is_last);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun if (!cleaned)
615*4882a593Smuzhiyun return 0;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun fq->stats.frames += cleaned;
618*4882a593Smuzhiyun ch->stats.frames += cleaned;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /* A dequeue operation only pulls frames from a single queue
621*4882a593Smuzhiyun * into the store. Return the frame queue as an out param.
622*4882a593Smuzhiyun */
623*4882a593Smuzhiyun if (src)
624*4882a593Smuzhiyun *src = fq;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun return cleaned;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
dpaa2_eth_ptp_parse(struct sk_buff * skb,u8 * msgtype,u8 * twostep,u8 * udp,u16 * correction_offset,u16 * origintimestamp_offset)629*4882a593Smuzhiyun static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
630*4882a593Smuzhiyun u8 *msgtype, u8 *twostep, u8 *udp,
631*4882a593Smuzhiyun u16 *correction_offset,
632*4882a593Smuzhiyun u16 *origintimestamp_offset)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun unsigned int ptp_class;
635*4882a593Smuzhiyun struct ptp_header *hdr;
636*4882a593Smuzhiyun unsigned int type;
637*4882a593Smuzhiyun u8 *base;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun ptp_class = ptp_classify_raw(skb);
640*4882a593Smuzhiyun if (ptp_class == PTP_CLASS_NONE)
641*4882a593Smuzhiyun return -EINVAL;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun hdr = ptp_parse_header(skb, ptp_class);
644*4882a593Smuzhiyun if (!hdr)
645*4882a593Smuzhiyun return -EINVAL;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun *msgtype = ptp_get_msgtype(hdr, ptp_class);
648*4882a593Smuzhiyun *twostep = hdr->flag_field[0] & 0x2;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun type = ptp_class & PTP_CLASS_PMASK;
651*4882a593Smuzhiyun if (type == PTP_CLASS_IPV4 ||
652*4882a593Smuzhiyun type == PTP_CLASS_IPV6)
653*4882a593Smuzhiyun *udp = 1;
654*4882a593Smuzhiyun else
655*4882a593Smuzhiyun *udp = 0;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun base = skb_mac_header(skb);
658*4882a593Smuzhiyun *correction_offset = (u8 *)&hdr->correction - base;
659*4882a593Smuzhiyun *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun return 0;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /* Configure the egress frame annotation for timestamp update */
dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv * priv,struct dpaa2_fd * fd,void * buf_start,struct sk_buff * skb)665*4882a593Smuzhiyun static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
666*4882a593Smuzhiyun struct dpaa2_fd *fd,
667*4882a593Smuzhiyun void *buf_start,
668*4882a593Smuzhiyun struct sk_buff *skb)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun struct ptp_tstamp origin_timestamp;
671*4882a593Smuzhiyun struct dpni_single_step_cfg cfg;
672*4882a593Smuzhiyun u8 msgtype, twostep, udp;
673*4882a593Smuzhiyun struct dpaa2_faead *faead;
674*4882a593Smuzhiyun struct dpaa2_fas *fas;
675*4882a593Smuzhiyun struct timespec64 ts;
676*4882a593Smuzhiyun u16 offset1, offset2;
677*4882a593Smuzhiyun u32 ctrl, frc;
678*4882a593Smuzhiyun __le64 *ns;
679*4882a593Smuzhiyun u8 *data;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /* Mark the egress frame annotation area as valid */
682*4882a593Smuzhiyun frc = dpaa2_fd_get_frc(fd);
683*4882a593Smuzhiyun dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /* Set hardware annotation size */
686*4882a593Smuzhiyun ctrl = dpaa2_fd_get_ctrl(fd);
687*4882a593Smuzhiyun dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /* enable UPD (update prepanded data) bit in FAEAD field of
690*4882a593Smuzhiyun * hardware frame annotation area
691*4882a593Smuzhiyun */
692*4882a593Smuzhiyun ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
693*4882a593Smuzhiyun faead = dpaa2_get_faead(buf_start, true);
694*4882a593Smuzhiyun faead->ctrl = cpu_to_le32(ctrl);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
697*4882a593Smuzhiyun if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
698*4882a593Smuzhiyun &offset1, &offset2) ||
699*4882a593Smuzhiyun msgtype != 0 || twostep) {
700*4882a593Smuzhiyun WARN_ONCE(1, "Bad packet for one-step timestamping\n");
701*4882a593Smuzhiyun return;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /* Mark the frame annotation status as valid */
705*4882a593Smuzhiyun frc = dpaa2_fd_get_frc(fd);
706*4882a593Smuzhiyun dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /* Mark the PTP flag for one step timestamping */
709*4882a593Smuzhiyun fas = dpaa2_get_fas(buf_start, true);
710*4882a593Smuzhiyun fas->status = cpu_to_le32(DPAA2_FAS_PTP);
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
713*4882a593Smuzhiyun ns = dpaa2_get_ts(buf_start, true);
714*4882a593Smuzhiyun *ns = cpu_to_le64(timespec64_to_ns(&ts) /
715*4882a593Smuzhiyun DPAA2_PTP_CLK_PERIOD_NS);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun /* Update current time to PTP message originTimestamp field */
718*4882a593Smuzhiyun ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
719*4882a593Smuzhiyun data = skb_mac_header(skb);
720*4882a593Smuzhiyun *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
721*4882a593Smuzhiyun *(__be32 *)(data + offset2 + 2) =
722*4882a593Smuzhiyun htonl(origin_timestamp.sec_lsb);
723*4882a593Smuzhiyun *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun cfg.en = 1;
726*4882a593Smuzhiyun cfg.ch_update = udp;
727*4882a593Smuzhiyun cfg.offset = offset1;
728*4882a593Smuzhiyun cfg.peer_delay = 0;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
731*4882a593Smuzhiyun &cfg))
732*4882a593Smuzhiyun WARN_ONCE(1, "Failed to set single step register");
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /* Create a frame descriptor based on a fragmented skb */
dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv * priv,struct sk_buff * skb,struct dpaa2_fd * fd,void ** swa_addr)737*4882a593Smuzhiyun static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
738*4882a593Smuzhiyun struct sk_buff *skb,
739*4882a593Smuzhiyun struct dpaa2_fd *fd,
740*4882a593Smuzhiyun void **swa_addr)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
743*4882a593Smuzhiyun void *sgt_buf = NULL;
744*4882a593Smuzhiyun dma_addr_t addr;
745*4882a593Smuzhiyun int nr_frags = skb_shinfo(skb)->nr_frags;
746*4882a593Smuzhiyun struct dpaa2_sg_entry *sgt;
747*4882a593Smuzhiyun int i, err;
748*4882a593Smuzhiyun int sgt_buf_size;
749*4882a593Smuzhiyun struct scatterlist *scl, *crt_scl;
750*4882a593Smuzhiyun int num_sg;
751*4882a593Smuzhiyun int num_dma_bufs;
752*4882a593Smuzhiyun struct dpaa2_eth_swa *swa;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /* Create and map scatterlist.
755*4882a593Smuzhiyun * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
756*4882a593Smuzhiyun * to go beyond nr_frags+1.
757*4882a593Smuzhiyun * Note: We don't support chained scatterlists
758*4882a593Smuzhiyun */
759*4882a593Smuzhiyun if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
760*4882a593Smuzhiyun return -EINVAL;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
763*4882a593Smuzhiyun if (unlikely(!scl))
764*4882a593Smuzhiyun return -ENOMEM;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun sg_init_table(scl, nr_frags + 1);
767*4882a593Smuzhiyun num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
768*4882a593Smuzhiyun if (unlikely(num_sg < 0)) {
769*4882a593Smuzhiyun err = -ENOMEM;
770*4882a593Smuzhiyun goto dma_map_sg_failed;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
773*4882a593Smuzhiyun if (unlikely(!num_dma_bufs)) {
774*4882a593Smuzhiyun err = -ENOMEM;
775*4882a593Smuzhiyun goto dma_map_sg_failed;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun /* Prepare the HW SGT structure */
779*4882a593Smuzhiyun sgt_buf_size = priv->tx_data_offset +
780*4882a593Smuzhiyun sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
781*4882a593Smuzhiyun sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
782*4882a593Smuzhiyun if (unlikely(!sgt_buf)) {
783*4882a593Smuzhiyun err = -ENOMEM;
784*4882a593Smuzhiyun goto sgt_buf_alloc_failed;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
787*4882a593Smuzhiyun memset(sgt_buf, 0, sgt_buf_size);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun /* Fill in the HW SGT structure.
792*4882a593Smuzhiyun *
793*4882a593Smuzhiyun * sgt_buf is zeroed out, so the following fields are implicit
794*4882a593Smuzhiyun * in all sgt entries:
795*4882a593Smuzhiyun * - offset is 0
796*4882a593Smuzhiyun * - format is 'dpaa2_sg_single'
797*4882a593Smuzhiyun */
798*4882a593Smuzhiyun for_each_sg(scl, crt_scl, num_dma_bufs, i) {
799*4882a593Smuzhiyun dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
800*4882a593Smuzhiyun dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun dpaa2_sg_set_final(&sgt[i - 1], true);
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /* Store the skb backpointer in the SGT buffer.
805*4882a593Smuzhiyun * Fit the scatterlist and the number of buffers alongside the
806*4882a593Smuzhiyun * skb backpointer in the software annotation area. We'll need
807*4882a593Smuzhiyun * all of them on Tx Conf.
808*4882a593Smuzhiyun */
809*4882a593Smuzhiyun *swa_addr = (void *)sgt_buf;
810*4882a593Smuzhiyun swa = (struct dpaa2_eth_swa *)sgt_buf;
811*4882a593Smuzhiyun swa->type = DPAA2_ETH_SWA_SG;
812*4882a593Smuzhiyun swa->sg.skb = skb;
813*4882a593Smuzhiyun swa->sg.scl = scl;
814*4882a593Smuzhiyun swa->sg.num_sg = num_sg;
815*4882a593Smuzhiyun swa->sg.sgt_size = sgt_buf_size;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /* Separately map the SGT buffer */
818*4882a593Smuzhiyun addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
819*4882a593Smuzhiyun if (unlikely(dma_mapping_error(dev, addr))) {
820*4882a593Smuzhiyun err = -ENOMEM;
821*4882a593Smuzhiyun goto dma_map_single_failed;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun dpaa2_fd_set_offset(fd, priv->tx_data_offset);
824*4882a593Smuzhiyun dpaa2_fd_set_format(fd, dpaa2_fd_sg);
825*4882a593Smuzhiyun dpaa2_fd_set_addr(fd, addr);
826*4882a593Smuzhiyun dpaa2_fd_set_len(fd, skb->len);
827*4882a593Smuzhiyun dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun return 0;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun dma_map_single_failed:
832*4882a593Smuzhiyun skb_free_frag(sgt_buf);
833*4882a593Smuzhiyun sgt_buf_alloc_failed:
834*4882a593Smuzhiyun dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
835*4882a593Smuzhiyun dma_map_sg_failed:
836*4882a593Smuzhiyun kfree(scl);
837*4882a593Smuzhiyun return err;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /* Create a SG frame descriptor based on a linear skb.
841*4882a593Smuzhiyun *
842*4882a593Smuzhiyun * This function is used on the Tx path when the skb headroom is not large
843*4882a593Smuzhiyun * enough for the HW requirements, thus instead of realloc-ing the skb we
844*4882a593Smuzhiyun * create a SG frame descriptor with only one entry.
845*4882a593Smuzhiyun */
dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv * priv,struct sk_buff * skb,struct dpaa2_fd * fd,void ** swa_addr)846*4882a593Smuzhiyun static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
847*4882a593Smuzhiyun struct sk_buff *skb,
848*4882a593Smuzhiyun struct dpaa2_fd *fd,
849*4882a593Smuzhiyun void **swa_addr)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
852*4882a593Smuzhiyun struct dpaa2_eth_sgt_cache *sgt_cache;
853*4882a593Smuzhiyun struct dpaa2_sg_entry *sgt;
854*4882a593Smuzhiyun struct dpaa2_eth_swa *swa;
855*4882a593Smuzhiyun dma_addr_t addr, sgt_addr;
856*4882a593Smuzhiyun void *sgt_buf = NULL;
857*4882a593Smuzhiyun int sgt_buf_size;
858*4882a593Smuzhiyun int err;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /* Prepare the HW SGT structure */
861*4882a593Smuzhiyun sgt_cache = this_cpu_ptr(priv->sgt_cache);
862*4882a593Smuzhiyun sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun if (sgt_cache->count == 0)
865*4882a593Smuzhiyun sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
866*4882a593Smuzhiyun GFP_ATOMIC);
867*4882a593Smuzhiyun else
868*4882a593Smuzhiyun sgt_buf = sgt_cache->buf[--sgt_cache->count];
869*4882a593Smuzhiyun if (unlikely(!sgt_buf))
870*4882a593Smuzhiyun return -ENOMEM;
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
873*4882a593Smuzhiyun sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
876*4882a593Smuzhiyun if (unlikely(dma_mapping_error(dev, addr))) {
877*4882a593Smuzhiyun err = -ENOMEM;
878*4882a593Smuzhiyun goto data_map_failed;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun /* Fill in the HW SGT structure */
882*4882a593Smuzhiyun dpaa2_sg_set_addr(sgt, addr);
883*4882a593Smuzhiyun dpaa2_sg_set_len(sgt, skb->len);
884*4882a593Smuzhiyun dpaa2_sg_set_final(sgt, true);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /* Store the skb backpointer in the SGT buffer */
887*4882a593Smuzhiyun *swa_addr = (void *)sgt_buf;
888*4882a593Smuzhiyun swa = (struct dpaa2_eth_swa *)sgt_buf;
889*4882a593Smuzhiyun swa->type = DPAA2_ETH_SWA_SINGLE;
890*4882a593Smuzhiyun swa->single.skb = skb;
891*4882a593Smuzhiyun swa->single.sgt_size = sgt_buf_size;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun /* Separately map the SGT buffer */
894*4882a593Smuzhiyun sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
895*4882a593Smuzhiyun if (unlikely(dma_mapping_error(dev, sgt_addr))) {
896*4882a593Smuzhiyun err = -ENOMEM;
897*4882a593Smuzhiyun goto sgt_map_failed;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun dpaa2_fd_set_offset(fd, priv->tx_data_offset);
901*4882a593Smuzhiyun dpaa2_fd_set_format(fd, dpaa2_fd_sg);
902*4882a593Smuzhiyun dpaa2_fd_set_addr(fd, sgt_addr);
903*4882a593Smuzhiyun dpaa2_fd_set_len(fd, skb->len);
904*4882a593Smuzhiyun dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return 0;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun sgt_map_failed:
909*4882a593Smuzhiyun dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
910*4882a593Smuzhiyun data_map_failed:
911*4882a593Smuzhiyun if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
912*4882a593Smuzhiyun kfree(sgt_buf);
913*4882a593Smuzhiyun else
914*4882a593Smuzhiyun sgt_cache->buf[sgt_cache->count++] = sgt_buf;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun return err;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /* Create a frame descriptor based on a linear skb */
dpaa2_eth_build_single_fd(struct dpaa2_eth_priv * priv,struct sk_buff * skb,struct dpaa2_fd * fd,void ** swa_addr)920*4882a593Smuzhiyun static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
921*4882a593Smuzhiyun struct sk_buff *skb,
922*4882a593Smuzhiyun struct dpaa2_fd *fd,
923*4882a593Smuzhiyun void **swa_addr)
924*4882a593Smuzhiyun {
925*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
926*4882a593Smuzhiyun u8 *buffer_start, *aligned_start;
927*4882a593Smuzhiyun struct dpaa2_eth_swa *swa;
928*4882a593Smuzhiyun dma_addr_t addr;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun /* If there's enough room to align the FD address, do it.
933*4882a593Smuzhiyun * It will help hardware optimize accesses.
934*4882a593Smuzhiyun */
935*4882a593Smuzhiyun aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
936*4882a593Smuzhiyun DPAA2_ETH_TX_BUF_ALIGN);
937*4882a593Smuzhiyun if (aligned_start >= skb->head)
938*4882a593Smuzhiyun buffer_start = aligned_start;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun /* Store a backpointer to the skb at the beginning of the buffer
941*4882a593Smuzhiyun * (in the private data area) such that we can release it
942*4882a593Smuzhiyun * on Tx confirm
943*4882a593Smuzhiyun */
944*4882a593Smuzhiyun *swa_addr = (void *)buffer_start;
945*4882a593Smuzhiyun swa = (struct dpaa2_eth_swa *)buffer_start;
946*4882a593Smuzhiyun swa->type = DPAA2_ETH_SWA_SINGLE;
947*4882a593Smuzhiyun swa->single.skb = skb;
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun addr = dma_map_single(dev, buffer_start,
950*4882a593Smuzhiyun skb_tail_pointer(skb) - buffer_start,
951*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
952*4882a593Smuzhiyun if (unlikely(dma_mapping_error(dev, addr)))
953*4882a593Smuzhiyun return -ENOMEM;
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun dpaa2_fd_set_addr(fd, addr);
956*4882a593Smuzhiyun dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
957*4882a593Smuzhiyun dpaa2_fd_set_len(fd, skb->len);
958*4882a593Smuzhiyun dpaa2_fd_set_format(fd, dpaa2_fd_single);
959*4882a593Smuzhiyun dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun return 0;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun /* FD freeing routine on the Tx path
965*4882a593Smuzhiyun *
966*4882a593Smuzhiyun * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
967*4882a593Smuzhiyun * back-pointed to is also freed.
968*4882a593Smuzhiyun * This can be called either from dpaa2_eth_tx_conf() or on the error path of
969*4882a593Smuzhiyun * dpaa2_eth_tx().
970*4882a593Smuzhiyun */
dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq,const struct dpaa2_fd * fd,bool in_napi)971*4882a593Smuzhiyun static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
972*4882a593Smuzhiyun struct dpaa2_eth_fq *fq,
973*4882a593Smuzhiyun const struct dpaa2_fd *fd, bool in_napi)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
976*4882a593Smuzhiyun dma_addr_t fd_addr, sg_addr;
977*4882a593Smuzhiyun struct sk_buff *skb = NULL;
978*4882a593Smuzhiyun unsigned char *buffer_start;
979*4882a593Smuzhiyun struct dpaa2_eth_swa *swa;
980*4882a593Smuzhiyun u8 fd_format = dpaa2_fd_get_format(fd);
981*4882a593Smuzhiyun u32 fd_len = dpaa2_fd_get_len(fd);
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun struct dpaa2_eth_sgt_cache *sgt_cache;
984*4882a593Smuzhiyun struct dpaa2_sg_entry *sgt;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun fd_addr = dpaa2_fd_get_addr(fd);
987*4882a593Smuzhiyun buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
988*4882a593Smuzhiyun swa = (struct dpaa2_eth_swa *)buffer_start;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (fd_format == dpaa2_fd_single) {
991*4882a593Smuzhiyun if (swa->type == DPAA2_ETH_SWA_SINGLE) {
992*4882a593Smuzhiyun skb = swa->single.skb;
993*4882a593Smuzhiyun /* Accessing the skb buffer is safe before dma unmap,
994*4882a593Smuzhiyun * because we didn't map the actual skb shell.
995*4882a593Smuzhiyun */
996*4882a593Smuzhiyun dma_unmap_single(dev, fd_addr,
997*4882a593Smuzhiyun skb_tail_pointer(skb) - buffer_start,
998*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
999*4882a593Smuzhiyun } else {
1000*4882a593Smuzhiyun WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
1001*4882a593Smuzhiyun dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
1002*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun } else if (fd_format == dpaa2_fd_sg) {
1005*4882a593Smuzhiyun if (swa->type == DPAA2_ETH_SWA_SG) {
1006*4882a593Smuzhiyun skb = swa->sg.skb;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun /* Unmap the scatterlist */
1009*4882a593Smuzhiyun dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
1010*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1011*4882a593Smuzhiyun kfree(swa->sg.scl);
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun /* Unmap the SGT buffer */
1014*4882a593Smuzhiyun dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1015*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1016*4882a593Smuzhiyun } else {
1017*4882a593Smuzhiyun skb = swa->single.skb;
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun /* Unmap the SGT Buffer */
1020*4882a593Smuzhiyun dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
1021*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun sgt = (struct dpaa2_sg_entry *)(buffer_start +
1024*4882a593Smuzhiyun priv->tx_data_offset);
1025*4882a593Smuzhiyun sg_addr = dpaa2_sg_get_addr(sgt);
1026*4882a593Smuzhiyun dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun } else {
1029*4882a593Smuzhiyun netdev_dbg(priv->net_dev, "Invalid FD format\n");
1030*4882a593Smuzhiyun return;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
1034*4882a593Smuzhiyun fq->dq_frames++;
1035*4882a593Smuzhiyun fq->dq_bytes += fd_len;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun if (swa->type == DPAA2_ETH_SWA_XDP) {
1039*4882a593Smuzhiyun xdp_return_frame(swa->xdp.xdpf);
1040*4882a593Smuzhiyun return;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /* Get the timestamp value */
1044*4882a593Smuzhiyun if (skb->cb[0] == TX_TSTAMP) {
1045*4882a593Smuzhiyun struct skb_shared_hwtstamps shhwtstamps;
1046*4882a593Smuzhiyun __le64 *ts = dpaa2_get_ts(buffer_start, true);
1047*4882a593Smuzhiyun u64 ns;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
1052*4882a593Smuzhiyun shhwtstamps.hwtstamp = ns_to_ktime(ns);
1053*4882a593Smuzhiyun skb_tstamp_tx(skb, &shhwtstamps);
1054*4882a593Smuzhiyun } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1055*4882a593Smuzhiyun mutex_unlock(&priv->onestep_tstamp_lock);
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun /* Free SGT buffer allocated on tx */
1059*4882a593Smuzhiyun if (fd_format != dpaa2_fd_single) {
1060*4882a593Smuzhiyun sgt_cache = this_cpu_ptr(priv->sgt_cache);
1061*4882a593Smuzhiyun if (swa->type == DPAA2_ETH_SWA_SG) {
1062*4882a593Smuzhiyun skb_free_frag(buffer_start);
1063*4882a593Smuzhiyun } else {
1064*4882a593Smuzhiyun if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
1065*4882a593Smuzhiyun kfree(buffer_start);
1066*4882a593Smuzhiyun else
1067*4882a593Smuzhiyun sgt_cache->buf[sgt_cache->count++] = buffer_start;
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun /* Move on with skb release */
1072*4882a593Smuzhiyun napi_consume_skb(skb, in_napi);
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
__dpaa2_eth_tx(struct sk_buff * skb,struct net_device * net_dev)1075*4882a593Smuzhiyun static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
1076*4882a593Smuzhiyun struct net_device *net_dev)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1079*4882a593Smuzhiyun struct dpaa2_fd fd;
1080*4882a593Smuzhiyun struct rtnl_link_stats64 *percpu_stats;
1081*4882a593Smuzhiyun struct dpaa2_eth_drv_stats *percpu_extras;
1082*4882a593Smuzhiyun struct dpaa2_eth_fq *fq;
1083*4882a593Smuzhiyun struct netdev_queue *nq;
1084*4882a593Smuzhiyun u16 queue_mapping;
1085*4882a593Smuzhiyun unsigned int needed_headroom;
1086*4882a593Smuzhiyun u32 fd_len;
1087*4882a593Smuzhiyun u8 prio = 0;
1088*4882a593Smuzhiyun int err, i;
1089*4882a593Smuzhiyun void *swa;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun percpu_stats = this_cpu_ptr(priv->percpu_stats);
1092*4882a593Smuzhiyun percpu_extras = this_cpu_ptr(priv->percpu_extras);
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun needed_headroom = dpaa2_eth_needed_headroom(skb);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /* We'll be holding a back-reference to the skb until Tx Confirmation;
1097*4882a593Smuzhiyun * we don't want that overwritten by a concurrent Tx with a cloned skb.
1098*4882a593Smuzhiyun */
1099*4882a593Smuzhiyun skb = skb_unshare(skb, GFP_ATOMIC);
1100*4882a593Smuzhiyun if (unlikely(!skb)) {
1101*4882a593Smuzhiyun /* skb_unshare() has already freed the skb */
1102*4882a593Smuzhiyun percpu_stats->tx_dropped++;
1103*4882a593Smuzhiyun return NETDEV_TX_OK;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun /* Setup the FD fields */
1107*4882a593Smuzhiyun memset(&fd, 0, sizeof(fd));
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun if (skb_is_nonlinear(skb)) {
1110*4882a593Smuzhiyun err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
1111*4882a593Smuzhiyun percpu_extras->tx_sg_frames++;
1112*4882a593Smuzhiyun percpu_extras->tx_sg_bytes += skb->len;
1113*4882a593Smuzhiyun } else if (skb_headroom(skb) < needed_headroom) {
1114*4882a593Smuzhiyun err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
1115*4882a593Smuzhiyun percpu_extras->tx_sg_frames++;
1116*4882a593Smuzhiyun percpu_extras->tx_sg_bytes += skb->len;
1117*4882a593Smuzhiyun percpu_extras->tx_converted_sg_frames++;
1118*4882a593Smuzhiyun percpu_extras->tx_converted_sg_bytes += skb->len;
1119*4882a593Smuzhiyun } else {
1120*4882a593Smuzhiyun err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun if (unlikely(err)) {
1124*4882a593Smuzhiyun percpu_stats->tx_dropped++;
1125*4882a593Smuzhiyun goto err_build_fd;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun if (skb->cb[0])
1129*4882a593Smuzhiyun dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun /* Tracing point */
1132*4882a593Smuzhiyun trace_dpaa2_tx_fd(net_dev, &fd);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /* TxConf FQ selection relies on queue id from the stack.
1135*4882a593Smuzhiyun * In case of a forwarded frame from another DPNI interface, we choose
1136*4882a593Smuzhiyun * a queue affined to the same core that processed the Rx frame
1137*4882a593Smuzhiyun */
1138*4882a593Smuzhiyun queue_mapping = skb_get_queue_mapping(skb);
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun if (net_dev->num_tc) {
1141*4882a593Smuzhiyun prio = netdev_txq_to_tc(net_dev, queue_mapping);
1142*4882a593Smuzhiyun /* Hardware interprets priority level 0 as being the highest,
1143*4882a593Smuzhiyun * so we need to do a reverse mapping to the netdev tc index
1144*4882a593Smuzhiyun */
1145*4882a593Smuzhiyun prio = net_dev->num_tc - prio - 1;
1146*4882a593Smuzhiyun /* We have only one FQ array entry for all Tx hardware queues
1147*4882a593Smuzhiyun * with the same flow id (but different priority levels)
1148*4882a593Smuzhiyun */
1149*4882a593Smuzhiyun queue_mapping %= dpaa2_eth_queue_count(priv);
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun fq = &priv->fq[queue_mapping];
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun fd_len = dpaa2_fd_get_len(&fd);
1154*4882a593Smuzhiyun nq = netdev_get_tx_queue(net_dev, queue_mapping);
1155*4882a593Smuzhiyun netdev_tx_sent_queue(nq, fd_len);
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun /* Everything that happens after this enqueues might race with
1158*4882a593Smuzhiyun * the Tx confirmation callback for this frame
1159*4882a593Smuzhiyun */
1160*4882a593Smuzhiyun for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1161*4882a593Smuzhiyun err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
1162*4882a593Smuzhiyun if (err != -EBUSY)
1163*4882a593Smuzhiyun break;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun percpu_extras->tx_portal_busy += i;
1166*4882a593Smuzhiyun if (unlikely(err < 0)) {
1167*4882a593Smuzhiyun percpu_stats->tx_errors++;
1168*4882a593Smuzhiyun /* Clean up everything, including freeing the skb */
1169*4882a593Smuzhiyun dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
1170*4882a593Smuzhiyun netdev_tx_completed_queue(nq, 1, fd_len);
1171*4882a593Smuzhiyun } else {
1172*4882a593Smuzhiyun percpu_stats->tx_packets++;
1173*4882a593Smuzhiyun percpu_stats->tx_bytes += fd_len;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun return NETDEV_TX_OK;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun err_build_fd:
1179*4882a593Smuzhiyun dev_kfree_skb(skb);
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun return NETDEV_TX_OK;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
dpaa2_eth_tx_onestep_tstamp(struct work_struct * work)1184*4882a593Smuzhiyun static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
1185*4882a593Smuzhiyun {
1186*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
1187*4882a593Smuzhiyun tx_onestep_tstamp);
1188*4882a593Smuzhiyun struct sk_buff *skb;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun while (true) {
1191*4882a593Smuzhiyun skb = skb_dequeue(&priv->tx_skbs);
1192*4882a593Smuzhiyun if (!skb)
1193*4882a593Smuzhiyun return;
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun /* Lock just before TX one-step timestamping packet,
1196*4882a593Smuzhiyun * and release the lock in dpaa2_eth_free_tx_fd when
1197*4882a593Smuzhiyun * confirm the packet has been sent on hardware, or
1198*4882a593Smuzhiyun * when clean up during transmit failure.
1199*4882a593Smuzhiyun */
1200*4882a593Smuzhiyun mutex_lock(&priv->onestep_tstamp_lock);
1201*4882a593Smuzhiyun __dpaa2_eth_tx(skb, priv->net_dev);
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
dpaa2_eth_tx(struct sk_buff * skb,struct net_device * net_dev)1205*4882a593Smuzhiyun static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1208*4882a593Smuzhiyun u8 msgtype, twostep, udp;
1209*4882a593Smuzhiyun u16 offset1, offset2;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun /* Utilize skb->cb[0] for timestamping request per skb */
1212*4882a593Smuzhiyun skb->cb[0] = 0;
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
1215*4882a593Smuzhiyun if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
1216*4882a593Smuzhiyun skb->cb[0] = TX_TSTAMP;
1217*4882a593Smuzhiyun else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
1218*4882a593Smuzhiyun skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun /* TX for one-step timestamping PTP Sync packet */
1222*4882a593Smuzhiyun if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1223*4882a593Smuzhiyun if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
1224*4882a593Smuzhiyun &offset1, &offset2))
1225*4882a593Smuzhiyun if (msgtype == 0 && twostep == 0) {
1226*4882a593Smuzhiyun skb_queue_tail(&priv->tx_skbs, skb);
1227*4882a593Smuzhiyun queue_work(priv->dpaa2_ptp_wq,
1228*4882a593Smuzhiyun &priv->tx_onestep_tstamp);
1229*4882a593Smuzhiyun return NETDEV_TX_OK;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun /* Use two-step timestamping if not one-step timestamping
1232*4882a593Smuzhiyun * PTP Sync packet
1233*4882a593Smuzhiyun */
1234*4882a593Smuzhiyun skb->cb[0] = TX_TSTAMP;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun /* TX for other packets */
1238*4882a593Smuzhiyun return __dpaa2_eth_tx(skb, net_dev);
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun /* Tx confirmation frame processing routine */
dpaa2_eth_tx_conf(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch __always_unused,const struct dpaa2_fd * fd,struct dpaa2_eth_fq * fq)1242*4882a593Smuzhiyun static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
1243*4882a593Smuzhiyun struct dpaa2_eth_channel *ch __always_unused,
1244*4882a593Smuzhiyun const struct dpaa2_fd *fd,
1245*4882a593Smuzhiyun struct dpaa2_eth_fq *fq)
1246*4882a593Smuzhiyun {
1247*4882a593Smuzhiyun struct rtnl_link_stats64 *percpu_stats;
1248*4882a593Smuzhiyun struct dpaa2_eth_drv_stats *percpu_extras;
1249*4882a593Smuzhiyun u32 fd_len = dpaa2_fd_get_len(fd);
1250*4882a593Smuzhiyun u32 fd_errors;
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun /* Tracing point */
1253*4882a593Smuzhiyun trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun percpu_extras = this_cpu_ptr(priv->percpu_extras);
1256*4882a593Smuzhiyun percpu_extras->tx_conf_frames++;
1257*4882a593Smuzhiyun percpu_extras->tx_conf_bytes += fd_len;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun /* Check frame errors in the FD field */
1260*4882a593Smuzhiyun fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
1261*4882a593Smuzhiyun dpaa2_eth_free_tx_fd(priv, fq, fd, true);
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun if (likely(!fd_errors))
1264*4882a593Smuzhiyun return;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun if (net_ratelimit())
1267*4882a593Smuzhiyun netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
1268*4882a593Smuzhiyun fd_errors);
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun percpu_stats = this_cpu_ptr(priv->percpu_stats);
1271*4882a593Smuzhiyun /* Tx-conf logically pertains to the egress path. */
1272*4882a593Smuzhiyun percpu_stats->tx_errors++;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun
dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv * priv,bool enable)1275*4882a593Smuzhiyun static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun int err;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1280*4882a593Smuzhiyun DPNI_OFF_RX_L3_CSUM, enable);
1281*4882a593Smuzhiyun if (err) {
1282*4882a593Smuzhiyun netdev_err(priv->net_dev,
1283*4882a593Smuzhiyun "dpni_set_offload(RX_L3_CSUM) failed\n");
1284*4882a593Smuzhiyun return err;
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1288*4882a593Smuzhiyun DPNI_OFF_RX_L4_CSUM, enable);
1289*4882a593Smuzhiyun if (err) {
1290*4882a593Smuzhiyun netdev_err(priv->net_dev,
1291*4882a593Smuzhiyun "dpni_set_offload(RX_L4_CSUM) failed\n");
1292*4882a593Smuzhiyun return err;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun return 0;
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun
dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv * priv,bool enable)1298*4882a593Smuzhiyun static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun int err;
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1303*4882a593Smuzhiyun DPNI_OFF_TX_L3_CSUM, enable);
1304*4882a593Smuzhiyun if (err) {
1305*4882a593Smuzhiyun netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
1306*4882a593Smuzhiyun return err;
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1310*4882a593Smuzhiyun DPNI_OFF_TX_L4_CSUM, enable);
1311*4882a593Smuzhiyun if (err) {
1312*4882a593Smuzhiyun netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
1313*4882a593Smuzhiyun return err;
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun return 0;
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun /* Perform a single release command to add buffers
1320*4882a593Smuzhiyun * to the specified buffer pool
1321*4882a593Smuzhiyun */
dpaa2_eth_add_bufs(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,u16 bpid)1322*4882a593Smuzhiyun static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
1323*4882a593Smuzhiyun struct dpaa2_eth_channel *ch, u16 bpid)
1324*4882a593Smuzhiyun {
1325*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
1326*4882a593Smuzhiyun u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1327*4882a593Smuzhiyun struct page *page;
1328*4882a593Smuzhiyun dma_addr_t addr;
1329*4882a593Smuzhiyun int retries = 0;
1330*4882a593Smuzhiyun int i, err;
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
1333*4882a593Smuzhiyun /* Allocate buffer visible to WRIOP + skb shared info +
1334*4882a593Smuzhiyun * alignment padding
1335*4882a593Smuzhiyun */
1336*4882a593Smuzhiyun /* allocate one page for each Rx buffer. WRIOP sees
1337*4882a593Smuzhiyun * the entire page except for a tailroom reserved for
1338*4882a593Smuzhiyun * skb shared info
1339*4882a593Smuzhiyun */
1340*4882a593Smuzhiyun page = dev_alloc_pages(0);
1341*4882a593Smuzhiyun if (!page)
1342*4882a593Smuzhiyun goto err_alloc;
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1345*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1346*4882a593Smuzhiyun if (unlikely(dma_mapping_error(dev, addr)))
1347*4882a593Smuzhiyun goto err_map;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun buf_array[i] = addr;
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun /* tracing point */
1352*4882a593Smuzhiyun trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
1353*4882a593Smuzhiyun DPAA2_ETH_RX_BUF_RAW_SIZE,
1354*4882a593Smuzhiyun addr, priv->rx_buf_size,
1355*4882a593Smuzhiyun bpid);
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun release_bufs:
1359*4882a593Smuzhiyun /* In case the portal is busy, retry until successful */
1360*4882a593Smuzhiyun while ((err = dpaa2_io_service_release(ch->dpio, bpid,
1361*4882a593Smuzhiyun buf_array, i)) == -EBUSY) {
1362*4882a593Smuzhiyun if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1363*4882a593Smuzhiyun break;
1364*4882a593Smuzhiyun cpu_relax();
1365*4882a593Smuzhiyun }
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun /* If release command failed, clean up and bail out;
1368*4882a593Smuzhiyun * not much else we can do about it
1369*4882a593Smuzhiyun */
1370*4882a593Smuzhiyun if (err) {
1371*4882a593Smuzhiyun dpaa2_eth_free_bufs(priv, buf_array, i);
1372*4882a593Smuzhiyun return 0;
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun return i;
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun err_map:
1378*4882a593Smuzhiyun __free_pages(page, 0);
1379*4882a593Smuzhiyun err_alloc:
1380*4882a593Smuzhiyun /* If we managed to allocate at least some buffers,
1381*4882a593Smuzhiyun * release them to hardware
1382*4882a593Smuzhiyun */
1383*4882a593Smuzhiyun if (i)
1384*4882a593Smuzhiyun goto release_bufs;
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun return 0;
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun
dpaa2_eth_seed_pool(struct dpaa2_eth_priv * priv,u16 bpid)1389*4882a593Smuzhiyun static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun int i, j;
1392*4882a593Smuzhiyun int new_count;
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun for (j = 0; j < priv->num_channels; j++) {
1395*4882a593Smuzhiyun for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1396*4882a593Smuzhiyun i += DPAA2_ETH_BUFS_PER_CMD) {
1397*4882a593Smuzhiyun new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
1398*4882a593Smuzhiyun priv->channel[j]->buf_count += new_count;
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
1401*4882a593Smuzhiyun return -ENOMEM;
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun }
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun return 0;
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun /*
1410*4882a593Smuzhiyun * Drain the specified number of buffers from the DPNI's private buffer pool.
1411*4882a593Smuzhiyun * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1412*4882a593Smuzhiyun */
dpaa2_eth_drain_bufs(struct dpaa2_eth_priv * priv,int count)1413*4882a593Smuzhiyun static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1416*4882a593Smuzhiyun int retries = 0;
1417*4882a593Smuzhiyun int ret;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun do {
1420*4882a593Smuzhiyun ret = dpaa2_io_service_acquire(NULL, priv->bpid,
1421*4882a593Smuzhiyun buf_array, count);
1422*4882a593Smuzhiyun if (ret < 0) {
1423*4882a593Smuzhiyun if (ret == -EBUSY &&
1424*4882a593Smuzhiyun retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
1425*4882a593Smuzhiyun continue;
1426*4882a593Smuzhiyun netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1427*4882a593Smuzhiyun return;
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun dpaa2_eth_free_bufs(priv, buf_array, ret);
1430*4882a593Smuzhiyun retries = 0;
1431*4882a593Smuzhiyun } while (ret);
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
dpaa2_eth_drain_pool(struct dpaa2_eth_priv * priv)1434*4882a593Smuzhiyun static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
1435*4882a593Smuzhiyun {
1436*4882a593Smuzhiyun int i;
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1439*4882a593Smuzhiyun dpaa2_eth_drain_bufs(priv, 1);
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun for (i = 0; i < priv->num_channels; i++)
1442*4882a593Smuzhiyun priv->channel[i]->buf_count = 0;
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun /* Function is called from softirq context only, so we don't need to guard
1446*4882a593Smuzhiyun * the access to percpu count
1447*4882a593Smuzhiyun */
dpaa2_eth_refill_pool(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,u16 bpid)1448*4882a593Smuzhiyun static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
1449*4882a593Smuzhiyun struct dpaa2_eth_channel *ch,
1450*4882a593Smuzhiyun u16 bpid)
1451*4882a593Smuzhiyun {
1452*4882a593Smuzhiyun int new_count;
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1455*4882a593Smuzhiyun return 0;
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun do {
1458*4882a593Smuzhiyun new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
1459*4882a593Smuzhiyun if (unlikely(!new_count)) {
1460*4882a593Smuzhiyun /* Out of memory; abort for now, we'll try later on */
1461*4882a593Smuzhiyun break;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun ch->buf_count += new_count;
1464*4882a593Smuzhiyun } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1467*4882a593Smuzhiyun return -ENOMEM;
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun return 0;
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun
dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv * priv)1472*4882a593Smuzhiyun static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
1473*4882a593Smuzhiyun {
1474*4882a593Smuzhiyun struct dpaa2_eth_sgt_cache *sgt_cache;
1475*4882a593Smuzhiyun u16 count;
1476*4882a593Smuzhiyun int k, i;
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun for_each_possible_cpu(k) {
1479*4882a593Smuzhiyun sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
1480*4882a593Smuzhiyun count = sgt_cache->count;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun for (i = 0; i < count; i++)
1483*4882a593Smuzhiyun kfree(sgt_cache->buf[i]);
1484*4882a593Smuzhiyun sgt_cache->count = 0;
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun
dpaa2_eth_pull_channel(struct dpaa2_eth_channel * ch)1488*4882a593Smuzhiyun static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
1489*4882a593Smuzhiyun {
1490*4882a593Smuzhiyun int err;
1491*4882a593Smuzhiyun int dequeues = -1;
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun /* Retry while portal is busy */
1494*4882a593Smuzhiyun do {
1495*4882a593Smuzhiyun err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1496*4882a593Smuzhiyun ch->store);
1497*4882a593Smuzhiyun dequeues++;
1498*4882a593Smuzhiyun cpu_relax();
1499*4882a593Smuzhiyun } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun ch->stats.dequeue_portal_busy += dequeues;
1502*4882a593Smuzhiyun if (unlikely(err))
1503*4882a593Smuzhiyun ch->stats.pull_err++;
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun return err;
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun /* NAPI poll routine
1509*4882a593Smuzhiyun *
1510*4882a593Smuzhiyun * Frames are dequeued from the QMan channel associated with this NAPI context.
1511*4882a593Smuzhiyun * Rx, Tx confirmation and (if configured) Rx error frames all count
1512*4882a593Smuzhiyun * towards the NAPI budget.
1513*4882a593Smuzhiyun */
dpaa2_eth_poll(struct napi_struct * napi,int budget)1514*4882a593Smuzhiyun static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1515*4882a593Smuzhiyun {
1516*4882a593Smuzhiyun struct dpaa2_eth_channel *ch;
1517*4882a593Smuzhiyun struct dpaa2_eth_priv *priv;
1518*4882a593Smuzhiyun int rx_cleaned = 0, txconf_cleaned = 0;
1519*4882a593Smuzhiyun struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1520*4882a593Smuzhiyun struct netdev_queue *nq;
1521*4882a593Smuzhiyun int store_cleaned, work_done;
1522*4882a593Smuzhiyun struct list_head rx_list;
1523*4882a593Smuzhiyun int retries = 0;
1524*4882a593Smuzhiyun u16 flowid;
1525*4882a593Smuzhiyun int err;
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun ch = container_of(napi, struct dpaa2_eth_channel, napi);
1528*4882a593Smuzhiyun ch->xdp.res = 0;
1529*4882a593Smuzhiyun priv = ch->priv;
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun INIT_LIST_HEAD(&rx_list);
1532*4882a593Smuzhiyun ch->rx_list = &rx_list;
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun do {
1535*4882a593Smuzhiyun err = dpaa2_eth_pull_channel(ch);
1536*4882a593Smuzhiyun if (unlikely(err))
1537*4882a593Smuzhiyun break;
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun /* Refill pool if appropriate */
1540*4882a593Smuzhiyun dpaa2_eth_refill_pool(priv, ch, priv->bpid);
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
1543*4882a593Smuzhiyun if (store_cleaned <= 0)
1544*4882a593Smuzhiyun break;
1545*4882a593Smuzhiyun if (fq->type == DPAA2_RX_FQ) {
1546*4882a593Smuzhiyun rx_cleaned += store_cleaned;
1547*4882a593Smuzhiyun flowid = fq->flowid;
1548*4882a593Smuzhiyun } else {
1549*4882a593Smuzhiyun txconf_cleaned += store_cleaned;
1550*4882a593Smuzhiyun /* We have a single Tx conf FQ on this channel */
1551*4882a593Smuzhiyun txc_fq = fq;
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun /* If we either consumed the whole NAPI budget with Rx frames
1555*4882a593Smuzhiyun * or we reached the Tx confirmations threshold, we're done.
1556*4882a593Smuzhiyun */
1557*4882a593Smuzhiyun if (rx_cleaned >= budget ||
1558*4882a593Smuzhiyun txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1559*4882a593Smuzhiyun work_done = budget;
1560*4882a593Smuzhiyun goto out;
1561*4882a593Smuzhiyun }
1562*4882a593Smuzhiyun } while (store_cleaned);
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun /* We didn't consume the entire budget, so finish napi and
1565*4882a593Smuzhiyun * re-enable data availability notifications
1566*4882a593Smuzhiyun */
1567*4882a593Smuzhiyun napi_complete_done(napi, rx_cleaned);
1568*4882a593Smuzhiyun do {
1569*4882a593Smuzhiyun err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1570*4882a593Smuzhiyun cpu_relax();
1571*4882a593Smuzhiyun } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
1572*4882a593Smuzhiyun WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1573*4882a593Smuzhiyun ch->nctx.desired_cpu);
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun work_done = max(rx_cleaned, 1);
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun out:
1578*4882a593Smuzhiyun netif_receive_skb_list(ch->rx_list);
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun if (txc_fq && txc_fq->dq_frames) {
1581*4882a593Smuzhiyun nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1582*4882a593Smuzhiyun netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1583*4882a593Smuzhiyun txc_fq->dq_bytes);
1584*4882a593Smuzhiyun txc_fq->dq_frames = 0;
1585*4882a593Smuzhiyun txc_fq->dq_bytes = 0;
1586*4882a593Smuzhiyun }
1587*4882a593Smuzhiyun
1588*4882a593Smuzhiyun if (ch->xdp.res & XDP_REDIRECT)
1589*4882a593Smuzhiyun xdp_do_flush_map();
1590*4882a593Smuzhiyun else if (rx_cleaned && ch->xdp.res & XDP_TX)
1591*4882a593Smuzhiyun dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun return work_done;
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun
dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv * priv)1596*4882a593Smuzhiyun static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
1597*4882a593Smuzhiyun {
1598*4882a593Smuzhiyun struct dpaa2_eth_channel *ch;
1599*4882a593Smuzhiyun int i;
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun for (i = 0; i < priv->num_channels; i++) {
1602*4882a593Smuzhiyun ch = priv->channel[i];
1603*4882a593Smuzhiyun napi_enable(&ch->napi);
1604*4882a593Smuzhiyun }
1605*4882a593Smuzhiyun }
1606*4882a593Smuzhiyun
dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv * priv)1607*4882a593Smuzhiyun static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
1608*4882a593Smuzhiyun {
1609*4882a593Smuzhiyun struct dpaa2_eth_channel *ch;
1610*4882a593Smuzhiyun int i;
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun for (i = 0; i < priv->num_channels; i++) {
1613*4882a593Smuzhiyun ch = priv->channel[i];
1614*4882a593Smuzhiyun napi_disable(&ch->napi);
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun }
1617*4882a593Smuzhiyun
dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv * priv,bool tx_pause,bool pfc)1618*4882a593Smuzhiyun void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
1619*4882a593Smuzhiyun bool tx_pause, bool pfc)
1620*4882a593Smuzhiyun {
1621*4882a593Smuzhiyun struct dpni_taildrop td = {0};
1622*4882a593Smuzhiyun struct dpaa2_eth_fq *fq;
1623*4882a593Smuzhiyun int i, err;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
1626*4882a593Smuzhiyun * flow control is disabled (as it might interfere with either the
1627*4882a593Smuzhiyun * buffer pool depletion trigger for pause frames or with the group
1628*4882a593Smuzhiyun * congestion trigger for PFC frames)
1629*4882a593Smuzhiyun */
1630*4882a593Smuzhiyun td.enable = !tx_pause;
1631*4882a593Smuzhiyun if (priv->rx_fqtd_enabled == td.enable)
1632*4882a593Smuzhiyun goto set_cgtd;
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
1635*4882a593Smuzhiyun td.units = DPNI_CONGESTION_UNIT_BYTES;
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun for (i = 0; i < priv->num_fqs; i++) {
1638*4882a593Smuzhiyun fq = &priv->fq[i];
1639*4882a593Smuzhiyun if (fq->type != DPAA2_RX_FQ)
1640*4882a593Smuzhiyun continue;
1641*4882a593Smuzhiyun err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1642*4882a593Smuzhiyun DPNI_CP_QUEUE, DPNI_QUEUE_RX,
1643*4882a593Smuzhiyun fq->tc, fq->flowid, &td);
1644*4882a593Smuzhiyun if (err) {
1645*4882a593Smuzhiyun netdev_err(priv->net_dev,
1646*4882a593Smuzhiyun "dpni_set_taildrop(FQ) failed\n");
1647*4882a593Smuzhiyun return;
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun }
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun priv->rx_fqtd_enabled = td.enable;
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun set_cgtd:
1654*4882a593Smuzhiyun /* Congestion group taildrop: threshold is in frames, per group
1655*4882a593Smuzhiyun * of FQs belonging to the same traffic class
1656*4882a593Smuzhiyun * Enabled if general Tx pause disabled or if PFCs are enabled
1657*4882a593Smuzhiyun * (congestion group threhsold for PFC generation is lower than the
1658*4882a593Smuzhiyun * CG taildrop threshold, so it won't interfere with it; we also
1659*4882a593Smuzhiyun * want frames in non-PFC enabled traffic classes to be kept in check)
1660*4882a593Smuzhiyun */
1661*4882a593Smuzhiyun td.enable = !tx_pause || (tx_pause && pfc);
1662*4882a593Smuzhiyun if (priv->rx_cgtd_enabled == td.enable)
1663*4882a593Smuzhiyun return;
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
1666*4882a593Smuzhiyun td.units = DPNI_CONGESTION_UNIT_FRAMES;
1667*4882a593Smuzhiyun for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
1668*4882a593Smuzhiyun err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1669*4882a593Smuzhiyun DPNI_CP_GROUP, DPNI_QUEUE_RX,
1670*4882a593Smuzhiyun i, 0, &td);
1671*4882a593Smuzhiyun if (err) {
1672*4882a593Smuzhiyun netdev_err(priv->net_dev,
1673*4882a593Smuzhiyun "dpni_set_taildrop(CG) failed\n");
1674*4882a593Smuzhiyun return;
1675*4882a593Smuzhiyun }
1676*4882a593Smuzhiyun }
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun priv->rx_cgtd_enabled = td.enable;
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun
dpaa2_eth_link_state_update(struct dpaa2_eth_priv * priv)1681*4882a593Smuzhiyun static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
1682*4882a593Smuzhiyun {
1683*4882a593Smuzhiyun struct dpni_link_state state = {0};
1684*4882a593Smuzhiyun bool tx_pause;
1685*4882a593Smuzhiyun int err;
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1688*4882a593Smuzhiyun if (unlikely(err)) {
1689*4882a593Smuzhiyun netdev_err(priv->net_dev,
1690*4882a593Smuzhiyun "dpni_get_link_state() failed\n");
1691*4882a593Smuzhiyun return err;
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun /* If Tx pause frame settings have changed, we need to update
1695*4882a593Smuzhiyun * Rx FQ taildrop configuration as well. We configure taildrop
1696*4882a593Smuzhiyun * only when pause frame generation is disabled.
1697*4882a593Smuzhiyun */
1698*4882a593Smuzhiyun tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
1699*4882a593Smuzhiyun dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun /* When we manage the MAC/PHY using phylink there is no need
1702*4882a593Smuzhiyun * to manually update the netif_carrier.
1703*4882a593Smuzhiyun */
1704*4882a593Smuzhiyun if (priv->mac)
1705*4882a593Smuzhiyun goto out;
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun /* Chech link state; speed / duplex changes are not treated yet */
1708*4882a593Smuzhiyun if (priv->link_state.up == state.up)
1709*4882a593Smuzhiyun goto out;
1710*4882a593Smuzhiyun
1711*4882a593Smuzhiyun if (state.up) {
1712*4882a593Smuzhiyun netif_carrier_on(priv->net_dev);
1713*4882a593Smuzhiyun netif_tx_start_all_queues(priv->net_dev);
1714*4882a593Smuzhiyun } else {
1715*4882a593Smuzhiyun netif_tx_stop_all_queues(priv->net_dev);
1716*4882a593Smuzhiyun netif_carrier_off(priv->net_dev);
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun netdev_info(priv->net_dev, "Link Event: state %s\n",
1720*4882a593Smuzhiyun state.up ? "up" : "down");
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun out:
1723*4882a593Smuzhiyun priv->link_state = state;
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun return 0;
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun
dpaa2_eth_open(struct net_device * net_dev)1728*4882a593Smuzhiyun static int dpaa2_eth_open(struct net_device *net_dev)
1729*4882a593Smuzhiyun {
1730*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1731*4882a593Smuzhiyun int err;
1732*4882a593Smuzhiyun
1733*4882a593Smuzhiyun err = dpaa2_eth_seed_pool(priv, priv->bpid);
1734*4882a593Smuzhiyun if (err) {
1735*4882a593Smuzhiyun /* Not much to do; the buffer pool, though not filled up,
1736*4882a593Smuzhiyun * may still contain some buffers which would enable us
1737*4882a593Smuzhiyun * to limp on.
1738*4882a593Smuzhiyun */
1739*4882a593Smuzhiyun netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1740*4882a593Smuzhiyun priv->dpbp_dev->obj_desc.id, priv->bpid);
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun if (!priv->mac) {
1744*4882a593Smuzhiyun /* We'll only start the txqs when the link is actually ready;
1745*4882a593Smuzhiyun * make sure we don't race against the link up notification,
1746*4882a593Smuzhiyun * which may come immediately after dpni_enable();
1747*4882a593Smuzhiyun */
1748*4882a593Smuzhiyun netif_tx_stop_all_queues(net_dev);
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun /* Also, explicitly set carrier off, otherwise
1751*4882a593Smuzhiyun * netif_carrier_ok() will return true and cause 'ip link show'
1752*4882a593Smuzhiyun * to report the LOWER_UP flag, even though the link
1753*4882a593Smuzhiyun * notification wasn't even received.
1754*4882a593Smuzhiyun */
1755*4882a593Smuzhiyun netif_carrier_off(net_dev);
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun dpaa2_eth_enable_ch_napi(priv);
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1760*4882a593Smuzhiyun if (err < 0) {
1761*4882a593Smuzhiyun netdev_err(net_dev, "dpni_enable() failed\n");
1762*4882a593Smuzhiyun goto enable_err;
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun if (priv->mac)
1766*4882a593Smuzhiyun phylink_start(priv->mac->phylink);
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun return 0;
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun enable_err:
1771*4882a593Smuzhiyun dpaa2_eth_disable_ch_napi(priv);
1772*4882a593Smuzhiyun dpaa2_eth_drain_pool(priv);
1773*4882a593Smuzhiyun return err;
1774*4882a593Smuzhiyun }
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun /* Total number of in-flight frames on ingress queues */
dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv * priv)1777*4882a593Smuzhiyun static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
1778*4882a593Smuzhiyun {
1779*4882a593Smuzhiyun struct dpaa2_eth_fq *fq;
1780*4882a593Smuzhiyun u32 fcnt = 0, bcnt = 0, total = 0;
1781*4882a593Smuzhiyun int i, err;
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun for (i = 0; i < priv->num_fqs; i++) {
1784*4882a593Smuzhiyun fq = &priv->fq[i];
1785*4882a593Smuzhiyun err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1786*4882a593Smuzhiyun if (err) {
1787*4882a593Smuzhiyun netdev_warn(priv->net_dev, "query_fq_count failed");
1788*4882a593Smuzhiyun break;
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun total += fcnt;
1791*4882a593Smuzhiyun }
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyun return total;
1794*4882a593Smuzhiyun }
1795*4882a593Smuzhiyun
dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv * priv)1796*4882a593Smuzhiyun static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
1797*4882a593Smuzhiyun {
1798*4882a593Smuzhiyun int retries = 10;
1799*4882a593Smuzhiyun u32 pending;
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun do {
1802*4882a593Smuzhiyun pending = dpaa2_eth_ingress_fq_count(priv);
1803*4882a593Smuzhiyun if (pending)
1804*4882a593Smuzhiyun msleep(100);
1805*4882a593Smuzhiyun } while (pending && --retries);
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun #define DPNI_TX_PENDING_VER_MAJOR 7
1809*4882a593Smuzhiyun #define DPNI_TX_PENDING_VER_MINOR 13
dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv * priv)1810*4882a593Smuzhiyun static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
1811*4882a593Smuzhiyun {
1812*4882a593Smuzhiyun union dpni_statistics stats;
1813*4882a593Smuzhiyun int retries = 10;
1814*4882a593Smuzhiyun int err;
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
1817*4882a593Smuzhiyun DPNI_TX_PENDING_VER_MINOR) < 0)
1818*4882a593Smuzhiyun goto out;
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun do {
1821*4882a593Smuzhiyun err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
1822*4882a593Smuzhiyun &stats);
1823*4882a593Smuzhiyun if (err)
1824*4882a593Smuzhiyun goto out;
1825*4882a593Smuzhiyun if (stats.page_6.tx_pending_frames == 0)
1826*4882a593Smuzhiyun return;
1827*4882a593Smuzhiyun } while (--retries);
1828*4882a593Smuzhiyun
1829*4882a593Smuzhiyun out:
1830*4882a593Smuzhiyun msleep(500);
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun
dpaa2_eth_stop(struct net_device * net_dev)1833*4882a593Smuzhiyun static int dpaa2_eth_stop(struct net_device *net_dev)
1834*4882a593Smuzhiyun {
1835*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1836*4882a593Smuzhiyun int dpni_enabled = 0;
1837*4882a593Smuzhiyun int retries = 10;
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun if (!priv->mac) {
1840*4882a593Smuzhiyun netif_tx_stop_all_queues(net_dev);
1841*4882a593Smuzhiyun netif_carrier_off(net_dev);
1842*4882a593Smuzhiyun } else {
1843*4882a593Smuzhiyun phylink_stop(priv->mac->phylink);
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun
1846*4882a593Smuzhiyun /* On dpni_disable(), the MC firmware will:
1847*4882a593Smuzhiyun * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1848*4882a593Smuzhiyun * - cut off WRIOP dequeues from egress FQs and wait until transmission
1849*4882a593Smuzhiyun * of all in flight Tx frames is finished (and corresponding Tx conf
1850*4882a593Smuzhiyun * frames are enqueued back to software)
1851*4882a593Smuzhiyun *
1852*4882a593Smuzhiyun * Before calling dpni_disable(), we wait for all Tx frames to arrive
1853*4882a593Smuzhiyun * on WRIOP. After it finishes, wait until all remaining frames on Rx
1854*4882a593Smuzhiyun * and Tx conf queues are consumed on NAPI poll.
1855*4882a593Smuzhiyun */
1856*4882a593Smuzhiyun dpaa2_eth_wait_for_egress_fq_empty(priv);
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun do {
1859*4882a593Smuzhiyun dpni_disable(priv->mc_io, 0, priv->mc_token);
1860*4882a593Smuzhiyun dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1861*4882a593Smuzhiyun if (dpni_enabled)
1862*4882a593Smuzhiyun /* Allow the hardware some slack */
1863*4882a593Smuzhiyun msleep(100);
1864*4882a593Smuzhiyun } while (dpni_enabled && --retries);
1865*4882a593Smuzhiyun if (!retries) {
1866*4882a593Smuzhiyun netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1867*4882a593Smuzhiyun /* Must go on and disable NAPI nonetheless, so we don't crash at
1868*4882a593Smuzhiyun * the next "ifconfig up"
1869*4882a593Smuzhiyun */
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun dpaa2_eth_wait_for_ingress_fq_empty(priv);
1873*4882a593Smuzhiyun dpaa2_eth_disable_ch_napi(priv);
1874*4882a593Smuzhiyun
1875*4882a593Smuzhiyun /* Empty the buffer pool */
1876*4882a593Smuzhiyun dpaa2_eth_drain_pool(priv);
1877*4882a593Smuzhiyun
1878*4882a593Smuzhiyun /* Empty the Scatter-Gather Buffer cache */
1879*4882a593Smuzhiyun dpaa2_eth_sgt_cache_drain(priv);
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun return 0;
1882*4882a593Smuzhiyun }
1883*4882a593Smuzhiyun
dpaa2_eth_set_addr(struct net_device * net_dev,void * addr)1884*4882a593Smuzhiyun static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1885*4882a593Smuzhiyun {
1886*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1887*4882a593Smuzhiyun struct device *dev = net_dev->dev.parent;
1888*4882a593Smuzhiyun int err;
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun err = eth_mac_addr(net_dev, addr);
1891*4882a593Smuzhiyun if (err < 0) {
1892*4882a593Smuzhiyun dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1893*4882a593Smuzhiyun return err;
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1897*4882a593Smuzhiyun net_dev->dev_addr);
1898*4882a593Smuzhiyun if (err) {
1899*4882a593Smuzhiyun dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1900*4882a593Smuzhiyun return err;
1901*4882a593Smuzhiyun }
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun return 0;
1904*4882a593Smuzhiyun }
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun /** Fill in counters maintained by the GPP driver. These may be different from
1907*4882a593Smuzhiyun * the hardware counters obtained by ethtool.
1908*4882a593Smuzhiyun */
dpaa2_eth_get_stats(struct net_device * net_dev,struct rtnl_link_stats64 * stats)1909*4882a593Smuzhiyun static void dpaa2_eth_get_stats(struct net_device *net_dev,
1910*4882a593Smuzhiyun struct rtnl_link_stats64 *stats)
1911*4882a593Smuzhiyun {
1912*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1913*4882a593Smuzhiyun struct rtnl_link_stats64 *percpu_stats;
1914*4882a593Smuzhiyun u64 *cpustats;
1915*4882a593Smuzhiyun u64 *netstats = (u64 *)stats;
1916*4882a593Smuzhiyun int i, j;
1917*4882a593Smuzhiyun int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1918*4882a593Smuzhiyun
1919*4882a593Smuzhiyun for_each_possible_cpu(i) {
1920*4882a593Smuzhiyun percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1921*4882a593Smuzhiyun cpustats = (u64 *)percpu_stats;
1922*4882a593Smuzhiyun for (j = 0; j < num; j++)
1923*4882a593Smuzhiyun netstats[j] += cpustats[j];
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun /* Copy mac unicast addresses from @net_dev to @priv.
1928*4882a593Smuzhiyun * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1929*4882a593Smuzhiyun */
dpaa2_eth_add_uc_hw_addr(const struct net_device * net_dev,struct dpaa2_eth_priv * priv)1930*4882a593Smuzhiyun static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
1931*4882a593Smuzhiyun struct dpaa2_eth_priv *priv)
1932*4882a593Smuzhiyun {
1933*4882a593Smuzhiyun struct netdev_hw_addr *ha;
1934*4882a593Smuzhiyun int err;
1935*4882a593Smuzhiyun
1936*4882a593Smuzhiyun netdev_for_each_uc_addr(ha, net_dev) {
1937*4882a593Smuzhiyun err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1938*4882a593Smuzhiyun ha->addr);
1939*4882a593Smuzhiyun if (err)
1940*4882a593Smuzhiyun netdev_warn(priv->net_dev,
1941*4882a593Smuzhiyun "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1942*4882a593Smuzhiyun ha->addr, err);
1943*4882a593Smuzhiyun }
1944*4882a593Smuzhiyun }
1945*4882a593Smuzhiyun
1946*4882a593Smuzhiyun /* Copy mac multicast addresses from @net_dev to @priv
1947*4882a593Smuzhiyun * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1948*4882a593Smuzhiyun */
dpaa2_eth_add_mc_hw_addr(const struct net_device * net_dev,struct dpaa2_eth_priv * priv)1949*4882a593Smuzhiyun static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
1950*4882a593Smuzhiyun struct dpaa2_eth_priv *priv)
1951*4882a593Smuzhiyun {
1952*4882a593Smuzhiyun struct netdev_hw_addr *ha;
1953*4882a593Smuzhiyun int err;
1954*4882a593Smuzhiyun
1955*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, net_dev) {
1956*4882a593Smuzhiyun err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1957*4882a593Smuzhiyun ha->addr);
1958*4882a593Smuzhiyun if (err)
1959*4882a593Smuzhiyun netdev_warn(priv->net_dev,
1960*4882a593Smuzhiyun "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1961*4882a593Smuzhiyun ha->addr, err);
1962*4882a593Smuzhiyun }
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun
dpaa2_eth_set_rx_mode(struct net_device * net_dev)1965*4882a593Smuzhiyun static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1966*4882a593Smuzhiyun {
1967*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1968*4882a593Smuzhiyun int uc_count = netdev_uc_count(net_dev);
1969*4882a593Smuzhiyun int mc_count = netdev_mc_count(net_dev);
1970*4882a593Smuzhiyun u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1971*4882a593Smuzhiyun u32 options = priv->dpni_attrs.options;
1972*4882a593Smuzhiyun u16 mc_token = priv->mc_token;
1973*4882a593Smuzhiyun struct fsl_mc_io *mc_io = priv->mc_io;
1974*4882a593Smuzhiyun int err;
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun /* Basic sanity checks; these probably indicate a misconfiguration */
1977*4882a593Smuzhiyun if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1978*4882a593Smuzhiyun netdev_info(net_dev,
1979*4882a593Smuzhiyun "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1980*4882a593Smuzhiyun max_mac);
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1983*4882a593Smuzhiyun if (uc_count > max_mac) {
1984*4882a593Smuzhiyun netdev_info(net_dev,
1985*4882a593Smuzhiyun "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1986*4882a593Smuzhiyun uc_count, max_mac);
1987*4882a593Smuzhiyun goto force_promisc;
1988*4882a593Smuzhiyun }
1989*4882a593Smuzhiyun if (mc_count + uc_count > max_mac) {
1990*4882a593Smuzhiyun netdev_info(net_dev,
1991*4882a593Smuzhiyun "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1992*4882a593Smuzhiyun uc_count + mc_count, max_mac);
1993*4882a593Smuzhiyun goto force_mc_promisc;
1994*4882a593Smuzhiyun }
1995*4882a593Smuzhiyun
1996*4882a593Smuzhiyun /* Adjust promisc settings due to flag combinations */
1997*4882a593Smuzhiyun if (net_dev->flags & IFF_PROMISC)
1998*4882a593Smuzhiyun goto force_promisc;
1999*4882a593Smuzhiyun if (net_dev->flags & IFF_ALLMULTI) {
2000*4882a593Smuzhiyun /* First, rebuild unicast filtering table. This should be done
2001*4882a593Smuzhiyun * in promisc mode, in order to avoid frame loss while we
2002*4882a593Smuzhiyun * progressively add entries to the table.
2003*4882a593Smuzhiyun * We don't know whether we had been in promisc already, and
2004*4882a593Smuzhiyun * making an MC call to find out is expensive; so set uc promisc
2005*4882a593Smuzhiyun * nonetheless.
2006*4882a593Smuzhiyun */
2007*4882a593Smuzhiyun err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2008*4882a593Smuzhiyun if (err)
2009*4882a593Smuzhiyun netdev_warn(net_dev, "Can't set uc promisc\n");
2010*4882a593Smuzhiyun
2011*4882a593Smuzhiyun /* Actual uc table reconstruction. */
2012*4882a593Smuzhiyun err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2013*4882a593Smuzhiyun if (err)
2014*4882a593Smuzhiyun netdev_warn(net_dev, "Can't clear uc filters\n");
2015*4882a593Smuzhiyun dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun /* Finally, clear uc promisc and set mc promisc as requested. */
2018*4882a593Smuzhiyun err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2019*4882a593Smuzhiyun if (err)
2020*4882a593Smuzhiyun netdev_warn(net_dev, "Can't clear uc promisc\n");
2021*4882a593Smuzhiyun goto force_mc_promisc;
2022*4882a593Smuzhiyun }
2023*4882a593Smuzhiyun
2024*4882a593Smuzhiyun /* Neither unicast, nor multicast promisc will be on... eventually.
2025*4882a593Smuzhiyun * For now, rebuild mac filtering tables while forcing both of them on.
2026*4882a593Smuzhiyun */
2027*4882a593Smuzhiyun err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2028*4882a593Smuzhiyun if (err)
2029*4882a593Smuzhiyun netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2030*4882a593Smuzhiyun err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2031*4882a593Smuzhiyun if (err)
2032*4882a593Smuzhiyun netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2033*4882a593Smuzhiyun
2034*4882a593Smuzhiyun /* Actual mac filtering tables reconstruction */
2035*4882a593Smuzhiyun err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2036*4882a593Smuzhiyun if (err)
2037*4882a593Smuzhiyun netdev_warn(net_dev, "Can't clear mac filters\n");
2038*4882a593Smuzhiyun dpaa2_eth_add_mc_hw_addr(net_dev, priv);
2039*4882a593Smuzhiyun dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun /* Now we can clear both ucast and mcast promisc, without risking
2042*4882a593Smuzhiyun * to drop legitimate frames anymore.
2043*4882a593Smuzhiyun */
2044*4882a593Smuzhiyun err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2045*4882a593Smuzhiyun if (err)
2046*4882a593Smuzhiyun netdev_warn(net_dev, "Can't clear ucast promisc\n");
2047*4882a593Smuzhiyun err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2048*4882a593Smuzhiyun if (err)
2049*4882a593Smuzhiyun netdev_warn(net_dev, "Can't clear mcast promisc\n");
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun return;
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun force_promisc:
2054*4882a593Smuzhiyun err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2055*4882a593Smuzhiyun if (err)
2056*4882a593Smuzhiyun netdev_warn(net_dev, "Can't set ucast promisc\n");
2057*4882a593Smuzhiyun force_mc_promisc:
2058*4882a593Smuzhiyun err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2059*4882a593Smuzhiyun if (err)
2060*4882a593Smuzhiyun netdev_warn(net_dev, "Can't set mcast promisc\n");
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun
dpaa2_eth_set_features(struct net_device * net_dev,netdev_features_t features)2063*4882a593Smuzhiyun static int dpaa2_eth_set_features(struct net_device *net_dev,
2064*4882a593Smuzhiyun netdev_features_t features)
2065*4882a593Smuzhiyun {
2066*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2067*4882a593Smuzhiyun netdev_features_t changed = features ^ net_dev->features;
2068*4882a593Smuzhiyun bool enable;
2069*4882a593Smuzhiyun int err;
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun if (changed & NETIF_F_RXCSUM) {
2072*4882a593Smuzhiyun enable = !!(features & NETIF_F_RXCSUM);
2073*4882a593Smuzhiyun err = dpaa2_eth_set_rx_csum(priv, enable);
2074*4882a593Smuzhiyun if (err)
2075*4882a593Smuzhiyun return err;
2076*4882a593Smuzhiyun }
2077*4882a593Smuzhiyun
2078*4882a593Smuzhiyun if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2079*4882a593Smuzhiyun enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2080*4882a593Smuzhiyun err = dpaa2_eth_set_tx_csum(priv, enable);
2081*4882a593Smuzhiyun if (err)
2082*4882a593Smuzhiyun return err;
2083*4882a593Smuzhiyun }
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyun return 0;
2086*4882a593Smuzhiyun }
2087*4882a593Smuzhiyun
dpaa2_eth_ts_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2088*4882a593Smuzhiyun static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2089*4882a593Smuzhiyun {
2090*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(dev);
2091*4882a593Smuzhiyun struct hwtstamp_config config;
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun if (!dpaa2_ptp)
2094*4882a593Smuzhiyun return -EINVAL;
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2097*4882a593Smuzhiyun return -EFAULT;
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun switch (config.tx_type) {
2100*4882a593Smuzhiyun case HWTSTAMP_TX_OFF:
2101*4882a593Smuzhiyun case HWTSTAMP_TX_ON:
2102*4882a593Smuzhiyun case HWTSTAMP_TX_ONESTEP_SYNC:
2103*4882a593Smuzhiyun priv->tx_tstamp_type = config.tx_type;
2104*4882a593Smuzhiyun break;
2105*4882a593Smuzhiyun default:
2106*4882a593Smuzhiyun return -ERANGE;
2107*4882a593Smuzhiyun }
2108*4882a593Smuzhiyun
2109*4882a593Smuzhiyun if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2110*4882a593Smuzhiyun priv->rx_tstamp = false;
2111*4882a593Smuzhiyun } else {
2112*4882a593Smuzhiyun priv->rx_tstamp = true;
2113*4882a593Smuzhiyun /* TS is set for all frame types, not only those requested */
2114*4882a593Smuzhiyun config.rx_filter = HWTSTAMP_FILTER_ALL;
2115*4882a593Smuzhiyun }
2116*4882a593Smuzhiyun
2117*4882a593Smuzhiyun return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2118*4882a593Smuzhiyun -EFAULT : 0;
2119*4882a593Smuzhiyun }
2120*4882a593Smuzhiyun
dpaa2_eth_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2121*4882a593Smuzhiyun static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2122*4882a593Smuzhiyun {
2123*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(dev);
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun if (cmd == SIOCSHWTSTAMP)
2126*4882a593Smuzhiyun return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun if (priv->mac)
2129*4882a593Smuzhiyun return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
2130*4882a593Smuzhiyun
2131*4882a593Smuzhiyun return -EOPNOTSUPP;
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun
xdp_mtu_valid(struct dpaa2_eth_priv * priv,int mtu)2134*4882a593Smuzhiyun static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
2135*4882a593Smuzhiyun {
2136*4882a593Smuzhiyun int mfl, linear_mfl;
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2139*4882a593Smuzhiyun linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
2140*4882a593Smuzhiyun dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun if (mfl > linear_mfl) {
2143*4882a593Smuzhiyun netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
2144*4882a593Smuzhiyun linear_mfl - VLAN_ETH_HLEN);
2145*4882a593Smuzhiyun return false;
2146*4882a593Smuzhiyun }
2147*4882a593Smuzhiyun
2148*4882a593Smuzhiyun return true;
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun
dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv * priv,int mtu,bool has_xdp)2151*4882a593Smuzhiyun static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
2152*4882a593Smuzhiyun {
2153*4882a593Smuzhiyun int mfl, err;
2154*4882a593Smuzhiyun
2155*4882a593Smuzhiyun /* We enforce a maximum Rx frame length based on MTU only if we have
2156*4882a593Smuzhiyun * an XDP program attached (in order to avoid Rx S/G frames).
2157*4882a593Smuzhiyun * Otherwise, we accept all incoming frames as long as they are not
2158*4882a593Smuzhiyun * larger than maximum size supported in hardware
2159*4882a593Smuzhiyun */
2160*4882a593Smuzhiyun if (has_xdp)
2161*4882a593Smuzhiyun mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2162*4882a593Smuzhiyun else
2163*4882a593Smuzhiyun mfl = DPAA2_ETH_MFL;
2164*4882a593Smuzhiyun
2165*4882a593Smuzhiyun err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
2166*4882a593Smuzhiyun if (err) {
2167*4882a593Smuzhiyun netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
2168*4882a593Smuzhiyun return err;
2169*4882a593Smuzhiyun }
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun return 0;
2172*4882a593Smuzhiyun }
2173*4882a593Smuzhiyun
dpaa2_eth_change_mtu(struct net_device * dev,int new_mtu)2174*4882a593Smuzhiyun static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
2175*4882a593Smuzhiyun {
2176*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(dev);
2177*4882a593Smuzhiyun int err;
2178*4882a593Smuzhiyun
2179*4882a593Smuzhiyun if (!priv->xdp_prog)
2180*4882a593Smuzhiyun goto out;
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun if (!xdp_mtu_valid(priv, new_mtu))
2183*4882a593Smuzhiyun return -EINVAL;
2184*4882a593Smuzhiyun
2185*4882a593Smuzhiyun err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
2186*4882a593Smuzhiyun if (err)
2187*4882a593Smuzhiyun return err;
2188*4882a593Smuzhiyun
2189*4882a593Smuzhiyun out:
2190*4882a593Smuzhiyun dev->mtu = new_mtu;
2191*4882a593Smuzhiyun return 0;
2192*4882a593Smuzhiyun }
2193*4882a593Smuzhiyun
dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv * priv,bool has_xdp)2194*4882a593Smuzhiyun static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
2195*4882a593Smuzhiyun {
2196*4882a593Smuzhiyun struct dpni_buffer_layout buf_layout = {0};
2197*4882a593Smuzhiyun int err;
2198*4882a593Smuzhiyun
2199*4882a593Smuzhiyun err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
2200*4882a593Smuzhiyun DPNI_QUEUE_RX, &buf_layout);
2201*4882a593Smuzhiyun if (err) {
2202*4882a593Smuzhiyun netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
2203*4882a593Smuzhiyun return err;
2204*4882a593Smuzhiyun }
2205*4882a593Smuzhiyun
2206*4882a593Smuzhiyun /* Reserve extra headroom for XDP header size changes */
2207*4882a593Smuzhiyun buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
2208*4882a593Smuzhiyun (has_xdp ? XDP_PACKET_HEADROOM : 0);
2209*4882a593Smuzhiyun buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
2210*4882a593Smuzhiyun err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2211*4882a593Smuzhiyun DPNI_QUEUE_RX, &buf_layout);
2212*4882a593Smuzhiyun if (err) {
2213*4882a593Smuzhiyun netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
2214*4882a593Smuzhiyun return err;
2215*4882a593Smuzhiyun }
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun return 0;
2218*4882a593Smuzhiyun }
2219*4882a593Smuzhiyun
dpaa2_eth_setup_xdp(struct net_device * dev,struct bpf_prog * prog)2220*4882a593Smuzhiyun static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
2221*4882a593Smuzhiyun {
2222*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(dev);
2223*4882a593Smuzhiyun struct dpaa2_eth_channel *ch;
2224*4882a593Smuzhiyun struct bpf_prog *old;
2225*4882a593Smuzhiyun bool up, need_update;
2226*4882a593Smuzhiyun int i, err;
2227*4882a593Smuzhiyun
2228*4882a593Smuzhiyun if (prog && !xdp_mtu_valid(priv, dev->mtu))
2229*4882a593Smuzhiyun return -EINVAL;
2230*4882a593Smuzhiyun
2231*4882a593Smuzhiyun if (prog)
2232*4882a593Smuzhiyun bpf_prog_add(prog, priv->num_channels);
2233*4882a593Smuzhiyun
2234*4882a593Smuzhiyun up = netif_running(dev);
2235*4882a593Smuzhiyun need_update = (!!priv->xdp_prog != !!prog);
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun if (up)
2238*4882a593Smuzhiyun dpaa2_eth_stop(dev);
2239*4882a593Smuzhiyun
2240*4882a593Smuzhiyun /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
2241*4882a593Smuzhiyun * Also, when switching between xdp/non-xdp modes we need to reconfigure
2242*4882a593Smuzhiyun * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
2243*4882a593Smuzhiyun * so we are sure no old format buffers will be used from now on.
2244*4882a593Smuzhiyun */
2245*4882a593Smuzhiyun if (need_update) {
2246*4882a593Smuzhiyun err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
2247*4882a593Smuzhiyun if (err)
2248*4882a593Smuzhiyun goto out_err;
2249*4882a593Smuzhiyun err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
2250*4882a593Smuzhiyun if (err)
2251*4882a593Smuzhiyun goto out_err;
2252*4882a593Smuzhiyun }
2253*4882a593Smuzhiyun
2254*4882a593Smuzhiyun old = xchg(&priv->xdp_prog, prog);
2255*4882a593Smuzhiyun if (old)
2256*4882a593Smuzhiyun bpf_prog_put(old);
2257*4882a593Smuzhiyun
2258*4882a593Smuzhiyun for (i = 0; i < priv->num_channels; i++) {
2259*4882a593Smuzhiyun ch = priv->channel[i];
2260*4882a593Smuzhiyun old = xchg(&ch->xdp.prog, prog);
2261*4882a593Smuzhiyun if (old)
2262*4882a593Smuzhiyun bpf_prog_put(old);
2263*4882a593Smuzhiyun }
2264*4882a593Smuzhiyun
2265*4882a593Smuzhiyun if (up) {
2266*4882a593Smuzhiyun err = dpaa2_eth_open(dev);
2267*4882a593Smuzhiyun if (err)
2268*4882a593Smuzhiyun return err;
2269*4882a593Smuzhiyun }
2270*4882a593Smuzhiyun
2271*4882a593Smuzhiyun return 0;
2272*4882a593Smuzhiyun
2273*4882a593Smuzhiyun out_err:
2274*4882a593Smuzhiyun if (prog)
2275*4882a593Smuzhiyun bpf_prog_sub(prog, priv->num_channels);
2276*4882a593Smuzhiyun if (up)
2277*4882a593Smuzhiyun dpaa2_eth_open(dev);
2278*4882a593Smuzhiyun
2279*4882a593Smuzhiyun return err;
2280*4882a593Smuzhiyun }
2281*4882a593Smuzhiyun
dpaa2_eth_xdp(struct net_device * dev,struct netdev_bpf * xdp)2282*4882a593Smuzhiyun static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2283*4882a593Smuzhiyun {
2284*4882a593Smuzhiyun switch (xdp->command) {
2285*4882a593Smuzhiyun case XDP_SETUP_PROG:
2286*4882a593Smuzhiyun return dpaa2_eth_setup_xdp(dev, xdp->prog);
2287*4882a593Smuzhiyun default:
2288*4882a593Smuzhiyun return -EINVAL;
2289*4882a593Smuzhiyun }
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun return 0;
2292*4882a593Smuzhiyun }
2293*4882a593Smuzhiyun
dpaa2_eth_xdp_create_fd(struct net_device * net_dev,struct xdp_frame * xdpf,struct dpaa2_fd * fd)2294*4882a593Smuzhiyun static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
2295*4882a593Smuzhiyun struct xdp_frame *xdpf,
2296*4882a593Smuzhiyun struct dpaa2_fd *fd)
2297*4882a593Smuzhiyun {
2298*4882a593Smuzhiyun struct device *dev = net_dev->dev.parent;
2299*4882a593Smuzhiyun unsigned int needed_headroom;
2300*4882a593Smuzhiyun struct dpaa2_eth_swa *swa;
2301*4882a593Smuzhiyun void *buffer_start, *aligned_start;
2302*4882a593Smuzhiyun dma_addr_t addr;
2303*4882a593Smuzhiyun
2304*4882a593Smuzhiyun /* We require a minimum headroom to be able to transmit the frame.
2305*4882a593Smuzhiyun * Otherwise return an error and let the original net_device handle it
2306*4882a593Smuzhiyun */
2307*4882a593Smuzhiyun needed_headroom = dpaa2_eth_needed_headroom(NULL);
2308*4882a593Smuzhiyun if (xdpf->headroom < needed_headroom)
2309*4882a593Smuzhiyun return -EINVAL;
2310*4882a593Smuzhiyun
2311*4882a593Smuzhiyun /* Setup the FD fields */
2312*4882a593Smuzhiyun memset(fd, 0, sizeof(*fd));
2313*4882a593Smuzhiyun
2314*4882a593Smuzhiyun /* Align FD address, if possible */
2315*4882a593Smuzhiyun buffer_start = xdpf->data - needed_headroom;
2316*4882a593Smuzhiyun aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2317*4882a593Smuzhiyun DPAA2_ETH_TX_BUF_ALIGN);
2318*4882a593Smuzhiyun if (aligned_start >= xdpf->data - xdpf->headroom)
2319*4882a593Smuzhiyun buffer_start = aligned_start;
2320*4882a593Smuzhiyun
2321*4882a593Smuzhiyun swa = (struct dpaa2_eth_swa *)buffer_start;
2322*4882a593Smuzhiyun /* fill in necessary fields here */
2323*4882a593Smuzhiyun swa->type = DPAA2_ETH_SWA_XDP;
2324*4882a593Smuzhiyun swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
2325*4882a593Smuzhiyun swa->xdp.xdpf = xdpf;
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun addr = dma_map_single(dev, buffer_start,
2328*4882a593Smuzhiyun swa->xdp.dma_size,
2329*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
2330*4882a593Smuzhiyun if (unlikely(dma_mapping_error(dev, addr)))
2331*4882a593Smuzhiyun return -ENOMEM;
2332*4882a593Smuzhiyun
2333*4882a593Smuzhiyun dpaa2_fd_set_addr(fd, addr);
2334*4882a593Smuzhiyun dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
2335*4882a593Smuzhiyun dpaa2_fd_set_len(fd, xdpf->len);
2336*4882a593Smuzhiyun dpaa2_fd_set_format(fd, dpaa2_fd_single);
2337*4882a593Smuzhiyun dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2338*4882a593Smuzhiyun
2339*4882a593Smuzhiyun return 0;
2340*4882a593Smuzhiyun }
2341*4882a593Smuzhiyun
dpaa2_eth_xdp_xmit(struct net_device * net_dev,int n,struct xdp_frame ** frames,u32 flags)2342*4882a593Smuzhiyun static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
2343*4882a593Smuzhiyun struct xdp_frame **frames, u32 flags)
2344*4882a593Smuzhiyun {
2345*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2346*4882a593Smuzhiyun struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
2347*4882a593Smuzhiyun struct rtnl_link_stats64 *percpu_stats;
2348*4882a593Smuzhiyun struct dpaa2_eth_fq *fq;
2349*4882a593Smuzhiyun struct dpaa2_fd *fds;
2350*4882a593Smuzhiyun int enqueued, i, err;
2351*4882a593Smuzhiyun
2352*4882a593Smuzhiyun if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2353*4882a593Smuzhiyun return -EINVAL;
2354*4882a593Smuzhiyun
2355*4882a593Smuzhiyun if (!netif_running(net_dev))
2356*4882a593Smuzhiyun return -ENETDOWN;
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun fq = &priv->fq[smp_processor_id()];
2359*4882a593Smuzhiyun xdp_redirect_fds = &fq->xdp_redirect_fds;
2360*4882a593Smuzhiyun fds = xdp_redirect_fds->fds;
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun percpu_stats = this_cpu_ptr(priv->percpu_stats);
2363*4882a593Smuzhiyun
2364*4882a593Smuzhiyun /* create a FD for each xdp_frame in the list received */
2365*4882a593Smuzhiyun for (i = 0; i < n; i++) {
2366*4882a593Smuzhiyun err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
2367*4882a593Smuzhiyun if (err)
2368*4882a593Smuzhiyun break;
2369*4882a593Smuzhiyun }
2370*4882a593Smuzhiyun xdp_redirect_fds->num = i;
2371*4882a593Smuzhiyun
2372*4882a593Smuzhiyun /* enqueue all the frame descriptors */
2373*4882a593Smuzhiyun enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
2374*4882a593Smuzhiyun
2375*4882a593Smuzhiyun /* update statistics */
2376*4882a593Smuzhiyun percpu_stats->tx_packets += enqueued;
2377*4882a593Smuzhiyun for (i = 0; i < enqueued; i++)
2378*4882a593Smuzhiyun percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
2379*4882a593Smuzhiyun for (i = enqueued; i < n; i++)
2380*4882a593Smuzhiyun xdp_return_frame_rx_napi(frames[i]);
2381*4882a593Smuzhiyun
2382*4882a593Smuzhiyun return enqueued;
2383*4882a593Smuzhiyun }
2384*4882a593Smuzhiyun
update_xps(struct dpaa2_eth_priv * priv)2385*4882a593Smuzhiyun static int update_xps(struct dpaa2_eth_priv *priv)
2386*4882a593Smuzhiyun {
2387*4882a593Smuzhiyun struct net_device *net_dev = priv->net_dev;
2388*4882a593Smuzhiyun struct cpumask xps_mask;
2389*4882a593Smuzhiyun struct dpaa2_eth_fq *fq;
2390*4882a593Smuzhiyun int i, num_queues, netdev_queues;
2391*4882a593Smuzhiyun int err = 0;
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun num_queues = dpaa2_eth_queue_count(priv);
2394*4882a593Smuzhiyun netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
2395*4882a593Smuzhiyun
2396*4882a593Smuzhiyun /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
2397*4882a593Smuzhiyun * queues, so only process those
2398*4882a593Smuzhiyun */
2399*4882a593Smuzhiyun for (i = 0; i < netdev_queues; i++) {
2400*4882a593Smuzhiyun fq = &priv->fq[i % num_queues];
2401*4882a593Smuzhiyun
2402*4882a593Smuzhiyun cpumask_clear(&xps_mask);
2403*4882a593Smuzhiyun cpumask_set_cpu(fq->target_cpu, &xps_mask);
2404*4882a593Smuzhiyun
2405*4882a593Smuzhiyun err = netif_set_xps_queue(net_dev, &xps_mask, i);
2406*4882a593Smuzhiyun if (err) {
2407*4882a593Smuzhiyun netdev_warn_once(net_dev, "Error setting XPS queue\n");
2408*4882a593Smuzhiyun break;
2409*4882a593Smuzhiyun }
2410*4882a593Smuzhiyun }
2411*4882a593Smuzhiyun
2412*4882a593Smuzhiyun return err;
2413*4882a593Smuzhiyun }
2414*4882a593Smuzhiyun
dpaa2_eth_setup_mqprio(struct net_device * net_dev,struct tc_mqprio_qopt * mqprio)2415*4882a593Smuzhiyun static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
2416*4882a593Smuzhiyun struct tc_mqprio_qopt *mqprio)
2417*4882a593Smuzhiyun {
2418*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2419*4882a593Smuzhiyun u8 num_tc, num_queues;
2420*4882a593Smuzhiyun int i;
2421*4882a593Smuzhiyun
2422*4882a593Smuzhiyun mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2423*4882a593Smuzhiyun num_queues = dpaa2_eth_queue_count(priv);
2424*4882a593Smuzhiyun num_tc = mqprio->num_tc;
2425*4882a593Smuzhiyun
2426*4882a593Smuzhiyun if (num_tc == net_dev->num_tc)
2427*4882a593Smuzhiyun return 0;
2428*4882a593Smuzhiyun
2429*4882a593Smuzhiyun if (num_tc > dpaa2_eth_tc_count(priv)) {
2430*4882a593Smuzhiyun netdev_err(net_dev, "Max %d traffic classes supported\n",
2431*4882a593Smuzhiyun dpaa2_eth_tc_count(priv));
2432*4882a593Smuzhiyun return -EOPNOTSUPP;
2433*4882a593Smuzhiyun }
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun if (!num_tc) {
2436*4882a593Smuzhiyun netdev_reset_tc(net_dev);
2437*4882a593Smuzhiyun netif_set_real_num_tx_queues(net_dev, num_queues);
2438*4882a593Smuzhiyun goto out;
2439*4882a593Smuzhiyun }
2440*4882a593Smuzhiyun
2441*4882a593Smuzhiyun netdev_set_num_tc(net_dev, num_tc);
2442*4882a593Smuzhiyun netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun for (i = 0; i < num_tc; i++)
2445*4882a593Smuzhiyun netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun out:
2448*4882a593Smuzhiyun update_xps(priv);
2449*4882a593Smuzhiyun
2450*4882a593Smuzhiyun return 0;
2451*4882a593Smuzhiyun }
2452*4882a593Smuzhiyun
2453*4882a593Smuzhiyun #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2454*4882a593Smuzhiyun
dpaa2_eth_setup_tbf(struct net_device * net_dev,struct tc_tbf_qopt_offload * p)2455*4882a593Smuzhiyun static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
2456*4882a593Smuzhiyun {
2457*4882a593Smuzhiyun struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
2458*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2459*4882a593Smuzhiyun struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
2460*4882a593Smuzhiyun struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
2461*4882a593Smuzhiyun int err;
2462*4882a593Smuzhiyun
2463*4882a593Smuzhiyun if (p->command == TC_TBF_STATS)
2464*4882a593Smuzhiyun return -EOPNOTSUPP;
2465*4882a593Smuzhiyun
2466*4882a593Smuzhiyun /* Only per port Tx shaping */
2467*4882a593Smuzhiyun if (p->parent != TC_H_ROOT)
2468*4882a593Smuzhiyun return -EOPNOTSUPP;
2469*4882a593Smuzhiyun
2470*4882a593Smuzhiyun if (p->command == TC_TBF_REPLACE) {
2471*4882a593Smuzhiyun if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
2472*4882a593Smuzhiyun netdev_err(net_dev, "burst size cannot be greater than %d\n",
2473*4882a593Smuzhiyun DPAA2_ETH_MAX_BURST_SIZE);
2474*4882a593Smuzhiyun return -EINVAL;
2475*4882a593Smuzhiyun }
2476*4882a593Smuzhiyun
2477*4882a593Smuzhiyun tx_cr_shaper.max_burst_size = cfg->max_size;
2478*4882a593Smuzhiyun /* The TBF interface is in bytes/s, whereas DPAA2 expects the
2479*4882a593Smuzhiyun * rate in Mbits/s
2480*4882a593Smuzhiyun */
2481*4882a593Smuzhiyun tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
2482*4882a593Smuzhiyun }
2483*4882a593Smuzhiyun
2484*4882a593Smuzhiyun err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
2485*4882a593Smuzhiyun &tx_er_shaper, 0);
2486*4882a593Smuzhiyun if (err) {
2487*4882a593Smuzhiyun netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
2488*4882a593Smuzhiyun return err;
2489*4882a593Smuzhiyun }
2490*4882a593Smuzhiyun
2491*4882a593Smuzhiyun return 0;
2492*4882a593Smuzhiyun }
2493*4882a593Smuzhiyun
dpaa2_eth_setup_tc(struct net_device * net_dev,enum tc_setup_type type,void * type_data)2494*4882a593Smuzhiyun static int dpaa2_eth_setup_tc(struct net_device *net_dev,
2495*4882a593Smuzhiyun enum tc_setup_type type, void *type_data)
2496*4882a593Smuzhiyun {
2497*4882a593Smuzhiyun switch (type) {
2498*4882a593Smuzhiyun case TC_SETUP_QDISC_MQPRIO:
2499*4882a593Smuzhiyun return dpaa2_eth_setup_mqprio(net_dev, type_data);
2500*4882a593Smuzhiyun case TC_SETUP_QDISC_TBF:
2501*4882a593Smuzhiyun return dpaa2_eth_setup_tbf(net_dev, type_data);
2502*4882a593Smuzhiyun default:
2503*4882a593Smuzhiyun return -EOPNOTSUPP;
2504*4882a593Smuzhiyun }
2505*4882a593Smuzhiyun }
2506*4882a593Smuzhiyun
2507*4882a593Smuzhiyun static const struct net_device_ops dpaa2_eth_ops = {
2508*4882a593Smuzhiyun .ndo_open = dpaa2_eth_open,
2509*4882a593Smuzhiyun .ndo_start_xmit = dpaa2_eth_tx,
2510*4882a593Smuzhiyun .ndo_stop = dpaa2_eth_stop,
2511*4882a593Smuzhiyun .ndo_set_mac_address = dpaa2_eth_set_addr,
2512*4882a593Smuzhiyun .ndo_get_stats64 = dpaa2_eth_get_stats,
2513*4882a593Smuzhiyun .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2514*4882a593Smuzhiyun .ndo_set_features = dpaa2_eth_set_features,
2515*4882a593Smuzhiyun .ndo_do_ioctl = dpaa2_eth_ioctl,
2516*4882a593Smuzhiyun .ndo_change_mtu = dpaa2_eth_change_mtu,
2517*4882a593Smuzhiyun .ndo_bpf = dpaa2_eth_xdp,
2518*4882a593Smuzhiyun .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
2519*4882a593Smuzhiyun .ndo_setup_tc = dpaa2_eth_setup_tc,
2520*4882a593Smuzhiyun };
2521*4882a593Smuzhiyun
dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx * ctx)2522*4882a593Smuzhiyun static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2523*4882a593Smuzhiyun {
2524*4882a593Smuzhiyun struct dpaa2_eth_channel *ch;
2525*4882a593Smuzhiyun
2526*4882a593Smuzhiyun ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2527*4882a593Smuzhiyun
2528*4882a593Smuzhiyun /* Update NAPI statistics */
2529*4882a593Smuzhiyun ch->stats.cdan++;
2530*4882a593Smuzhiyun
2531*4882a593Smuzhiyun napi_schedule(&ch->napi);
2532*4882a593Smuzhiyun }
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun /* Allocate and configure a DPCON object */
dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv * priv)2535*4882a593Smuzhiyun static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
2536*4882a593Smuzhiyun {
2537*4882a593Smuzhiyun struct fsl_mc_device *dpcon;
2538*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
2539*4882a593Smuzhiyun int err;
2540*4882a593Smuzhiyun
2541*4882a593Smuzhiyun err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2542*4882a593Smuzhiyun FSL_MC_POOL_DPCON, &dpcon);
2543*4882a593Smuzhiyun if (err) {
2544*4882a593Smuzhiyun if (err == -ENXIO)
2545*4882a593Smuzhiyun err = -EPROBE_DEFER;
2546*4882a593Smuzhiyun else
2547*4882a593Smuzhiyun dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2548*4882a593Smuzhiyun return ERR_PTR(err);
2549*4882a593Smuzhiyun }
2550*4882a593Smuzhiyun
2551*4882a593Smuzhiyun err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2552*4882a593Smuzhiyun if (err) {
2553*4882a593Smuzhiyun dev_err(dev, "dpcon_open() failed\n");
2554*4882a593Smuzhiyun goto free;
2555*4882a593Smuzhiyun }
2556*4882a593Smuzhiyun
2557*4882a593Smuzhiyun err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2558*4882a593Smuzhiyun if (err) {
2559*4882a593Smuzhiyun dev_err(dev, "dpcon_reset() failed\n");
2560*4882a593Smuzhiyun goto close;
2561*4882a593Smuzhiyun }
2562*4882a593Smuzhiyun
2563*4882a593Smuzhiyun err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2564*4882a593Smuzhiyun if (err) {
2565*4882a593Smuzhiyun dev_err(dev, "dpcon_enable() failed\n");
2566*4882a593Smuzhiyun goto close;
2567*4882a593Smuzhiyun }
2568*4882a593Smuzhiyun
2569*4882a593Smuzhiyun return dpcon;
2570*4882a593Smuzhiyun
2571*4882a593Smuzhiyun close:
2572*4882a593Smuzhiyun dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2573*4882a593Smuzhiyun free:
2574*4882a593Smuzhiyun fsl_mc_object_free(dpcon);
2575*4882a593Smuzhiyun
2576*4882a593Smuzhiyun return ERR_PTR(err);
2577*4882a593Smuzhiyun }
2578*4882a593Smuzhiyun
dpaa2_eth_free_dpcon(struct dpaa2_eth_priv * priv,struct fsl_mc_device * dpcon)2579*4882a593Smuzhiyun static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
2580*4882a593Smuzhiyun struct fsl_mc_device *dpcon)
2581*4882a593Smuzhiyun {
2582*4882a593Smuzhiyun dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2583*4882a593Smuzhiyun dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2584*4882a593Smuzhiyun fsl_mc_object_free(dpcon);
2585*4882a593Smuzhiyun }
2586*4882a593Smuzhiyun
dpaa2_eth_alloc_channel(struct dpaa2_eth_priv * priv)2587*4882a593Smuzhiyun static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
2588*4882a593Smuzhiyun {
2589*4882a593Smuzhiyun struct dpaa2_eth_channel *channel;
2590*4882a593Smuzhiyun struct dpcon_attr attr;
2591*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
2592*4882a593Smuzhiyun int err;
2593*4882a593Smuzhiyun
2594*4882a593Smuzhiyun channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2595*4882a593Smuzhiyun if (!channel)
2596*4882a593Smuzhiyun return NULL;
2597*4882a593Smuzhiyun
2598*4882a593Smuzhiyun channel->dpcon = dpaa2_eth_setup_dpcon(priv);
2599*4882a593Smuzhiyun if (IS_ERR(channel->dpcon)) {
2600*4882a593Smuzhiyun err = PTR_ERR(channel->dpcon);
2601*4882a593Smuzhiyun goto err_setup;
2602*4882a593Smuzhiyun }
2603*4882a593Smuzhiyun
2604*4882a593Smuzhiyun err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2605*4882a593Smuzhiyun &attr);
2606*4882a593Smuzhiyun if (err) {
2607*4882a593Smuzhiyun dev_err(dev, "dpcon_get_attributes() failed\n");
2608*4882a593Smuzhiyun goto err_get_attr;
2609*4882a593Smuzhiyun }
2610*4882a593Smuzhiyun
2611*4882a593Smuzhiyun channel->dpcon_id = attr.id;
2612*4882a593Smuzhiyun channel->ch_id = attr.qbman_ch_id;
2613*4882a593Smuzhiyun channel->priv = priv;
2614*4882a593Smuzhiyun
2615*4882a593Smuzhiyun return channel;
2616*4882a593Smuzhiyun
2617*4882a593Smuzhiyun err_get_attr:
2618*4882a593Smuzhiyun dpaa2_eth_free_dpcon(priv, channel->dpcon);
2619*4882a593Smuzhiyun err_setup:
2620*4882a593Smuzhiyun kfree(channel);
2621*4882a593Smuzhiyun return ERR_PTR(err);
2622*4882a593Smuzhiyun }
2623*4882a593Smuzhiyun
dpaa2_eth_free_channel(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * channel)2624*4882a593Smuzhiyun static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
2625*4882a593Smuzhiyun struct dpaa2_eth_channel *channel)
2626*4882a593Smuzhiyun {
2627*4882a593Smuzhiyun dpaa2_eth_free_dpcon(priv, channel->dpcon);
2628*4882a593Smuzhiyun kfree(channel);
2629*4882a593Smuzhiyun }
2630*4882a593Smuzhiyun
2631*4882a593Smuzhiyun /* DPIO setup: allocate and configure QBMan channels, setup core affinity
2632*4882a593Smuzhiyun * and register data availability notifications
2633*4882a593Smuzhiyun */
dpaa2_eth_setup_dpio(struct dpaa2_eth_priv * priv)2634*4882a593Smuzhiyun static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
2635*4882a593Smuzhiyun {
2636*4882a593Smuzhiyun struct dpaa2_io_notification_ctx *nctx;
2637*4882a593Smuzhiyun struct dpaa2_eth_channel *channel;
2638*4882a593Smuzhiyun struct dpcon_notification_cfg dpcon_notif_cfg;
2639*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
2640*4882a593Smuzhiyun int i, err;
2641*4882a593Smuzhiyun
2642*4882a593Smuzhiyun /* We want the ability to spread ingress traffic (RX, TX conf) to as
2643*4882a593Smuzhiyun * many cores as possible, so we need one channel for each core
2644*4882a593Smuzhiyun * (unless there's fewer queues than cores, in which case the extra
2645*4882a593Smuzhiyun * channels would be wasted).
2646*4882a593Smuzhiyun * Allocate one channel per core and register it to the core's
2647*4882a593Smuzhiyun * affine DPIO. If not enough channels are available for all cores
2648*4882a593Smuzhiyun * or if some cores don't have an affine DPIO, there will be no
2649*4882a593Smuzhiyun * ingress frame processing on those cores.
2650*4882a593Smuzhiyun */
2651*4882a593Smuzhiyun cpumask_clear(&priv->dpio_cpumask);
2652*4882a593Smuzhiyun for_each_online_cpu(i) {
2653*4882a593Smuzhiyun /* Try to allocate a channel */
2654*4882a593Smuzhiyun channel = dpaa2_eth_alloc_channel(priv);
2655*4882a593Smuzhiyun if (IS_ERR_OR_NULL(channel)) {
2656*4882a593Smuzhiyun err = PTR_ERR_OR_ZERO(channel);
2657*4882a593Smuzhiyun if (err != -EPROBE_DEFER)
2658*4882a593Smuzhiyun dev_info(dev,
2659*4882a593Smuzhiyun "No affine channel for cpu %d and above\n", i);
2660*4882a593Smuzhiyun goto err_alloc_ch;
2661*4882a593Smuzhiyun }
2662*4882a593Smuzhiyun
2663*4882a593Smuzhiyun priv->channel[priv->num_channels] = channel;
2664*4882a593Smuzhiyun
2665*4882a593Smuzhiyun nctx = &channel->nctx;
2666*4882a593Smuzhiyun nctx->is_cdan = 1;
2667*4882a593Smuzhiyun nctx->cb = dpaa2_eth_cdan_cb;
2668*4882a593Smuzhiyun nctx->id = channel->ch_id;
2669*4882a593Smuzhiyun nctx->desired_cpu = i;
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun /* Register the new context */
2672*4882a593Smuzhiyun channel->dpio = dpaa2_io_service_select(i);
2673*4882a593Smuzhiyun err = dpaa2_io_service_register(channel->dpio, nctx, dev);
2674*4882a593Smuzhiyun if (err) {
2675*4882a593Smuzhiyun dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2676*4882a593Smuzhiyun /* If no affine DPIO for this core, there's probably
2677*4882a593Smuzhiyun * none available for next cores either. Signal we want
2678*4882a593Smuzhiyun * to retry later, in case the DPIO devices weren't
2679*4882a593Smuzhiyun * probed yet.
2680*4882a593Smuzhiyun */
2681*4882a593Smuzhiyun err = -EPROBE_DEFER;
2682*4882a593Smuzhiyun goto err_service_reg;
2683*4882a593Smuzhiyun }
2684*4882a593Smuzhiyun
2685*4882a593Smuzhiyun /* Register DPCON notification with MC */
2686*4882a593Smuzhiyun dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2687*4882a593Smuzhiyun dpcon_notif_cfg.priority = 0;
2688*4882a593Smuzhiyun dpcon_notif_cfg.user_ctx = nctx->qman64;
2689*4882a593Smuzhiyun err = dpcon_set_notification(priv->mc_io, 0,
2690*4882a593Smuzhiyun channel->dpcon->mc_handle,
2691*4882a593Smuzhiyun &dpcon_notif_cfg);
2692*4882a593Smuzhiyun if (err) {
2693*4882a593Smuzhiyun dev_err(dev, "dpcon_set_notification failed()\n");
2694*4882a593Smuzhiyun goto err_set_cdan;
2695*4882a593Smuzhiyun }
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun /* If we managed to allocate a channel and also found an affine
2698*4882a593Smuzhiyun * DPIO for this core, add it to the final mask
2699*4882a593Smuzhiyun */
2700*4882a593Smuzhiyun cpumask_set_cpu(i, &priv->dpio_cpumask);
2701*4882a593Smuzhiyun priv->num_channels++;
2702*4882a593Smuzhiyun
2703*4882a593Smuzhiyun /* Stop if we already have enough channels to accommodate all
2704*4882a593Smuzhiyun * RX and TX conf queues
2705*4882a593Smuzhiyun */
2706*4882a593Smuzhiyun if (priv->num_channels == priv->dpni_attrs.num_queues)
2707*4882a593Smuzhiyun break;
2708*4882a593Smuzhiyun }
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun return 0;
2711*4882a593Smuzhiyun
2712*4882a593Smuzhiyun err_set_cdan:
2713*4882a593Smuzhiyun dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2714*4882a593Smuzhiyun err_service_reg:
2715*4882a593Smuzhiyun dpaa2_eth_free_channel(priv, channel);
2716*4882a593Smuzhiyun err_alloc_ch:
2717*4882a593Smuzhiyun if (err == -EPROBE_DEFER) {
2718*4882a593Smuzhiyun for (i = 0; i < priv->num_channels; i++) {
2719*4882a593Smuzhiyun channel = priv->channel[i];
2720*4882a593Smuzhiyun nctx = &channel->nctx;
2721*4882a593Smuzhiyun dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2722*4882a593Smuzhiyun dpaa2_eth_free_channel(priv, channel);
2723*4882a593Smuzhiyun }
2724*4882a593Smuzhiyun priv->num_channels = 0;
2725*4882a593Smuzhiyun return err;
2726*4882a593Smuzhiyun }
2727*4882a593Smuzhiyun
2728*4882a593Smuzhiyun if (cpumask_empty(&priv->dpio_cpumask)) {
2729*4882a593Smuzhiyun dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
2730*4882a593Smuzhiyun return -ENODEV;
2731*4882a593Smuzhiyun }
2732*4882a593Smuzhiyun
2733*4882a593Smuzhiyun dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2734*4882a593Smuzhiyun cpumask_pr_args(&priv->dpio_cpumask));
2735*4882a593Smuzhiyun
2736*4882a593Smuzhiyun return 0;
2737*4882a593Smuzhiyun }
2738*4882a593Smuzhiyun
dpaa2_eth_free_dpio(struct dpaa2_eth_priv * priv)2739*4882a593Smuzhiyun static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
2740*4882a593Smuzhiyun {
2741*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
2742*4882a593Smuzhiyun struct dpaa2_eth_channel *ch;
2743*4882a593Smuzhiyun int i;
2744*4882a593Smuzhiyun
2745*4882a593Smuzhiyun /* deregister CDAN notifications and free channels */
2746*4882a593Smuzhiyun for (i = 0; i < priv->num_channels; i++) {
2747*4882a593Smuzhiyun ch = priv->channel[i];
2748*4882a593Smuzhiyun dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
2749*4882a593Smuzhiyun dpaa2_eth_free_channel(priv, ch);
2750*4882a593Smuzhiyun }
2751*4882a593Smuzhiyun }
2752*4882a593Smuzhiyun
dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv * priv,int cpu)2753*4882a593Smuzhiyun static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
2754*4882a593Smuzhiyun int cpu)
2755*4882a593Smuzhiyun {
2756*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
2757*4882a593Smuzhiyun int i;
2758*4882a593Smuzhiyun
2759*4882a593Smuzhiyun for (i = 0; i < priv->num_channels; i++)
2760*4882a593Smuzhiyun if (priv->channel[i]->nctx.desired_cpu == cpu)
2761*4882a593Smuzhiyun return priv->channel[i];
2762*4882a593Smuzhiyun
2763*4882a593Smuzhiyun /* We should never get here. Issue a warning and return
2764*4882a593Smuzhiyun * the first channel, because it's still better than nothing
2765*4882a593Smuzhiyun */
2766*4882a593Smuzhiyun dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
2767*4882a593Smuzhiyun
2768*4882a593Smuzhiyun return priv->channel[0];
2769*4882a593Smuzhiyun }
2770*4882a593Smuzhiyun
dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv * priv)2771*4882a593Smuzhiyun static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
2772*4882a593Smuzhiyun {
2773*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
2774*4882a593Smuzhiyun struct dpaa2_eth_fq *fq;
2775*4882a593Smuzhiyun int rx_cpu, txc_cpu;
2776*4882a593Smuzhiyun int i;
2777*4882a593Smuzhiyun
2778*4882a593Smuzhiyun /* For each FQ, pick one channel/CPU to deliver frames to.
2779*4882a593Smuzhiyun * This may well change at runtime, either through irqbalance or
2780*4882a593Smuzhiyun * through direct user intervention.
2781*4882a593Smuzhiyun */
2782*4882a593Smuzhiyun rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
2783*4882a593Smuzhiyun
2784*4882a593Smuzhiyun for (i = 0; i < priv->num_fqs; i++) {
2785*4882a593Smuzhiyun fq = &priv->fq[i];
2786*4882a593Smuzhiyun switch (fq->type) {
2787*4882a593Smuzhiyun case DPAA2_RX_FQ:
2788*4882a593Smuzhiyun case DPAA2_RX_ERR_FQ:
2789*4882a593Smuzhiyun fq->target_cpu = rx_cpu;
2790*4882a593Smuzhiyun rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
2791*4882a593Smuzhiyun if (rx_cpu >= nr_cpu_ids)
2792*4882a593Smuzhiyun rx_cpu = cpumask_first(&priv->dpio_cpumask);
2793*4882a593Smuzhiyun break;
2794*4882a593Smuzhiyun case DPAA2_TX_CONF_FQ:
2795*4882a593Smuzhiyun fq->target_cpu = txc_cpu;
2796*4882a593Smuzhiyun txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
2797*4882a593Smuzhiyun if (txc_cpu >= nr_cpu_ids)
2798*4882a593Smuzhiyun txc_cpu = cpumask_first(&priv->dpio_cpumask);
2799*4882a593Smuzhiyun break;
2800*4882a593Smuzhiyun default:
2801*4882a593Smuzhiyun dev_err(dev, "Unknown FQ type: %d\n", fq->type);
2802*4882a593Smuzhiyun }
2803*4882a593Smuzhiyun fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
2804*4882a593Smuzhiyun }
2805*4882a593Smuzhiyun
2806*4882a593Smuzhiyun update_xps(priv);
2807*4882a593Smuzhiyun }
2808*4882a593Smuzhiyun
dpaa2_eth_setup_fqs(struct dpaa2_eth_priv * priv)2809*4882a593Smuzhiyun static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
2810*4882a593Smuzhiyun {
2811*4882a593Smuzhiyun int i, j;
2812*4882a593Smuzhiyun
2813*4882a593Smuzhiyun /* We have one TxConf FQ per Tx flow.
2814*4882a593Smuzhiyun * The number of Tx and Rx queues is the same.
2815*4882a593Smuzhiyun * Tx queues come first in the fq array.
2816*4882a593Smuzhiyun */
2817*4882a593Smuzhiyun for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2818*4882a593Smuzhiyun priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
2819*4882a593Smuzhiyun priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
2820*4882a593Smuzhiyun priv->fq[priv->num_fqs++].flowid = (u16)i;
2821*4882a593Smuzhiyun }
2822*4882a593Smuzhiyun
2823*4882a593Smuzhiyun for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
2824*4882a593Smuzhiyun for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2825*4882a593Smuzhiyun priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
2826*4882a593Smuzhiyun priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
2827*4882a593Smuzhiyun priv->fq[priv->num_fqs].tc = (u8)j;
2828*4882a593Smuzhiyun priv->fq[priv->num_fqs++].flowid = (u16)i;
2829*4882a593Smuzhiyun }
2830*4882a593Smuzhiyun }
2831*4882a593Smuzhiyun
2832*4882a593Smuzhiyun /* We have exactly one Rx error queue per DPNI */
2833*4882a593Smuzhiyun priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
2834*4882a593Smuzhiyun priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
2835*4882a593Smuzhiyun
2836*4882a593Smuzhiyun /* For each FQ, decide on which core to process incoming frames */
2837*4882a593Smuzhiyun dpaa2_eth_set_fq_affinity(priv);
2838*4882a593Smuzhiyun }
2839*4882a593Smuzhiyun
2840*4882a593Smuzhiyun /* Allocate and configure one buffer pool for each interface */
dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv * priv)2841*4882a593Smuzhiyun static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
2842*4882a593Smuzhiyun {
2843*4882a593Smuzhiyun int err;
2844*4882a593Smuzhiyun struct fsl_mc_device *dpbp_dev;
2845*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
2846*4882a593Smuzhiyun struct dpbp_attr dpbp_attrs;
2847*4882a593Smuzhiyun
2848*4882a593Smuzhiyun err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2849*4882a593Smuzhiyun &dpbp_dev);
2850*4882a593Smuzhiyun if (err) {
2851*4882a593Smuzhiyun if (err == -ENXIO)
2852*4882a593Smuzhiyun err = -EPROBE_DEFER;
2853*4882a593Smuzhiyun else
2854*4882a593Smuzhiyun dev_err(dev, "DPBP device allocation failed\n");
2855*4882a593Smuzhiyun return err;
2856*4882a593Smuzhiyun }
2857*4882a593Smuzhiyun
2858*4882a593Smuzhiyun priv->dpbp_dev = dpbp_dev;
2859*4882a593Smuzhiyun
2860*4882a593Smuzhiyun err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
2861*4882a593Smuzhiyun &dpbp_dev->mc_handle);
2862*4882a593Smuzhiyun if (err) {
2863*4882a593Smuzhiyun dev_err(dev, "dpbp_open() failed\n");
2864*4882a593Smuzhiyun goto err_open;
2865*4882a593Smuzhiyun }
2866*4882a593Smuzhiyun
2867*4882a593Smuzhiyun err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
2868*4882a593Smuzhiyun if (err) {
2869*4882a593Smuzhiyun dev_err(dev, "dpbp_reset() failed\n");
2870*4882a593Smuzhiyun goto err_reset;
2871*4882a593Smuzhiyun }
2872*4882a593Smuzhiyun
2873*4882a593Smuzhiyun err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
2874*4882a593Smuzhiyun if (err) {
2875*4882a593Smuzhiyun dev_err(dev, "dpbp_enable() failed\n");
2876*4882a593Smuzhiyun goto err_enable;
2877*4882a593Smuzhiyun }
2878*4882a593Smuzhiyun
2879*4882a593Smuzhiyun err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
2880*4882a593Smuzhiyun &dpbp_attrs);
2881*4882a593Smuzhiyun if (err) {
2882*4882a593Smuzhiyun dev_err(dev, "dpbp_get_attributes() failed\n");
2883*4882a593Smuzhiyun goto err_get_attr;
2884*4882a593Smuzhiyun }
2885*4882a593Smuzhiyun priv->bpid = dpbp_attrs.bpid;
2886*4882a593Smuzhiyun
2887*4882a593Smuzhiyun return 0;
2888*4882a593Smuzhiyun
2889*4882a593Smuzhiyun err_get_attr:
2890*4882a593Smuzhiyun dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
2891*4882a593Smuzhiyun err_enable:
2892*4882a593Smuzhiyun err_reset:
2893*4882a593Smuzhiyun dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
2894*4882a593Smuzhiyun err_open:
2895*4882a593Smuzhiyun fsl_mc_object_free(dpbp_dev);
2896*4882a593Smuzhiyun
2897*4882a593Smuzhiyun return err;
2898*4882a593Smuzhiyun }
2899*4882a593Smuzhiyun
dpaa2_eth_free_dpbp(struct dpaa2_eth_priv * priv)2900*4882a593Smuzhiyun static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
2901*4882a593Smuzhiyun {
2902*4882a593Smuzhiyun dpaa2_eth_drain_pool(priv);
2903*4882a593Smuzhiyun dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2904*4882a593Smuzhiyun dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2905*4882a593Smuzhiyun fsl_mc_object_free(priv->dpbp_dev);
2906*4882a593Smuzhiyun }
2907*4882a593Smuzhiyun
dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv * priv)2908*4882a593Smuzhiyun static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
2909*4882a593Smuzhiyun {
2910*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
2911*4882a593Smuzhiyun struct dpni_buffer_layout buf_layout = {0};
2912*4882a593Smuzhiyun u16 rx_buf_align;
2913*4882a593Smuzhiyun int err;
2914*4882a593Smuzhiyun
2915*4882a593Smuzhiyun /* We need to check for WRIOP version 1.0.0, but depending on the MC
2916*4882a593Smuzhiyun * version, this number is not always provided correctly on rev1.
2917*4882a593Smuzhiyun * We need to check for both alternatives in this situation.
2918*4882a593Smuzhiyun */
2919*4882a593Smuzhiyun if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
2920*4882a593Smuzhiyun priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
2921*4882a593Smuzhiyun rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
2922*4882a593Smuzhiyun else
2923*4882a593Smuzhiyun rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
2924*4882a593Smuzhiyun
2925*4882a593Smuzhiyun /* We need to ensure that the buffer size seen by WRIOP is a multiple
2926*4882a593Smuzhiyun * of 64 or 256 bytes depending on the WRIOP version.
2927*4882a593Smuzhiyun */
2928*4882a593Smuzhiyun priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
2929*4882a593Smuzhiyun
2930*4882a593Smuzhiyun /* tx buffer */
2931*4882a593Smuzhiyun buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
2932*4882a593Smuzhiyun buf_layout.pass_timestamp = true;
2933*4882a593Smuzhiyun buf_layout.pass_frame_status = true;
2934*4882a593Smuzhiyun buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
2935*4882a593Smuzhiyun DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
2936*4882a593Smuzhiyun DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2937*4882a593Smuzhiyun err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2938*4882a593Smuzhiyun DPNI_QUEUE_TX, &buf_layout);
2939*4882a593Smuzhiyun if (err) {
2940*4882a593Smuzhiyun dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
2941*4882a593Smuzhiyun return err;
2942*4882a593Smuzhiyun }
2943*4882a593Smuzhiyun
2944*4882a593Smuzhiyun /* tx-confirm buffer */
2945*4882a593Smuzhiyun buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
2946*4882a593Smuzhiyun DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2947*4882a593Smuzhiyun err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2948*4882a593Smuzhiyun DPNI_QUEUE_TX_CONFIRM, &buf_layout);
2949*4882a593Smuzhiyun if (err) {
2950*4882a593Smuzhiyun dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
2951*4882a593Smuzhiyun return err;
2952*4882a593Smuzhiyun }
2953*4882a593Smuzhiyun
2954*4882a593Smuzhiyun /* Now that we've set our tx buffer layout, retrieve the minimum
2955*4882a593Smuzhiyun * required tx data offset.
2956*4882a593Smuzhiyun */
2957*4882a593Smuzhiyun err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
2958*4882a593Smuzhiyun &priv->tx_data_offset);
2959*4882a593Smuzhiyun if (err) {
2960*4882a593Smuzhiyun dev_err(dev, "dpni_get_tx_data_offset() failed\n");
2961*4882a593Smuzhiyun return err;
2962*4882a593Smuzhiyun }
2963*4882a593Smuzhiyun
2964*4882a593Smuzhiyun if ((priv->tx_data_offset % 64) != 0)
2965*4882a593Smuzhiyun dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
2966*4882a593Smuzhiyun priv->tx_data_offset);
2967*4882a593Smuzhiyun
2968*4882a593Smuzhiyun /* rx buffer */
2969*4882a593Smuzhiyun buf_layout.pass_frame_status = true;
2970*4882a593Smuzhiyun buf_layout.pass_parser_result = true;
2971*4882a593Smuzhiyun buf_layout.data_align = rx_buf_align;
2972*4882a593Smuzhiyun buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
2973*4882a593Smuzhiyun buf_layout.private_data_size = 0;
2974*4882a593Smuzhiyun buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
2975*4882a593Smuzhiyun DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2976*4882a593Smuzhiyun DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
2977*4882a593Smuzhiyun DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
2978*4882a593Smuzhiyun DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2979*4882a593Smuzhiyun err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2980*4882a593Smuzhiyun DPNI_QUEUE_RX, &buf_layout);
2981*4882a593Smuzhiyun if (err) {
2982*4882a593Smuzhiyun dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
2983*4882a593Smuzhiyun return err;
2984*4882a593Smuzhiyun }
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun return 0;
2987*4882a593Smuzhiyun }
2988*4882a593Smuzhiyun
2989*4882a593Smuzhiyun #define DPNI_ENQUEUE_FQID_VER_MAJOR 7
2990*4882a593Smuzhiyun #define DPNI_ENQUEUE_FQID_VER_MINOR 9
2991*4882a593Smuzhiyun
dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq,struct dpaa2_fd * fd,u8 prio,u32 num_frames __always_unused,int * frames_enqueued)2992*4882a593Smuzhiyun static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
2993*4882a593Smuzhiyun struct dpaa2_eth_fq *fq,
2994*4882a593Smuzhiyun struct dpaa2_fd *fd, u8 prio,
2995*4882a593Smuzhiyun u32 num_frames __always_unused,
2996*4882a593Smuzhiyun int *frames_enqueued)
2997*4882a593Smuzhiyun {
2998*4882a593Smuzhiyun int err;
2999*4882a593Smuzhiyun
3000*4882a593Smuzhiyun err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3001*4882a593Smuzhiyun priv->tx_qdid, prio,
3002*4882a593Smuzhiyun fq->tx_qdbin, fd);
3003*4882a593Smuzhiyun if (!err && frames_enqueued)
3004*4882a593Smuzhiyun *frames_enqueued = 1;
3005*4882a593Smuzhiyun return err;
3006*4882a593Smuzhiyun }
3007*4882a593Smuzhiyun
dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq,struct dpaa2_fd * fd,u8 prio,u32 num_frames,int * frames_enqueued)3008*4882a593Smuzhiyun static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
3009*4882a593Smuzhiyun struct dpaa2_eth_fq *fq,
3010*4882a593Smuzhiyun struct dpaa2_fd *fd,
3011*4882a593Smuzhiyun u8 prio, u32 num_frames,
3012*4882a593Smuzhiyun int *frames_enqueued)
3013*4882a593Smuzhiyun {
3014*4882a593Smuzhiyun int err;
3015*4882a593Smuzhiyun
3016*4882a593Smuzhiyun err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
3017*4882a593Smuzhiyun fq->tx_fqid[prio],
3018*4882a593Smuzhiyun fd, num_frames);
3019*4882a593Smuzhiyun
3020*4882a593Smuzhiyun if (err == 0)
3021*4882a593Smuzhiyun return -EBUSY;
3022*4882a593Smuzhiyun
3023*4882a593Smuzhiyun if (frames_enqueued)
3024*4882a593Smuzhiyun *frames_enqueued = err;
3025*4882a593Smuzhiyun return 0;
3026*4882a593Smuzhiyun }
3027*4882a593Smuzhiyun
dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv * priv)3028*4882a593Smuzhiyun static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
3029*4882a593Smuzhiyun {
3030*4882a593Smuzhiyun if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3031*4882a593Smuzhiyun DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3032*4882a593Smuzhiyun priv->enqueue = dpaa2_eth_enqueue_qd;
3033*4882a593Smuzhiyun else
3034*4882a593Smuzhiyun priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3035*4882a593Smuzhiyun }
3036*4882a593Smuzhiyun
dpaa2_eth_set_pause(struct dpaa2_eth_priv * priv)3037*4882a593Smuzhiyun static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
3038*4882a593Smuzhiyun {
3039*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
3040*4882a593Smuzhiyun struct dpni_link_cfg link_cfg = {0};
3041*4882a593Smuzhiyun int err;
3042*4882a593Smuzhiyun
3043*4882a593Smuzhiyun /* Get the default link options so we don't override other flags */
3044*4882a593Smuzhiyun err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3045*4882a593Smuzhiyun if (err) {
3046*4882a593Smuzhiyun dev_err(dev, "dpni_get_link_cfg() failed\n");
3047*4882a593Smuzhiyun return err;
3048*4882a593Smuzhiyun }
3049*4882a593Smuzhiyun
3050*4882a593Smuzhiyun /* By default, enable both Rx and Tx pause frames */
3051*4882a593Smuzhiyun link_cfg.options |= DPNI_LINK_OPT_PAUSE;
3052*4882a593Smuzhiyun link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
3053*4882a593Smuzhiyun err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3054*4882a593Smuzhiyun if (err) {
3055*4882a593Smuzhiyun dev_err(dev, "dpni_set_link_cfg() failed\n");
3056*4882a593Smuzhiyun return err;
3057*4882a593Smuzhiyun }
3058*4882a593Smuzhiyun
3059*4882a593Smuzhiyun priv->link_state.options = link_cfg.options;
3060*4882a593Smuzhiyun
3061*4882a593Smuzhiyun return 0;
3062*4882a593Smuzhiyun }
3063*4882a593Smuzhiyun
dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv * priv)3064*4882a593Smuzhiyun static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
3065*4882a593Smuzhiyun {
3066*4882a593Smuzhiyun struct dpni_queue_id qid = {0};
3067*4882a593Smuzhiyun struct dpaa2_eth_fq *fq;
3068*4882a593Smuzhiyun struct dpni_queue queue;
3069*4882a593Smuzhiyun int i, j, err;
3070*4882a593Smuzhiyun
3071*4882a593Smuzhiyun /* We only use Tx FQIDs for FQID-based enqueue, so check
3072*4882a593Smuzhiyun * if DPNI version supports it before updating FQIDs
3073*4882a593Smuzhiyun */
3074*4882a593Smuzhiyun if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3075*4882a593Smuzhiyun DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3076*4882a593Smuzhiyun return;
3077*4882a593Smuzhiyun
3078*4882a593Smuzhiyun for (i = 0; i < priv->num_fqs; i++) {
3079*4882a593Smuzhiyun fq = &priv->fq[i];
3080*4882a593Smuzhiyun if (fq->type != DPAA2_TX_CONF_FQ)
3081*4882a593Smuzhiyun continue;
3082*4882a593Smuzhiyun for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3083*4882a593Smuzhiyun err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3084*4882a593Smuzhiyun DPNI_QUEUE_TX, j, fq->flowid,
3085*4882a593Smuzhiyun &queue, &qid);
3086*4882a593Smuzhiyun if (err)
3087*4882a593Smuzhiyun goto out_err;
3088*4882a593Smuzhiyun
3089*4882a593Smuzhiyun fq->tx_fqid[j] = qid.fqid;
3090*4882a593Smuzhiyun if (fq->tx_fqid[j] == 0)
3091*4882a593Smuzhiyun goto out_err;
3092*4882a593Smuzhiyun }
3093*4882a593Smuzhiyun }
3094*4882a593Smuzhiyun
3095*4882a593Smuzhiyun priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3096*4882a593Smuzhiyun
3097*4882a593Smuzhiyun return;
3098*4882a593Smuzhiyun
3099*4882a593Smuzhiyun out_err:
3100*4882a593Smuzhiyun netdev_info(priv->net_dev,
3101*4882a593Smuzhiyun "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3102*4882a593Smuzhiyun priv->enqueue = dpaa2_eth_enqueue_qd;
3103*4882a593Smuzhiyun }
3104*4882a593Smuzhiyun
3105*4882a593Smuzhiyun /* Configure ingress classification based on VLAN PCP */
dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv * priv)3106*4882a593Smuzhiyun static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
3107*4882a593Smuzhiyun {
3108*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
3109*4882a593Smuzhiyun struct dpkg_profile_cfg kg_cfg = {0};
3110*4882a593Smuzhiyun struct dpni_qos_tbl_cfg qos_cfg = {0};
3111*4882a593Smuzhiyun struct dpni_rule_cfg key_params;
3112*4882a593Smuzhiyun void *dma_mem, *key, *mask;
3113*4882a593Smuzhiyun u8 key_size = 2; /* VLAN TCI field */
3114*4882a593Smuzhiyun int i, pcp, err;
3115*4882a593Smuzhiyun
3116*4882a593Smuzhiyun /* VLAN-based classification only makes sense if we have multiple
3117*4882a593Smuzhiyun * traffic classes.
3118*4882a593Smuzhiyun * Also, we need to extract just the 3-bit PCP field from the VLAN
3119*4882a593Smuzhiyun * header and we can only do that by using a mask
3120*4882a593Smuzhiyun */
3121*4882a593Smuzhiyun if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
3122*4882a593Smuzhiyun dev_dbg(dev, "VLAN-based QoS classification not supported\n");
3123*4882a593Smuzhiyun return -EOPNOTSUPP;
3124*4882a593Smuzhiyun }
3125*4882a593Smuzhiyun
3126*4882a593Smuzhiyun dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3127*4882a593Smuzhiyun if (!dma_mem)
3128*4882a593Smuzhiyun return -ENOMEM;
3129*4882a593Smuzhiyun
3130*4882a593Smuzhiyun kg_cfg.num_extracts = 1;
3131*4882a593Smuzhiyun kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
3132*4882a593Smuzhiyun kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
3133*4882a593Smuzhiyun kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
3134*4882a593Smuzhiyun kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
3137*4882a593Smuzhiyun if (err) {
3138*4882a593Smuzhiyun dev_err(dev, "dpni_prepare_key_cfg failed\n");
3139*4882a593Smuzhiyun goto out_free_tbl;
3140*4882a593Smuzhiyun }
3141*4882a593Smuzhiyun
3142*4882a593Smuzhiyun /* set QoS table */
3143*4882a593Smuzhiyun qos_cfg.default_tc = 0;
3144*4882a593Smuzhiyun qos_cfg.discard_on_miss = 0;
3145*4882a593Smuzhiyun qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3146*4882a593Smuzhiyun DPAA2_CLASSIFIER_DMA_SIZE,
3147*4882a593Smuzhiyun DMA_TO_DEVICE);
3148*4882a593Smuzhiyun if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
3149*4882a593Smuzhiyun dev_err(dev, "QoS table DMA mapping failed\n");
3150*4882a593Smuzhiyun err = -ENOMEM;
3151*4882a593Smuzhiyun goto out_free_tbl;
3152*4882a593Smuzhiyun }
3153*4882a593Smuzhiyun
3154*4882a593Smuzhiyun err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
3155*4882a593Smuzhiyun if (err) {
3156*4882a593Smuzhiyun dev_err(dev, "dpni_set_qos_table failed\n");
3157*4882a593Smuzhiyun goto out_unmap_tbl;
3158*4882a593Smuzhiyun }
3159*4882a593Smuzhiyun
3160*4882a593Smuzhiyun /* Add QoS table entries */
3161*4882a593Smuzhiyun key = kzalloc(key_size * 2, GFP_KERNEL);
3162*4882a593Smuzhiyun if (!key) {
3163*4882a593Smuzhiyun err = -ENOMEM;
3164*4882a593Smuzhiyun goto out_unmap_tbl;
3165*4882a593Smuzhiyun }
3166*4882a593Smuzhiyun mask = key + key_size;
3167*4882a593Smuzhiyun *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun key_params.key_iova = dma_map_single(dev, key, key_size * 2,
3170*4882a593Smuzhiyun DMA_TO_DEVICE);
3171*4882a593Smuzhiyun if (dma_mapping_error(dev, key_params.key_iova)) {
3172*4882a593Smuzhiyun dev_err(dev, "Qos table entry DMA mapping failed\n");
3173*4882a593Smuzhiyun err = -ENOMEM;
3174*4882a593Smuzhiyun goto out_free_key;
3175*4882a593Smuzhiyun }
3176*4882a593Smuzhiyun
3177*4882a593Smuzhiyun key_params.mask_iova = key_params.key_iova + key_size;
3178*4882a593Smuzhiyun key_params.key_size = key_size;
3179*4882a593Smuzhiyun
3180*4882a593Smuzhiyun /* We add rules for PCP-based distribution starting with highest
3181*4882a593Smuzhiyun * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
3182*4882a593Smuzhiyun * classes to accommodate all priority levels, the lowest ones end up
3183*4882a593Smuzhiyun * on TC 0 which was configured as default
3184*4882a593Smuzhiyun */
3185*4882a593Smuzhiyun for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
3186*4882a593Smuzhiyun *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
3187*4882a593Smuzhiyun dma_sync_single_for_device(dev, key_params.key_iova,
3188*4882a593Smuzhiyun key_size * 2, DMA_TO_DEVICE);
3189*4882a593Smuzhiyun
3190*4882a593Smuzhiyun err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
3191*4882a593Smuzhiyun &key_params, i, i);
3192*4882a593Smuzhiyun if (err) {
3193*4882a593Smuzhiyun dev_err(dev, "dpni_add_qos_entry failed\n");
3194*4882a593Smuzhiyun dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
3195*4882a593Smuzhiyun goto out_unmap_key;
3196*4882a593Smuzhiyun }
3197*4882a593Smuzhiyun }
3198*4882a593Smuzhiyun
3199*4882a593Smuzhiyun priv->vlan_cls_enabled = true;
3200*4882a593Smuzhiyun
3201*4882a593Smuzhiyun /* Table and key memory is not persistent, clean everything up after
3202*4882a593Smuzhiyun * configuration is finished
3203*4882a593Smuzhiyun */
3204*4882a593Smuzhiyun out_unmap_key:
3205*4882a593Smuzhiyun dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
3206*4882a593Smuzhiyun out_free_key:
3207*4882a593Smuzhiyun kfree(key);
3208*4882a593Smuzhiyun out_unmap_tbl:
3209*4882a593Smuzhiyun dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3210*4882a593Smuzhiyun DMA_TO_DEVICE);
3211*4882a593Smuzhiyun out_free_tbl:
3212*4882a593Smuzhiyun kfree(dma_mem);
3213*4882a593Smuzhiyun
3214*4882a593Smuzhiyun return err;
3215*4882a593Smuzhiyun }
3216*4882a593Smuzhiyun
3217*4882a593Smuzhiyun /* Configure the DPNI object this interface is associated with */
dpaa2_eth_setup_dpni(struct fsl_mc_device * ls_dev)3218*4882a593Smuzhiyun static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
3219*4882a593Smuzhiyun {
3220*4882a593Smuzhiyun struct device *dev = &ls_dev->dev;
3221*4882a593Smuzhiyun struct dpaa2_eth_priv *priv;
3222*4882a593Smuzhiyun struct net_device *net_dev;
3223*4882a593Smuzhiyun int err;
3224*4882a593Smuzhiyun
3225*4882a593Smuzhiyun net_dev = dev_get_drvdata(dev);
3226*4882a593Smuzhiyun priv = netdev_priv(net_dev);
3227*4882a593Smuzhiyun
3228*4882a593Smuzhiyun /* get a handle for the DPNI object */
3229*4882a593Smuzhiyun err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3230*4882a593Smuzhiyun if (err) {
3231*4882a593Smuzhiyun dev_err(dev, "dpni_open() failed\n");
3232*4882a593Smuzhiyun return err;
3233*4882a593Smuzhiyun }
3234*4882a593Smuzhiyun
3235*4882a593Smuzhiyun /* Check if we can work with this DPNI object */
3236*4882a593Smuzhiyun err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3237*4882a593Smuzhiyun &priv->dpni_ver_minor);
3238*4882a593Smuzhiyun if (err) {
3239*4882a593Smuzhiyun dev_err(dev, "dpni_get_api_version() failed\n");
3240*4882a593Smuzhiyun goto close;
3241*4882a593Smuzhiyun }
3242*4882a593Smuzhiyun if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3243*4882a593Smuzhiyun dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3244*4882a593Smuzhiyun priv->dpni_ver_major, priv->dpni_ver_minor,
3245*4882a593Smuzhiyun DPNI_VER_MAJOR, DPNI_VER_MINOR);
3246*4882a593Smuzhiyun err = -ENOTSUPP;
3247*4882a593Smuzhiyun goto close;
3248*4882a593Smuzhiyun }
3249*4882a593Smuzhiyun
3250*4882a593Smuzhiyun ls_dev->mc_io = priv->mc_io;
3251*4882a593Smuzhiyun ls_dev->mc_handle = priv->mc_token;
3252*4882a593Smuzhiyun
3253*4882a593Smuzhiyun err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3254*4882a593Smuzhiyun if (err) {
3255*4882a593Smuzhiyun dev_err(dev, "dpni_reset() failed\n");
3256*4882a593Smuzhiyun goto close;
3257*4882a593Smuzhiyun }
3258*4882a593Smuzhiyun
3259*4882a593Smuzhiyun err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3260*4882a593Smuzhiyun &priv->dpni_attrs);
3261*4882a593Smuzhiyun if (err) {
3262*4882a593Smuzhiyun dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3263*4882a593Smuzhiyun goto close;
3264*4882a593Smuzhiyun }
3265*4882a593Smuzhiyun
3266*4882a593Smuzhiyun err = dpaa2_eth_set_buffer_layout(priv);
3267*4882a593Smuzhiyun if (err)
3268*4882a593Smuzhiyun goto close;
3269*4882a593Smuzhiyun
3270*4882a593Smuzhiyun dpaa2_eth_set_enqueue_mode(priv);
3271*4882a593Smuzhiyun
3272*4882a593Smuzhiyun /* Enable pause frame support */
3273*4882a593Smuzhiyun if (dpaa2_eth_has_pause_support(priv)) {
3274*4882a593Smuzhiyun err = dpaa2_eth_set_pause(priv);
3275*4882a593Smuzhiyun if (err)
3276*4882a593Smuzhiyun goto close;
3277*4882a593Smuzhiyun }
3278*4882a593Smuzhiyun
3279*4882a593Smuzhiyun err = dpaa2_eth_set_vlan_qos(priv);
3280*4882a593Smuzhiyun if (err && err != -EOPNOTSUPP)
3281*4882a593Smuzhiyun goto close;
3282*4882a593Smuzhiyun
3283*4882a593Smuzhiyun priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3284*4882a593Smuzhiyun sizeof(struct dpaa2_eth_cls_rule),
3285*4882a593Smuzhiyun GFP_KERNEL);
3286*4882a593Smuzhiyun if (!priv->cls_rules) {
3287*4882a593Smuzhiyun err = -ENOMEM;
3288*4882a593Smuzhiyun goto close;
3289*4882a593Smuzhiyun }
3290*4882a593Smuzhiyun
3291*4882a593Smuzhiyun return 0;
3292*4882a593Smuzhiyun
3293*4882a593Smuzhiyun close:
3294*4882a593Smuzhiyun dpni_close(priv->mc_io, 0, priv->mc_token);
3295*4882a593Smuzhiyun
3296*4882a593Smuzhiyun return err;
3297*4882a593Smuzhiyun }
3298*4882a593Smuzhiyun
dpaa2_eth_free_dpni(struct dpaa2_eth_priv * priv)3299*4882a593Smuzhiyun static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
3300*4882a593Smuzhiyun {
3301*4882a593Smuzhiyun int err;
3302*4882a593Smuzhiyun
3303*4882a593Smuzhiyun err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3304*4882a593Smuzhiyun if (err)
3305*4882a593Smuzhiyun netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3306*4882a593Smuzhiyun err);
3307*4882a593Smuzhiyun
3308*4882a593Smuzhiyun dpni_close(priv->mc_io, 0, priv->mc_token);
3309*4882a593Smuzhiyun }
3310*4882a593Smuzhiyun
dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq)3311*4882a593Smuzhiyun static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
3312*4882a593Smuzhiyun struct dpaa2_eth_fq *fq)
3313*4882a593Smuzhiyun {
3314*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
3315*4882a593Smuzhiyun struct dpni_queue queue;
3316*4882a593Smuzhiyun struct dpni_queue_id qid;
3317*4882a593Smuzhiyun int err;
3318*4882a593Smuzhiyun
3319*4882a593Smuzhiyun err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3320*4882a593Smuzhiyun DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
3321*4882a593Smuzhiyun if (err) {
3322*4882a593Smuzhiyun dev_err(dev, "dpni_get_queue(RX) failed\n");
3323*4882a593Smuzhiyun return err;
3324*4882a593Smuzhiyun }
3325*4882a593Smuzhiyun
3326*4882a593Smuzhiyun fq->fqid = qid.fqid;
3327*4882a593Smuzhiyun
3328*4882a593Smuzhiyun queue.destination.id = fq->channel->dpcon_id;
3329*4882a593Smuzhiyun queue.destination.type = DPNI_DEST_DPCON;
3330*4882a593Smuzhiyun queue.destination.priority = 1;
3331*4882a593Smuzhiyun queue.user_context = (u64)(uintptr_t)fq;
3332*4882a593Smuzhiyun err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3333*4882a593Smuzhiyun DPNI_QUEUE_RX, fq->tc, fq->flowid,
3334*4882a593Smuzhiyun DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3335*4882a593Smuzhiyun &queue);
3336*4882a593Smuzhiyun if (err) {
3337*4882a593Smuzhiyun dev_err(dev, "dpni_set_queue(RX) failed\n");
3338*4882a593Smuzhiyun return err;
3339*4882a593Smuzhiyun }
3340*4882a593Smuzhiyun
3341*4882a593Smuzhiyun /* xdp_rxq setup */
3342*4882a593Smuzhiyun /* only once for each channel */
3343*4882a593Smuzhiyun if (fq->tc > 0)
3344*4882a593Smuzhiyun return 0;
3345*4882a593Smuzhiyun
3346*4882a593Smuzhiyun err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
3347*4882a593Smuzhiyun fq->flowid);
3348*4882a593Smuzhiyun if (err) {
3349*4882a593Smuzhiyun dev_err(dev, "xdp_rxq_info_reg failed\n");
3350*4882a593Smuzhiyun return err;
3351*4882a593Smuzhiyun }
3352*4882a593Smuzhiyun
3353*4882a593Smuzhiyun err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
3354*4882a593Smuzhiyun MEM_TYPE_PAGE_ORDER0, NULL);
3355*4882a593Smuzhiyun if (err) {
3356*4882a593Smuzhiyun dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3357*4882a593Smuzhiyun return err;
3358*4882a593Smuzhiyun }
3359*4882a593Smuzhiyun
3360*4882a593Smuzhiyun return 0;
3361*4882a593Smuzhiyun }
3362*4882a593Smuzhiyun
dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq)3363*4882a593Smuzhiyun static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
3364*4882a593Smuzhiyun struct dpaa2_eth_fq *fq)
3365*4882a593Smuzhiyun {
3366*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
3367*4882a593Smuzhiyun struct dpni_queue queue;
3368*4882a593Smuzhiyun struct dpni_queue_id qid;
3369*4882a593Smuzhiyun int i, err;
3370*4882a593Smuzhiyun
3371*4882a593Smuzhiyun for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3372*4882a593Smuzhiyun err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3373*4882a593Smuzhiyun DPNI_QUEUE_TX, i, fq->flowid,
3374*4882a593Smuzhiyun &queue, &qid);
3375*4882a593Smuzhiyun if (err) {
3376*4882a593Smuzhiyun dev_err(dev, "dpni_get_queue(TX) failed\n");
3377*4882a593Smuzhiyun return err;
3378*4882a593Smuzhiyun }
3379*4882a593Smuzhiyun fq->tx_fqid[i] = qid.fqid;
3380*4882a593Smuzhiyun }
3381*4882a593Smuzhiyun
3382*4882a593Smuzhiyun /* All Tx queues belonging to the same flowid have the same qdbin */
3383*4882a593Smuzhiyun fq->tx_qdbin = qid.qdbin;
3384*4882a593Smuzhiyun
3385*4882a593Smuzhiyun err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3386*4882a593Smuzhiyun DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3387*4882a593Smuzhiyun &queue, &qid);
3388*4882a593Smuzhiyun if (err) {
3389*4882a593Smuzhiyun dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3390*4882a593Smuzhiyun return err;
3391*4882a593Smuzhiyun }
3392*4882a593Smuzhiyun
3393*4882a593Smuzhiyun fq->fqid = qid.fqid;
3394*4882a593Smuzhiyun
3395*4882a593Smuzhiyun queue.destination.id = fq->channel->dpcon_id;
3396*4882a593Smuzhiyun queue.destination.type = DPNI_DEST_DPCON;
3397*4882a593Smuzhiyun queue.destination.priority = 0;
3398*4882a593Smuzhiyun queue.user_context = (u64)(uintptr_t)fq;
3399*4882a593Smuzhiyun err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3400*4882a593Smuzhiyun DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3401*4882a593Smuzhiyun DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3402*4882a593Smuzhiyun &queue);
3403*4882a593Smuzhiyun if (err) {
3404*4882a593Smuzhiyun dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3405*4882a593Smuzhiyun return err;
3406*4882a593Smuzhiyun }
3407*4882a593Smuzhiyun
3408*4882a593Smuzhiyun return 0;
3409*4882a593Smuzhiyun }
3410*4882a593Smuzhiyun
setup_rx_err_flow(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq)3411*4882a593Smuzhiyun static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3412*4882a593Smuzhiyun struct dpaa2_eth_fq *fq)
3413*4882a593Smuzhiyun {
3414*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
3415*4882a593Smuzhiyun struct dpni_queue q = { { 0 } };
3416*4882a593Smuzhiyun struct dpni_queue_id qid;
3417*4882a593Smuzhiyun u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3418*4882a593Smuzhiyun int err;
3419*4882a593Smuzhiyun
3420*4882a593Smuzhiyun err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3421*4882a593Smuzhiyun DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3422*4882a593Smuzhiyun if (err) {
3423*4882a593Smuzhiyun dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3424*4882a593Smuzhiyun return err;
3425*4882a593Smuzhiyun }
3426*4882a593Smuzhiyun
3427*4882a593Smuzhiyun fq->fqid = qid.fqid;
3428*4882a593Smuzhiyun
3429*4882a593Smuzhiyun q.destination.id = fq->channel->dpcon_id;
3430*4882a593Smuzhiyun q.destination.type = DPNI_DEST_DPCON;
3431*4882a593Smuzhiyun q.destination.priority = 1;
3432*4882a593Smuzhiyun q.user_context = (u64)(uintptr_t)fq;
3433*4882a593Smuzhiyun err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3434*4882a593Smuzhiyun DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3435*4882a593Smuzhiyun if (err) {
3436*4882a593Smuzhiyun dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3437*4882a593Smuzhiyun return err;
3438*4882a593Smuzhiyun }
3439*4882a593Smuzhiyun
3440*4882a593Smuzhiyun return 0;
3441*4882a593Smuzhiyun }
3442*4882a593Smuzhiyun
3443*4882a593Smuzhiyun /* Supported header fields for Rx hash distribution key */
3444*4882a593Smuzhiyun static const struct dpaa2_eth_dist_fields dist_fields[] = {
3445*4882a593Smuzhiyun {
3446*4882a593Smuzhiyun /* L2 header */
3447*4882a593Smuzhiyun .rxnfc_field = RXH_L2DA,
3448*4882a593Smuzhiyun .cls_prot = NET_PROT_ETH,
3449*4882a593Smuzhiyun .cls_field = NH_FLD_ETH_DA,
3450*4882a593Smuzhiyun .id = DPAA2_ETH_DIST_ETHDST,
3451*4882a593Smuzhiyun .size = 6,
3452*4882a593Smuzhiyun }, {
3453*4882a593Smuzhiyun .cls_prot = NET_PROT_ETH,
3454*4882a593Smuzhiyun .cls_field = NH_FLD_ETH_SA,
3455*4882a593Smuzhiyun .id = DPAA2_ETH_DIST_ETHSRC,
3456*4882a593Smuzhiyun .size = 6,
3457*4882a593Smuzhiyun }, {
3458*4882a593Smuzhiyun /* This is the last ethertype field parsed:
3459*4882a593Smuzhiyun * depending on frame format, it can be the MAC ethertype
3460*4882a593Smuzhiyun * or the VLAN etype.
3461*4882a593Smuzhiyun */
3462*4882a593Smuzhiyun .cls_prot = NET_PROT_ETH,
3463*4882a593Smuzhiyun .cls_field = NH_FLD_ETH_TYPE,
3464*4882a593Smuzhiyun .id = DPAA2_ETH_DIST_ETHTYPE,
3465*4882a593Smuzhiyun .size = 2,
3466*4882a593Smuzhiyun }, {
3467*4882a593Smuzhiyun /* VLAN header */
3468*4882a593Smuzhiyun .rxnfc_field = RXH_VLAN,
3469*4882a593Smuzhiyun .cls_prot = NET_PROT_VLAN,
3470*4882a593Smuzhiyun .cls_field = NH_FLD_VLAN_TCI,
3471*4882a593Smuzhiyun .id = DPAA2_ETH_DIST_VLAN,
3472*4882a593Smuzhiyun .size = 2,
3473*4882a593Smuzhiyun }, {
3474*4882a593Smuzhiyun /* IP header */
3475*4882a593Smuzhiyun .rxnfc_field = RXH_IP_SRC,
3476*4882a593Smuzhiyun .cls_prot = NET_PROT_IP,
3477*4882a593Smuzhiyun .cls_field = NH_FLD_IP_SRC,
3478*4882a593Smuzhiyun .id = DPAA2_ETH_DIST_IPSRC,
3479*4882a593Smuzhiyun .size = 4,
3480*4882a593Smuzhiyun }, {
3481*4882a593Smuzhiyun .rxnfc_field = RXH_IP_DST,
3482*4882a593Smuzhiyun .cls_prot = NET_PROT_IP,
3483*4882a593Smuzhiyun .cls_field = NH_FLD_IP_DST,
3484*4882a593Smuzhiyun .id = DPAA2_ETH_DIST_IPDST,
3485*4882a593Smuzhiyun .size = 4,
3486*4882a593Smuzhiyun }, {
3487*4882a593Smuzhiyun .rxnfc_field = RXH_L3_PROTO,
3488*4882a593Smuzhiyun .cls_prot = NET_PROT_IP,
3489*4882a593Smuzhiyun .cls_field = NH_FLD_IP_PROTO,
3490*4882a593Smuzhiyun .id = DPAA2_ETH_DIST_IPPROTO,
3491*4882a593Smuzhiyun .size = 1,
3492*4882a593Smuzhiyun }, {
3493*4882a593Smuzhiyun /* Using UDP ports, this is functionally equivalent to raw
3494*4882a593Smuzhiyun * byte pairs from L4 header.
3495*4882a593Smuzhiyun */
3496*4882a593Smuzhiyun .rxnfc_field = RXH_L4_B_0_1,
3497*4882a593Smuzhiyun .cls_prot = NET_PROT_UDP,
3498*4882a593Smuzhiyun .cls_field = NH_FLD_UDP_PORT_SRC,
3499*4882a593Smuzhiyun .id = DPAA2_ETH_DIST_L4SRC,
3500*4882a593Smuzhiyun .size = 2,
3501*4882a593Smuzhiyun }, {
3502*4882a593Smuzhiyun .rxnfc_field = RXH_L4_B_2_3,
3503*4882a593Smuzhiyun .cls_prot = NET_PROT_UDP,
3504*4882a593Smuzhiyun .cls_field = NH_FLD_UDP_PORT_DST,
3505*4882a593Smuzhiyun .id = DPAA2_ETH_DIST_L4DST,
3506*4882a593Smuzhiyun .size = 2,
3507*4882a593Smuzhiyun },
3508*4882a593Smuzhiyun };
3509*4882a593Smuzhiyun
3510*4882a593Smuzhiyun /* Configure the Rx hash key using the legacy API */
dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv * priv,dma_addr_t key)3511*4882a593Smuzhiyun static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3512*4882a593Smuzhiyun {
3513*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
3514*4882a593Smuzhiyun struct dpni_rx_tc_dist_cfg dist_cfg;
3515*4882a593Smuzhiyun int i, err = 0;
3516*4882a593Smuzhiyun
3517*4882a593Smuzhiyun memset(&dist_cfg, 0, sizeof(dist_cfg));
3518*4882a593Smuzhiyun
3519*4882a593Smuzhiyun dist_cfg.key_cfg_iova = key;
3520*4882a593Smuzhiyun dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3521*4882a593Smuzhiyun dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3522*4882a593Smuzhiyun
3523*4882a593Smuzhiyun for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3524*4882a593Smuzhiyun err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
3525*4882a593Smuzhiyun i, &dist_cfg);
3526*4882a593Smuzhiyun if (err) {
3527*4882a593Smuzhiyun dev_err(dev, "dpni_set_rx_tc_dist failed\n");
3528*4882a593Smuzhiyun break;
3529*4882a593Smuzhiyun }
3530*4882a593Smuzhiyun }
3531*4882a593Smuzhiyun
3532*4882a593Smuzhiyun return err;
3533*4882a593Smuzhiyun }
3534*4882a593Smuzhiyun
3535*4882a593Smuzhiyun /* Configure the Rx hash key using the new API */
dpaa2_eth_config_hash_key(struct dpaa2_eth_priv * priv,dma_addr_t key)3536*4882a593Smuzhiyun static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3537*4882a593Smuzhiyun {
3538*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
3539*4882a593Smuzhiyun struct dpni_rx_dist_cfg dist_cfg;
3540*4882a593Smuzhiyun int i, err = 0;
3541*4882a593Smuzhiyun
3542*4882a593Smuzhiyun memset(&dist_cfg, 0, sizeof(dist_cfg));
3543*4882a593Smuzhiyun
3544*4882a593Smuzhiyun dist_cfg.key_cfg_iova = key;
3545*4882a593Smuzhiyun dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3546*4882a593Smuzhiyun dist_cfg.enable = 1;
3547*4882a593Smuzhiyun
3548*4882a593Smuzhiyun for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3549*4882a593Smuzhiyun dist_cfg.tc = i;
3550*4882a593Smuzhiyun err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
3551*4882a593Smuzhiyun &dist_cfg);
3552*4882a593Smuzhiyun if (err) {
3553*4882a593Smuzhiyun dev_err(dev, "dpni_set_rx_hash_dist failed\n");
3554*4882a593Smuzhiyun break;
3555*4882a593Smuzhiyun }
3556*4882a593Smuzhiyun
3557*4882a593Smuzhiyun /* If the flow steering / hashing key is shared between all
3558*4882a593Smuzhiyun * traffic classes, install it just once
3559*4882a593Smuzhiyun */
3560*4882a593Smuzhiyun if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3561*4882a593Smuzhiyun break;
3562*4882a593Smuzhiyun }
3563*4882a593Smuzhiyun
3564*4882a593Smuzhiyun return err;
3565*4882a593Smuzhiyun }
3566*4882a593Smuzhiyun
3567*4882a593Smuzhiyun /* Configure the Rx flow classification key */
dpaa2_eth_config_cls_key(struct dpaa2_eth_priv * priv,dma_addr_t key)3568*4882a593Smuzhiyun static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3569*4882a593Smuzhiyun {
3570*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
3571*4882a593Smuzhiyun struct dpni_rx_dist_cfg dist_cfg;
3572*4882a593Smuzhiyun int i, err = 0;
3573*4882a593Smuzhiyun
3574*4882a593Smuzhiyun memset(&dist_cfg, 0, sizeof(dist_cfg));
3575*4882a593Smuzhiyun
3576*4882a593Smuzhiyun dist_cfg.key_cfg_iova = key;
3577*4882a593Smuzhiyun dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3578*4882a593Smuzhiyun dist_cfg.enable = 1;
3579*4882a593Smuzhiyun
3580*4882a593Smuzhiyun for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3581*4882a593Smuzhiyun dist_cfg.tc = i;
3582*4882a593Smuzhiyun err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
3583*4882a593Smuzhiyun &dist_cfg);
3584*4882a593Smuzhiyun if (err) {
3585*4882a593Smuzhiyun dev_err(dev, "dpni_set_rx_fs_dist failed\n");
3586*4882a593Smuzhiyun break;
3587*4882a593Smuzhiyun }
3588*4882a593Smuzhiyun
3589*4882a593Smuzhiyun /* If the flow steering / hashing key is shared between all
3590*4882a593Smuzhiyun * traffic classes, install it just once
3591*4882a593Smuzhiyun */
3592*4882a593Smuzhiyun if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3593*4882a593Smuzhiyun break;
3594*4882a593Smuzhiyun }
3595*4882a593Smuzhiyun
3596*4882a593Smuzhiyun return err;
3597*4882a593Smuzhiyun }
3598*4882a593Smuzhiyun
3599*4882a593Smuzhiyun /* Size of the Rx flow classification key */
dpaa2_eth_cls_key_size(u64 fields)3600*4882a593Smuzhiyun int dpaa2_eth_cls_key_size(u64 fields)
3601*4882a593Smuzhiyun {
3602*4882a593Smuzhiyun int i, size = 0;
3603*4882a593Smuzhiyun
3604*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3605*4882a593Smuzhiyun if (!(fields & dist_fields[i].id))
3606*4882a593Smuzhiyun continue;
3607*4882a593Smuzhiyun size += dist_fields[i].size;
3608*4882a593Smuzhiyun }
3609*4882a593Smuzhiyun
3610*4882a593Smuzhiyun return size;
3611*4882a593Smuzhiyun }
3612*4882a593Smuzhiyun
3613*4882a593Smuzhiyun /* Offset of header field in Rx classification key */
dpaa2_eth_cls_fld_off(int prot,int field)3614*4882a593Smuzhiyun int dpaa2_eth_cls_fld_off(int prot, int field)
3615*4882a593Smuzhiyun {
3616*4882a593Smuzhiyun int i, off = 0;
3617*4882a593Smuzhiyun
3618*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3619*4882a593Smuzhiyun if (dist_fields[i].cls_prot == prot &&
3620*4882a593Smuzhiyun dist_fields[i].cls_field == field)
3621*4882a593Smuzhiyun return off;
3622*4882a593Smuzhiyun off += dist_fields[i].size;
3623*4882a593Smuzhiyun }
3624*4882a593Smuzhiyun
3625*4882a593Smuzhiyun WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
3626*4882a593Smuzhiyun return 0;
3627*4882a593Smuzhiyun }
3628*4882a593Smuzhiyun
3629*4882a593Smuzhiyun /* Prune unused fields from the classification rule.
3630*4882a593Smuzhiyun * Used when masking is not supported
3631*4882a593Smuzhiyun */
dpaa2_eth_cls_trim_rule(void * key_mem,u64 fields)3632*4882a593Smuzhiyun void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
3633*4882a593Smuzhiyun {
3634*4882a593Smuzhiyun int off = 0, new_off = 0;
3635*4882a593Smuzhiyun int i, size;
3636*4882a593Smuzhiyun
3637*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3638*4882a593Smuzhiyun size = dist_fields[i].size;
3639*4882a593Smuzhiyun if (dist_fields[i].id & fields) {
3640*4882a593Smuzhiyun memcpy(key_mem + new_off, key_mem + off, size);
3641*4882a593Smuzhiyun new_off += size;
3642*4882a593Smuzhiyun }
3643*4882a593Smuzhiyun off += size;
3644*4882a593Smuzhiyun }
3645*4882a593Smuzhiyun }
3646*4882a593Smuzhiyun
3647*4882a593Smuzhiyun /* Set Rx distribution (hash or flow classification) key
3648*4882a593Smuzhiyun * flags is a combination of RXH_ bits
3649*4882a593Smuzhiyun */
dpaa2_eth_set_dist_key(struct net_device * net_dev,enum dpaa2_eth_rx_dist type,u64 flags)3650*4882a593Smuzhiyun static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
3651*4882a593Smuzhiyun enum dpaa2_eth_rx_dist type, u64 flags)
3652*4882a593Smuzhiyun {
3653*4882a593Smuzhiyun struct device *dev = net_dev->dev.parent;
3654*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3655*4882a593Smuzhiyun struct dpkg_profile_cfg cls_cfg;
3656*4882a593Smuzhiyun u32 rx_hash_fields = 0;
3657*4882a593Smuzhiyun dma_addr_t key_iova;
3658*4882a593Smuzhiyun u8 *dma_mem;
3659*4882a593Smuzhiyun int i;
3660*4882a593Smuzhiyun int err = 0;
3661*4882a593Smuzhiyun
3662*4882a593Smuzhiyun memset(&cls_cfg, 0, sizeof(cls_cfg));
3663*4882a593Smuzhiyun
3664*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3665*4882a593Smuzhiyun struct dpkg_extract *key =
3666*4882a593Smuzhiyun &cls_cfg.extracts[cls_cfg.num_extracts];
3667*4882a593Smuzhiyun
3668*4882a593Smuzhiyun /* For both Rx hashing and classification keys
3669*4882a593Smuzhiyun * we set only the selected fields.
3670*4882a593Smuzhiyun */
3671*4882a593Smuzhiyun if (!(flags & dist_fields[i].id))
3672*4882a593Smuzhiyun continue;
3673*4882a593Smuzhiyun if (type == DPAA2_ETH_RX_DIST_HASH)
3674*4882a593Smuzhiyun rx_hash_fields |= dist_fields[i].rxnfc_field;
3675*4882a593Smuzhiyun
3676*4882a593Smuzhiyun if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3677*4882a593Smuzhiyun dev_err(dev, "error adding key extraction rule, too many rules?\n");
3678*4882a593Smuzhiyun return -E2BIG;
3679*4882a593Smuzhiyun }
3680*4882a593Smuzhiyun
3681*4882a593Smuzhiyun key->type = DPKG_EXTRACT_FROM_HDR;
3682*4882a593Smuzhiyun key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3683*4882a593Smuzhiyun key->extract.from_hdr.type = DPKG_FULL_FIELD;
3684*4882a593Smuzhiyun key->extract.from_hdr.field = dist_fields[i].cls_field;
3685*4882a593Smuzhiyun cls_cfg.num_extracts++;
3686*4882a593Smuzhiyun }
3687*4882a593Smuzhiyun
3688*4882a593Smuzhiyun dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3689*4882a593Smuzhiyun if (!dma_mem)
3690*4882a593Smuzhiyun return -ENOMEM;
3691*4882a593Smuzhiyun
3692*4882a593Smuzhiyun err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
3693*4882a593Smuzhiyun if (err) {
3694*4882a593Smuzhiyun dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
3695*4882a593Smuzhiyun goto free_key;
3696*4882a593Smuzhiyun }
3697*4882a593Smuzhiyun
3698*4882a593Smuzhiyun /* Prepare for setting the rx dist */
3699*4882a593Smuzhiyun key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
3700*4882a593Smuzhiyun DMA_TO_DEVICE);
3701*4882a593Smuzhiyun if (dma_mapping_error(dev, key_iova)) {
3702*4882a593Smuzhiyun dev_err(dev, "DMA mapping failed\n");
3703*4882a593Smuzhiyun err = -ENOMEM;
3704*4882a593Smuzhiyun goto free_key;
3705*4882a593Smuzhiyun }
3706*4882a593Smuzhiyun
3707*4882a593Smuzhiyun if (type == DPAA2_ETH_RX_DIST_HASH) {
3708*4882a593Smuzhiyun if (dpaa2_eth_has_legacy_dist(priv))
3709*4882a593Smuzhiyun err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
3710*4882a593Smuzhiyun else
3711*4882a593Smuzhiyun err = dpaa2_eth_config_hash_key(priv, key_iova);
3712*4882a593Smuzhiyun } else {
3713*4882a593Smuzhiyun err = dpaa2_eth_config_cls_key(priv, key_iova);
3714*4882a593Smuzhiyun }
3715*4882a593Smuzhiyun
3716*4882a593Smuzhiyun dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3717*4882a593Smuzhiyun DMA_TO_DEVICE);
3718*4882a593Smuzhiyun if (!err && type == DPAA2_ETH_RX_DIST_HASH)
3719*4882a593Smuzhiyun priv->rx_hash_fields = rx_hash_fields;
3720*4882a593Smuzhiyun
3721*4882a593Smuzhiyun free_key:
3722*4882a593Smuzhiyun kfree(dma_mem);
3723*4882a593Smuzhiyun return err;
3724*4882a593Smuzhiyun }
3725*4882a593Smuzhiyun
dpaa2_eth_set_hash(struct net_device * net_dev,u64 flags)3726*4882a593Smuzhiyun int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
3727*4882a593Smuzhiyun {
3728*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3729*4882a593Smuzhiyun u64 key = 0;
3730*4882a593Smuzhiyun int i;
3731*4882a593Smuzhiyun
3732*4882a593Smuzhiyun if (!dpaa2_eth_hash_enabled(priv))
3733*4882a593Smuzhiyun return -EOPNOTSUPP;
3734*4882a593Smuzhiyun
3735*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
3736*4882a593Smuzhiyun if (dist_fields[i].rxnfc_field & flags)
3737*4882a593Smuzhiyun key |= dist_fields[i].id;
3738*4882a593Smuzhiyun
3739*4882a593Smuzhiyun return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
3740*4882a593Smuzhiyun }
3741*4882a593Smuzhiyun
dpaa2_eth_set_cls(struct net_device * net_dev,u64 flags)3742*4882a593Smuzhiyun int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
3743*4882a593Smuzhiyun {
3744*4882a593Smuzhiyun return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
3745*4882a593Smuzhiyun }
3746*4882a593Smuzhiyun
dpaa2_eth_set_default_cls(struct dpaa2_eth_priv * priv)3747*4882a593Smuzhiyun static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
3748*4882a593Smuzhiyun {
3749*4882a593Smuzhiyun struct device *dev = priv->net_dev->dev.parent;
3750*4882a593Smuzhiyun int err;
3751*4882a593Smuzhiyun
3752*4882a593Smuzhiyun /* Check if we actually support Rx flow classification */
3753*4882a593Smuzhiyun if (dpaa2_eth_has_legacy_dist(priv)) {
3754*4882a593Smuzhiyun dev_dbg(dev, "Rx cls not supported by current MC version\n");
3755*4882a593Smuzhiyun return -EOPNOTSUPP;
3756*4882a593Smuzhiyun }
3757*4882a593Smuzhiyun
3758*4882a593Smuzhiyun if (!dpaa2_eth_fs_enabled(priv)) {
3759*4882a593Smuzhiyun dev_dbg(dev, "Rx cls disabled in DPNI options\n");
3760*4882a593Smuzhiyun return -EOPNOTSUPP;
3761*4882a593Smuzhiyun }
3762*4882a593Smuzhiyun
3763*4882a593Smuzhiyun if (!dpaa2_eth_hash_enabled(priv)) {
3764*4882a593Smuzhiyun dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
3765*4882a593Smuzhiyun return -EOPNOTSUPP;
3766*4882a593Smuzhiyun }
3767*4882a593Smuzhiyun
3768*4882a593Smuzhiyun /* If there is no support for masking in the classification table,
3769*4882a593Smuzhiyun * we don't set a default key, as it will depend on the rules
3770*4882a593Smuzhiyun * added by the user at runtime.
3771*4882a593Smuzhiyun */
3772*4882a593Smuzhiyun if (!dpaa2_eth_fs_mask_enabled(priv))
3773*4882a593Smuzhiyun goto out;
3774*4882a593Smuzhiyun
3775*4882a593Smuzhiyun err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
3776*4882a593Smuzhiyun if (err)
3777*4882a593Smuzhiyun return err;
3778*4882a593Smuzhiyun
3779*4882a593Smuzhiyun out:
3780*4882a593Smuzhiyun priv->rx_cls_enabled = 1;
3781*4882a593Smuzhiyun
3782*4882a593Smuzhiyun return 0;
3783*4882a593Smuzhiyun }
3784*4882a593Smuzhiyun
3785*4882a593Smuzhiyun /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3786*4882a593Smuzhiyun * frame queues and channels
3787*4882a593Smuzhiyun */
dpaa2_eth_bind_dpni(struct dpaa2_eth_priv * priv)3788*4882a593Smuzhiyun static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
3789*4882a593Smuzhiyun {
3790*4882a593Smuzhiyun struct net_device *net_dev = priv->net_dev;
3791*4882a593Smuzhiyun struct device *dev = net_dev->dev.parent;
3792*4882a593Smuzhiyun struct dpni_pools_cfg pools_params;
3793*4882a593Smuzhiyun struct dpni_error_cfg err_cfg;
3794*4882a593Smuzhiyun int err = 0;
3795*4882a593Smuzhiyun int i;
3796*4882a593Smuzhiyun
3797*4882a593Smuzhiyun pools_params.num_dpbp = 1;
3798*4882a593Smuzhiyun pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3799*4882a593Smuzhiyun pools_params.pools[0].backup_pool = 0;
3800*4882a593Smuzhiyun pools_params.pools[0].buffer_size = priv->rx_buf_size;
3801*4882a593Smuzhiyun err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3802*4882a593Smuzhiyun if (err) {
3803*4882a593Smuzhiyun dev_err(dev, "dpni_set_pools() failed\n");
3804*4882a593Smuzhiyun return err;
3805*4882a593Smuzhiyun }
3806*4882a593Smuzhiyun
3807*4882a593Smuzhiyun /* have the interface implicitly distribute traffic based on
3808*4882a593Smuzhiyun * the default hash key
3809*4882a593Smuzhiyun */
3810*4882a593Smuzhiyun err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
3811*4882a593Smuzhiyun if (err && err != -EOPNOTSUPP)
3812*4882a593Smuzhiyun dev_err(dev, "Failed to configure hashing\n");
3813*4882a593Smuzhiyun
3814*4882a593Smuzhiyun /* Configure the flow classification key; it includes all
3815*4882a593Smuzhiyun * supported header fields and cannot be modified at runtime
3816*4882a593Smuzhiyun */
3817*4882a593Smuzhiyun err = dpaa2_eth_set_default_cls(priv);
3818*4882a593Smuzhiyun if (err && err != -EOPNOTSUPP)
3819*4882a593Smuzhiyun dev_err(dev, "Failed to configure Rx classification key\n");
3820*4882a593Smuzhiyun
3821*4882a593Smuzhiyun /* Configure handling of error frames */
3822*4882a593Smuzhiyun err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
3823*4882a593Smuzhiyun err_cfg.set_frame_annotation = 1;
3824*4882a593Smuzhiyun err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3825*4882a593Smuzhiyun err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3826*4882a593Smuzhiyun &err_cfg);
3827*4882a593Smuzhiyun if (err) {
3828*4882a593Smuzhiyun dev_err(dev, "dpni_set_errors_behavior failed\n");
3829*4882a593Smuzhiyun return err;
3830*4882a593Smuzhiyun }
3831*4882a593Smuzhiyun
3832*4882a593Smuzhiyun /* Configure Rx and Tx conf queues to generate CDANs */
3833*4882a593Smuzhiyun for (i = 0; i < priv->num_fqs; i++) {
3834*4882a593Smuzhiyun switch (priv->fq[i].type) {
3835*4882a593Smuzhiyun case DPAA2_RX_FQ:
3836*4882a593Smuzhiyun err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
3837*4882a593Smuzhiyun break;
3838*4882a593Smuzhiyun case DPAA2_TX_CONF_FQ:
3839*4882a593Smuzhiyun err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
3840*4882a593Smuzhiyun break;
3841*4882a593Smuzhiyun case DPAA2_RX_ERR_FQ:
3842*4882a593Smuzhiyun err = setup_rx_err_flow(priv, &priv->fq[i]);
3843*4882a593Smuzhiyun break;
3844*4882a593Smuzhiyun default:
3845*4882a593Smuzhiyun dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3846*4882a593Smuzhiyun return -EINVAL;
3847*4882a593Smuzhiyun }
3848*4882a593Smuzhiyun if (err)
3849*4882a593Smuzhiyun return err;
3850*4882a593Smuzhiyun }
3851*4882a593Smuzhiyun
3852*4882a593Smuzhiyun err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
3853*4882a593Smuzhiyun DPNI_QUEUE_TX, &priv->tx_qdid);
3854*4882a593Smuzhiyun if (err) {
3855*4882a593Smuzhiyun dev_err(dev, "dpni_get_qdid() failed\n");
3856*4882a593Smuzhiyun return err;
3857*4882a593Smuzhiyun }
3858*4882a593Smuzhiyun
3859*4882a593Smuzhiyun return 0;
3860*4882a593Smuzhiyun }
3861*4882a593Smuzhiyun
3862*4882a593Smuzhiyun /* Allocate rings for storing incoming frame descriptors */
dpaa2_eth_alloc_rings(struct dpaa2_eth_priv * priv)3863*4882a593Smuzhiyun static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
3864*4882a593Smuzhiyun {
3865*4882a593Smuzhiyun struct net_device *net_dev = priv->net_dev;
3866*4882a593Smuzhiyun struct device *dev = net_dev->dev.parent;
3867*4882a593Smuzhiyun int i;
3868*4882a593Smuzhiyun
3869*4882a593Smuzhiyun for (i = 0; i < priv->num_channels; i++) {
3870*4882a593Smuzhiyun priv->channel[i]->store =
3871*4882a593Smuzhiyun dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3872*4882a593Smuzhiyun if (!priv->channel[i]->store) {
3873*4882a593Smuzhiyun netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3874*4882a593Smuzhiyun goto err_ring;
3875*4882a593Smuzhiyun }
3876*4882a593Smuzhiyun }
3877*4882a593Smuzhiyun
3878*4882a593Smuzhiyun return 0;
3879*4882a593Smuzhiyun
3880*4882a593Smuzhiyun err_ring:
3881*4882a593Smuzhiyun for (i = 0; i < priv->num_channels; i++) {
3882*4882a593Smuzhiyun if (!priv->channel[i]->store)
3883*4882a593Smuzhiyun break;
3884*4882a593Smuzhiyun dpaa2_io_store_destroy(priv->channel[i]->store);
3885*4882a593Smuzhiyun }
3886*4882a593Smuzhiyun
3887*4882a593Smuzhiyun return -ENOMEM;
3888*4882a593Smuzhiyun }
3889*4882a593Smuzhiyun
dpaa2_eth_free_rings(struct dpaa2_eth_priv * priv)3890*4882a593Smuzhiyun static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
3891*4882a593Smuzhiyun {
3892*4882a593Smuzhiyun int i;
3893*4882a593Smuzhiyun
3894*4882a593Smuzhiyun for (i = 0; i < priv->num_channels; i++)
3895*4882a593Smuzhiyun dpaa2_io_store_destroy(priv->channel[i]->store);
3896*4882a593Smuzhiyun }
3897*4882a593Smuzhiyun
dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv * priv)3898*4882a593Smuzhiyun static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
3899*4882a593Smuzhiyun {
3900*4882a593Smuzhiyun struct net_device *net_dev = priv->net_dev;
3901*4882a593Smuzhiyun struct device *dev = net_dev->dev.parent;
3902*4882a593Smuzhiyun u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3903*4882a593Smuzhiyun int err;
3904*4882a593Smuzhiyun
3905*4882a593Smuzhiyun /* Get firmware address, if any */
3906*4882a593Smuzhiyun err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3907*4882a593Smuzhiyun if (err) {
3908*4882a593Smuzhiyun dev_err(dev, "dpni_get_port_mac_addr() failed\n");
3909*4882a593Smuzhiyun return err;
3910*4882a593Smuzhiyun }
3911*4882a593Smuzhiyun
3912*4882a593Smuzhiyun /* Get DPNI attributes address, if any */
3913*4882a593Smuzhiyun err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3914*4882a593Smuzhiyun dpni_mac_addr);
3915*4882a593Smuzhiyun if (err) {
3916*4882a593Smuzhiyun dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
3917*4882a593Smuzhiyun return err;
3918*4882a593Smuzhiyun }
3919*4882a593Smuzhiyun
3920*4882a593Smuzhiyun /* First check if firmware has any address configured by bootloader */
3921*4882a593Smuzhiyun if (!is_zero_ether_addr(mac_addr)) {
3922*4882a593Smuzhiyun /* If the DPMAC addr != DPNI addr, update it */
3923*4882a593Smuzhiyun if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
3924*4882a593Smuzhiyun err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3925*4882a593Smuzhiyun priv->mc_token,
3926*4882a593Smuzhiyun mac_addr);
3927*4882a593Smuzhiyun if (err) {
3928*4882a593Smuzhiyun dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3929*4882a593Smuzhiyun return err;
3930*4882a593Smuzhiyun }
3931*4882a593Smuzhiyun }
3932*4882a593Smuzhiyun memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
3933*4882a593Smuzhiyun } else if (is_zero_ether_addr(dpni_mac_addr)) {
3934*4882a593Smuzhiyun /* No MAC address configured, fill in net_dev->dev_addr
3935*4882a593Smuzhiyun * with a random one
3936*4882a593Smuzhiyun */
3937*4882a593Smuzhiyun eth_hw_addr_random(net_dev);
3938*4882a593Smuzhiyun dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
3939*4882a593Smuzhiyun
3940*4882a593Smuzhiyun err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3941*4882a593Smuzhiyun net_dev->dev_addr);
3942*4882a593Smuzhiyun if (err) {
3943*4882a593Smuzhiyun dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3944*4882a593Smuzhiyun return err;
3945*4882a593Smuzhiyun }
3946*4882a593Smuzhiyun
3947*4882a593Smuzhiyun /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3948*4882a593Smuzhiyun * practical purposes, this will be our "permanent" mac address,
3949*4882a593Smuzhiyun * at least until the next reboot. This move will also permit
3950*4882a593Smuzhiyun * register_netdevice() to properly fill up net_dev->perm_addr.
3951*4882a593Smuzhiyun */
3952*4882a593Smuzhiyun net_dev->addr_assign_type = NET_ADDR_PERM;
3953*4882a593Smuzhiyun } else {
3954*4882a593Smuzhiyun /* NET_ADDR_PERM is default, all we have to do is
3955*4882a593Smuzhiyun * fill in the device addr.
3956*4882a593Smuzhiyun */
3957*4882a593Smuzhiyun memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
3958*4882a593Smuzhiyun }
3959*4882a593Smuzhiyun
3960*4882a593Smuzhiyun return 0;
3961*4882a593Smuzhiyun }
3962*4882a593Smuzhiyun
dpaa2_eth_netdev_init(struct net_device * net_dev)3963*4882a593Smuzhiyun static int dpaa2_eth_netdev_init(struct net_device *net_dev)
3964*4882a593Smuzhiyun {
3965*4882a593Smuzhiyun struct device *dev = net_dev->dev.parent;
3966*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3967*4882a593Smuzhiyun u32 options = priv->dpni_attrs.options;
3968*4882a593Smuzhiyun u64 supported = 0, not_supported = 0;
3969*4882a593Smuzhiyun u8 bcast_addr[ETH_ALEN];
3970*4882a593Smuzhiyun u8 num_queues;
3971*4882a593Smuzhiyun int err;
3972*4882a593Smuzhiyun
3973*4882a593Smuzhiyun net_dev->netdev_ops = &dpaa2_eth_ops;
3974*4882a593Smuzhiyun net_dev->ethtool_ops = &dpaa2_ethtool_ops;
3975*4882a593Smuzhiyun
3976*4882a593Smuzhiyun err = dpaa2_eth_set_mac_addr(priv);
3977*4882a593Smuzhiyun if (err)
3978*4882a593Smuzhiyun return err;
3979*4882a593Smuzhiyun
3980*4882a593Smuzhiyun /* Explicitly add the broadcast address to the MAC filtering table */
3981*4882a593Smuzhiyun eth_broadcast_addr(bcast_addr);
3982*4882a593Smuzhiyun err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
3983*4882a593Smuzhiyun if (err) {
3984*4882a593Smuzhiyun dev_err(dev, "dpni_add_mac_addr() failed\n");
3985*4882a593Smuzhiyun return err;
3986*4882a593Smuzhiyun }
3987*4882a593Smuzhiyun
3988*4882a593Smuzhiyun /* Set MTU upper limit; lower limit is 68B (default value) */
3989*4882a593Smuzhiyun net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
3990*4882a593Smuzhiyun err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
3991*4882a593Smuzhiyun DPAA2_ETH_MFL);
3992*4882a593Smuzhiyun if (err) {
3993*4882a593Smuzhiyun dev_err(dev, "dpni_set_max_frame_length() failed\n");
3994*4882a593Smuzhiyun return err;
3995*4882a593Smuzhiyun }
3996*4882a593Smuzhiyun
3997*4882a593Smuzhiyun /* Set actual number of queues in the net device */
3998*4882a593Smuzhiyun num_queues = dpaa2_eth_queue_count(priv);
3999*4882a593Smuzhiyun err = netif_set_real_num_tx_queues(net_dev, num_queues);
4000*4882a593Smuzhiyun if (err) {
4001*4882a593Smuzhiyun dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
4002*4882a593Smuzhiyun return err;
4003*4882a593Smuzhiyun }
4004*4882a593Smuzhiyun err = netif_set_real_num_rx_queues(net_dev, num_queues);
4005*4882a593Smuzhiyun if (err) {
4006*4882a593Smuzhiyun dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
4007*4882a593Smuzhiyun return err;
4008*4882a593Smuzhiyun }
4009*4882a593Smuzhiyun
4010*4882a593Smuzhiyun /* Capabilities listing */
4011*4882a593Smuzhiyun supported |= IFF_LIVE_ADDR_CHANGE;
4012*4882a593Smuzhiyun
4013*4882a593Smuzhiyun if (options & DPNI_OPT_NO_MAC_FILTER)
4014*4882a593Smuzhiyun not_supported |= IFF_UNICAST_FLT;
4015*4882a593Smuzhiyun else
4016*4882a593Smuzhiyun supported |= IFF_UNICAST_FLT;
4017*4882a593Smuzhiyun
4018*4882a593Smuzhiyun net_dev->priv_flags |= supported;
4019*4882a593Smuzhiyun net_dev->priv_flags &= ~not_supported;
4020*4882a593Smuzhiyun
4021*4882a593Smuzhiyun /* Features */
4022*4882a593Smuzhiyun net_dev->features = NETIF_F_RXCSUM |
4023*4882a593Smuzhiyun NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4024*4882a593Smuzhiyun NETIF_F_SG | NETIF_F_HIGHDMA |
4025*4882a593Smuzhiyun NETIF_F_LLTX | NETIF_F_HW_TC;
4026*4882a593Smuzhiyun net_dev->hw_features = net_dev->features;
4027*4882a593Smuzhiyun
4028*4882a593Smuzhiyun return 0;
4029*4882a593Smuzhiyun }
4030*4882a593Smuzhiyun
dpaa2_eth_poll_link_state(void * arg)4031*4882a593Smuzhiyun static int dpaa2_eth_poll_link_state(void *arg)
4032*4882a593Smuzhiyun {
4033*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
4034*4882a593Smuzhiyun int err;
4035*4882a593Smuzhiyun
4036*4882a593Smuzhiyun while (!kthread_should_stop()) {
4037*4882a593Smuzhiyun err = dpaa2_eth_link_state_update(priv);
4038*4882a593Smuzhiyun if (unlikely(err))
4039*4882a593Smuzhiyun return err;
4040*4882a593Smuzhiyun
4041*4882a593Smuzhiyun msleep(DPAA2_ETH_LINK_STATE_REFRESH);
4042*4882a593Smuzhiyun }
4043*4882a593Smuzhiyun
4044*4882a593Smuzhiyun return 0;
4045*4882a593Smuzhiyun }
4046*4882a593Smuzhiyun
dpaa2_eth_connect_mac(struct dpaa2_eth_priv * priv)4047*4882a593Smuzhiyun static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
4048*4882a593Smuzhiyun {
4049*4882a593Smuzhiyun struct fsl_mc_device *dpni_dev, *dpmac_dev;
4050*4882a593Smuzhiyun struct dpaa2_mac *mac;
4051*4882a593Smuzhiyun int err;
4052*4882a593Smuzhiyun
4053*4882a593Smuzhiyun dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
4054*4882a593Smuzhiyun dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
4055*4882a593Smuzhiyun if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
4056*4882a593Smuzhiyun return 0;
4057*4882a593Smuzhiyun
4058*4882a593Smuzhiyun if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
4059*4882a593Smuzhiyun return 0;
4060*4882a593Smuzhiyun
4061*4882a593Smuzhiyun mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
4062*4882a593Smuzhiyun if (!mac)
4063*4882a593Smuzhiyun return -ENOMEM;
4064*4882a593Smuzhiyun
4065*4882a593Smuzhiyun mac->mc_dev = dpmac_dev;
4066*4882a593Smuzhiyun mac->mc_io = priv->mc_io;
4067*4882a593Smuzhiyun mac->net_dev = priv->net_dev;
4068*4882a593Smuzhiyun
4069*4882a593Smuzhiyun err = dpaa2_mac_connect(mac);
4070*4882a593Smuzhiyun if (err) {
4071*4882a593Smuzhiyun netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
4072*4882a593Smuzhiyun kfree(mac);
4073*4882a593Smuzhiyun return err;
4074*4882a593Smuzhiyun }
4075*4882a593Smuzhiyun priv->mac = mac;
4076*4882a593Smuzhiyun
4077*4882a593Smuzhiyun return 0;
4078*4882a593Smuzhiyun }
4079*4882a593Smuzhiyun
dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv * priv)4080*4882a593Smuzhiyun static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
4081*4882a593Smuzhiyun {
4082*4882a593Smuzhiyun if (!priv->mac)
4083*4882a593Smuzhiyun return;
4084*4882a593Smuzhiyun
4085*4882a593Smuzhiyun dpaa2_mac_disconnect(priv->mac);
4086*4882a593Smuzhiyun kfree(priv->mac);
4087*4882a593Smuzhiyun priv->mac = NULL;
4088*4882a593Smuzhiyun }
4089*4882a593Smuzhiyun
dpni_irq0_handler_thread(int irq_num,void * arg)4090*4882a593Smuzhiyun static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4091*4882a593Smuzhiyun {
4092*4882a593Smuzhiyun u32 status = ~0;
4093*4882a593Smuzhiyun struct device *dev = (struct device *)arg;
4094*4882a593Smuzhiyun struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4095*4882a593Smuzhiyun struct net_device *net_dev = dev_get_drvdata(dev);
4096*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4097*4882a593Smuzhiyun int err;
4098*4882a593Smuzhiyun
4099*4882a593Smuzhiyun err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4100*4882a593Smuzhiyun DPNI_IRQ_INDEX, &status);
4101*4882a593Smuzhiyun if (unlikely(err)) {
4102*4882a593Smuzhiyun netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
4103*4882a593Smuzhiyun return IRQ_HANDLED;
4104*4882a593Smuzhiyun }
4105*4882a593Smuzhiyun
4106*4882a593Smuzhiyun if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
4107*4882a593Smuzhiyun dpaa2_eth_link_state_update(netdev_priv(net_dev));
4108*4882a593Smuzhiyun
4109*4882a593Smuzhiyun if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
4110*4882a593Smuzhiyun dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
4111*4882a593Smuzhiyun dpaa2_eth_update_tx_fqids(priv);
4112*4882a593Smuzhiyun
4113*4882a593Smuzhiyun rtnl_lock();
4114*4882a593Smuzhiyun if (priv->mac)
4115*4882a593Smuzhiyun dpaa2_eth_disconnect_mac(priv);
4116*4882a593Smuzhiyun else
4117*4882a593Smuzhiyun dpaa2_eth_connect_mac(priv);
4118*4882a593Smuzhiyun rtnl_unlock();
4119*4882a593Smuzhiyun }
4120*4882a593Smuzhiyun
4121*4882a593Smuzhiyun return IRQ_HANDLED;
4122*4882a593Smuzhiyun }
4123*4882a593Smuzhiyun
dpaa2_eth_setup_irqs(struct fsl_mc_device * ls_dev)4124*4882a593Smuzhiyun static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
4125*4882a593Smuzhiyun {
4126*4882a593Smuzhiyun int err = 0;
4127*4882a593Smuzhiyun struct fsl_mc_device_irq *irq;
4128*4882a593Smuzhiyun
4129*4882a593Smuzhiyun err = fsl_mc_allocate_irqs(ls_dev);
4130*4882a593Smuzhiyun if (err) {
4131*4882a593Smuzhiyun dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4132*4882a593Smuzhiyun return err;
4133*4882a593Smuzhiyun }
4134*4882a593Smuzhiyun
4135*4882a593Smuzhiyun irq = ls_dev->irqs[0];
4136*4882a593Smuzhiyun err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
4137*4882a593Smuzhiyun NULL, dpni_irq0_handler_thread,
4138*4882a593Smuzhiyun IRQF_NO_SUSPEND | IRQF_ONESHOT,
4139*4882a593Smuzhiyun dev_name(&ls_dev->dev), &ls_dev->dev);
4140*4882a593Smuzhiyun if (err < 0) {
4141*4882a593Smuzhiyun dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
4142*4882a593Smuzhiyun goto free_mc_irq;
4143*4882a593Smuzhiyun }
4144*4882a593Smuzhiyun
4145*4882a593Smuzhiyun err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
4146*4882a593Smuzhiyun DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
4147*4882a593Smuzhiyun DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
4148*4882a593Smuzhiyun if (err < 0) {
4149*4882a593Smuzhiyun dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
4150*4882a593Smuzhiyun goto free_irq;
4151*4882a593Smuzhiyun }
4152*4882a593Smuzhiyun
4153*4882a593Smuzhiyun err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
4154*4882a593Smuzhiyun DPNI_IRQ_INDEX, 1);
4155*4882a593Smuzhiyun if (err < 0) {
4156*4882a593Smuzhiyun dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
4157*4882a593Smuzhiyun goto free_irq;
4158*4882a593Smuzhiyun }
4159*4882a593Smuzhiyun
4160*4882a593Smuzhiyun return 0;
4161*4882a593Smuzhiyun
4162*4882a593Smuzhiyun free_irq:
4163*4882a593Smuzhiyun devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
4164*4882a593Smuzhiyun free_mc_irq:
4165*4882a593Smuzhiyun fsl_mc_free_irqs(ls_dev);
4166*4882a593Smuzhiyun
4167*4882a593Smuzhiyun return err;
4168*4882a593Smuzhiyun }
4169*4882a593Smuzhiyun
dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv * priv)4170*4882a593Smuzhiyun static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
4171*4882a593Smuzhiyun {
4172*4882a593Smuzhiyun int i;
4173*4882a593Smuzhiyun struct dpaa2_eth_channel *ch;
4174*4882a593Smuzhiyun
4175*4882a593Smuzhiyun for (i = 0; i < priv->num_channels; i++) {
4176*4882a593Smuzhiyun ch = priv->channel[i];
4177*4882a593Smuzhiyun /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
4178*4882a593Smuzhiyun netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
4179*4882a593Smuzhiyun NAPI_POLL_WEIGHT);
4180*4882a593Smuzhiyun }
4181*4882a593Smuzhiyun }
4182*4882a593Smuzhiyun
dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv * priv)4183*4882a593Smuzhiyun static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
4184*4882a593Smuzhiyun {
4185*4882a593Smuzhiyun int i;
4186*4882a593Smuzhiyun struct dpaa2_eth_channel *ch;
4187*4882a593Smuzhiyun
4188*4882a593Smuzhiyun for (i = 0; i < priv->num_channels; i++) {
4189*4882a593Smuzhiyun ch = priv->channel[i];
4190*4882a593Smuzhiyun netif_napi_del(&ch->napi);
4191*4882a593Smuzhiyun }
4192*4882a593Smuzhiyun }
4193*4882a593Smuzhiyun
dpaa2_eth_probe(struct fsl_mc_device * dpni_dev)4194*4882a593Smuzhiyun static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4195*4882a593Smuzhiyun {
4196*4882a593Smuzhiyun struct device *dev;
4197*4882a593Smuzhiyun struct net_device *net_dev = NULL;
4198*4882a593Smuzhiyun struct dpaa2_eth_priv *priv = NULL;
4199*4882a593Smuzhiyun int err = 0;
4200*4882a593Smuzhiyun
4201*4882a593Smuzhiyun dev = &dpni_dev->dev;
4202*4882a593Smuzhiyun
4203*4882a593Smuzhiyun /* Net device */
4204*4882a593Smuzhiyun net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
4205*4882a593Smuzhiyun if (!net_dev) {
4206*4882a593Smuzhiyun dev_err(dev, "alloc_etherdev_mq() failed\n");
4207*4882a593Smuzhiyun return -ENOMEM;
4208*4882a593Smuzhiyun }
4209*4882a593Smuzhiyun
4210*4882a593Smuzhiyun SET_NETDEV_DEV(net_dev, dev);
4211*4882a593Smuzhiyun dev_set_drvdata(dev, net_dev);
4212*4882a593Smuzhiyun
4213*4882a593Smuzhiyun priv = netdev_priv(net_dev);
4214*4882a593Smuzhiyun priv->net_dev = net_dev;
4215*4882a593Smuzhiyun
4216*4882a593Smuzhiyun priv->iommu_domain = iommu_get_domain_for_dev(dev);
4217*4882a593Smuzhiyun
4218*4882a593Smuzhiyun priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
4219*4882a593Smuzhiyun priv->rx_tstamp = false;
4220*4882a593Smuzhiyun
4221*4882a593Smuzhiyun priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4222*4882a593Smuzhiyun if (!priv->dpaa2_ptp_wq) {
4223*4882a593Smuzhiyun err = -ENOMEM;
4224*4882a593Smuzhiyun goto err_wq_alloc;
4225*4882a593Smuzhiyun }
4226*4882a593Smuzhiyun
4227*4882a593Smuzhiyun INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
4228*4882a593Smuzhiyun mutex_init(&priv->onestep_tstamp_lock);
4229*4882a593Smuzhiyun skb_queue_head_init(&priv->tx_skbs);
4230*4882a593Smuzhiyun
4231*4882a593Smuzhiyun /* Obtain a MC portal */
4232*4882a593Smuzhiyun err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4233*4882a593Smuzhiyun &priv->mc_io);
4234*4882a593Smuzhiyun if (err) {
4235*4882a593Smuzhiyun if (err == -ENXIO)
4236*4882a593Smuzhiyun err = -EPROBE_DEFER;
4237*4882a593Smuzhiyun else
4238*4882a593Smuzhiyun dev_err(dev, "MC portal allocation failed\n");
4239*4882a593Smuzhiyun goto err_portal_alloc;
4240*4882a593Smuzhiyun }
4241*4882a593Smuzhiyun
4242*4882a593Smuzhiyun /* MC objects initialization and configuration */
4243*4882a593Smuzhiyun err = dpaa2_eth_setup_dpni(dpni_dev);
4244*4882a593Smuzhiyun if (err)
4245*4882a593Smuzhiyun goto err_dpni_setup;
4246*4882a593Smuzhiyun
4247*4882a593Smuzhiyun err = dpaa2_eth_setup_dpio(priv);
4248*4882a593Smuzhiyun if (err)
4249*4882a593Smuzhiyun goto err_dpio_setup;
4250*4882a593Smuzhiyun
4251*4882a593Smuzhiyun dpaa2_eth_setup_fqs(priv);
4252*4882a593Smuzhiyun
4253*4882a593Smuzhiyun err = dpaa2_eth_setup_dpbp(priv);
4254*4882a593Smuzhiyun if (err)
4255*4882a593Smuzhiyun goto err_dpbp_setup;
4256*4882a593Smuzhiyun
4257*4882a593Smuzhiyun err = dpaa2_eth_bind_dpni(priv);
4258*4882a593Smuzhiyun if (err)
4259*4882a593Smuzhiyun goto err_bind;
4260*4882a593Smuzhiyun
4261*4882a593Smuzhiyun /* Add a NAPI context for each channel */
4262*4882a593Smuzhiyun dpaa2_eth_add_ch_napi(priv);
4263*4882a593Smuzhiyun
4264*4882a593Smuzhiyun /* Percpu statistics */
4265*4882a593Smuzhiyun priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4266*4882a593Smuzhiyun if (!priv->percpu_stats) {
4267*4882a593Smuzhiyun dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4268*4882a593Smuzhiyun err = -ENOMEM;
4269*4882a593Smuzhiyun goto err_alloc_percpu_stats;
4270*4882a593Smuzhiyun }
4271*4882a593Smuzhiyun priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4272*4882a593Smuzhiyun if (!priv->percpu_extras) {
4273*4882a593Smuzhiyun dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4274*4882a593Smuzhiyun err = -ENOMEM;
4275*4882a593Smuzhiyun goto err_alloc_percpu_extras;
4276*4882a593Smuzhiyun }
4277*4882a593Smuzhiyun
4278*4882a593Smuzhiyun priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
4279*4882a593Smuzhiyun if (!priv->sgt_cache) {
4280*4882a593Smuzhiyun dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4281*4882a593Smuzhiyun err = -ENOMEM;
4282*4882a593Smuzhiyun goto err_alloc_sgt_cache;
4283*4882a593Smuzhiyun }
4284*4882a593Smuzhiyun
4285*4882a593Smuzhiyun err = dpaa2_eth_netdev_init(net_dev);
4286*4882a593Smuzhiyun if (err)
4287*4882a593Smuzhiyun goto err_netdev_init;
4288*4882a593Smuzhiyun
4289*4882a593Smuzhiyun /* Configure checksum offload based on current interface flags */
4290*4882a593Smuzhiyun err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4291*4882a593Smuzhiyun if (err)
4292*4882a593Smuzhiyun goto err_csum;
4293*4882a593Smuzhiyun
4294*4882a593Smuzhiyun err = dpaa2_eth_set_tx_csum(priv,
4295*4882a593Smuzhiyun !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4296*4882a593Smuzhiyun if (err)
4297*4882a593Smuzhiyun goto err_csum;
4298*4882a593Smuzhiyun
4299*4882a593Smuzhiyun err = dpaa2_eth_alloc_rings(priv);
4300*4882a593Smuzhiyun if (err)
4301*4882a593Smuzhiyun goto err_alloc_rings;
4302*4882a593Smuzhiyun
4303*4882a593Smuzhiyun #ifdef CONFIG_FSL_DPAA2_ETH_DCB
4304*4882a593Smuzhiyun if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
4305*4882a593Smuzhiyun priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4306*4882a593Smuzhiyun net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4307*4882a593Smuzhiyun } else {
4308*4882a593Smuzhiyun dev_dbg(dev, "PFC not supported\n");
4309*4882a593Smuzhiyun }
4310*4882a593Smuzhiyun #endif
4311*4882a593Smuzhiyun
4312*4882a593Smuzhiyun err = dpaa2_eth_setup_irqs(dpni_dev);
4313*4882a593Smuzhiyun if (err) {
4314*4882a593Smuzhiyun netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4315*4882a593Smuzhiyun priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
4316*4882a593Smuzhiyun "%s_poll_link", net_dev->name);
4317*4882a593Smuzhiyun if (IS_ERR(priv->poll_thread)) {
4318*4882a593Smuzhiyun dev_err(dev, "Error starting polling thread\n");
4319*4882a593Smuzhiyun goto err_poll_thread;
4320*4882a593Smuzhiyun }
4321*4882a593Smuzhiyun priv->do_link_poll = true;
4322*4882a593Smuzhiyun }
4323*4882a593Smuzhiyun
4324*4882a593Smuzhiyun err = dpaa2_eth_connect_mac(priv);
4325*4882a593Smuzhiyun if (err)
4326*4882a593Smuzhiyun goto err_connect_mac;
4327*4882a593Smuzhiyun
4328*4882a593Smuzhiyun err = dpaa2_eth_dl_register(priv);
4329*4882a593Smuzhiyun if (err)
4330*4882a593Smuzhiyun goto err_dl_register;
4331*4882a593Smuzhiyun
4332*4882a593Smuzhiyun err = dpaa2_eth_dl_traps_register(priv);
4333*4882a593Smuzhiyun if (err)
4334*4882a593Smuzhiyun goto err_dl_trap_register;
4335*4882a593Smuzhiyun
4336*4882a593Smuzhiyun err = dpaa2_eth_dl_port_add(priv);
4337*4882a593Smuzhiyun if (err)
4338*4882a593Smuzhiyun goto err_dl_port_add;
4339*4882a593Smuzhiyun
4340*4882a593Smuzhiyun err = register_netdev(net_dev);
4341*4882a593Smuzhiyun if (err < 0) {
4342*4882a593Smuzhiyun dev_err(dev, "register_netdev() failed\n");
4343*4882a593Smuzhiyun goto err_netdev_reg;
4344*4882a593Smuzhiyun }
4345*4882a593Smuzhiyun
4346*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
4347*4882a593Smuzhiyun dpaa2_dbg_add(priv);
4348*4882a593Smuzhiyun #endif
4349*4882a593Smuzhiyun
4350*4882a593Smuzhiyun dev_info(dev, "Probed interface %s\n", net_dev->name);
4351*4882a593Smuzhiyun return 0;
4352*4882a593Smuzhiyun
4353*4882a593Smuzhiyun err_netdev_reg:
4354*4882a593Smuzhiyun dpaa2_eth_dl_port_del(priv);
4355*4882a593Smuzhiyun err_dl_port_add:
4356*4882a593Smuzhiyun dpaa2_eth_dl_traps_unregister(priv);
4357*4882a593Smuzhiyun err_dl_trap_register:
4358*4882a593Smuzhiyun dpaa2_eth_dl_unregister(priv);
4359*4882a593Smuzhiyun err_dl_register:
4360*4882a593Smuzhiyun dpaa2_eth_disconnect_mac(priv);
4361*4882a593Smuzhiyun err_connect_mac:
4362*4882a593Smuzhiyun if (priv->do_link_poll)
4363*4882a593Smuzhiyun kthread_stop(priv->poll_thread);
4364*4882a593Smuzhiyun else
4365*4882a593Smuzhiyun fsl_mc_free_irqs(dpni_dev);
4366*4882a593Smuzhiyun err_poll_thread:
4367*4882a593Smuzhiyun dpaa2_eth_free_rings(priv);
4368*4882a593Smuzhiyun err_alloc_rings:
4369*4882a593Smuzhiyun err_csum:
4370*4882a593Smuzhiyun err_netdev_init:
4371*4882a593Smuzhiyun free_percpu(priv->sgt_cache);
4372*4882a593Smuzhiyun err_alloc_sgt_cache:
4373*4882a593Smuzhiyun free_percpu(priv->percpu_extras);
4374*4882a593Smuzhiyun err_alloc_percpu_extras:
4375*4882a593Smuzhiyun free_percpu(priv->percpu_stats);
4376*4882a593Smuzhiyun err_alloc_percpu_stats:
4377*4882a593Smuzhiyun dpaa2_eth_del_ch_napi(priv);
4378*4882a593Smuzhiyun err_bind:
4379*4882a593Smuzhiyun dpaa2_eth_free_dpbp(priv);
4380*4882a593Smuzhiyun err_dpbp_setup:
4381*4882a593Smuzhiyun dpaa2_eth_free_dpio(priv);
4382*4882a593Smuzhiyun err_dpio_setup:
4383*4882a593Smuzhiyun dpaa2_eth_free_dpni(priv);
4384*4882a593Smuzhiyun err_dpni_setup:
4385*4882a593Smuzhiyun fsl_mc_portal_free(priv->mc_io);
4386*4882a593Smuzhiyun err_portal_alloc:
4387*4882a593Smuzhiyun destroy_workqueue(priv->dpaa2_ptp_wq);
4388*4882a593Smuzhiyun err_wq_alloc:
4389*4882a593Smuzhiyun dev_set_drvdata(dev, NULL);
4390*4882a593Smuzhiyun free_netdev(net_dev);
4391*4882a593Smuzhiyun
4392*4882a593Smuzhiyun return err;
4393*4882a593Smuzhiyun }
4394*4882a593Smuzhiyun
dpaa2_eth_remove(struct fsl_mc_device * ls_dev)4395*4882a593Smuzhiyun static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4396*4882a593Smuzhiyun {
4397*4882a593Smuzhiyun struct device *dev;
4398*4882a593Smuzhiyun struct net_device *net_dev;
4399*4882a593Smuzhiyun struct dpaa2_eth_priv *priv;
4400*4882a593Smuzhiyun
4401*4882a593Smuzhiyun dev = &ls_dev->dev;
4402*4882a593Smuzhiyun net_dev = dev_get_drvdata(dev);
4403*4882a593Smuzhiyun priv = netdev_priv(net_dev);
4404*4882a593Smuzhiyun
4405*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
4406*4882a593Smuzhiyun dpaa2_dbg_remove(priv);
4407*4882a593Smuzhiyun #endif
4408*4882a593Smuzhiyun
4409*4882a593Smuzhiyun unregister_netdev(net_dev);
4410*4882a593Smuzhiyun rtnl_lock();
4411*4882a593Smuzhiyun dpaa2_eth_disconnect_mac(priv);
4412*4882a593Smuzhiyun rtnl_unlock();
4413*4882a593Smuzhiyun
4414*4882a593Smuzhiyun dpaa2_eth_dl_port_del(priv);
4415*4882a593Smuzhiyun dpaa2_eth_dl_traps_unregister(priv);
4416*4882a593Smuzhiyun dpaa2_eth_dl_unregister(priv);
4417*4882a593Smuzhiyun
4418*4882a593Smuzhiyun if (priv->do_link_poll)
4419*4882a593Smuzhiyun kthread_stop(priv->poll_thread);
4420*4882a593Smuzhiyun else
4421*4882a593Smuzhiyun fsl_mc_free_irqs(ls_dev);
4422*4882a593Smuzhiyun
4423*4882a593Smuzhiyun dpaa2_eth_free_rings(priv);
4424*4882a593Smuzhiyun free_percpu(priv->sgt_cache);
4425*4882a593Smuzhiyun free_percpu(priv->percpu_stats);
4426*4882a593Smuzhiyun free_percpu(priv->percpu_extras);
4427*4882a593Smuzhiyun
4428*4882a593Smuzhiyun dpaa2_eth_del_ch_napi(priv);
4429*4882a593Smuzhiyun dpaa2_eth_free_dpbp(priv);
4430*4882a593Smuzhiyun dpaa2_eth_free_dpio(priv);
4431*4882a593Smuzhiyun dpaa2_eth_free_dpni(priv);
4432*4882a593Smuzhiyun
4433*4882a593Smuzhiyun fsl_mc_portal_free(priv->mc_io);
4434*4882a593Smuzhiyun
4435*4882a593Smuzhiyun destroy_workqueue(priv->dpaa2_ptp_wq);
4436*4882a593Smuzhiyun
4437*4882a593Smuzhiyun dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
4438*4882a593Smuzhiyun
4439*4882a593Smuzhiyun free_netdev(net_dev);
4440*4882a593Smuzhiyun
4441*4882a593Smuzhiyun return 0;
4442*4882a593Smuzhiyun }
4443*4882a593Smuzhiyun
4444*4882a593Smuzhiyun static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
4445*4882a593Smuzhiyun {
4446*4882a593Smuzhiyun .vendor = FSL_MC_VENDOR_FREESCALE,
4447*4882a593Smuzhiyun .obj_type = "dpni",
4448*4882a593Smuzhiyun },
4449*4882a593Smuzhiyun { .vendor = 0x0 }
4450*4882a593Smuzhiyun };
4451*4882a593Smuzhiyun MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
4452*4882a593Smuzhiyun
4453*4882a593Smuzhiyun static struct fsl_mc_driver dpaa2_eth_driver = {
4454*4882a593Smuzhiyun .driver = {
4455*4882a593Smuzhiyun .name = KBUILD_MODNAME,
4456*4882a593Smuzhiyun .owner = THIS_MODULE,
4457*4882a593Smuzhiyun },
4458*4882a593Smuzhiyun .probe = dpaa2_eth_probe,
4459*4882a593Smuzhiyun .remove = dpaa2_eth_remove,
4460*4882a593Smuzhiyun .match_id_table = dpaa2_eth_match_id_table
4461*4882a593Smuzhiyun };
4462*4882a593Smuzhiyun
dpaa2_eth_driver_init(void)4463*4882a593Smuzhiyun static int __init dpaa2_eth_driver_init(void)
4464*4882a593Smuzhiyun {
4465*4882a593Smuzhiyun int err;
4466*4882a593Smuzhiyun
4467*4882a593Smuzhiyun dpaa2_eth_dbg_init();
4468*4882a593Smuzhiyun err = fsl_mc_driver_register(&dpaa2_eth_driver);
4469*4882a593Smuzhiyun if (err) {
4470*4882a593Smuzhiyun dpaa2_eth_dbg_exit();
4471*4882a593Smuzhiyun return err;
4472*4882a593Smuzhiyun }
4473*4882a593Smuzhiyun
4474*4882a593Smuzhiyun return 0;
4475*4882a593Smuzhiyun }
4476*4882a593Smuzhiyun
dpaa2_eth_driver_exit(void)4477*4882a593Smuzhiyun static void __exit dpaa2_eth_driver_exit(void)
4478*4882a593Smuzhiyun {
4479*4882a593Smuzhiyun dpaa2_eth_dbg_exit();
4480*4882a593Smuzhiyun fsl_mc_driver_unregister(&dpaa2_eth_driver);
4481*4882a593Smuzhiyun }
4482*4882a593Smuzhiyun
4483*4882a593Smuzhiyun module_init(dpaa2_eth_driver_init);
4484*4882a593Smuzhiyun module_exit(dpaa2_eth_driver_exit);
4485