1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2009-2012 Cavium, Inc
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/platform_device.h>
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/etherdevice.h>
12*4882a593Smuzhiyun #include <linux/capability.h>
13*4882a593Smuzhiyun #include <linux/net_tstamp.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/netdevice.h>
16*4882a593Smuzhiyun #include <linux/spinlock.h>
17*4882a593Smuzhiyun #include <linux/if_vlan.h>
18*4882a593Smuzhiyun #include <linux/of_mdio.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/of_net.h>
21*4882a593Smuzhiyun #include <linux/init.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/phy.h>
24*4882a593Smuzhiyun #include <linux/io.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <asm/octeon/octeon.h>
27*4882a593Smuzhiyun #include <asm/octeon/cvmx-mixx-defs.h>
28*4882a593Smuzhiyun #include <asm/octeon/cvmx-agl-defs.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define DRV_NAME "octeon_mgmt"
31*4882a593Smuzhiyun #define DRV_DESCRIPTION \
32*4882a593Smuzhiyun "Cavium Networks Octeon MII (management) port Network Driver"
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define OCTEON_MGMT_NAPI_WEIGHT 16
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* Ring sizes that are powers of two allow for more efficient modulo
37*4882a593Smuzhiyun * opertions.
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun #define OCTEON_MGMT_RX_RING_SIZE 512
40*4882a593Smuzhiyun #define OCTEON_MGMT_TX_RING_SIZE 128
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* Allow 8 bytes for vlan and FCS. */
43*4882a593Smuzhiyun #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun union mgmt_port_ring_entry {
46*4882a593Smuzhiyun u64 d64;
47*4882a593Smuzhiyun struct {
48*4882a593Smuzhiyun #define RING_ENTRY_CODE_DONE 0xf
49*4882a593Smuzhiyun #define RING_ENTRY_CODE_MORE 0x10
50*4882a593Smuzhiyun #ifdef __BIG_ENDIAN_BITFIELD
51*4882a593Smuzhiyun u64 reserved_62_63:2;
52*4882a593Smuzhiyun /* Length of the buffer/packet in bytes */
53*4882a593Smuzhiyun u64 len:14;
54*4882a593Smuzhiyun /* For TX, signals that the packet should be timestamped */
55*4882a593Smuzhiyun u64 tstamp:1;
56*4882a593Smuzhiyun /* The RX error code */
57*4882a593Smuzhiyun u64 code:7;
58*4882a593Smuzhiyun /* Physical address of the buffer */
59*4882a593Smuzhiyun u64 addr:40;
60*4882a593Smuzhiyun #else
61*4882a593Smuzhiyun u64 addr:40;
62*4882a593Smuzhiyun u64 code:7;
63*4882a593Smuzhiyun u64 tstamp:1;
64*4882a593Smuzhiyun u64 len:14;
65*4882a593Smuzhiyun u64 reserved_62_63:2;
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun } s;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define MIX_ORING1 0x0
71*4882a593Smuzhiyun #define MIX_ORING2 0x8
72*4882a593Smuzhiyun #define MIX_IRING1 0x10
73*4882a593Smuzhiyun #define MIX_IRING2 0x18
74*4882a593Smuzhiyun #define MIX_CTL 0x20
75*4882a593Smuzhiyun #define MIX_IRHWM 0x28
76*4882a593Smuzhiyun #define MIX_IRCNT 0x30
77*4882a593Smuzhiyun #define MIX_ORHWM 0x38
78*4882a593Smuzhiyun #define MIX_ORCNT 0x40
79*4882a593Smuzhiyun #define MIX_ISR 0x48
80*4882a593Smuzhiyun #define MIX_INTENA 0x50
81*4882a593Smuzhiyun #define MIX_REMCNT 0x58
82*4882a593Smuzhiyun #define MIX_BIST 0x78
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #define AGL_GMX_PRT_CFG 0x10
85*4882a593Smuzhiyun #define AGL_GMX_RX_FRM_CTL 0x18
86*4882a593Smuzhiyun #define AGL_GMX_RX_FRM_MAX 0x30
87*4882a593Smuzhiyun #define AGL_GMX_RX_JABBER 0x38
88*4882a593Smuzhiyun #define AGL_GMX_RX_STATS_CTL 0x50
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
91*4882a593Smuzhiyun #define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
92*4882a593Smuzhiyun #define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #define AGL_GMX_RX_ADR_CTL 0x100
95*4882a593Smuzhiyun #define AGL_GMX_RX_ADR_CAM_EN 0x108
96*4882a593Smuzhiyun #define AGL_GMX_RX_ADR_CAM0 0x180
97*4882a593Smuzhiyun #define AGL_GMX_RX_ADR_CAM1 0x188
98*4882a593Smuzhiyun #define AGL_GMX_RX_ADR_CAM2 0x190
99*4882a593Smuzhiyun #define AGL_GMX_RX_ADR_CAM3 0x198
100*4882a593Smuzhiyun #define AGL_GMX_RX_ADR_CAM4 0x1a0
101*4882a593Smuzhiyun #define AGL_GMX_RX_ADR_CAM5 0x1a8
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #define AGL_GMX_TX_CLK 0x208
104*4882a593Smuzhiyun #define AGL_GMX_TX_STATS_CTL 0x268
105*4882a593Smuzhiyun #define AGL_GMX_TX_CTL 0x270
106*4882a593Smuzhiyun #define AGL_GMX_TX_STAT0 0x280
107*4882a593Smuzhiyun #define AGL_GMX_TX_STAT1 0x288
108*4882a593Smuzhiyun #define AGL_GMX_TX_STAT2 0x290
109*4882a593Smuzhiyun #define AGL_GMX_TX_STAT3 0x298
110*4882a593Smuzhiyun #define AGL_GMX_TX_STAT4 0x2a0
111*4882a593Smuzhiyun #define AGL_GMX_TX_STAT5 0x2a8
112*4882a593Smuzhiyun #define AGL_GMX_TX_STAT6 0x2b0
113*4882a593Smuzhiyun #define AGL_GMX_TX_STAT7 0x2b8
114*4882a593Smuzhiyun #define AGL_GMX_TX_STAT8 0x2c0
115*4882a593Smuzhiyun #define AGL_GMX_TX_STAT9 0x2c8
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun struct octeon_mgmt {
118*4882a593Smuzhiyun struct net_device *netdev;
119*4882a593Smuzhiyun u64 mix;
120*4882a593Smuzhiyun u64 agl;
121*4882a593Smuzhiyun u64 agl_prt_ctl;
122*4882a593Smuzhiyun int port;
123*4882a593Smuzhiyun int irq;
124*4882a593Smuzhiyun bool has_rx_tstamp;
125*4882a593Smuzhiyun u64 *tx_ring;
126*4882a593Smuzhiyun dma_addr_t tx_ring_handle;
127*4882a593Smuzhiyun unsigned int tx_next;
128*4882a593Smuzhiyun unsigned int tx_next_clean;
129*4882a593Smuzhiyun unsigned int tx_current_fill;
130*4882a593Smuzhiyun /* The tx_list lock also protects the ring related variables */
131*4882a593Smuzhiyun struct sk_buff_head tx_list;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* RX variables only touched in napi_poll. No locking necessary. */
134*4882a593Smuzhiyun u64 *rx_ring;
135*4882a593Smuzhiyun dma_addr_t rx_ring_handle;
136*4882a593Smuzhiyun unsigned int rx_next;
137*4882a593Smuzhiyun unsigned int rx_next_fill;
138*4882a593Smuzhiyun unsigned int rx_current_fill;
139*4882a593Smuzhiyun struct sk_buff_head rx_list;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun spinlock_t lock;
142*4882a593Smuzhiyun unsigned int last_duplex;
143*4882a593Smuzhiyun unsigned int last_link;
144*4882a593Smuzhiyun unsigned int last_speed;
145*4882a593Smuzhiyun struct device *dev;
146*4882a593Smuzhiyun struct napi_struct napi;
147*4882a593Smuzhiyun struct tasklet_struct tx_clean_tasklet;
148*4882a593Smuzhiyun struct device_node *phy_np;
149*4882a593Smuzhiyun resource_size_t mix_phys;
150*4882a593Smuzhiyun resource_size_t mix_size;
151*4882a593Smuzhiyun resource_size_t agl_phys;
152*4882a593Smuzhiyun resource_size_t agl_size;
153*4882a593Smuzhiyun resource_size_t agl_prt_ctl_phys;
154*4882a593Smuzhiyun resource_size_t agl_prt_ctl_size;
155*4882a593Smuzhiyun };
156*4882a593Smuzhiyun
octeon_mgmt_set_rx_irq(struct octeon_mgmt * p,int enable)157*4882a593Smuzhiyun static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun union cvmx_mixx_intena mix_intena;
160*4882a593Smuzhiyun unsigned long flags;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun spin_lock_irqsave(&p->lock, flags);
163*4882a593Smuzhiyun mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
164*4882a593Smuzhiyun mix_intena.s.ithena = enable ? 1 : 0;
165*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
166*4882a593Smuzhiyun spin_unlock_irqrestore(&p->lock, flags);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
octeon_mgmt_set_tx_irq(struct octeon_mgmt * p,int enable)169*4882a593Smuzhiyun static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun union cvmx_mixx_intena mix_intena;
172*4882a593Smuzhiyun unsigned long flags;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun spin_lock_irqsave(&p->lock, flags);
175*4882a593Smuzhiyun mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
176*4882a593Smuzhiyun mix_intena.s.othena = enable ? 1 : 0;
177*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
178*4882a593Smuzhiyun spin_unlock_irqrestore(&p->lock, flags);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
octeon_mgmt_enable_rx_irq(struct octeon_mgmt * p)181*4882a593Smuzhiyun static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun octeon_mgmt_set_rx_irq(p, 1);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
octeon_mgmt_disable_rx_irq(struct octeon_mgmt * p)186*4882a593Smuzhiyun static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun octeon_mgmt_set_rx_irq(p, 0);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
octeon_mgmt_enable_tx_irq(struct octeon_mgmt * p)191*4882a593Smuzhiyun static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun octeon_mgmt_set_tx_irq(p, 1);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
octeon_mgmt_disable_tx_irq(struct octeon_mgmt * p)196*4882a593Smuzhiyun static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun octeon_mgmt_set_tx_irq(p, 0);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
ring_max_fill(unsigned int ring_size)201*4882a593Smuzhiyun static unsigned int ring_max_fill(unsigned int ring_size)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun return ring_size - 8;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
ring_size_to_bytes(unsigned int ring_size)206*4882a593Smuzhiyun static unsigned int ring_size_to_bytes(unsigned int ring_size)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun return ring_size * sizeof(union mgmt_port_ring_entry);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
octeon_mgmt_rx_fill_ring(struct net_device * netdev)211*4882a593Smuzhiyun static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
216*4882a593Smuzhiyun unsigned int size;
217*4882a593Smuzhiyun union mgmt_port_ring_entry re;
218*4882a593Smuzhiyun struct sk_buff *skb;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* CN56XX pass 1 needs 8 bytes of padding. */
221*4882a593Smuzhiyun size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun skb = netdev_alloc_skb(netdev, size);
224*4882a593Smuzhiyun if (!skb)
225*4882a593Smuzhiyun break;
226*4882a593Smuzhiyun skb_reserve(skb, NET_IP_ALIGN);
227*4882a593Smuzhiyun __skb_queue_tail(&p->rx_list, skb);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun re.d64 = 0;
230*4882a593Smuzhiyun re.s.len = size;
231*4882a593Smuzhiyun re.s.addr = dma_map_single(p->dev, skb->data,
232*4882a593Smuzhiyun size,
233*4882a593Smuzhiyun DMA_FROM_DEVICE);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Put it in the ring. */
236*4882a593Smuzhiyun p->rx_ring[p->rx_next_fill] = re.d64;
237*4882a593Smuzhiyun /* Make sure there is no reorder of filling the ring and ringing
238*4882a593Smuzhiyun * the bell
239*4882a593Smuzhiyun */
240*4882a593Smuzhiyun wmb();
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun dma_sync_single_for_device(p->dev, p->rx_ring_handle,
243*4882a593Smuzhiyun ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
244*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
245*4882a593Smuzhiyun p->rx_next_fill =
246*4882a593Smuzhiyun (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
247*4882a593Smuzhiyun p->rx_current_fill++;
248*4882a593Smuzhiyun /* Ring the bell. */
249*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_IRING2, 1);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
octeon_mgmt_clean_tx_buffers(struct octeon_mgmt * p)253*4882a593Smuzhiyun static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun union cvmx_mixx_orcnt mix_orcnt;
256*4882a593Smuzhiyun union mgmt_port_ring_entry re;
257*4882a593Smuzhiyun struct sk_buff *skb;
258*4882a593Smuzhiyun int cleaned = 0;
259*4882a593Smuzhiyun unsigned long flags;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
262*4882a593Smuzhiyun while (mix_orcnt.s.orcnt) {
263*4882a593Smuzhiyun spin_lock_irqsave(&p->tx_list.lock, flags);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (mix_orcnt.s.orcnt == 0) {
268*4882a593Smuzhiyun spin_unlock_irqrestore(&p->tx_list.lock, flags);
269*4882a593Smuzhiyun break;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
273*4882a593Smuzhiyun ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
274*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun re.d64 = p->tx_ring[p->tx_next_clean];
277*4882a593Smuzhiyun p->tx_next_clean =
278*4882a593Smuzhiyun (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
279*4882a593Smuzhiyun skb = __skb_dequeue(&p->tx_list);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun mix_orcnt.u64 = 0;
282*4882a593Smuzhiyun mix_orcnt.s.orcnt = 1;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* Acknowledge to hardware that we have the buffer. */
285*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
286*4882a593Smuzhiyun p->tx_current_fill--;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun spin_unlock_irqrestore(&p->tx_list.lock, flags);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun dma_unmap_single(p->dev, re.s.addr, re.s.len,
291*4882a593Smuzhiyun DMA_TO_DEVICE);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* Read the hardware TX timestamp if one was recorded */
294*4882a593Smuzhiyun if (unlikely(re.s.tstamp)) {
295*4882a593Smuzhiyun struct skb_shared_hwtstamps ts;
296*4882a593Smuzhiyun u64 ns;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun memset(&ts, 0, sizeof(ts));
299*4882a593Smuzhiyun /* Read the timestamp */
300*4882a593Smuzhiyun ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
301*4882a593Smuzhiyun /* Remove the timestamp from the FIFO */
302*4882a593Smuzhiyun cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
303*4882a593Smuzhiyun /* Tell the kernel about the timestamp */
304*4882a593Smuzhiyun ts.hwtstamp = ns_to_ktime(ns);
305*4882a593Smuzhiyun skb_tstamp_tx(skb, &ts);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun dev_kfree_skb_any(skb);
309*4882a593Smuzhiyun cleaned++;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (cleaned && netif_queue_stopped(p->netdev))
315*4882a593Smuzhiyun netif_wake_queue(p->netdev);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
octeon_mgmt_clean_tx_tasklet(struct tasklet_struct * t)318*4882a593Smuzhiyun static void octeon_mgmt_clean_tx_tasklet(struct tasklet_struct *t)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct octeon_mgmt *p = from_tasklet(p, t, tx_clean_tasklet);
321*4882a593Smuzhiyun octeon_mgmt_clean_tx_buffers(p);
322*4882a593Smuzhiyun octeon_mgmt_enable_tx_irq(p);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
octeon_mgmt_update_rx_stats(struct net_device * netdev)325*4882a593Smuzhiyun static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
328*4882a593Smuzhiyun unsigned long flags;
329*4882a593Smuzhiyun u64 drop, bad;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /* These reads also clear the count registers. */
332*4882a593Smuzhiyun drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
333*4882a593Smuzhiyun bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (drop || bad) {
336*4882a593Smuzhiyun /* Do an atomic update. */
337*4882a593Smuzhiyun spin_lock_irqsave(&p->lock, flags);
338*4882a593Smuzhiyun netdev->stats.rx_errors += bad;
339*4882a593Smuzhiyun netdev->stats.rx_dropped += drop;
340*4882a593Smuzhiyun spin_unlock_irqrestore(&p->lock, flags);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
octeon_mgmt_update_tx_stats(struct net_device * netdev)344*4882a593Smuzhiyun static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
347*4882a593Smuzhiyun unsigned long flags;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun union cvmx_agl_gmx_txx_stat0 s0;
350*4882a593Smuzhiyun union cvmx_agl_gmx_txx_stat1 s1;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* These reads also clear the count registers. */
353*4882a593Smuzhiyun s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
354*4882a593Smuzhiyun s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
357*4882a593Smuzhiyun /* Do an atomic update. */
358*4882a593Smuzhiyun spin_lock_irqsave(&p->lock, flags);
359*4882a593Smuzhiyun netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
360*4882a593Smuzhiyun netdev->stats.collisions += s1.s.scol + s1.s.mcol;
361*4882a593Smuzhiyun spin_unlock_irqrestore(&p->lock, flags);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * Dequeue a receive skb and its corresponding ring entry. The ring
367*4882a593Smuzhiyun * entry is returned, *pskb is updated to point to the skb.
368*4882a593Smuzhiyun */
octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt * p,struct sk_buff ** pskb)369*4882a593Smuzhiyun static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
370*4882a593Smuzhiyun struct sk_buff **pskb)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun union mgmt_port_ring_entry re;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
375*4882a593Smuzhiyun ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
376*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun re.d64 = p->rx_ring[p->rx_next];
379*4882a593Smuzhiyun p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
380*4882a593Smuzhiyun p->rx_current_fill--;
381*4882a593Smuzhiyun *pskb = __skb_dequeue(&p->rx_list);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun dma_unmap_single(p->dev, re.s.addr,
384*4882a593Smuzhiyun ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
385*4882a593Smuzhiyun DMA_FROM_DEVICE);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun return re.d64;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun
octeon_mgmt_receive_one(struct octeon_mgmt * p)391*4882a593Smuzhiyun static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct net_device *netdev = p->netdev;
394*4882a593Smuzhiyun union cvmx_mixx_ircnt mix_ircnt;
395*4882a593Smuzhiyun union mgmt_port_ring_entry re;
396*4882a593Smuzhiyun struct sk_buff *skb;
397*4882a593Smuzhiyun struct sk_buff *skb2;
398*4882a593Smuzhiyun struct sk_buff *skb_new;
399*4882a593Smuzhiyun union mgmt_port_ring_entry re2;
400*4882a593Smuzhiyun int rc = 1;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
404*4882a593Smuzhiyun if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
405*4882a593Smuzhiyun /* A good packet, send it up. */
406*4882a593Smuzhiyun skb_put(skb, re.s.len);
407*4882a593Smuzhiyun good:
408*4882a593Smuzhiyun /* Process the RX timestamp if it was recorded */
409*4882a593Smuzhiyun if (p->has_rx_tstamp) {
410*4882a593Smuzhiyun /* The first 8 bytes are the timestamp */
411*4882a593Smuzhiyun u64 ns = *(u64 *)skb->data;
412*4882a593Smuzhiyun struct skb_shared_hwtstamps *ts;
413*4882a593Smuzhiyun ts = skb_hwtstamps(skb);
414*4882a593Smuzhiyun ts->hwtstamp = ns_to_ktime(ns);
415*4882a593Smuzhiyun __skb_pull(skb, 8);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, netdev);
418*4882a593Smuzhiyun netdev->stats.rx_packets++;
419*4882a593Smuzhiyun netdev->stats.rx_bytes += skb->len;
420*4882a593Smuzhiyun netif_receive_skb(skb);
421*4882a593Smuzhiyun rc = 0;
422*4882a593Smuzhiyun } else if (re.s.code == RING_ENTRY_CODE_MORE) {
423*4882a593Smuzhiyun /* Packet split across skbs. This can happen if we
424*4882a593Smuzhiyun * increase the MTU. Buffers that are already in the
425*4882a593Smuzhiyun * rx ring can then end up being too small. As the rx
426*4882a593Smuzhiyun * ring is refilled, buffers sized for the new MTU
427*4882a593Smuzhiyun * will be used and we should go back to the normal
428*4882a593Smuzhiyun * non-split case.
429*4882a593Smuzhiyun */
430*4882a593Smuzhiyun skb_put(skb, re.s.len);
431*4882a593Smuzhiyun do {
432*4882a593Smuzhiyun re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
433*4882a593Smuzhiyun if (re2.s.code != RING_ENTRY_CODE_MORE
434*4882a593Smuzhiyun && re2.s.code != RING_ENTRY_CODE_DONE)
435*4882a593Smuzhiyun goto split_error;
436*4882a593Smuzhiyun skb_put(skb2, re2.s.len);
437*4882a593Smuzhiyun skb_new = skb_copy_expand(skb, 0, skb2->len,
438*4882a593Smuzhiyun GFP_ATOMIC);
439*4882a593Smuzhiyun if (!skb_new)
440*4882a593Smuzhiyun goto split_error;
441*4882a593Smuzhiyun if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
442*4882a593Smuzhiyun skb2->len))
443*4882a593Smuzhiyun goto split_error;
444*4882a593Smuzhiyun skb_put(skb_new, skb2->len);
445*4882a593Smuzhiyun dev_kfree_skb_any(skb);
446*4882a593Smuzhiyun dev_kfree_skb_any(skb2);
447*4882a593Smuzhiyun skb = skb_new;
448*4882a593Smuzhiyun } while (re2.s.code == RING_ENTRY_CODE_MORE);
449*4882a593Smuzhiyun goto good;
450*4882a593Smuzhiyun } else {
451*4882a593Smuzhiyun /* Some other error, discard it. */
452*4882a593Smuzhiyun dev_kfree_skb_any(skb);
453*4882a593Smuzhiyun /* Error statistics are accumulated in
454*4882a593Smuzhiyun * octeon_mgmt_update_rx_stats.
455*4882a593Smuzhiyun */
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun goto done;
458*4882a593Smuzhiyun split_error:
459*4882a593Smuzhiyun /* Discard the whole mess. */
460*4882a593Smuzhiyun dev_kfree_skb_any(skb);
461*4882a593Smuzhiyun dev_kfree_skb_any(skb2);
462*4882a593Smuzhiyun while (re2.s.code == RING_ENTRY_CODE_MORE) {
463*4882a593Smuzhiyun re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
464*4882a593Smuzhiyun dev_kfree_skb_any(skb2);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun netdev->stats.rx_errors++;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun done:
469*4882a593Smuzhiyun /* Tell the hardware we processed a packet. */
470*4882a593Smuzhiyun mix_ircnt.u64 = 0;
471*4882a593Smuzhiyun mix_ircnt.s.ircnt = 1;
472*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
473*4882a593Smuzhiyun return rc;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
octeon_mgmt_receive_packets(struct octeon_mgmt * p,int budget)476*4882a593Smuzhiyun static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun unsigned int work_done = 0;
479*4882a593Smuzhiyun union cvmx_mixx_ircnt mix_ircnt;
480*4882a593Smuzhiyun int rc;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
483*4882a593Smuzhiyun while (work_done < budget && mix_ircnt.s.ircnt) {
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun rc = octeon_mgmt_receive_one(p);
486*4882a593Smuzhiyun if (!rc)
487*4882a593Smuzhiyun work_done++;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /* Check for more packets. */
490*4882a593Smuzhiyun mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun octeon_mgmt_rx_fill_ring(p->netdev);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun return work_done;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
octeon_mgmt_napi_poll(struct napi_struct * napi,int budget)498*4882a593Smuzhiyun static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
501*4882a593Smuzhiyun struct net_device *netdev = p->netdev;
502*4882a593Smuzhiyun unsigned int work_done = 0;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun work_done = octeon_mgmt_receive_packets(p, budget);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (work_done < budget) {
507*4882a593Smuzhiyun /* We stopped because no more packets were available. */
508*4882a593Smuzhiyun napi_complete_done(napi, work_done);
509*4882a593Smuzhiyun octeon_mgmt_enable_rx_irq(p);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun octeon_mgmt_update_rx_stats(netdev);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun return work_done;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /* Reset the hardware to clean state. */
octeon_mgmt_reset_hw(struct octeon_mgmt * p)517*4882a593Smuzhiyun static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun union cvmx_mixx_ctl mix_ctl;
520*4882a593Smuzhiyun union cvmx_mixx_bist mix_bist;
521*4882a593Smuzhiyun union cvmx_agl_gmx_bist agl_gmx_bist;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun mix_ctl.u64 = 0;
524*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
525*4882a593Smuzhiyun do {
526*4882a593Smuzhiyun mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
527*4882a593Smuzhiyun } while (mix_ctl.s.busy);
528*4882a593Smuzhiyun mix_ctl.s.reset = 1;
529*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
530*4882a593Smuzhiyun cvmx_read_csr(p->mix + MIX_CTL);
531*4882a593Smuzhiyun octeon_io_clk_delay(64);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
534*4882a593Smuzhiyun if (mix_bist.u64)
535*4882a593Smuzhiyun dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
536*4882a593Smuzhiyun (unsigned long long)mix_bist.u64);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
539*4882a593Smuzhiyun if (agl_gmx_bist.u64)
540*4882a593Smuzhiyun dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
541*4882a593Smuzhiyun (unsigned long long)agl_gmx_bist.u64);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun struct octeon_mgmt_cam_state {
545*4882a593Smuzhiyun u64 cam[6];
546*4882a593Smuzhiyun u64 cam_mask;
547*4882a593Smuzhiyun int cam_index;
548*4882a593Smuzhiyun };
549*4882a593Smuzhiyun
octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state * cs,unsigned char * addr)550*4882a593Smuzhiyun static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
551*4882a593Smuzhiyun unsigned char *addr)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun int i;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun for (i = 0; i < 6; i++)
556*4882a593Smuzhiyun cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
557*4882a593Smuzhiyun cs->cam_mask |= (1ULL << cs->cam_index);
558*4882a593Smuzhiyun cs->cam_index++;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
octeon_mgmt_set_rx_filtering(struct net_device * netdev)561*4882a593Smuzhiyun static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
564*4882a593Smuzhiyun union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
565*4882a593Smuzhiyun union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
566*4882a593Smuzhiyun unsigned long flags;
567*4882a593Smuzhiyun unsigned int prev_packet_enable;
568*4882a593Smuzhiyun unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
569*4882a593Smuzhiyun unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
570*4882a593Smuzhiyun struct octeon_mgmt_cam_state cam_state;
571*4882a593Smuzhiyun struct netdev_hw_addr *ha;
572*4882a593Smuzhiyun int available_cam_entries;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun memset(&cam_state, 0, sizeof(cam_state));
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
577*4882a593Smuzhiyun cam_mode = 0;
578*4882a593Smuzhiyun available_cam_entries = 8;
579*4882a593Smuzhiyun } else {
580*4882a593Smuzhiyun /* One CAM entry for the primary address, leaves seven
581*4882a593Smuzhiyun * for the secondary addresses.
582*4882a593Smuzhiyun */
583*4882a593Smuzhiyun available_cam_entries = 7 - netdev->uc.count;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (netdev->flags & IFF_MULTICAST) {
587*4882a593Smuzhiyun if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
588*4882a593Smuzhiyun netdev_mc_count(netdev) > available_cam_entries)
589*4882a593Smuzhiyun multicast_mode = 2; /* 2 - Accept all multicast. */
590*4882a593Smuzhiyun else
591*4882a593Smuzhiyun multicast_mode = 0; /* 0 - Use CAM. */
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun if (cam_mode == 1) {
595*4882a593Smuzhiyun /* Add primary address. */
596*4882a593Smuzhiyun octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
597*4882a593Smuzhiyun netdev_for_each_uc_addr(ha, netdev)
598*4882a593Smuzhiyun octeon_mgmt_cam_state_add(&cam_state, ha->addr);
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun if (multicast_mode == 0) {
601*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, netdev)
602*4882a593Smuzhiyun octeon_mgmt_cam_state_add(&cam_state, ha->addr);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun spin_lock_irqsave(&p->lock, flags);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /* Disable packet I/O. */
608*4882a593Smuzhiyun agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
609*4882a593Smuzhiyun prev_packet_enable = agl_gmx_prtx.s.en;
610*4882a593Smuzhiyun agl_gmx_prtx.s.en = 0;
611*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun adr_ctl.u64 = 0;
614*4882a593Smuzhiyun adr_ctl.s.cam_mode = cam_mode;
615*4882a593Smuzhiyun adr_ctl.s.mcst = multicast_mode;
616*4882a593Smuzhiyun adr_ctl.s.bcst = 1; /* Allow broadcast */
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
621*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
622*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
623*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
624*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
625*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
626*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /* Restore packet I/O. */
629*4882a593Smuzhiyun agl_gmx_prtx.s.en = prev_packet_enable;
630*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun spin_unlock_irqrestore(&p->lock, flags);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
octeon_mgmt_set_mac_address(struct net_device * netdev,void * addr)635*4882a593Smuzhiyun static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun int r = eth_mac_addr(netdev, addr);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun if (r)
640*4882a593Smuzhiyun return r;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun octeon_mgmt_set_rx_filtering(netdev);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun return 0;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
octeon_mgmt_change_mtu(struct net_device * netdev,int new_mtu)647*4882a593Smuzhiyun static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
650*4882a593Smuzhiyun int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun netdev->mtu = new_mtu;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /* HW lifts the limit if the frame is VLAN tagged
655*4882a593Smuzhiyun * (+4 bytes per each tag, up to two tags)
656*4882a593Smuzhiyun */
657*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
658*4882a593Smuzhiyun /* Set the hardware to truncate packets larger than the MTU. The jabber
659*4882a593Smuzhiyun * register must be set to a multiple of 8 bytes, so round up. JABBER is
660*4882a593Smuzhiyun * an unconditional limit, so we need to account for two possible VLAN
661*4882a593Smuzhiyun * tags.
662*4882a593Smuzhiyun */
663*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
664*4882a593Smuzhiyun (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun return 0;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
octeon_mgmt_interrupt(int cpl,void * dev_id)669*4882a593Smuzhiyun static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun struct net_device *netdev = dev_id;
672*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
673*4882a593Smuzhiyun union cvmx_mixx_isr mixx_isr;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* Clear any pending interrupts */
678*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
679*4882a593Smuzhiyun cvmx_read_csr(p->mix + MIX_ISR);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun if (mixx_isr.s.irthresh) {
682*4882a593Smuzhiyun octeon_mgmt_disable_rx_irq(p);
683*4882a593Smuzhiyun napi_schedule(&p->napi);
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun if (mixx_isr.s.orthresh) {
686*4882a593Smuzhiyun octeon_mgmt_disable_tx_irq(p);
687*4882a593Smuzhiyun tasklet_schedule(&p->tx_clean_tasklet);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun return IRQ_HANDLED;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
octeon_mgmt_ioctl_hwtstamp(struct net_device * netdev,struct ifreq * rq,int cmd)693*4882a593Smuzhiyun static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
694*4882a593Smuzhiyun struct ifreq *rq, int cmd)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
697*4882a593Smuzhiyun struct hwtstamp_config config;
698*4882a593Smuzhiyun union cvmx_mio_ptp_clock_cfg ptp;
699*4882a593Smuzhiyun union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
700*4882a593Smuzhiyun bool have_hw_timestamps = false;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
703*4882a593Smuzhiyun return -EFAULT;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun if (config.flags) /* reserved for future extensions */
706*4882a593Smuzhiyun return -EINVAL;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /* Check the status of hardware for tiemstamps */
709*4882a593Smuzhiyun if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
710*4882a593Smuzhiyun /* Get the current state of the PTP clock */
711*4882a593Smuzhiyun ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
712*4882a593Smuzhiyun if (!ptp.s.ext_clk_en) {
713*4882a593Smuzhiyun /* The clock has not been configured to use an
714*4882a593Smuzhiyun * external source. Program it to use the main clock
715*4882a593Smuzhiyun * reference.
716*4882a593Smuzhiyun */
717*4882a593Smuzhiyun u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
718*4882a593Smuzhiyun if (!ptp.s.ptp_en)
719*4882a593Smuzhiyun cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
720*4882a593Smuzhiyun netdev_info(netdev,
721*4882a593Smuzhiyun "PTP Clock using sclk reference @ %lldHz\n",
722*4882a593Smuzhiyun (NSEC_PER_SEC << 32) / clock_comp);
723*4882a593Smuzhiyun } else {
724*4882a593Smuzhiyun /* The clock is already programmed to use a GPIO */
725*4882a593Smuzhiyun u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
726*4882a593Smuzhiyun netdev_info(netdev,
727*4882a593Smuzhiyun "PTP Clock using GPIO%d @ %lld Hz\n",
728*4882a593Smuzhiyun ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /* Enable the clock if it wasn't done already */
732*4882a593Smuzhiyun if (!ptp.s.ptp_en) {
733*4882a593Smuzhiyun ptp.s.ptp_en = 1;
734*4882a593Smuzhiyun cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun have_hw_timestamps = true;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if (!have_hw_timestamps)
740*4882a593Smuzhiyun return -EINVAL;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun switch (config.tx_type) {
743*4882a593Smuzhiyun case HWTSTAMP_TX_OFF:
744*4882a593Smuzhiyun case HWTSTAMP_TX_ON:
745*4882a593Smuzhiyun break;
746*4882a593Smuzhiyun default:
747*4882a593Smuzhiyun return -ERANGE;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun switch (config.rx_filter) {
751*4882a593Smuzhiyun case HWTSTAMP_FILTER_NONE:
752*4882a593Smuzhiyun p->has_rx_tstamp = false;
753*4882a593Smuzhiyun rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
754*4882a593Smuzhiyun rxx_frm_ctl.s.ptp_mode = 0;
755*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
756*4882a593Smuzhiyun break;
757*4882a593Smuzhiyun case HWTSTAMP_FILTER_ALL:
758*4882a593Smuzhiyun case HWTSTAMP_FILTER_SOME:
759*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
760*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
761*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
762*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
763*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
764*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
765*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
766*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
767*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
768*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_EVENT:
769*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_SYNC:
770*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
771*4882a593Smuzhiyun case HWTSTAMP_FILTER_NTP_ALL:
772*4882a593Smuzhiyun p->has_rx_tstamp = have_hw_timestamps;
773*4882a593Smuzhiyun config.rx_filter = HWTSTAMP_FILTER_ALL;
774*4882a593Smuzhiyun if (p->has_rx_tstamp) {
775*4882a593Smuzhiyun rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
776*4882a593Smuzhiyun rxx_frm_ctl.s.ptp_mode = 1;
777*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun break;
780*4882a593Smuzhiyun default:
781*4882a593Smuzhiyun return -ERANGE;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
785*4882a593Smuzhiyun return -EFAULT;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun return 0;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
octeon_mgmt_ioctl(struct net_device * netdev,struct ifreq * rq,int cmd)790*4882a593Smuzhiyun static int octeon_mgmt_ioctl(struct net_device *netdev,
791*4882a593Smuzhiyun struct ifreq *rq, int cmd)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun switch (cmd) {
794*4882a593Smuzhiyun case SIOCSHWTSTAMP:
795*4882a593Smuzhiyun return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
796*4882a593Smuzhiyun default:
797*4882a593Smuzhiyun return phy_do_ioctl(netdev, rq, cmd);
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
octeon_mgmt_disable_link(struct octeon_mgmt * p)801*4882a593Smuzhiyun static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun union cvmx_agl_gmx_prtx_cfg prtx_cfg;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /* Disable GMX before we make any changes. */
806*4882a593Smuzhiyun prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
807*4882a593Smuzhiyun prtx_cfg.s.en = 0;
808*4882a593Smuzhiyun prtx_cfg.s.tx_en = 0;
809*4882a593Smuzhiyun prtx_cfg.s.rx_en = 0;
810*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
813*4882a593Smuzhiyun int i;
814*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
815*4882a593Smuzhiyun prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
816*4882a593Smuzhiyun if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
817*4882a593Smuzhiyun break;
818*4882a593Smuzhiyun mdelay(1);
819*4882a593Smuzhiyun i++;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
octeon_mgmt_enable_link(struct octeon_mgmt * p)824*4882a593Smuzhiyun static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun union cvmx_agl_gmx_prtx_cfg prtx_cfg;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun /* Restore the GMX enable state only if link is set */
829*4882a593Smuzhiyun prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
830*4882a593Smuzhiyun prtx_cfg.s.tx_en = 1;
831*4882a593Smuzhiyun prtx_cfg.s.rx_en = 1;
832*4882a593Smuzhiyun prtx_cfg.s.en = 1;
833*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
octeon_mgmt_update_link(struct octeon_mgmt * p)836*4882a593Smuzhiyun static void octeon_mgmt_update_link(struct octeon_mgmt *p)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun struct net_device *ndev = p->netdev;
839*4882a593Smuzhiyun struct phy_device *phydev = ndev->phydev;
840*4882a593Smuzhiyun union cvmx_agl_gmx_prtx_cfg prtx_cfg;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (!phydev->link)
845*4882a593Smuzhiyun prtx_cfg.s.duplex = 1;
846*4882a593Smuzhiyun else
847*4882a593Smuzhiyun prtx_cfg.s.duplex = phydev->duplex;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun switch (phydev->speed) {
850*4882a593Smuzhiyun case 10:
851*4882a593Smuzhiyun prtx_cfg.s.speed = 0;
852*4882a593Smuzhiyun prtx_cfg.s.slottime = 0;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
855*4882a593Smuzhiyun prtx_cfg.s.burst = 1;
856*4882a593Smuzhiyun prtx_cfg.s.speed_msb = 1;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun break;
859*4882a593Smuzhiyun case 100:
860*4882a593Smuzhiyun prtx_cfg.s.speed = 0;
861*4882a593Smuzhiyun prtx_cfg.s.slottime = 0;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
864*4882a593Smuzhiyun prtx_cfg.s.burst = 1;
865*4882a593Smuzhiyun prtx_cfg.s.speed_msb = 0;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun break;
868*4882a593Smuzhiyun case 1000:
869*4882a593Smuzhiyun /* 1000 MBits is only supported on 6XXX chips */
870*4882a593Smuzhiyun if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
871*4882a593Smuzhiyun prtx_cfg.s.speed = 1;
872*4882a593Smuzhiyun prtx_cfg.s.speed_msb = 0;
873*4882a593Smuzhiyun /* Only matters for half-duplex */
874*4882a593Smuzhiyun prtx_cfg.s.slottime = 1;
875*4882a593Smuzhiyun prtx_cfg.s.burst = phydev->duplex;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun break;
878*4882a593Smuzhiyun case 0: /* No link */
879*4882a593Smuzhiyun default:
880*4882a593Smuzhiyun break;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /* Write the new GMX setting with the port still disabled. */
884*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /* Read GMX CFG again to make sure the config is completed. */
887*4882a593Smuzhiyun prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
890*4882a593Smuzhiyun union cvmx_agl_gmx_txx_clk agl_clk;
891*4882a593Smuzhiyun union cvmx_agl_prtx_ctl prtx_ctl;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
894*4882a593Smuzhiyun agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
895*4882a593Smuzhiyun /* MII (both speeds) and RGMII 1000 speed. */
896*4882a593Smuzhiyun agl_clk.s.clk_cnt = 1;
897*4882a593Smuzhiyun if (prtx_ctl.s.mode == 0) { /* RGMII mode */
898*4882a593Smuzhiyun if (phydev->speed == 10)
899*4882a593Smuzhiyun agl_clk.s.clk_cnt = 50;
900*4882a593Smuzhiyun else if (phydev->speed == 100)
901*4882a593Smuzhiyun agl_clk.s.clk_cnt = 5;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
octeon_mgmt_adjust_link(struct net_device * netdev)907*4882a593Smuzhiyun static void octeon_mgmt_adjust_link(struct net_device *netdev)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
910*4882a593Smuzhiyun struct phy_device *phydev = netdev->phydev;
911*4882a593Smuzhiyun unsigned long flags;
912*4882a593Smuzhiyun int link_changed = 0;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun if (!phydev)
915*4882a593Smuzhiyun return;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun spin_lock_irqsave(&p->lock, flags);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun if (!phydev->link && p->last_link)
921*4882a593Smuzhiyun link_changed = -1;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun if (phydev->link &&
924*4882a593Smuzhiyun (p->last_duplex != phydev->duplex ||
925*4882a593Smuzhiyun p->last_link != phydev->link ||
926*4882a593Smuzhiyun p->last_speed != phydev->speed)) {
927*4882a593Smuzhiyun octeon_mgmt_disable_link(p);
928*4882a593Smuzhiyun link_changed = 1;
929*4882a593Smuzhiyun octeon_mgmt_update_link(p);
930*4882a593Smuzhiyun octeon_mgmt_enable_link(p);
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun p->last_link = phydev->link;
934*4882a593Smuzhiyun p->last_speed = phydev->speed;
935*4882a593Smuzhiyun p->last_duplex = phydev->duplex;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun spin_unlock_irqrestore(&p->lock, flags);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun if (link_changed != 0) {
940*4882a593Smuzhiyun if (link_changed > 0)
941*4882a593Smuzhiyun netdev_info(netdev, "Link is up - %d/%s\n",
942*4882a593Smuzhiyun phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half");
943*4882a593Smuzhiyun else
944*4882a593Smuzhiyun netdev_info(netdev, "Link is down\n");
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
octeon_mgmt_init_phy(struct net_device * netdev)948*4882a593Smuzhiyun static int octeon_mgmt_init_phy(struct net_device *netdev)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
951*4882a593Smuzhiyun struct phy_device *phydev = NULL;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun if (octeon_is_simulation() || p->phy_np == NULL) {
954*4882a593Smuzhiyun /* No PHYs in the simulator. */
955*4882a593Smuzhiyun netif_carrier_on(netdev);
956*4882a593Smuzhiyun return 0;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun phydev = of_phy_connect(netdev, p->phy_np,
960*4882a593Smuzhiyun octeon_mgmt_adjust_link, 0,
961*4882a593Smuzhiyun PHY_INTERFACE_MODE_MII);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun if (!phydev)
964*4882a593Smuzhiyun return -EPROBE_DEFER;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun return 0;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
octeon_mgmt_open(struct net_device * netdev)969*4882a593Smuzhiyun static int octeon_mgmt_open(struct net_device *netdev)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
972*4882a593Smuzhiyun union cvmx_mixx_ctl mix_ctl;
973*4882a593Smuzhiyun union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
974*4882a593Smuzhiyun union cvmx_mixx_oring1 oring1;
975*4882a593Smuzhiyun union cvmx_mixx_iring1 iring1;
976*4882a593Smuzhiyun union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
977*4882a593Smuzhiyun union cvmx_mixx_irhwm mix_irhwm;
978*4882a593Smuzhiyun union cvmx_mixx_orhwm mix_orhwm;
979*4882a593Smuzhiyun union cvmx_mixx_intena mix_intena;
980*4882a593Smuzhiyun struct sockaddr sa;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun /* Allocate ring buffers. */
983*4882a593Smuzhiyun p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
984*4882a593Smuzhiyun GFP_KERNEL);
985*4882a593Smuzhiyun if (!p->tx_ring)
986*4882a593Smuzhiyun return -ENOMEM;
987*4882a593Smuzhiyun p->tx_ring_handle =
988*4882a593Smuzhiyun dma_map_single(p->dev, p->tx_ring,
989*4882a593Smuzhiyun ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
990*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
991*4882a593Smuzhiyun p->tx_next = 0;
992*4882a593Smuzhiyun p->tx_next_clean = 0;
993*4882a593Smuzhiyun p->tx_current_fill = 0;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
997*4882a593Smuzhiyun GFP_KERNEL);
998*4882a593Smuzhiyun if (!p->rx_ring)
999*4882a593Smuzhiyun goto err_nomem;
1000*4882a593Smuzhiyun p->rx_ring_handle =
1001*4882a593Smuzhiyun dma_map_single(p->dev, p->rx_ring,
1002*4882a593Smuzhiyun ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1003*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun p->rx_next = 0;
1006*4882a593Smuzhiyun p->rx_next_fill = 0;
1007*4882a593Smuzhiyun p->rx_current_fill = 0;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun octeon_mgmt_reset_hw(p);
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun /* Bring it out of reset if needed. */
1014*4882a593Smuzhiyun if (mix_ctl.s.reset) {
1015*4882a593Smuzhiyun mix_ctl.s.reset = 0;
1016*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1017*4882a593Smuzhiyun do {
1018*4882a593Smuzhiyun mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1019*4882a593Smuzhiyun } while (mix_ctl.s.reset);
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
1023*4882a593Smuzhiyun agl_gmx_inf_mode.u64 = 0;
1024*4882a593Smuzhiyun agl_gmx_inf_mode.s.en = 1;
1025*4882a593Smuzhiyun cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1028*4882a593Smuzhiyun || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1029*4882a593Smuzhiyun /* Force compensation values, as they are not
1030*4882a593Smuzhiyun * determined properly by HW
1031*4882a593Smuzhiyun */
1032*4882a593Smuzhiyun union cvmx_agl_gmx_drv_ctl drv_ctl;
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1035*4882a593Smuzhiyun if (p->port) {
1036*4882a593Smuzhiyun drv_ctl.s.byp_en1 = 1;
1037*4882a593Smuzhiyun drv_ctl.s.nctl1 = 6;
1038*4882a593Smuzhiyun drv_ctl.s.pctl1 = 6;
1039*4882a593Smuzhiyun } else {
1040*4882a593Smuzhiyun drv_ctl.s.byp_en = 1;
1041*4882a593Smuzhiyun drv_ctl.s.nctl = 6;
1042*4882a593Smuzhiyun drv_ctl.s.pctl = 6;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun oring1.u64 = 0;
1048*4882a593Smuzhiyun oring1.s.obase = p->tx_ring_handle >> 3;
1049*4882a593Smuzhiyun oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
1050*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun iring1.u64 = 0;
1053*4882a593Smuzhiyun iring1.s.ibase = p->rx_ring_handle >> 3;
1054*4882a593Smuzhiyun iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
1055*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
1058*4882a593Smuzhiyun octeon_mgmt_set_mac_address(netdev, &sa);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun octeon_mgmt_change_mtu(netdev, netdev->mtu);
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun /* Enable the port HW. Packets are not allowed until
1063*4882a593Smuzhiyun * cvmx_mgmt_port_enable() is called.
1064*4882a593Smuzhiyun */
1065*4882a593Smuzhiyun mix_ctl.u64 = 0;
1066*4882a593Smuzhiyun mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
1067*4882a593Smuzhiyun mix_ctl.s.en = 1; /* Enable the port */
1068*4882a593Smuzhiyun mix_ctl.s.nbtarb = 0; /* Arbitration mode */
1069*4882a593Smuzhiyun /* MII CB-request FIFO programmable high watermark */
1070*4882a593Smuzhiyun mix_ctl.s.mrq_hwm = 1;
1071*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN
1072*4882a593Smuzhiyun mix_ctl.s.lendian = 1;
1073*4882a593Smuzhiyun #endif
1074*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun /* Read the PHY to find the mode of the interface. */
1077*4882a593Smuzhiyun if (octeon_mgmt_init_phy(netdev)) {
1078*4882a593Smuzhiyun dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
1079*4882a593Smuzhiyun goto err_noirq;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun /* Set the mode of the interface, RGMII/MII. */
1083*4882a593Smuzhiyun if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
1084*4882a593Smuzhiyun union cvmx_agl_prtx_ctl agl_prtx_ctl;
1085*4882a593Smuzhiyun int rgmii_mode =
1086*4882a593Smuzhiyun (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
1087*4882a593Smuzhiyun netdev->phydev->supported) |
1088*4882a593Smuzhiyun linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1089*4882a593Smuzhiyun netdev->phydev->supported)) != 0;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1092*4882a593Smuzhiyun agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1093*4882a593Smuzhiyun cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun /* MII clocks counts are based on the 125Mhz
1096*4882a593Smuzhiyun * reference, which has an 8nS period. So our delays
1097*4882a593Smuzhiyun * need to be multiplied by this factor.
1098*4882a593Smuzhiyun */
1099*4882a593Smuzhiyun #define NS_PER_PHY_CLK 8
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun /* Take the DLL and clock tree out of reset */
1102*4882a593Smuzhiyun agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1103*4882a593Smuzhiyun agl_prtx_ctl.s.clkrst = 0;
1104*4882a593Smuzhiyun if (rgmii_mode) {
1105*4882a593Smuzhiyun agl_prtx_ctl.s.dllrst = 0;
1106*4882a593Smuzhiyun agl_prtx_ctl.s.clktx_byp = 0;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1109*4882a593Smuzhiyun cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun /* Wait for the DLL to lock. External 125 MHz
1112*4882a593Smuzhiyun * reference clock must be stable at this point.
1113*4882a593Smuzhiyun */
1114*4882a593Smuzhiyun ndelay(256 * NS_PER_PHY_CLK);
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun /* Enable the interface */
1117*4882a593Smuzhiyun agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1118*4882a593Smuzhiyun agl_prtx_ctl.s.enable = 1;
1119*4882a593Smuzhiyun cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun /* Read the value back to force the previous write */
1122*4882a593Smuzhiyun agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /* Enable the compensation controller */
1125*4882a593Smuzhiyun agl_prtx_ctl.s.comp = 1;
1126*4882a593Smuzhiyun agl_prtx_ctl.s.drv_byp = 0;
1127*4882a593Smuzhiyun cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1128*4882a593Smuzhiyun /* Force write out before wait. */
1129*4882a593Smuzhiyun cvmx_read_csr(p->agl_prt_ctl);
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun /* For compensation state to lock. */
1132*4882a593Smuzhiyun ndelay(1040 * NS_PER_PHY_CLK);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /* Default Interframe Gaps are too small. Recommended
1135*4882a593Smuzhiyun * workaround is.
1136*4882a593Smuzhiyun *
1137*4882a593Smuzhiyun * AGL_GMX_TX_IFG[IFG1]=14
1138*4882a593Smuzhiyun * AGL_GMX_TX_IFG[IFG2]=10
1139*4882a593Smuzhiyun */
1140*4882a593Smuzhiyun cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun octeon_mgmt_rx_fill_ring(netdev);
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun /* Clear statistics. */
1146*4882a593Smuzhiyun /* Clear on read. */
1147*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
1148*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
1149*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
1152*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
1153*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun /* Clear any pending interrupts */
1156*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
1159*4882a593Smuzhiyun netdev)) {
1160*4882a593Smuzhiyun dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
1161*4882a593Smuzhiyun goto err_noirq;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun /* Interrupt every single RX packet */
1165*4882a593Smuzhiyun mix_irhwm.u64 = 0;
1166*4882a593Smuzhiyun mix_irhwm.s.irhwm = 0;
1167*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun /* Interrupt when we have 1 or more packets to clean. */
1170*4882a593Smuzhiyun mix_orhwm.u64 = 0;
1171*4882a593Smuzhiyun mix_orhwm.s.orhwm = 0;
1172*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun /* Enable receive and transmit interrupts */
1175*4882a593Smuzhiyun mix_intena.u64 = 0;
1176*4882a593Smuzhiyun mix_intena.s.ithena = 1;
1177*4882a593Smuzhiyun mix_intena.s.othena = 1;
1178*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun /* Enable packet I/O. */
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun rxx_frm_ctl.u64 = 0;
1183*4882a593Smuzhiyun rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
1184*4882a593Smuzhiyun rxx_frm_ctl.s.pre_align = 1;
1185*4882a593Smuzhiyun /* When set, disables the length check for non-min sized pkts
1186*4882a593Smuzhiyun * with padding in the client data.
1187*4882a593Smuzhiyun */
1188*4882a593Smuzhiyun rxx_frm_ctl.s.pad_len = 1;
1189*4882a593Smuzhiyun /* When set, disables the length check for VLAN pkts */
1190*4882a593Smuzhiyun rxx_frm_ctl.s.vlan_len = 1;
1191*4882a593Smuzhiyun /* When set, PREAMBLE checking is less strict */
1192*4882a593Smuzhiyun rxx_frm_ctl.s.pre_free = 1;
1193*4882a593Smuzhiyun /* Control Pause Frames can match station SMAC */
1194*4882a593Smuzhiyun rxx_frm_ctl.s.ctl_smac = 0;
1195*4882a593Smuzhiyun /* Control Pause Frames can match globally assign Multicast address */
1196*4882a593Smuzhiyun rxx_frm_ctl.s.ctl_mcst = 1;
1197*4882a593Smuzhiyun /* Forward pause information to TX block */
1198*4882a593Smuzhiyun rxx_frm_ctl.s.ctl_bck = 1;
1199*4882a593Smuzhiyun /* Drop Control Pause Frames */
1200*4882a593Smuzhiyun rxx_frm_ctl.s.ctl_drp = 1;
1201*4882a593Smuzhiyun /* Strip off the preamble */
1202*4882a593Smuzhiyun rxx_frm_ctl.s.pre_strp = 1;
1203*4882a593Smuzhiyun /* This port is configured to send PREAMBLE+SFD to begin every
1204*4882a593Smuzhiyun * frame. GMX checks that the PREAMBLE is sent correctly.
1205*4882a593Smuzhiyun */
1206*4882a593Smuzhiyun rxx_frm_ctl.s.pre_chk = 1;
1207*4882a593Smuzhiyun cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun /* Configure the port duplex, speed and enables */
1210*4882a593Smuzhiyun octeon_mgmt_disable_link(p);
1211*4882a593Smuzhiyun if (netdev->phydev)
1212*4882a593Smuzhiyun octeon_mgmt_update_link(p);
1213*4882a593Smuzhiyun octeon_mgmt_enable_link(p);
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun p->last_link = 0;
1216*4882a593Smuzhiyun p->last_speed = 0;
1217*4882a593Smuzhiyun /* PHY is not present in simulator. The carrier is enabled
1218*4882a593Smuzhiyun * while initializing the phy for simulator, leave it enabled.
1219*4882a593Smuzhiyun */
1220*4882a593Smuzhiyun if (netdev->phydev) {
1221*4882a593Smuzhiyun netif_carrier_off(netdev);
1222*4882a593Smuzhiyun phy_start(netdev->phydev);
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun netif_wake_queue(netdev);
1226*4882a593Smuzhiyun napi_enable(&p->napi);
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun return 0;
1229*4882a593Smuzhiyun err_noirq:
1230*4882a593Smuzhiyun octeon_mgmt_reset_hw(p);
1231*4882a593Smuzhiyun dma_unmap_single(p->dev, p->rx_ring_handle,
1232*4882a593Smuzhiyun ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1233*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1234*4882a593Smuzhiyun kfree(p->rx_ring);
1235*4882a593Smuzhiyun err_nomem:
1236*4882a593Smuzhiyun dma_unmap_single(p->dev, p->tx_ring_handle,
1237*4882a593Smuzhiyun ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1238*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1239*4882a593Smuzhiyun kfree(p->tx_ring);
1240*4882a593Smuzhiyun return -ENOMEM;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun
octeon_mgmt_stop(struct net_device * netdev)1243*4882a593Smuzhiyun static int octeon_mgmt_stop(struct net_device *netdev)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun napi_disable(&p->napi);
1248*4882a593Smuzhiyun netif_stop_queue(netdev);
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun if (netdev->phydev) {
1251*4882a593Smuzhiyun phy_stop(netdev->phydev);
1252*4882a593Smuzhiyun phy_disconnect(netdev->phydev);
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun netif_carrier_off(netdev);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun octeon_mgmt_reset_hw(p);
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun free_irq(p->irq, netdev);
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun /* dma_unmap is a nop on Octeon, so just free everything. */
1262*4882a593Smuzhiyun skb_queue_purge(&p->tx_list);
1263*4882a593Smuzhiyun skb_queue_purge(&p->rx_list);
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun dma_unmap_single(p->dev, p->rx_ring_handle,
1266*4882a593Smuzhiyun ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1267*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1268*4882a593Smuzhiyun kfree(p->rx_ring);
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun dma_unmap_single(p->dev, p->tx_ring_handle,
1271*4882a593Smuzhiyun ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1272*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1273*4882a593Smuzhiyun kfree(p->tx_ring);
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun return 0;
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun static netdev_tx_t
octeon_mgmt_xmit(struct sk_buff * skb,struct net_device * netdev)1279*4882a593Smuzhiyun octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1280*4882a593Smuzhiyun {
1281*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
1282*4882a593Smuzhiyun union mgmt_port_ring_entry re;
1283*4882a593Smuzhiyun unsigned long flags;
1284*4882a593Smuzhiyun netdev_tx_t rv = NETDEV_TX_BUSY;
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun re.d64 = 0;
1287*4882a593Smuzhiyun re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
1288*4882a593Smuzhiyun re.s.len = skb->len;
1289*4882a593Smuzhiyun re.s.addr = dma_map_single(p->dev, skb->data,
1290*4882a593Smuzhiyun skb->len,
1291*4882a593Smuzhiyun DMA_TO_DEVICE);
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun spin_lock_irqsave(&p->tx_list.lock, flags);
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1296*4882a593Smuzhiyun spin_unlock_irqrestore(&p->tx_list.lock, flags);
1297*4882a593Smuzhiyun netif_stop_queue(netdev);
1298*4882a593Smuzhiyun spin_lock_irqsave(&p->tx_list.lock, flags);
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun if (unlikely(p->tx_current_fill >=
1302*4882a593Smuzhiyun ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1303*4882a593Smuzhiyun spin_unlock_irqrestore(&p->tx_list.lock, flags);
1304*4882a593Smuzhiyun dma_unmap_single(p->dev, re.s.addr, re.s.len,
1305*4882a593Smuzhiyun DMA_TO_DEVICE);
1306*4882a593Smuzhiyun goto out;
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun __skb_queue_tail(&p->tx_list, skb);
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun /* Put it in the ring. */
1312*4882a593Smuzhiyun p->tx_ring[p->tx_next] = re.d64;
1313*4882a593Smuzhiyun p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1314*4882a593Smuzhiyun p->tx_current_fill++;
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun spin_unlock_irqrestore(&p->tx_list.lock, flags);
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1319*4882a593Smuzhiyun ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1320*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun netdev->stats.tx_packets++;
1323*4882a593Smuzhiyun netdev->stats.tx_bytes += skb->len;
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun /* Ring the bell. */
1326*4882a593Smuzhiyun cvmx_write_csr(p->mix + MIX_ORING2, 1);
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun netif_trans_update(netdev);
1329*4882a593Smuzhiyun rv = NETDEV_TX_OK;
1330*4882a593Smuzhiyun out:
1331*4882a593Smuzhiyun octeon_mgmt_update_tx_stats(netdev);
1332*4882a593Smuzhiyun return rv;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
octeon_mgmt_poll_controller(struct net_device * netdev)1336*4882a593Smuzhiyun static void octeon_mgmt_poll_controller(struct net_device *netdev)
1337*4882a593Smuzhiyun {
1338*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun octeon_mgmt_receive_packets(p, 16);
1341*4882a593Smuzhiyun octeon_mgmt_update_rx_stats(netdev);
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun #endif
1344*4882a593Smuzhiyun
octeon_mgmt_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)1345*4882a593Smuzhiyun static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1346*4882a593Smuzhiyun struct ethtool_drvinfo *info)
1347*4882a593Smuzhiyun {
1348*4882a593Smuzhiyun strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1349*4882a593Smuzhiyun }
1350*4882a593Smuzhiyun
octeon_mgmt_nway_reset(struct net_device * dev)1351*4882a593Smuzhiyun static int octeon_mgmt_nway_reset(struct net_device *dev)
1352*4882a593Smuzhiyun {
1353*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN))
1354*4882a593Smuzhiyun return -EPERM;
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun if (dev->phydev)
1357*4882a593Smuzhiyun return phy_start_aneg(dev->phydev);
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun return -EOPNOTSUPP;
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1363*4882a593Smuzhiyun .get_drvinfo = octeon_mgmt_get_drvinfo,
1364*4882a593Smuzhiyun .nway_reset = octeon_mgmt_nway_reset,
1365*4882a593Smuzhiyun .get_link = ethtool_op_get_link,
1366*4882a593Smuzhiyun .get_link_ksettings = phy_ethtool_get_link_ksettings,
1367*4882a593Smuzhiyun .set_link_ksettings = phy_ethtool_set_link_ksettings,
1368*4882a593Smuzhiyun };
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun static const struct net_device_ops octeon_mgmt_ops = {
1371*4882a593Smuzhiyun .ndo_open = octeon_mgmt_open,
1372*4882a593Smuzhiyun .ndo_stop = octeon_mgmt_stop,
1373*4882a593Smuzhiyun .ndo_start_xmit = octeon_mgmt_xmit,
1374*4882a593Smuzhiyun .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1375*4882a593Smuzhiyun .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1376*4882a593Smuzhiyun .ndo_do_ioctl = octeon_mgmt_ioctl,
1377*4882a593Smuzhiyun .ndo_change_mtu = octeon_mgmt_change_mtu,
1378*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1379*4882a593Smuzhiyun .ndo_poll_controller = octeon_mgmt_poll_controller,
1380*4882a593Smuzhiyun #endif
1381*4882a593Smuzhiyun };
1382*4882a593Smuzhiyun
octeon_mgmt_probe(struct platform_device * pdev)1383*4882a593Smuzhiyun static int octeon_mgmt_probe(struct platform_device *pdev)
1384*4882a593Smuzhiyun {
1385*4882a593Smuzhiyun struct net_device *netdev;
1386*4882a593Smuzhiyun struct octeon_mgmt *p;
1387*4882a593Smuzhiyun const __be32 *data;
1388*4882a593Smuzhiyun const u8 *mac;
1389*4882a593Smuzhiyun struct resource *res_mix;
1390*4882a593Smuzhiyun struct resource *res_agl;
1391*4882a593Smuzhiyun struct resource *res_agl_prt_ctl;
1392*4882a593Smuzhiyun int len;
1393*4882a593Smuzhiyun int result;
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1396*4882a593Smuzhiyun if (netdev == NULL)
1397*4882a593Smuzhiyun return -ENOMEM;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun SET_NETDEV_DEV(netdev, &pdev->dev);
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun platform_set_drvdata(pdev, netdev);
1402*4882a593Smuzhiyun p = netdev_priv(netdev);
1403*4882a593Smuzhiyun netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1404*4882a593Smuzhiyun OCTEON_MGMT_NAPI_WEIGHT);
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun p->netdev = netdev;
1407*4882a593Smuzhiyun p->dev = &pdev->dev;
1408*4882a593Smuzhiyun p->has_rx_tstamp = false;
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1411*4882a593Smuzhiyun if (data && len == sizeof(*data)) {
1412*4882a593Smuzhiyun p->port = be32_to_cpup(data);
1413*4882a593Smuzhiyun } else {
1414*4882a593Smuzhiyun dev_err(&pdev->dev, "no 'cell-index' property\n");
1415*4882a593Smuzhiyun result = -ENXIO;
1416*4882a593Smuzhiyun goto err;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun result = platform_get_irq(pdev, 0);
1422*4882a593Smuzhiyun if (result < 0)
1423*4882a593Smuzhiyun goto err;
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun p->irq = result;
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1428*4882a593Smuzhiyun if (res_mix == NULL) {
1429*4882a593Smuzhiyun dev_err(&pdev->dev, "no 'reg' resource\n");
1430*4882a593Smuzhiyun result = -ENXIO;
1431*4882a593Smuzhiyun goto err;
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1435*4882a593Smuzhiyun if (res_agl == NULL) {
1436*4882a593Smuzhiyun dev_err(&pdev->dev, "no 'reg' resource\n");
1437*4882a593Smuzhiyun result = -ENXIO;
1438*4882a593Smuzhiyun goto err;
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1442*4882a593Smuzhiyun if (res_agl_prt_ctl == NULL) {
1443*4882a593Smuzhiyun dev_err(&pdev->dev, "no 'reg' resource\n");
1444*4882a593Smuzhiyun result = -ENXIO;
1445*4882a593Smuzhiyun goto err;
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun p->mix_phys = res_mix->start;
1449*4882a593Smuzhiyun p->mix_size = resource_size(res_mix);
1450*4882a593Smuzhiyun p->agl_phys = res_agl->start;
1451*4882a593Smuzhiyun p->agl_size = resource_size(res_agl);
1452*4882a593Smuzhiyun p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1453*4882a593Smuzhiyun p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1457*4882a593Smuzhiyun res_mix->name)) {
1458*4882a593Smuzhiyun dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1459*4882a593Smuzhiyun res_mix->name);
1460*4882a593Smuzhiyun result = -ENXIO;
1461*4882a593Smuzhiyun goto err;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1465*4882a593Smuzhiyun res_agl->name)) {
1466*4882a593Smuzhiyun result = -ENXIO;
1467*4882a593Smuzhiyun dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1468*4882a593Smuzhiyun res_agl->name);
1469*4882a593Smuzhiyun goto err;
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1473*4882a593Smuzhiyun p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1474*4882a593Smuzhiyun result = -ENXIO;
1475*4882a593Smuzhiyun dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1476*4882a593Smuzhiyun res_agl_prt_ctl->name);
1477*4882a593Smuzhiyun goto err;
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1481*4882a593Smuzhiyun p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1482*4882a593Smuzhiyun p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1483*4882a593Smuzhiyun p->agl_prt_ctl_size);
1484*4882a593Smuzhiyun if (!p->mix || !p->agl || !p->agl_prt_ctl) {
1485*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to map I/O memory\n");
1486*4882a593Smuzhiyun result = -ENOMEM;
1487*4882a593Smuzhiyun goto err;
1488*4882a593Smuzhiyun }
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun spin_lock_init(&p->lock);
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun skb_queue_head_init(&p->tx_list);
1493*4882a593Smuzhiyun skb_queue_head_init(&p->rx_list);
1494*4882a593Smuzhiyun tasklet_setup(&p->tx_clean_tasklet,
1495*4882a593Smuzhiyun octeon_mgmt_clean_tx_tasklet);
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun netdev->priv_flags |= IFF_UNICAST_FLT;
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun netdev->netdev_ops = &octeon_mgmt_ops;
1500*4882a593Smuzhiyun netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
1503*4882a593Smuzhiyun netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun mac = of_get_mac_address(pdev->dev.of_node);
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun if (!IS_ERR(mac))
1508*4882a593Smuzhiyun ether_addr_copy(netdev->dev_addr, mac);
1509*4882a593Smuzhiyun else
1510*4882a593Smuzhiyun eth_hw_addr_random(netdev);
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1515*4882a593Smuzhiyun if (result)
1516*4882a593Smuzhiyun goto err;
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun netif_carrier_off(netdev);
1519*4882a593Smuzhiyun result = register_netdev(netdev);
1520*4882a593Smuzhiyun if (result)
1521*4882a593Smuzhiyun goto err;
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun return 0;
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun err:
1526*4882a593Smuzhiyun of_node_put(p->phy_np);
1527*4882a593Smuzhiyun free_netdev(netdev);
1528*4882a593Smuzhiyun return result;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun
octeon_mgmt_remove(struct platform_device * pdev)1531*4882a593Smuzhiyun static int octeon_mgmt_remove(struct platform_device *pdev)
1532*4882a593Smuzhiyun {
1533*4882a593Smuzhiyun struct net_device *netdev = platform_get_drvdata(pdev);
1534*4882a593Smuzhiyun struct octeon_mgmt *p = netdev_priv(netdev);
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun unregister_netdev(netdev);
1537*4882a593Smuzhiyun of_node_put(p->phy_np);
1538*4882a593Smuzhiyun free_netdev(netdev);
1539*4882a593Smuzhiyun return 0;
1540*4882a593Smuzhiyun }
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun static const struct of_device_id octeon_mgmt_match[] = {
1543*4882a593Smuzhiyun {
1544*4882a593Smuzhiyun .compatible = "cavium,octeon-5750-mix",
1545*4882a593Smuzhiyun },
1546*4882a593Smuzhiyun {},
1547*4882a593Smuzhiyun };
1548*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun static struct platform_driver octeon_mgmt_driver = {
1551*4882a593Smuzhiyun .driver = {
1552*4882a593Smuzhiyun .name = "octeon_mgmt",
1553*4882a593Smuzhiyun .of_match_table = octeon_mgmt_match,
1554*4882a593Smuzhiyun },
1555*4882a593Smuzhiyun .probe = octeon_mgmt_probe,
1556*4882a593Smuzhiyun .remove = octeon_mgmt_remove,
1557*4882a593Smuzhiyun };
1558*4882a593Smuzhiyun
octeon_mgmt_mod_init(void)1559*4882a593Smuzhiyun static int __init octeon_mgmt_mod_init(void)
1560*4882a593Smuzhiyun {
1561*4882a593Smuzhiyun return platform_driver_register(&octeon_mgmt_driver);
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun
octeon_mgmt_mod_exit(void)1564*4882a593Smuzhiyun static void __exit octeon_mgmt_mod_exit(void)
1565*4882a593Smuzhiyun {
1566*4882a593Smuzhiyun platform_driver_unregister(&octeon_mgmt_driver);
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun module_init(octeon_mgmt_mod_init);
1570*4882a593Smuzhiyun module_exit(octeon_mgmt_mod_exit);
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun MODULE_SOFTDEP("pre: mdio-cavium");
1573*4882a593Smuzhiyun MODULE_DESCRIPTION(DRV_DESCRIPTION);
1574*4882a593Smuzhiyun MODULE_AUTHOR("David Daney");
1575*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1576