1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015 Cavium, Inc.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/acpi.h>
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/interrupt.h>
9*4882a593Smuzhiyun #include <linux/pci.h>
10*4882a593Smuzhiyun #include <linux/netdevice.h>
11*4882a593Smuzhiyun #include <linux/etherdevice.h>
12*4882a593Smuzhiyun #include <linux/phy.h>
13*4882a593Smuzhiyun #include <linux/of.h>
14*4882a593Smuzhiyun #include <linux/of_mdio.h>
15*4882a593Smuzhiyun #include <linux/of_net.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "nic_reg.h"
18*4882a593Smuzhiyun #include "nic.h"
19*4882a593Smuzhiyun #include "thunder_bgx.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define DRV_NAME "thunder_bgx"
22*4882a593Smuzhiyun #define DRV_VERSION "1.0"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* RX_DMAC_CTL configuration */
25*4882a593Smuzhiyun enum MCAST_MODE {
26*4882a593Smuzhiyun MCAST_MODE_REJECT = 0x0,
27*4882a593Smuzhiyun MCAST_MODE_ACCEPT = 0x1,
28*4882a593Smuzhiyun MCAST_MODE_CAM_FILTER = 0x2,
29*4882a593Smuzhiyun RSVD = 0x3
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define BCAST_ACCEPT BIT(0)
33*4882a593Smuzhiyun #define CAM_ACCEPT BIT(3)
34*4882a593Smuzhiyun #define MCAST_MODE_MASK 0x3
35*4882a593Smuzhiyun #define BGX_MCAST_MODE(x) (x << 1)
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun struct dmac_map {
38*4882a593Smuzhiyun u64 vf_map;
39*4882a593Smuzhiyun u64 dmac;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun struct lmac {
43*4882a593Smuzhiyun struct bgx *bgx;
44*4882a593Smuzhiyun /* actual number of DMACs configured */
45*4882a593Smuzhiyun u8 dmacs_cfg;
46*4882a593Smuzhiyun /* overal number of possible DMACs could be configured per LMAC */
47*4882a593Smuzhiyun u8 dmacs_count;
48*4882a593Smuzhiyun struct dmac_map *dmacs; /* DMAC:VFs tracking filter array */
49*4882a593Smuzhiyun u8 mac[ETH_ALEN];
50*4882a593Smuzhiyun u8 lmac_type;
51*4882a593Smuzhiyun u8 lane_to_sds;
52*4882a593Smuzhiyun bool use_training;
53*4882a593Smuzhiyun bool autoneg;
54*4882a593Smuzhiyun bool link_up;
55*4882a593Smuzhiyun int lmacid; /* ID within BGX */
56*4882a593Smuzhiyun int lmacid_bd; /* ID on board */
57*4882a593Smuzhiyun struct net_device netdev;
58*4882a593Smuzhiyun struct phy_device *phydev;
59*4882a593Smuzhiyun unsigned int last_duplex;
60*4882a593Smuzhiyun unsigned int last_link;
61*4882a593Smuzhiyun unsigned int last_speed;
62*4882a593Smuzhiyun bool is_sgmii;
63*4882a593Smuzhiyun struct delayed_work dwork;
64*4882a593Smuzhiyun struct workqueue_struct *check_link;
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun struct bgx {
68*4882a593Smuzhiyun u8 bgx_id;
69*4882a593Smuzhiyun struct lmac lmac[MAX_LMAC_PER_BGX];
70*4882a593Smuzhiyun u8 lmac_count;
71*4882a593Smuzhiyun u8 max_lmac;
72*4882a593Smuzhiyun u8 acpi_lmac_idx;
73*4882a593Smuzhiyun void __iomem *reg_base;
74*4882a593Smuzhiyun struct pci_dev *pdev;
75*4882a593Smuzhiyun bool is_dlm;
76*4882a593Smuzhiyun bool is_rgx;
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
80*4882a593Smuzhiyun static int lmac_count; /* Total no of LMACs in system */
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun static int bgx_xaui_check_link(struct lmac *lmac);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* Supported devices */
85*4882a593Smuzhiyun static const struct pci_device_id bgx_id_table[] = {
86*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
87*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) },
88*4882a593Smuzhiyun { 0, } /* end of table */
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun MODULE_AUTHOR("Cavium Inc");
92*4882a593Smuzhiyun MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
93*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
94*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
95*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, bgx_id_table);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* The Cavium ThunderX network controller can *only* be found in SoCs
98*4882a593Smuzhiyun * containing the ThunderX ARM64 CPU implementation. All accesses to the device
99*4882a593Smuzhiyun * registers on this platform are implicitly strongly ordered with respect
100*4882a593Smuzhiyun * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
101*4882a593Smuzhiyun * with no memory barriers in this driver. The readq()/writeq() functions add
102*4882a593Smuzhiyun * explicit ordering operation which in this case are redundant, and only
103*4882a593Smuzhiyun * add overhead.
104*4882a593Smuzhiyun */
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* Register read/write APIs */
bgx_reg_read(struct bgx * bgx,u8 lmac,u64 offset)107*4882a593Smuzhiyun static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun return readq_relaxed(addr);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
bgx_reg_write(struct bgx * bgx,u8 lmac,u64 offset,u64 val)114*4882a593Smuzhiyun static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun writeq_relaxed(val, addr);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
bgx_reg_modify(struct bgx * bgx,u8 lmac,u64 offset,u64 val)121*4882a593Smuzhiyun static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun writeq_relaxed(val | readq_relaxed(addr), addr);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
bgx_poll_reg(struct bgx * bgx,u8 lmac,u64 reg,u64 mask,bool zero)128*4882a593Smuzhiyun static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun int timeout = 100;
131*4882a593Smuzhiyun u64 reg_val;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun while (timeout) {
134*4882a593Smuzhiyun reg_val = bgx_reg_read(bgx, lmac, reg);
135*4882a593Smuzhiyun if (zero && !(reg_val & mask))
136*4882a593Smuzhiyun return 0;
137*4882a593Smuzhiyun if (!zero && (reg_val & mask))
138*4882a593Smuzhiyun return 0;
139*4882a593Smuzhiyun usleep_range(1000, 2000);
140*4882a593Smuzhiyun timeout--;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun return 1;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun static int max_bgx_per_node;
set_max_bgx_per_node(struct pci_dev * pdev)146*4882a593Smuzhiyun static void set_max_bgx_per_node(struct pci_dev *pdev)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun u16 sdevid;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (max_bgx_per_node)
151*4882a593Smuzhiyun return;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
154*4882a593Smuzhiyun switch (sdevid) {
155*4882a593Smuzhiyun case PCI_SUBSYS_DEVID_81XX_BGX:
156*4882a593Smuzhiyun case PCI_SUBSYS_DEVID_81XX_RGX:
157*4882a593Smuzhiyun max_bgx_per_node = MAX_BGX_PER_CN81XX;
158*4882a593Smuzhiyun break;
159*4882a593Smuzhiyun case PCI_SUBSYS_DEVID_83XX_BGX:
160*4882a593Smuzhiyun max_bgx_per_node = MAX_BGX_PER_CN83XX;
161*4882a593Smuzhiyun break;
162*4882a593Smuzhiyun case PCI_SUBSYS_DEVID_88XX_BGX:
163*4882a593Smuzhiyun default:
164*4882a593Smuzhiyun max_bgx_per_node = MAX_BGX_PER_CN88XX;
165*4882a593Smuzhiyun break;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
get_bgx(int node,int bgx_idx)169*4882a593Smuzhiyun static struct bgx *get_bgx(int node, int bgx_idx)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun int idx = (node * max_bgx_per_node) + bgx_idx;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun return bgx_vnic[idx];
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* Return number of BGX present in HW */
bgx_get_map(int node)177*4882a593Smuzhiyun unsigned bgx_get_map(int node)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun int i;
180*4882a593Smuzhiyun unsigned map = 0;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun for (i = 0; i < max_bgx_per_node; i++) {
183*4882a593Smuzhiyun if (bgx_vnic[(node * max_bgx_per_node) + i])
184*4882a593Smuzhiyun map |= (1 << i);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun return map;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_get_map);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Return number of LMAC configured for this BGX */
bgx_get_lmac_count(int node,int bgx_idx)192*4882a593Smuzhiyun int bgx_get_lmac_count(int node, int bgx_idx)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun struct bgx *bgx;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun bgx = get_bgx(node, bgx_idx);
197*4882a593Smuzhiyun if (bgx)
198*4882a593Smuzhiyun return bgx->lmac_count;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun return 0;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_get_lmac_count);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* Returns the current link status of LMAC */
bgx_get_lmac_link_state(int node,int bgx_idx,int lmacid,void * status)205*4882a593Smuzhiyun void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun struct bgx_link_status *link = (struct bgx_link_status *)status;
208*4882a593Smuzhiyun struct bgx *bgx;
209*4882a593Smuzhiyun struct lmac *lmac;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun bgx = get_bgx(node, bgx_idx);
212*4882a593Smuzhiyun if (!bgx)
213*4882a593Smuzhiyun return;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun lmac = &bgx->lmac[lmacid];
216*4882a593Smuzhiyun link->mac_type = lmac->lmac_type;
217*4882a593Smuzhiyun link->link_up = lmac->link_up;
218*4882a593Smuzhiyun link->duplex = lmac->last_duplex;
219*4882a593Smuzhiyun link->speed = lmac->last_speed;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_get_lmac_link_state);
222*4882a593Smuzhiyun
bgx_get_lmac_mac(int node,int bgx_idx,int lmacid)223*4882a593Smuzhiyun const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun struct bgx *bgx = get_bgx(node, bgx_idx);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (bgx)
228*4882a593Smuzhiyun return bgx->lmac[lmacid].mac;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun return NULL;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_get_lmac_mac);
233*4882a593Smuzhiyun
bgx_set_lmac_mac(int node,int bgx_idx,int lmacid,const u8 * mac)234*4882a593Smuzhiyun void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun struct bgx *bgx = get_bgx(node, bgx_idx);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (!bgx)
239*4882a593Smuzhiyun return;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun ether_addr_copy(bgx->lmac[lmacid].mac, mac);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_set_lmac_mac);
244*4882a593Smuzhiyun
bgx_flush_dmac_cam_filter(struct bgx * bgx,int lmacid)245*4882a593Smuzhiyun static void bgx_flush_dmac_cam_filter(struct bgx *bgx, int lmacid)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun struct lmac *lmac = NULL;
248*4882a593Smuzhiyun u8 idx = 0;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun lmac = &bgx->lmac[lmacid];
251*4882a593Smuzhiyun /* reset CAM filters */
252*4882a593Smuzhiyun for (idx = 0; idx < lmac->dmacs_count; idx++)
253*4882a593Smuzhiyun bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM +
254*4882a593Smuzhiyun ((lmacid * lmac->dmacs_count) + idx) *
255*4882a593Smuzhiyun sizeof(u64), 0);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
bgx_lmac_remove_filters(struct lmac * lmac,u8 vf_id)258*4882a593Smuzhiyun static void bgx_lmac_remove_filters(struct lmac *lmac, u8 vf_id)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun int i = 0;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (!lmac)
263*4882a593Smuzhiyun return;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* We've got reset filters request from some of attached VF, while the
266*4882a593Smuzhiyun * others might want to keep their configuration. So in this case lets
267*4882a593Smuzhiyun * iterate over all of configured filters and decrease number of
268*4882a593Smuzhiyun * referencies. if some addresses get zero refs remove them from list
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun for (i = lmac->dmacs_cfg - 1; i >= 0; i--) {
271*4882a593Smuzhiyun lmac->dmacs[i].vf_map &= ~BIT_ULL(vf_id);
272*4882a593Smuzhiyun if (!lmac->dmacs[i].vf_map) {
273*4882a593Smuzhiyun lmac->dmacs_cfg--;
274*4882a593Smuzhiyun lmac->dmacs[i].dmac = 0;
275*4882a593Smuzhiyun lmac->dmacs[i].vf_map = 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
bgx_lmac_save_filter(struct lmac * lmac,u64 dmac,u8 vf_id)280*4882a593Smuzhiyun static int bgx_lmac_save_filter(struct lmac *lmac, u64 dmac, u8 vf_id)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun u8 i = 0;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (!lmac)
285*4882a593Smuzhiyun return -1;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* At the same time we could have several VFs 'attached' to some
288*4882a593Smuzhiyun * particular LMAC, and each VF is represented as network interface
289*4882a593Smuzhiyun * for kernel. So from user perspective it should be possible to
290*4882a593Smuzhiyun * manipulate with its' (VF) receive modes. However from PF
291*4882a593Smuzhiyun * driver perspective we need to keep track of filter configurations
292*4882a593Smuzhiyun * for different VFs to prevent filter values dupes
293*4882a593Smuzhiyun */
294*4882a593Smuzhiyun for (i = 0; i < lmac->dmacs_cfg; i++) {
295*4882a593Smuzhiyun if (lmac->dmacs[i].dmac == dmac) {
296*4882a593Smuzhiyun lmac->dmacs[i].vf_map |= BIT_ULL(vf_id);
297*4882a593Smuzhiyun return -1;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (!(lmac->dmacs_cfg < lmac->dmacs_count))
302*4882a593Smuzhiyun return -1;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* keep it for further tracking */
305*4882a593Smuzhiyun lmac->dmacs[lmac->dmacs_cfg].dmac = dmac;
306*4882a593Smuzhiyun lmac->dmacs[lmac->dmacs_cfg].vf_map = BIT_ULL(vf_id);
307*4882a593Smuzhiyun lmac->dmacs_cfg++;
308*4882a593Smuzhiyun return 0;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
bgx_set_dmac_cam_filter_mac(struct bgx * bgx,int lmacid,u64 cam_dmac,u8 idx)311*4882a593Smuzhiyun static int bgx_set_dmac_cam_filter_mac(struct bgx *bgx, int lmacid,
312*4882a593Smuzhiyun u64 cam_dmac, u8 idx)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun struct lmac *lmac = NULL;
315*4882a593Smuzhiyun u64 cfg = 0;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* skip zero addresses as meaningless */
318*4882a593Smuzhiyun if (!cam_dmac || !bgx)
319*4882a593Smuzhiyun return -1;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun lmac = &bgx->lmac[lmacid];
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* configure DCAM filtering for designated LMAC */
324*4882a593Smuzhiyun cfg = RX_DMACX_CAM_LMACID(lmacid & LMAC_ID_MASK) |
325*4882a593Smuzhiyun RX_DMACX_CAM_EN | cam_dmac;
326*4882a593Smuzhiyun bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM +
327*4882a593Smuzhiyun ((lmacid * lmac->dmacs_count) + idx) * sizeof(u64), cfg);
328*4882a593Smuzhiyun return 0;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
bgx_set_dmac_cam_filter(int node,int bgx_idx,int lmacid,u64 cam_dmac,u8 vf_id)331*4882a593Smuzhiyun void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid,
332*4882a593Smuzhiyun u64 cam_dmac, u8 vf_id)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct bgx *bgx = get_bgx(node, bgx_idx);
335*4882a593Smuzhiyun struct lmac *lmac = NULL;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (!bgx)
338*4882a593Smuzhiyun return;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun lmac = &bgx->lmac[lmacid];
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (!cam_dmac)
343*4882a593Smuzhiyun cam_dmac = ether_addr_to_u64(lmac->mac);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* since we might have several VFs attached to particular LMAC
346*4882a593Smuzhiyun * and kernel could call mcast config for each of them with the
347*4882a593Smuzhiyun * same MAC, check if requested MAC is already in filtering list and
348*4882a593Smuzhiyun * updare/prepare list of MACs to be applied later to HW filters
349*4882a593Smuzhiyun */
350*4882a593Smuzhiyun bgx_lmac_save_filter(lmac, cam_dmac, vf_id);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_set_dmac_cam_filter);
353*4882a593Smuzhiyun
bgx_set_xcast_mode(int node,int bgx_idx,int lmacid,u8 mode)354*4882a593Smuzhiyun void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun struct bgx *bgx = get_bgx(node, bgx_idx);
357*4882a593Smuzhiyun struct lmac *lmac = NULL;
358*4882a593Smuzhiyun u64 cfg = 0;
359*4882a593Smuzhiyun u8 i = 0;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (!bgx)
362*4882a593Smuzhiyun return;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun lmac = &bgx->lmac[lmacid];
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL);
367*4882a593Smuzhiyun if (mode & BGX_XCAST_BCAST_ACCEPT)
368*4882a593Smuzhiyun cfg |= BCAST_ACCEPT;
369*4882a593Smuzhiyun else
370*4882a593Smuzhiyun cfg &= ~BCAST_ACCEPT;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* disable all MCASTs and DMAC filtering */
373*4882a593Smuzhiyun cfg &= ~(CAM_ACCEPT | BGX_MCAST_MODE(MCAST_MODE_MASK));
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* check requested bits and set filtergin mode appropriately */
376*4882a593Smuzhiyun if (mode & (BGX_XCAST_MCAST_ACCEPT)) {
377*4882a593Smuzhiyun cfg |= (BGX_MCAST_MODE(MCAST_MODE_ACCEPT));
378*4882a593Smuzhiyun } else if (mode & BGX_XCAST_MCAST_FILTER) {
379*4882a593Smuzhiyun cfg |= (BGX_MCAST_MODE(MCAST_MODE_CAM_FILTER) | CAM_ACCEPT);
380*4882a593Smuzhiyun for (i = 0; i < lmac->dmacs_cfg; i++)
381*4882a593Smuzhiyun bgx_set_dmac_cam_filter_mac(bgx, lmacid,
382*4882a593Smuzhiyun lmac->dmacs[i].dmac, i);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, cfg);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_set_xcast_mode);
387*4882a593Smuzhiyun
bgx_reset_xcast_mode(int node,int bgx_idx,int lmacid,u8 vf_id)388*4882a593Smuzhiyun void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf_id)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun struct bgx *bgx = get_bgx(node, bgx_idx);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (!bgx)
393*4882a593Smuzhiyun return;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun bgx_lmac_remove_filters(&bgx->lmac[lmacid], vf_id);
396*4882a593Smuzhiyun bgx_flush_dmac_cam_filter(bgx, lmacid);
397*4882a593Smuzhiyun bgx_set_xcast_mode(node, bgx_idx, lmacid,
398*4882a593Smuzhiyun (BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT));
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_reset_xcast_mode);
401*4882a593Smuzhiyun
bgx_lmac_rx_tx_enable(int node,int bgx_idx,int lmacid,bool enable)402*4882a593Smuzhiyun void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun struct bgx *bgx = get_bgx(node, bgx_idx);
405*4882a593Smuzhiyun struct lmac *lmac;
406*4882a593Smuzhiyun u64 cfg;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun if (!bgx)
409*4882a593Smuzhiyun return;
410*4882a593Smuzhiyun lmac = &bgx->lmac[lmacid];
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
413*4882a593Smuzhiyun if (enable) {
414*4882a593Smuzhiyun cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* enable TX FIFO Underflow interrupt */
417*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S,
418*4882a593Smuzhiyun GMI_TXX_INT_UNDFLW);
419*4882a593Smuzhiyun } else {
420*4882a593Smuzhiyun cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* Disable TX FIFO Underflow interrupt */
423*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C,
424*4882a593Smuzhiyun GMI_TXX_INT_UNDFLW);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun if (bgx->is_rgx)
429*4882a593Smuzhiyun xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /* Enables or disables timestamp insertion by BGX for Rx packets */
bgx_config_timestamping(int node,int bgx_idx,int lmacid,bool enable)434*4882a593Smuzhiyun void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun struct bgx *bgx = get_bgx(node, bgx_idx);
437*4882a593Smuzhiyun struct lmac *lmac;
438*4882a593Smuzhiyun u64 csr_offset, cfg;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (!bgx)
441*4882a593Smuzhiyun return;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun lmac = &bgx->lmac[lmacid];
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (lmac->lmac_type == BGX_MODE_SGMII ||
446*4882a593Smuzhiyun lmac->lmac_type == BGX_MODE_QSGMII ||
447*4882a593Smuzhiyun lmac->lmac_type == BGX_MODE_RGMII)
448*4882a593Smuzhiyun csr_offset = BGX_GMP_GMI_RXX_FRM_CTL;
449*4882a593Smuzhiyun else
450*4882a593Smuzhiyun csr_offset = BGX_SMUX_RX_FRM_CTL;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, csr_offset);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (enable)
455*4882a593Smuzhiyun cfg |= BGX_PKT_RX_PTP_EN;
456*4882a593Smuzhiyun else
457*4882a593Smuzhiyun cfg &= ~BGX_PKT_RX_PTP_EN;
458*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, csr_offset, cfg);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_config_timestamping);
461*4882a593Smuzhiyun
bgx_lmac_get_pfc(int node,int bgx_idx,int lmacid,void * pause)462*4882a593Smuzhiyun void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun struct pfc *pfc = (struct pfc *)pause;
465*4882a593Smuzhiyun struct bgx *bgx = get_bgx(node, bgx_idx);
466*4882a593Smuzhiyun struct lmac *lmac;
467*4882a593Smuzhiyun u64 cfg;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun if (!bgx)
470*4882a593Smuzhiyun return;
471*4882a593Smuzhiyun lmac = &bgx->lmac[lmacid];
472*4882a593Smuzhiyun if (lmac->is_sgmii)
473*4882a593Smuzhiyun return;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
476*4882a593Smuzhiyun pfc->fc_rx = cfg & RX_EN;
477*4882a593Smuzhiyun pfc->fc_tx = cfg & TX_EN;
478*4882a593Smuzhiyun pfc->autoneg = 0;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_lmac_get_pfc);
481*4882a593Smuzhiyun
bgx_lmac_set_pfc(int node,int bgx_idx,int lmacid,void * pause)482*4882a593Smuzhiyun void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun struct pfc *pfc = (struct pfc *)pause;
485*4882a593Smuzhiyun struct bgx *bgx = get_bgx(node, bgx_idx);
486*4882a593Smuzhiyun struct lmac *lmac;
487*4882a593Smuzhiyun u64 cfg;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (!bgx)
490*4882a593Smuzhiyun return;
491*4882a593Smuzhiyun lmac = &bgx->lmac[lmacid];
492*4882a593Smuzhiyun if (lmac->is_sgmii)
493*4882a593Smuzhiyun return;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
496*4882a593Smuzhiyun cfg &= ~(RX_EN | TX_EN);
497*4882a593Smuzhiyun cfg |= (pfc->fc_rx ? RX_EN : 0x00);
498*4882a593Smuzhiyun cfg |= (pfc->fc_tx ? TX_EN : 0x00);
499*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, cfg);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_lmac_set_pfc);
502*4882a593Smuzhiyun
bgx_sgmii_change_link_state(struct lmac * lmac)503*4882a593Smuzhiyun static void bgx_sgmii_change_link_state(struct lmac *lmac)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun struct bgx *bgx = lmac->bgx;
506*4882a593Smuzhiyun u64 cmr_cfg;
507*4882a593Smuzhiyun u64 port_cfg = 0;
508*4882a593Smuzhiyun u64 misc_ctl = 0;
509*4882a593Smuzhiyun bool tx_en, rx_en;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
512*4882a593Smuzhiyun tx_en = cmr_cfg & CMR_PKT_TX_EN;
513*4882a593Smuzhiyun rx_en = cmr_cfg & CMR_PKT_RX_EN;
514*4882a593Smuzhiyun cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
515*4882a593Smuzhiyun bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /* Wait for BGX RX to be idle */
518*4882a593Smuzhiyun if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
519*4882a593Smuzhiyun GMI_PORT_CFG_RX_IDLE, false)) {
520*4882a593Smuzhiyun dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n",
521*4882a593Smuzhiyun bgx->bgx_id, lmac->lmacid);
522*4882a593Smuzhiyun return;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /* Wait for BGX TX to be idle */
526*4882a593Smuzhiyun if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
527*4882a593Smuzhiyun GMI_PORT_CFG_TX_IDLE, false)) {
528*4882a593Smuzhiyun dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n",
529*4882a593Smuzhiyun bgx->bgx_id, lmac->lmacid);
530*4882a593Smuzhiyun return;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
534*4882a593Smuzhiyun misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun if (lmac->link_up) {
537*4882a593Smuzhiyun misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
538*4882a593Smuzhiyun port_cfg &= ~GMI_PORT_CFG_DUPLEX;
539*4882a593Smuzhiyun port_cfg |= (lmac->last_duplex << 2);
540*4882a593Smuzhiyun } else {
541*4882a593Smuzhiyun misc_ctl |= PCS_MISC_CTL_GMX_ENO;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun switch (lmac->last_speed) {
545*4882a593Smuzhiyun case 10:
546*4882a593Smuzhiyun port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
547*4882a593Smuzhiyun port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
548*4882a593Smuzhiyun port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
549*4882a593Smuzhiyun misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
550*4882a593Smuzhiyun misc_ctl |= 50; /* samp_pt */
551*4882a593Smuzhiyun bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
552*4882a593Smuzhiyun bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
553*4882a593Smuzhiyun break;
554*4882a593Smuzhiyun case 100:
555*4882a593Smuzhiyun port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
556*4882a593Smuzhiyun port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
557*4882a593Smuzhiyun port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
558*4882a593Smuzhiyun misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
559*4882a593Smuzhiyun misc_ctl |= 5; /* samp_pt */
560*4882a593Smuzhiyun bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
561*4882a593Smuzhiyun bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
562*4882a593Smuzhiyun break;
563*4882a593Smuzhiyun case 1000:
564*4882a593Smuzhiyun port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
565*4882a593Smuzhiyun port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
566*4882a593Smuzhiyun port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
567*4882a593Smuzhiyun misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
568*4882a593Smuzhiyun misc_ctl |= 1; /* samp_pt */
569*4882a593Smuzhiyun bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
570*4882a593Smuzhiyun if (lmac->last_duplex)
571*4882a593Smuzhiyun bgx_reg_write(bgx, lmac->lmacid,
572*4882a593Smuzhiyun BGX_GMP_GMI_TXX_BURST, 0);
573*4882a593Smuzhiyun else
574*4882a593Smuzhiyun bgx_reg_write(bgx, lmac->lmacid,
575*4882a593Smuzhiyun BGX_GMP_GMI_TXX_BURST, 8192);
576*4882a593Smuzhiyun break;
577*4882a593Smuzhiyun default:
578*4882a593Smuzhiyun break;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
581*4882a593Smuzhiyun bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /* Restore CMR config settings */
584*4882a593Smuzhiyun cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0);
585*4882a593Smuzhiyun bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
588*4882a593Smuzhiyun xcv_setup_link(lmac->link_up, lmac->last_speed);
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
bgx_lmac_handler(struct net_device * netdev)591*4882a593Smuzhiyun static void bgx_lmac_handler(struct net_device *netdev)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun struct lmac *lmac = container_of(netdev, struct lmac, netdev);
594*4882a593Smuzhiyun struct phy_device *phydev;
595*4882a593Smuzhiyun int link_changed = 0;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (!lmac)
598*4882a593Smuzhiyun return;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun phydev = lmac->phydev;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (!phydev->link && lmac->last_link)
603*4882a593Smuzhiyun link_changed = -1;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun if (phydev->link &&
606*4882a593Smuzhiyun (lmac->last_duplex != phydev->duplex ||
607*4882a593Smuzhiyun lmac->last_link != phydev->link ||
608*4882a593Smuzhiyun lmac->last_speed != phydev->speed)) {
609*4882a593Smuzhiyun link_changed = 1;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun lmac->last_link = phydev->link;
613*4882a593Smuzhiyun lmac->last_speed = phydev->speed;
614*4882a593Smuzhiyun lmac->last_duplex = phydev->duplex;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun if (!link_changed)
617*4882a593Smuzhiyun return;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun if (link_changed > 0)
620*4882a593Smuzhiyun lmac->link_up = true;
621*4882a593Smuzhiyun else
622*4882a593Smuzhiyun lmac->link_up = false;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun if (lmac->is_sgmii)
625*4882a593Smuzhiyun bgx_sgmii_change_link_state(lmac);
626*4882a593Smuzhiyun else
627*4882a593Smuzhiyun bgx_xaui_check_link(lmac);
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
bgx_get_rx_stats(int node,int bgx_idx,int lmac,int idx)630*4882a593Smuzhiyun u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun struct bgx *bgx;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun bgx = get_bgx(node, bgx_idx);
635*4882a593Smuzhiyun if (!bgx)
636*4882a593Smuzhiyun return 0;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun if (idx > 8)
639*4882a593Smuzhiyun lmac = 0;
640*4882a593Smuzhiyun return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_get_rx_stats);
643*4882a593Smuzhiyun
bgx_get_tx_stats(int node,int bgx_idx,int lmac,int idx)644*4882a593Smuzhiyun u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun struct bgx *bgx;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun bgx = get_bgx(node, bgx_idx);
649*4882a593Smuzhiyun if (!bgx)
650*4882a593Smuzhiyun return 0;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_get_tx_stats);
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /* Configure BGX LMAC in internal loopback mode */
bgx_lmac_internal_loopback(int node,int bgx_idx,int lmac_idx,bool enable)657*4882a593Smuzhiyun void bgx_lmac_internal_loopback(int node, int bgx_idx,
658*4882a593Smuzhiyun int lmac_idx, bool enable)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun struct bgx *bgx;
661*4882a593Smuzhiyun struct lmac *lmac;
662*4882a593Smuzhiyun u64 cfg;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun bgx = get_bgx(node, bgx_idx);
665*4882a593Smuzhiyun if (!bgx)
666*4882a593Smuzhiyun return;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun lmac = &bgx->lmac[lmac_idx];
669*4882a593Smuzhiyun if (lmac->is_sgmii) {
670*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
671*4882a593Smuzhiyun if (enable)
672*4882a593Smuzhiyun cfg |= PCS_MRX_CTL_LOOPBACK1;
673*4882a593Smuzhiyun else
674*4882a593Smuzhiyun cfg &= ~PCS_MRX_CTL_LOOPBACK1;
675*4882a593Smuzhiyun bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
676*4882a593Smuzhiyun } else {
677*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
678*4882a593Smuzhiyun if (enable)
679*4882a593Smuzhiyun cfg |= SPU_CTL_LOOPBACK;
680*4882a593Smuzhiyun else
681*4882a593Smuzhiyun cfg &= ~SPU_CTL_LOOPBACK;
682*4882a593Smuzhiyun bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun EXPORT_SYMBOL(bgx_lmac_internal_loopback);
686*4882a593Smuzhiyun
bgx_lmac_sgmii_init(struct bgx * bgx,struct lmac * lmac)687*4882a593Smuzhiyun static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun int lmacid = lmac->lmacid;
690*4882a593Smuzhiyun u64 cfg;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
693*4882a593Smuzhiyun /* max packet size */
694*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /* Disable frame alignment if using preamble */
697*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
698*4882a593Smuzhiyun if (cfg & 1)
699*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /* Enable lmac */
702*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /* PCS reset */
705*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
706*4882a593Smuzhiyun if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
707*4882a593Smuzhiyun PCS_MRX_CTL_RESET, true)) {
708*4882a593Smuzhiyun dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
709*4882a593Smuzhiyun return -1;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /* power down, reset autoneg, autoneg enable */
713*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
714*4882a593Smuzhiyun cfg &= ~PCS_MRX_CTL_PWR_DN;
715*4882a593Smuzhiyun cfg |= PCS_MRX_CTL_RST_AN;
716*4882a593Smuzhiyun if (lmac->phydev) {
717*4882a593Smuzhiyun cfg |= PCS_MRX_CTL_AN_EN;
718*4882a593Smuzhiyun } else {
719*4882a593Smuzhiyun /* In scenarios where PHY driver is not present or it's a
720*4882a593Smuzhiyun * non-standard PHY, FW sets AN_EN to inform Linux driver
721*4882a593Smuzhiyun * to do auto-neg and link polling or not.
722*4882a593Smuzhiyun */
723*4882a593Smuzhiyun if (cfg & PCS_MRX_CTL_AN_EN)
724*4882a593Smuzhiyun lmac->autoneg = true;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun if (lmac->lmac_type == BGX_MODE_QSGMII) {
729*4882a593Smuzhiyun /* Disable disparity check for QSGMII */
730*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
731*4882a593Smuzhiyun cfg &= ~PCS_MISC_CTL_DISP_EN;
732*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
733*4882a593Smuzhiyun return 0;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
737*4882a593Smuzhiyun if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
738*4882a593Smuzhiyun PCS_MRX_STATUS_AN_CPT, false)) {
739*4882a593Smuzhiyun dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
740*4882a593Smuzhiyun return -1;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun return 0;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
bgx_lmac_xaui_init(struct bgx * bgx,struct lmac * lmac)747*4882a593Smuzhiyun static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun u64 cfg;
750*4882a593Smuzhiyun int lmacid = lmac->lmacid;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /* Reset SPU */
753*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
754*4882a593Smuzhiyun if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
755*4882a593Smuzhiyun dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
756*4882a593Smuzhiyun return -1;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun /* Disable LMAC */
760*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
761*4882a593Smuzhiyun cfg &= ~CMR_EN;
762*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
765*4882a593Smuzhiyun /* Set interleaved running disparity for RXAUI */
766*4882a593Smuzhiyun if (lmac->lmac_type == BGX_MODE_RXAUI)
767*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
768*4882a593Smuzhiyun SPU_MISC_CTL_INTLV_RDISP);
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun /* Clear receive packet disable */
771*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
772*4882a593Smuzhiyun cfg &= ~SPU_MISC_CTL_RX_DIS;
773*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /* clear all interrupts */
776*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
777*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
778*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
779*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
780*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
781*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun if (lmac->use_training) {
784*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
785*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
786*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
787*4882a593Smuzhiyun /* training enable */
788*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid,
789*4882a593Smuzhiyun BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* Append FCS to each packet */
793*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun /* Disable forward error correction */
796*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
797*4882a593Smuzhiyun cfg &= ~SPU_FEC_CTL_FEC_EN;
798*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /* Disable autoneg */
801*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
802*4882a593Smuzhiyun cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
803*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
806*4882a593Smuzhiyun if (lmac->lmac_type == BGX_MODE_10G_KR)
807*4882a593Smuzhiyun cfg |= (1 << 23);
808*4882a593Smuzhiyun else if (lmac->lmac_type == BGX_MODE_40G_KR)
809*4882a593Smuzhiyun cfg |= (1 << 24);
810*4882a593Smuzhiyun else
811*4882a593Smuzhiyun cfg &= ~((1 << 23) | (1 << 24));
812*4882a593Smuzhiyun cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
813*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
816*4882a593Smuzhiyun cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
817*4882a593Smuzhiyun bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /* Enable lmac */
820*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
823*4882a593Smuzhiyun cfg &= ~SPU_CTL_LOW_POWER;
824*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
827*4882a593Smuzhiyun cfg &= ~SMU_TX_CTL_UNI_EN;
828*4882a593Smuzhiyun cfg |= SMU_TX_CTL_DIC_EN;
829*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /* Enable receive and transmission of pause frames */
832*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, ((0xffffULL << 32) |
833*4882a593Smuzhiyun BCK_EN | DRP_EN | TX_EN | RX_EN));
834*4882a593Smuzhiyun /* Configure pause time and interval */
835*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid,
836*4882a593Smuzhiyun BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME);
837*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL);
838*4882a593Smuzhiyun cfg &= ~0xFFFFull;
839*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL,
840*4882a593Smuzhiyun cfg | (DEFAULT_PAUSE_TIME - 0x1000));
841*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_ZERO, 0x01);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun /* take lmac_count into account */
844*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
845*4882a593Smuzhiyun /* max packet size */
846*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun return 0;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
bgx_xaui_check_link(struct lmac * lmac)851*4882a593Smuzhiyun static int bgx_xaui_check_link(struct lmac *lmac)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun struct bgx *bgx = lmac->bgx;
854*4882a593Smuzhiyun int lmacid = lmac->lmacid;
855*4882a593Smuzhiyun int lmac_type = lmac->lmac_type;
856*4882a593Smuzhiyun u64 cfg;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun if (lmac->use_training) {
859*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
860*4882a593Smuzhiyun if (!(cfg & (1ull << 13))) {
861*4882a593Smuzhiyun cfg = (1ull << 13) | (1ull << 14);
862*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
863*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
864*4882a593Smuzhiyun cfg |= (1ull << 0);
865*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
866*4882a593Smuzhiyun return -1;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun /* wait for PCS to come out of reset */
871*4882a593Smuzhiyun if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
872*4882a593Smuzhiyun dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
873*4882a593Smuzhiyun return -1;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
877*4882a593Smuzhiyun (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
878*4882a593Smuzhiyun if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
879*4882a593Smuzhiyun SPU_BR_STATUS_BLK_LOCK, false)) {
880*4882a593Smuzhiyun dev_err(&bgx->pdev->dev,
881*4882a593Smuzhiyun "SPU_BR_STATUS_BLK_LOCK not completed\n");
882*4882a593Smuzhiyun return -1;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun } else {
885*4882a593Smuzhiyun if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
886*4882a593Smuzhiyun SPU_BX_STATUS_RX_ALIGN, false)) {
887*4882a593Smuzhiyun dev_err(&bgx->pdev->dev,
888*4882a593Smuzhiyun "SPU_BX_STATUS_RX_ALIGN not completed\n");
889*4882a593Smuzhiyun return -1;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun /* Clear rcvflt bit (latching high) and read it back */
894*4882a593Smuzhiyun if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
895*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid,
896*4882a593Smuzhiyun BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
897*4882a593Smuzhiyun if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
898*4882a593Smuzhiyun dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
899*4882a593Smuzhiyun if (lmac->use_training) {
900*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
901*4882a593Smuzhiyun if (!(cfg & (1ull << 13))) {
902*4882a593Smuzhiyun cfg = (1ull << 13) | (1ull << 14);
903*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
904*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid,
905*4882a593Smuzhiyun BGX_SPUX_BR_PMD_CRTL);
906*4882a593Smuzhiyun cfg |= (1ull << 0);
907*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid,
908*4882a593Smuzhiyun BGX_SPUX_BR_PMD_CRTL, cfg);
909*4882a593Smuzhiyun return -1;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun return -1;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun /* Wait for BGX RX to be idle */
916*4882a593Smuzhiyun if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
917*4882a593Smuzhiyun dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
918*4882a593Smuzhiyun return -1;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun /* Wait for BGX TX to be idle */
922*4882a593Smuzhiyun if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
923*4882a593Smuzhiyun dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
924*4882a593Smuzhiyun return -1;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun /* Check for MAC RX faults */
928*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
929*4882a593Smuzhiyun /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
930*4882a593Smuzhiyun cfg &= SMU_RX_CTL_STATUS;
931*4882a593Smuzhiyun if (!cfg)
932*4882a593Smuzhiyun return 0;
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun /* Rx local/remote fault seen.
935*4882a593Smuzhiyun * Do lmac reinit to see if condition recovers
936*4882a593Smuzhiyun */
937*4882a593Smuzhiyun bgx_lmac_xaui_init(bgx, lmac);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun return -1;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
bgx_poll_for_sgmii_link(struct lmac * lmac)942*4882a593Smuzhiyun static void bgx_poll_for_sgmii_link(struct lmac *lmac)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun u64 pcs_link, an_result;
945*4882a593Smuzhiyun u8 speed;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
948*4882a593Smuzhiyun BGX_GMP_PCS_MRX_STATUS);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun /*Link state bit is sticky, read it again*/
951*4882a593Smuzhiyun if (!(pcs_link & PCS_MRX_STATUS_LINK))
952*4882a593Smuzhiyun pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
953*4882a593Smuzhiyun BGX_GMP_PCS_MRX_STATUS);
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
956*4882a593Smuzhiyun PCS_MRX_STATUS_AN_CPT, false)) {
957*4882a593Smuzhiyun lmac->link_up = false;
958*4882a593Smuzhiyun lmac->last_speed = SPEED_UNKNOWN;
959*4882a593Smuzhiyun lmac->last_duplex = DUPLEX_UNKNOWN;
960*4882a593Smuzhiyun goto next_poll;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
964*4882a593Smuzhiyun an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
965*4882a593Smuzhiyun BGX_GMP_PCS_ANX_AN_RESULTS);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun speed = (an_result >> 3) & 0x3;
968*4882a593Smuzhiyun lmac->last_duplex = (an_result >> 1) & 0x1;
969*4882a593Smuzhiyun switch (speed) {
970*4882a593Smuzhiyun case 0:
971*4882a593Smuzhiyun lmac->last_speed = SPEED_10;
972*4882a593Smuzhiyun break;
973*4882a593Smuzhiyun case 1:
974*4882a593Smuzhiyun lmac->last_speed = SPEED_100;
975*4882a593Smuzhiyun break;
976*4882a593Smuzhiyun case 2:
977*4882a593Smuzhiyun lmac->last_speed = SPEED_1000;
978*4882a593Smuzhiyun break;
979*4882a593Smuzhiyun default:
980*4882a593Smuzhiyun lmac->link_up = false;
981*4882a593Smuzhiyun lmac->last_speed = SPEED_UNKNOWN;
982*4882a593Smuzhiyun lmac->last_duplex = DUPLEX_UNKNOWN;
983*4882a593Smuzhiyun break;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun next_poll:
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun if (lmac->last_link != lmac->link_up) {
989*4882a593Smuzhiyun if (lmac->link_up)
990*4882a593Smuzhiyun bgx_sgmii_change_link_state(lmac);
991*4882a593Smuzhiyun lmac->last_link = lmac->link_up;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
bgx_poll_for_link(struct work_struct * work)997*4882a593Smuzhiyun static void bgx_poll_for_link(struct work_struct *work)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun struct lmac *lmac;
1000*4882a593Smuzhiyun u64 spu_link, smu_link;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun lmac = container_of(work, struct lmac, dwork.work);
1003*4882a593Smuzhiyun if (lmac->is_sgmii) {
1004*4882a593Smuzhiyun bgx_poll_for_sgmii_link(lmac);
1005*4882a593Smuzhiyun return;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun /* Receive link is latching low. Force it high and verify it */
1009*4882a593Smuzhiyun bgx_reg_modify(lmac->bgx, lmac->lmacid,
1010*4882a593Smuzhiyun BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
1011*4882a593Smuzhiyun bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
1012*4882a593Smuzhiyun SPU_STATUS1_RCV_LNK, false);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
1015*4882a593Smuzhiyun smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun if ((spu_link & SPU_STATUS1_RCV_LNK) &&
1018*4882a593Smuzhiyun !(smu_link & SMU_RX_CTL_STATUS)) {
1019*4882a593Smuzhiyun lmac->link_up = true;
1020*4882a593Smuzhiyun if (lmac->lmac_type == BGX_MODE_XLAUI)
1021*4882a593Smuzhiyun lmac->last_speed = SPEED_40000;
1022*4882a593Smuzhiyun else
1023*4882a593Smuzhiyun lmac->last_speed = SPEED_10000;
1024*4882a593Smuzhiyun lmac->last_duplex = DUPLEX_FULL;
1025*4882a593Smuzhiyun } else {
1026*4882a593Smuzhiyun lmac->link_up = false;
1027*4882a593Smuzhiyun lmac->last_speed = SPEED_UNKNOWN;
1028*4882a593Smuzhiyun lmac->last_duplex = DUPLEX_UNKNOWN;
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun if (lmac->last_link != lmac->link_up) {
1032*4882a593Smuzhiyun if (lmac->link_up) {
1033*4882a593Smuzhiyun if (bgx_xaui_check_link(lmac)) {
1034*4882a593Smuzhiyun /* Errors, clear link_up state */
1035*4882a593Smuzhiyun lmac->link_up = false;
1036*4882a593Smuzhiyun lmac->last_speed = SPEED_UNKNOWN;
1037*4882a593Smuzhiyun lmac->last_duplex = DUPLEX_UNKNOWN;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun lmac->last_link = lmac->link_up;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun
phy_interface_mode(u8 lmac_type)1046*4882a593Smuzhiyun static int phy_interface_mode(u8 lmac_type)
1047*4882a593Smuzhiyun {
1048*4882a593Smuzhiyun if (lmac_type == BGX_MODE_QSGMII)
1049*4882a593Smuzhiyun return PHY_INTERFACE_MODE_QSGMII;
1050*4882a593Smuzhiyun if (lmac_type == BGX_MODE_RGMII)
1051*4882a593Smuzhiyun return PHY_INTERFACE_MODE_RGMII_RXID;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun return PHY_INTERFACE_MODE_SGMII;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun
bgx_lmac_enable(struct bgx * bgx,u8 lmacid)1056*4882a593Smuzhiyun static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
1057*4882a593Smuzhiyun {
1058*4882a593Smuzhiyun struct lmac *lmac;
1059*4882a593Smuzhiyun u64 cfg;
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun lmac = &bgx->lmac[lmacid];
1062*4882a593Smuzhiyun lmac->bgx = bgx;
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun if ((lmac->lmac_type == BGX_MODE_SGMII) ||
1065*4882a593Smuzhiyun (lmac->lmac_type == BGX_MODE_QSGMII) ||
1066*4882a593Smuzhiyun (lmac->lmac_type == BGX_MODE_RGMII)) {
1067*4882a593Smuzhiyun lmac->is_sgmii = true;
1068*4882a593Smuzhiyun if (bgx_lmac_sgmii_init(bgx, lmac))
1069*4882a593Smuzhiyun return -1;
1070*4882a593Smuzhiyun } else {
1071*4882a593Smuzhiyun lmac->is_sgmii = false;
1072*4882a593Smuzhiyun if (bgx_lmac_xaui_init(bgx, lmac))
1073*4882a593Smuzhiyun return -1;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun if (lmac->is_sgmii) {
1077*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
1078*4882a593Smuzhiyun cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
1079*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
1080*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
1081*4882a593Smuzhiyun } else {
1082*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
1083*4882a593Smuzhiyun cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
1084*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
1085*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun /* actual number of filters available to exact LMAC */
1089*4882a593Smuzhiyun lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
1090*4882a593Smuzhiyun lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
1091*4882a593Smuzhiyun GFP_KERNEL);
1092*4882a593Smuzhiyun if (!lmac->dmacs)
1093*4882a593Smuzhiyun return -ENOMEM;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun /* Enable lmac */
1096*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun /* Restore default cfg, incase low level firmware changed it */
1099*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun if ((lmac->lmac_type != BGX_MODE_XFI) &&
1102*4882a593Smuzhiyun (lmac->lmac_type != BGX_MODE_XLAUI) &&
1103*4882a593Smuzhiyun (lmac->lmac_type != BGX_MODE_40G_KR) &&
1104*4882a593Smuzhiyun (lmac->lmac_type != BGX_MODE_10G_KR)) {
1105*4882a593Smuzhiyun if (!lmac->phydev) {
1106*4882a593Smuzhiyun if (lmac->autoneg) {
1107*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid,
1108*4882a593Smuzhiyun BGX_GMP_PCS_LINKX_TIMER,
1109*4882a593Smuzhiyun PCS_LINKX_TIMER_COUNT);
1110*4882a593Smuzhiyun goto poll;
1111*4882a593Smuzhiyun } else {
1112*4882a593Smuzhiyun /* Default to below link speed and duplex */
1113*4882a593Smuzhiyun lmac->link_up = true;
1114*4882a593Smuzhiyun lmac->last_speed = SPEED_1000;
1115*4882a593Smuzhiyun lmac->last_duplex = DUPLEX_FULL;
1116*4882a593Smuzhiyun bgx_sgmii_change_link_state(lmac);
1117*4882a593Smuzhiyun return 0;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun lmac->phydev->dev_flags = 0;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun if (phy_connect_direct(&lmac->netdev, lmac->phydev,
1123*4882a593Smuzhiyun bgx_lmac_handler,
1124*4882a593Smuzhiyun phy_interface_mode(lmac->lmac_type)))
1125*4882a593Smuzhiyun return -ENODEV;
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun phy_start(lmac->phydev);
1128*4882a593Smuzhiyun return 0;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun poll:
1132*4882a593Smuzhiyun lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
1133*4882a593Smuzhiyun WQ_MEM_RECLAIM, 1);
1134*4882a593Smuzhiyun if (!lmac->check_link)
1135*4882a593Smuzhiyun return -ENOMEM;
1136*4882a593Smuzhiyun INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
1137*4882a593Smuzhiyun queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun return 0;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
bgx_lmac_disable(struct bgx * bgx,u8 lmacid)1142*4882a593Smuzhiyun static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
1143*4882a593Smuzhiyun {
1144*4882a593Smuzhiyun struct lmac *lmac;
1145*4882a593Smuzhiyun u64 cfg;
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun lmac = &bgx->lmac[lmacid];
1148*4882a593Smuzhiyun if (lmac->check_link) {
1149*4882a593Smuzhiyun /* Destroy work queue */
1150*4882a593Smuzhiyun cancel_delayed_work_sync(&lmac->dwork);
1151*4882a593Smuzhiyun destroy_workqueue(lmac->check_link);
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun /* Disable packet reception */
1155*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
1156*4882a593Smuzhiyun cfg &= ~CMR_PKT_RX_EN;
1157*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun /* Give chance for Rx/Tx FIFO to get drained */
1160*4882a593Smuzhiyun bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
1161*4882a593Smuzhiyun bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun /* Disable packet transmission */
1164*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
1165*4882a593Smuzhiyun cfg &= ~CMR_PKT_TX_EN;
1166*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun /* Disable serdes lanes */
1169*4882a593Smuzhiyun if (!lmac->is_sgmii)
1170*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid,
1171*4882a593Smuzhiyun BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
1172*4882a593Smuzhiyun else
1173*4882a593Smuzhiyun bgx_reg_modify(bgx, lmacid,
1174*4882a593Smuzhiyun BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun /* Disable LMAC */
1177*4882a593Smuzhiyun cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
1178*4882a593Smuzhiyun cfg &= ~CMR_EN;
1179*4882a593Smuzhiyun bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun bgx_flush_dmac_cam_filter(bgx, lmacid);
1182*4882a593Smuzhiyun kfree(lmac->dmacs);
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun if ((lmac->lmac_type != BGX_MODE_XFI) &&
1185*4882a593Smuzhiyun (lmac->lmac_type != BGX_MODE_XLAUI) &&
1186*4882a593Smuzhiyun (lmac->lmac_type != BGX_MODE_40G_KR) &&
1187*4882a593Smuzhiyun (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
1188*4882a593Smuzhiyun phy_disconnect(lmac->phydev);
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun lmac->phydev = NULL;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun
bgx_init_hw(struct bgx * bgx)1193*4882a593Smuzhiyun static void bgx_init_hw(struct bgx *bgx)
1194*4882a593Smuzhiyun {
1195*4882a593Smuzhiyun int i;
1196*4882a593Smuzhiyun struct lmac *lmac;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1199*4882a593Smuzhiyun if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1200*4882a593Smuzhiyun dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun /* Set lmac type and lane2serdes mapping */
1203*4882a593Smuzhiyun for (i = 0; i < bgx->lmac_count; i++) {
1204*4882a593Smuzhiyun lmac = &bgx->lmac[i];
1205*4882a593Smuzhiyun bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1206*4882a593Smuzhiyun (lmac->lmac_type << 8) | lmac->lane_to_sds);
1207*4882a593Smuzhiyun bgx->lmac[i].lmacid_bd = lmac_count;
1208*4882a593Smuzhiyun lmac_count++;
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
1212*4882a593Smuzhiyun bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun /* Set the backpressure AND mask */
1215*4882a593Smuzhiyun for (i = 0; i < bgx->lmac_count; i++)
1216*4882a593Smuzhiyun bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1217*4882a593Smuzhiyun ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
1218*4882a593Smuzhiyun (i * MAX_BGX_CHANS_PER_LMAC));
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun /* Disable all MAC filtering */
1221*4882a593Smuzhiyun for (i = 0; i < RX_DMAC_COUNT; i++)
1222*4882a593Smuzhiyun bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun /* Disable MAC steering (NCSI traffic) */
1225*4882a593Smuzhiyun for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1226*4882a593Smuzhiyun bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun
bgx_get_lane2sds_cfg(struct bgx * bgx,struct lmac * lmac)1229*4882a593Smuzhiyun static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
1230*4882a593Smuzhiyun {
1231*4882a593Smuzhiyun return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF);
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
bgx_print_qlm_mode(struct bgx * bgx,u8 lmacid)1234*4882a593Smuzhiyun static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
1235*4882a593Smuzhiyun {
1236*4882a593Smuzhiyun struct device *dev = &bgx->pdev->dev;
1237*4882a593Smuzhiyun struct lmac *lmac;
1238*4882a593Smuzhiyun char str[27];
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun if (!bgx->is_dlm && lmacid)
1241*4882a593Smuzhiyun return;
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun lmac = &bgx->lmac[lmacid];
1244*4882a593Smuzhiyun if (!bgx->is_dlm)
1245*4882a593Smuzhiyun sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
1246*4882a593Smuzhiyun else
1247*4882a593Smuzhiyun sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun switch (lmac->lmac_type) {
1250*4882a593Smuzhiyun case BGX_MODE_SGMII:
1251*4882a593Smuzhiyun dev_info(dev, "%s: SGMII\n", (char *)str);
1252*4882a593Smuzhiyun break;
1253*4882a593Smuzhiyun case BGX_MODE_XAUI:
1254*4882a593Smuzhiyun dev_info(dev, "%s: XAUI\n", (char *)str);
1255*4882a593Smuzhiyun break;
1256*4882a593Smuzhiyun case BGX_MODE_RXAUI:
1257*4882a593Smuzhiyun dev_info(dev, "%s: RXAUI\n", (char *)str);
1258*4882a593Smuzhiyun break;
1259*4882a593Smuzhiyun case BGX_MODE_XFI:
1260*4882a593Smuzhiyun if (!lmac->use_training)
1261*4882a593Smuzhiyun dev_info(dev, "%s: XFI\n", (char *)str);
1262*4882a593Smuzhiyun else
1263*4882a593Smuzhiyun dev_info(dev, "%s: 10G_KR\n", (char *)str);
1264*4882a593Smuzhiyun break;
1265*4882a593Smuzhiyun case BGX_MODE_XLAUI:
1266*4882a593Smuzhiyun if (!lmac->use_training)
1267*4882a593Smuzhiyun dev_info(dev, "%s: XLAUI\n", (char *)str);
1268*4882a593Smuzhiyun else
1269*4882a593Smuzhiyun dev_info(dev, "%s: 40G_KR4\n", (char *)str);
1270*4882a593Smuzhiyun break;
1271*4882a593Smuzhiyun case BGX_MODE_QSGMII:
1272*4882a593Smuzhiyun dev_info(dev, "%s: QSGMII\n", (char *)str);
1273*4882a593Smuzhiyun break;
1274*4882a593Smuzhiyun case BGX_MODE_RGMII:
1275*4882a593Smuzhiyun dev_info(dev, "%s: RGMII\n", (char *)str);
1276*4882a593Smuzhiyun break;
1277*4882a593Smuzhiyun case BGX_MODE_INVALID:
1278*4882a593Smuzhiyun /* Nothing to do */
1279*4882a593Smuzhiyun break;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun
lmac_set_lane2sds(struct bgx * bgx,struct lmac * lmac)1283*4882a593Smuzhiyun static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac)
1284*4882a593Smuzhiyun {
1285*4882a593Smuzhiyun switch (lmac->lmac_type) {
1286*4882a593Smuzhiyun case BGX_MODE_SGMII:
1287*4882a593Smuzhiyun case BGX_MODE_XFI:
1288*4882a593Smuzhiyun lmac->lane_to_sds = lmac->lmacid;
1289*4882a593Smuzhiyun break;
1290*4882a593Smuzhiyun case BGX_MODE_XAUI:
1291*4882a593Smuzhiyun case BGX_MODE_XLAUI:
1292*4882a593Smuzhiyun case BGX_MODE_RGMII:
1293*4882a593Smuzhiyun lmac->lane_to_sds = 0xE4;
1294*4882a593Smuzhiyun break;
1295*4882a593Smuzhiyun case BGX_MODE_RXAUI:
1296*4882a593Smuzhiyun lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4;
1297*4882a593Smuzhiyun break;
1298*4882a593Smuzhiyun case BGX_MODE_QSGMII:
1299*4882a593Smuzhiyun /* There is no way to determine if DLM0/2 is QSGMII or
1300*4882a593Smuzhiyun * DLM1/3 is configured to QSGMII as bootloader will
1301*4882a593Smuzhiyun * configure all LMACs, so take whatever is configured
1302*4882a593Smuzhiyun * by low level firmware.
1303*4882a593Smuzhiyun */
1304*4882a593Smuzhiyun lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac);
1305*4882a593Smuzhiyun break;
1306*4882a593Smuzhiyun default:
1307*4882a593Smuzhiyun lmac->lane_to_sds = 0;
1308*4882a593Smuzhiyun break;
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun
lmac_set_training(struct bgx * bgx,struct lmac * lmac,int lmacid)1312*4882a593Smuzhiyun static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
1315*4882a593Smuzhiyun (lmac->lmac_type != BGX_MODE_40G_KR)) {
1316*4882a593Smuzhiyun lmac->use_training = false;
1317*4882a593Smuzhiyun return;
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) &
1321*4882a593Smuzhiyun SPU_PMD_CRTL_TRAIN_EN;
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun
bgx_set_lmac_config(struct bgx * bgx,u8 idx)1324*4882a593Smuzhiyun static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
1325*4882a593Smuzhiyun {
1326*4882a593Smuzhiyun struct lmac *lmac;
1327*4882a593Smuzhiyun u64 cmr_cfg;
1328*4882a593Smuzhiyun u8 lmac_type;
1329*4882a593Smuzhiyun u8 lane_to_sds;
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun lmac = &bgx->lmac[idx];
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun if (!bgx->is_dlm || bgx->is_rgx) {
1334*4882a593Smuzhiyun /* Read LMAC0 type to figure out QLM mode
1335*4882a593Smuzhiyun * This is configured by low level firmware
1336*4882a593Smuzhiyun */
1337*4882a593Smuzhiyun cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1338*4882a593Smuzhiyun lmac->lmac_type = (cmr_cfg >> 8) & 0x07;
1339*4882a593Smuzhiyun if (bgx->is_rgx)
1340*4882a593Smuzhiyun lmac->lmac_type = BGX_MODE_RGMII;
1341*4882a593Smuzhiyun lmac_set_training(bgx, lmac, 0);
1342*4882a593Smuzhiyun lmac_set_lane2sds(bgx, lmac);
1343*4882a593Smuzhiyun return;
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun /* For DLMs or SLMs on 80/81/83xx so many lane configurations
1347*4882a593Smuzhiyun * are possible and vary across boards. Also Kernel doesn't have
1348*4882a593Smuzhiyun * any way to identify board type/info and since firmware does,
1349*4882a593Smuzhiyun * just take lmac type and serdes lane config as is.
1350*4882a593Smuzhiyun */
1351*4882a593Smuzhiyun cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
1352*4882a593Smuzhiyun lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
1353*4882a593Smuzhiyun lane_to_sds = (u8)(cmr_cfg & 0xFF);
1354*4882a593Smuzhiyun /* Check if config is reset value */
1355*4882a593Smuzhiyun if ((lmac_type == 0) && (lane_to_sds == 0xE4))
1356*4882a593Smuzhiyun lmac->lmac_type = BGX_MODE_INVALID;
1357*4882a593Smuzhiyun else
1358*4882a593Smuzhiyun lmac->lmac_type = lmac_type;
1359*4882a593Smuzhiyun lmac->lane_to_sds = lane_to_sds;
1360*4882a593Smuzhiyun lmac_set_training(bgx, lmac, lmac->lmacid);
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun
bgx_get_qlm_mode(struct bgx * bgx)1363*4882a593Smuzhiyun static void bgx_get_qlm_mode(struct bgx *bgx)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun struct lmac *lmac;
1366*4882a593Smuzhiyun u8 idx;
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun /* Init all LMAC's type to invalid */
1369*4882a593Smuzhiyun for (idx = 0; idx < bgx->max_lmac; idx++) {
1370*4882a593Smuzhiyun lmac = &bgx->lmac[idx];
1371*4882a593Smuzhiyun lmac->lmacid = idx;
1372*4882a593Smuzhiyun lmac->lmac_type = BGX_MODE_INVALID;
1373*4882a593Smuzhiyun lmac->use_training = false;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun /* It is assumed that low level firmware sets this value */
1377*4882a593Smuzhiyun bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
1378*4882a593Smuzhiyun if (bgx->lmac_count > bgx->max_lmac)
1379*4882a593Smuzhiyun bgx->lmac_count = bgx->max_lmac;
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun for (idx = 0; idx < bgx->lmac_count; idx++) {
1382*4882a593Smuzhiyun bgx_set_lmac_config(bgx, idx);
1383*4882a593Smuzhiyun bgx_print_qlm_mode(bgx, idx);
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun #ifdef CONFIG_ACPI
1388*4882a593Smuzhiyun
acpi_get_mac_address(struct device * dev,struct acpi_device * adev,u8 * dst)1389*4882a593Smuzhiyun static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
1390*4882a593Smuzhiyun u8 *dst)
1391*4882a593Smuzhiyun {
1392*4882a593Smuzhiyun u8 mac[ETH_ALEN];
1393*4882a593Smuzhiyun u8 *addr;
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun addr = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac, ETH_ALEN);
1396*4882a593Smuzhiyun if (!addr) {
1397*4882a593Smuzhiyun dev_err(dev, "MAC address invalid: %pM\n", mac);
1398*4882a593Smuzhiyun return -EINVAL;
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun dev_info(dev, "MAC address set to: %pM\n", mac);
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun ether_addr_copy(dst, mac);
1404*4882a593Smuzhiyun return 0;
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun /* Currently only sets the MAC address. */
bgx_acpi_register_phy(acpi_handle handle,u32 lvl,void * context,void ** rv)1408*4882a593Smuzhiyun static acpi_status bgx_acpi_register_phy(acpi_handle handle,
1409*4882a593Smuzhiyun u32 lvl, void *context, void **rv)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun struct bgx *bgx = context;
1412*4882a593Smuzhiyun struct device *dev = &bgx->pdev->dev;
1413*4882a593Smuzhiyun struct acpi_device *adev;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun if (acpi_bus_get_device(handle, &adev))
1416*4882a593Smuzhiyun goto out;
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
1423*4882a593Smuzhiyun bgx->acpi_lmac_idx++; /* move to next LMAC */
1424*4882a593Smuzhiyun out:
1425*4882a593Smuzhiyun return AE_OK;
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun
bgx_acpi_match_id(acpi_handle handle,u32 lvl,void * context,void ** ret_val)1428*4882a593Smuzhiyun static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
1429*4882a593Smuzhiyun void *context, void **ret_val)
1430*4882a593Smuzhiyun {
1431*4882a593Smuzhiyun struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
1432*4882a593Smuzhiyun struct bgx *bgx = context;
1433*4882a593Smuzhiyun char bgx_sel[5];
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
1436*4882a593Smuzhiyun if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
1437*4882a593Smuzhiyun pr_warn("Invalid link device\n");
1438*4882a593Smuzhiyun return AE_OK;
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun if (strncmp(string.pointer, bgx_sel, 4)) {
1442*4882a593Smuzhiyun kfree(string.pointer);
1443*4882a593Smuzhiyun return AE_OK;
1444*4882a593Smuzhiyun }
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1447*4882a593Smuzhiyun bgx_acpi_register_phy, NULL, bgx, NULL);
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun kfree(string.pointer);
1450*4882a593Smuzhiyun return AE_CTRL_TERMINATE;
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun
bgx_init_acpi_phy(struct bgx * bgx)1453*4882a593Smuzhiyun static int bgx_init_acpi_phy(struct bgx *bgx)
1454*4882a593Smuzhiyun {
1455*4882a593Smuzhiyun acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
1456*4882a593Smuzhiyun return 0;
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun #else
1460*4882a593Smuzhiyun
bgx_init_acpi_phy(struct bgx * bgx)1461*4882a593Smuzhiyun static int bgx_init_acpi_phy(struct bgx *bgx)
1462*4882a593Smuzhiyun {
1463*4882a593Smuzhiyun return -ENODEV;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun #endif /* CONFIG_ACPI */
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_OF_MDIO)
1469*4882a593Smuzhiyun
bgx_init_of_phy(struct bgx * bgx)1470*4882a593Smuzhiyun static int bgx_init_of_phy(struct bgx *bgx)
1471*4882a593Smuzhiyun {
1472*4882a593Smuzhiyun struct fwnode_handle *fwn;
1473*4882a593Smuzhiyun struct device_node *node = NULL;
1474*4882a593Smuzhiyun u8 lmac = 0;
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun device_for_each_child_node(&bgx->pdev->dev, fwn) {
1477*4882a593Smuzhiyun struct phy_device *pd;
1478*4882a593Smuzhiyun struct device_node *phy_np;
1479*4882a593Smuzhiyun const char *mac;
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun /* Should always be an OF node. But if it is not, we
1482*4882a593Smuzhiyun * cannot handle it, so exit the loop.
1483*4882a593Smuzhiyun */
1484*4882a593Smuzhiyun node = to_of_node(fwn);
1485*4882a593Smuzhiyun if (!node)
1486*4882a593Smuzhiyun break;
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun mac = of_get_mac_address(node);
1489*4882a593Smuzhiyun if (!IS_ERR(mac))
1490*4882a593Smuzhiyun ether_addr_copy(bgx->lmac[lmac].mac, mac);
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
1493*4882a593Smuzhiyun bgx->lmac[lmac].lmacid = lmac;
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun phy_np = of_parse_phandle(node, "phy-handle", 0);
1496*4882a593Smuzhiyun /* If there is no phy or defective firmware presents
1497*4882a593Smuzhiyun * this cortina phy, for which there is no driver
1498*4882a593Smuzhiyun * support, ignore it.
1499*4882a593Smuzhiyun */
1500*4882a593Smuzhiyun if (phy_np &&
1501*4882a593Smuzhiyun !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
1502*4882a593Smuzhiyun /* Wait until the phy drivers are available */
1503*4882a593Smuzhiyun pd = of_phy_find_device(phy_np);
1504*4882a593Smuzhiyun if (!pd)
1505*4882a593Smuzhiyun goto defer;
1506*4882a593Smuzhiyun bgx->lmac[lmac].phydev = pd;
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun lmac++;
1510*4882a593Smuzhiyun if (lmac == bgx->max_lmac) {
1511*4882a593Smuzhiyun of_node_put(node);
1512*4882a593Smuzhiyun break;
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun }
1515*4882a593Smuzhiyun return 0;
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun defer:
1518*4882a593Smuzhiyun /* We are bailing out, try not to leak device reference counts
1519*4882a593Smuzhiyun * for phy devices we may have already found.
1520*4882a593Smuzhiyun */
1521*4882a593Smuzhiyun while (lmac) {
1522*4882a593Smuzhiyun if (bgx->lmac[lmac].phydev) {
1523*4882a593Smuzhiyun put_device(&bgx->lmac[lmac].phydev->mdio.dev);
1524*4882a593Smuzhiyun bgx->lmac[lmac].phydev = NULL;
1525*4882a593Smuzhiyun }
1526*4882a593Smuzhiyun lmac--;
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun of_node_put(node);
1529*4882a593Smuzhiyun return -EPROBE_DEFER;
1530*4882a593Smuzhiyun }
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun #else
1533*4882a593Smuzhiyun
bgx_init_of_phy(struct bgx * bgx)1534*4882a593Smuzhiyun static int bgx_init_of_phy(struct bgx *bgx)
1535*4882a593Smuzhiyun {
1536*4882a593Smuzhiyun return -ENODEV;
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun #endif /* CONFIG_OF_MDIO */
1540*4882a593Smuzhiyun
bgx_init_phy(struct bgx * bgx)1541*4882a593Smuzhiyun static int bgx_init_phy(struct bgx *bgx)
1542*4882a593Smuzhiyun {
1543*4882a593Smuzhiyun if (!acpi_disabled)
1544*4882a593Smuzhiyun return bgx_init_acpi_phy(bgx);
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun return bgx_init_of_phy(bgx);
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun
bgx_intr_handler(int irq,void * data)1549*4882a593Smuzhiyun static irqreturn_t bgx_intr_handler(int irq, void *data)
1550*4882a593Smuzhiyun {
1551*4882a593Smuzhiyun struct bgx *bgx = (struct bgx *)data;
1552*4882a593Smuzhiyun u64 status, val;
1553*4882a593Smuzhiyun int lmac;
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
1556*4882a593Smuzhiyun status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT);
1557*4882a593Smuzhiyun if (status & GMI_TXX_INT_UNDFLW) {
1558*4882a593Smuzhiyun pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n",
1559*4882a593Smuzhiyun bgx->bgx_id, lmac);
1560*4882a593Smuzhiyun val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG);
1561*4882a593Smuzhiyun val &= ~CMR_EN;
1562*4882a593Smuzhiyun bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
1563*4882a593Smuzhiyun val |= CMR_EN;
1564*4882a593Smuzhiyun bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
1565*4882a593Smuzhiyun }
1566*4882a593Smuzhiyun /* clear interrupts */
1567*4882a593Smuzhiyun bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status);
1568*4882a593Smuzhiyun }
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun return IRQ_HANDLED;
1571*4882a593Smuzhiyun }
1572*4882a593Smuzhiyun
bgx_register_intr(struct pci_dev * pdev)1573*4882a593Smuzhiyun static void bgx_register_intr(struct pci_dev *pdev)
1574*4882a593Smuzhiyun {
1575*4882a593Smuzhiyun struct bgx *bgx = pci_get_drvdata(pdev);
1576*4882a593Smuzhiyun int ret;
1577*4882a593Smuzhiyun
1578*4882a593Smuzhiyun ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET,
1579*4882a593Smuzhiyun BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES);
1580*4882a593Smuzhiyun if (ret < 0) {
1581*4882a593Smuzhiyun pci_err(pdev, "Req for #%d msix vectors failed\n",
1582*4882a593Smuzhiyun BGX_LMAC_VEC_OFFSET);
1583*4882a593Smuzhiyun return;
1584*4882a593Smuzhiyun }
1585*4882a593Smuzhiyun ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL,
1586*4882a593Smuzhiyun bgx, "BGX%d", bgx->bgx_id);
1587*4882a593Smuzhiyun if (ret)
1588*4882a593Smuzhiyun pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun
bgx_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1591*4882a593Smuzhiyun static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1592*4882a593Smuzhiyun {
1593*4882a593Smuzhiyun int err;
1594*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1595*4882a593Smuzhiyun struct bgx *bgx = NULL;
1596*4882a593Smuzhiyun u8 lmac;
1597*4882a593Smuzhiyun u16 sdevid;
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
1600*4882a593Smuzhiyun if (!bgx)
1601*4882a593Smuzhiyun return -ENOMEM;
1602*4882a593Smuzhiyun bgx->pdev = pdev;
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun pci_set_drvdata(pdev, bgx);
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun err = pcim_enable_device(pdev);
1607*4882a593Smuzhiyun if (err) {
1608*4882a593Smuzhiyun dev_err(dev, "Failed to enable PCI device\n");
1609*4882a593Smuzhiyun pci_set_drvdata(pdev, NULL);
1610*4882a593Smuzhiyun return err;
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun err = pci_request_regions(pdev, DRV_NAME);
1614*4882a593Smuzhiyun if (err) {
1615*4882a593Smuzhiyun dev_err(dev, "PCI request regions failed 0x%x\n", err);
1616*4882a593Smuzhiyun goto err_disable_device;
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun
1619*4882a593Smuzhiyun /* MAP configuration registers */
1620*4882a593Smuzhiyun bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1621*4882a593Smuzhiyun if (!bgx->reg_base) {
1622*4882a593Smuzhiyun dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
1623*4882a593Smuzhiyun err = -ENOMEM;
1624*4882a593Smuzhiyun goto err_release_regions;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun set_max_bgx_per_node(pdev);
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
1630*4882a593Smuzhiyun if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
1631*4882a593Smuzhiyun bgx->bgx_id = (pci_resource_start(pdev,
1632*4882a593Smuzhiyun PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
1633*4882a593Smuzhiyun bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
1634*4882a593Smuzhiyun bgx->max_lmac = MAX_LMAC_PER_BGX;
1635*4882a593Smuzhiyun bgx_vnic[bgx->bgx_id] = bgx;
1636*4882a593Smuzhiyun } else {
1637*4882a593Smuzhiyun bgx->is_rgx = true;
1638*4882a593Smuzhiyun bgx->max_lmac = 1;
1639*4882a593Smuzhiyun bgx->bgx_id = MAX_BGX_PER_CN81XX - 1;
1640*4882a593Smuzhiyun bgx_vnic[bgx->bgx_id] = bgx;
1641*4882a593Smuzhiyun xcv_init_hw();
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one
1645*4882a593Smuzhiyun * BGX i.e BGX2 can be split across 2 DLMs.
1646*4882a593Smuzhiyun */
1647*4882a593Smuzhiyun pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
1648*4882a593Smuzhiyun if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) ||
1649*4882a593Smuzhiyun ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2)))
1650*4882a593Smuzhiyun bgx->is_dlm = true;
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun bgx_get_qlm_mode(bgx);
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun err = bgx_init_phy(bgx);
1655*4882a593Smuzhiyun if (err)
1656*4882a593Smuzhiyun goto err_enable;
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun bgx_init_hw(bgx);
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun bgx_register_intr(pdev);
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun /* Enable all LMACs */
1663*4882a593Smuzhiyun for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
1664*4882a593Smuzhiyun err = bgx_lmac_enable(bgx, lmac);
1665*4882a593Smuzhiyun if (err) {
1666*4882a593Smuzhiyun dev_err(dev, "BGX%d failed to enable lmac%d\n",
1667*4882a593Smuzhiyun bgx->bgx_id, lmac);
1668*4882a593Smuzhiyun while (lmac)
1669*4882a593Smuzhiyun bgx_lmac_disable(bgx, --lmac);
1670*4882a593Smuzhiyun goto err_enable;
1671*4882a593Smuzhiyun }
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun return 0;
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun err_enable:
1677*4882a593Smuzhiyun bgx_vnic[bgx->bgx_id] = NULL;
1678*4882a593Smuzhiyun pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
1679*4882a593Smuzhiyun err_release_regions:
1680*4882a593Smuzhiyun pci_release_regions(pdev);
1681*4882a593Smuzhiyun err_disable_device:
1682*4882a593Smuzhiyun pci_disable_device(pdev);
1683*4882a593Smuzhiyun pci_set_drvdata(pdev, NULL);
1684*4882a593Smuzhiyun return err;
1685*4882a593Smuzhiyun }
1686*4882a593Smuzhiyun
bgx_remove(struct pci_dev * pdev)1687*4882a593Smuzhiyun static void bgx_remove(struct pci_dev *pdev)
1688*4882a593Smuzhiyun {
1689*4882a593Smuzhiyun struct bgx *bgx = pci_get_drvdata(pdev);
1690*4882a593Smuzhiyun u8 lmac;
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun /* Disable all LMACs */
1693*4882a593Smuzhiyun for (lmac = 0; lmac < bgx->lmac_count; lmac++)
1694*4882a593Smuzhiyun bgx_lmac_disable(bgx, lmac);
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun bgx_vnic[bgx->bgx_id] = NULL;
1699*4882a593Smuzhiyun pci_release_regions(pdev);
1700*4882a593Smuzhiyun pci_disable_device(pdev);
1701*4882a593Smuzhiyun pci_set_drvdata(pdev, NULL);
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun static struct pci_driver bgx_driver = {
1705*4882a593Smuzhiyun .name = DRV_NAME,
1706*4882a593Smuzhiyun .id_table = bgx_id_table,
1707*4882a593Smuzhiyun .probe = bgx_probe,
1708*4882a593Smuzhiyun .remove = bgx_remove,
1709*4882a593Smuzhiyun };
1710*4882a593Smuzhiyun
bgx_init_module(void)1711*4882a593Smuzhiyun static int __init bgx_init_module(void)
1712*4882a593Smuzhiyun {
1713*4882a593Smuzhiyun pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun return pci_register_driver(&bgx_driver);
1716*4882a593Smuzhiyun }
1717*4882a593Smuzhiyun
bgx_cleanup_module(void)1718*4882a593Smuzhiyun static void __exit bgx_cleanup_module(void)
1719*4882a593Smuzhiyun {
1720*4882a593Smuzhiyun pci_unregister_driver(&bgx_driver);
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyun module_init(bgx_init_module);
1724*4882a593Smuzhiyun module_exit(bgx_cleanup_module);
1725