1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Microchip switch driver main logic
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2017-2019 Microchip Technology Inc.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/delay.h>
9*4882a593Smuzhiyun #include <linux/export.h>
10*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/platform_data/microchip-ksz.h>
14*4882a593Smuzhiyun #include <linux/phy.h>
15*4882a593Smuzhiyun #include <linux/etherdevice.h>
16*4882a593Smuzhiyun #include <linux/if_bridge.h>
17*4882a593Smuzhiyun #include <linux/of_net.h>
18*4882a593Smuzhiyun #include <net/dsa.h>
19*4882a593Smuzhiyun #include <net/switchdev.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include "ksz_common.h"
22*4882a593Smuzhiyun
ksz_update_port_member(struct ksz_device * dev,int port)23*4882a593Smuzhiyun void ksz_update_port_member(struct ksz_device *dev, int port)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun struct ksz_port *p;
26*4882a593Smuzhiyun int i;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun for (i = 0; i < dev->port_cnt; i++) {
29*4882a593Smuzhiyun if (i == port || i == dev->cpu_port)
30*4882a593Smuzhiyun continue;
31*4882a593Smuzhiyun p = &dev->ports[i];
32*4882a593Smuzhiyun if (!(dev->member & (1 << i)))
33*4882a593Smuzhiyun continue;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /* Port is a member of the bridge and is forwarding. */
36*4882a593Smuzhiyun if (p->stp_state == BR_STATE_FORWARDING &&
37*4882a593Smuzhiyun p->member != dev->member)
38*4882a593Smuzhiyun dev->dev_ops->cfg_port_member(dev, i, dev->member);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_update_port_member);
42*4882a593Smuzhiyun
port_r_cnt(struct ksz_device * dev,int port)43*4882a593Smuzhiyun static void port_r_cnt(struct ksz_device *dev, int port)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun struct ksz_port_mib *mib = &dev->ports[port].mib;
46*4882a593Smuzhiyun u64 *dropped;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
49*4882a593Smuzhiyun while (mib->cnt_ptr < dev->reg_mib_cnt) {
50*4882a593Smuzhiyun dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
51*4882a593Smuzhiyun &mib->counters[mib->cnt_ptr]);
52*4882a593Smuzhiyun ++mib->cnt_ptr;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* last one in storage */
56*4882a593Smuzhiyun dropped = &mib->counters[dev->mib_cnt];
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
59*4882a593Smuzhiyun while (mib->cnt_ptr < dev->mib_cnt) {
60*4882a593Smuzhiyun dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
61*4882a593Smuzhiyun dropped, &mib->counters[mib->cnt_ptr]);
62*4882a593Smuzhiyun ++mib->cnt_ptr;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun mib->cnt_ptr = 0;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
ksz_mib_read_work(struct work_struct * work)67*4882a593Smuzhiyun static void ksz_mib_read_work(struct work_struct *work)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun struct ksz_device *dev = container_of(work, struct ksz_device,
70*4882a593Smuzhiyun mib_read.work);
71*4882a593Smuzhiyun struct ksz_port_mib *mib;
72*4882a593Smuzhiyun struct ksz_port *p;
73*4882a593Smuzhiyun int i;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun for (i = 0; i < dev->mib_port_cnt; i++) {
76*4882a593Smuzhiyun if (dsa_is_unused_port(dev->ds, i))
77*4882a593Smuzhiyun continue;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun p = &dev->ports[i];
80*4882a593Smuzhiyun mib = &p->mib;
81*4882a593Smuzhiyun mutex_lock(&mib->cnt_mutex);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Only read MIB counters when the port is told to do.
84*4882a593Smuzhiyun * If not, read only dropped counters when link is not up.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun if (!p->read) {
87*4882a593Smuzhiyun const struct dsa_port *dp = dsa_to_port(dev->ds, i);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun if (!netif_carrier_ok(dp->slave))
90*4882a593Smuzhiyun mib->cnt_ptr = dev->reg_mib_cnt;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun port_r_cnt(dev, i);
93*4882a593Smuzhiyun p->read = false;
94*4882a593Smuzhiyun mutex_unlock(&mib->cnt_mutex);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
ksz_init_mib_timer(struct ksz_device * dev)100*4882a593Smuzhiyun void ksz_init_mib_timer(struct ksz_device *dev)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun int i;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun for (i = 0; i < dev->mib_port_cnt; i++)
107*4882a593Smuzhiyun dev->dev_ops->port_init_cnt(dev, i);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
110*4882a593Smuzhiyun
ksz_phy_read16(struct dsa_switch * ds,int addr,int reg)111*4882a593Smuzhiyun int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct ksz_device *dev = ds->priv;
114*4882a593Smuzhiyun u16 val = 0xffff;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun dev->dev_ops->r_phy(dev, addr, reg, &val);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun return val;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_phy_read16);
121*4882a593Smuzhiyun
ksz_phy_write16(struct dsa_switch * ds,int addr,int reg,u16 val)122*4882a593Smuzhiyun int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun struct ksz_device *dev = ds->priv;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun dev->dev_ops->w_phy(dev, addr, reg, val);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun return 0;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_phy_write16);
131*4882a593Smuzhiyun
ksz_mac_link_down(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface)132*4882a593Smuzhiyun void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
133*4882a593Smuzhiyun phy_interface_t interface)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun struct ksz_device *dev = ds->priv;
136*4882a593Smuzhiyun struct ksz_port *p = &dev->ports[port];
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* Read all MIB counters when the link is going down. */
139*4882a593Smuzhiyun p->read = true;
140*4882a593Smuzhiyun /* timer started */
141*4882a593Smuzhiyun if (dev->mib_read_interval)
142*4882a593Smuzhiyun schedule_delayed_work(&dev->mib_read, 0);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_mac_link_down);
145*4882a593Smuzhiyun
ksz_sset_count(struct dsa_switch * ds,int port,int sset)146*4882a593Smuzhiyun int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun struct ksz_device *dev = ds->priv;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (sset != ETH_SS_STATS)
151*4882a593Smuzhiyun return 0;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun return dev->mib_cnt;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_sset_count);
156*4882a593Smuzhiyun
ksz_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * buf)157*4882a593Smuzhiyun void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun const struct dsa_port *dp = dsa_to_port(ds, port);
160*4882a593Smuzhiyun struct ksz_device *dev = ds->priv;
161*4882a593Smuzhiyun struct ksz_port_mib *mib;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun mib = &dev->ports[port].mib;
164*4882a593Smuzhiyun mutex_lock(&mib->cnt_mutex);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* Only read dropped counters if no link. */
167*4882a593Smuzhiyun if (!netif_carrier_ok(dp->slave))
168*4882a593Smuzhiyun mib->cnt_ptr = dev->reg_mib_cnt;
169*4882a593Smuzhiyun port_r_cnt(dev, port);
170*4882a593Smuzhiyun memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
171*4882a593Smuzhiyun mutex_unlock(&mib->cnt_mutex);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
174*4882a593Smuzhiyun
ksz_port_bridge_join(struct dsa_switch * ds,int port,struct net_device * br)175*4882a593Smuzhiyun int ksz_port_bridge_join(struct dsa_switch *ds, int port,
176*4882a593Smuzhiyun struct net_device *br)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct ksz_device *dev = ds->priv;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun mutex_lock(&dev->dev_mutex);
181*4882a593Smuzhiyun dev->br_member |= (1 << port);
182*4882a593Smuzhiyun mutex_unlock(&dev->dev_mutex);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /* port_stp_state_set() will be called after to put the port in
185*4882a593Smuzhiyun * appropriate state so there is no need to do anything.
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun return 0;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
191*4882a593Smuzhiyun
ksz_port_bridge_leave(struct dsa_switch * ds,int port,struct net_device * br)192*4882a593Smuzhiyun void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
193*4882a593Smuzhiyun struct net_device *br)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun struct ksz_device *dev = ds->priv;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun mutex_lock(&dev->dev_mutex);
198*4882a593Smuzhiyun dev->br_member &= ~(1 << port);
199*4882a593Smuzhiyun dev->member &= ~(1 << port);
200*4882a593Smuzhiyun mutex_unlock(&dev->dev_mutex);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* port_stp_state_set() will be called after to put the port in
203*4882a593Smuzhiyun * forwarding state so there is no need to do anything.
204*4882a593Smuzhiyun */
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
207*4882a593Smuzhiyun
ksz_port_fast_age(struct dsa_switch * ds,int port)208*4882a593Smuzhiyun void ksz_port_fast_age(struct dsa_switch *ds, int port)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun struct ksz_device *dev = ds->priv;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun dev->dev_ops->flush_dyn_mac_table(dev, port);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_port_fast_age);
215*4882a593Smuzhiyun
ksz_port_vlan_prepare(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)216*4882a593Smuzhiyun int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
217*4882a593Smuzhiyun const struct switchdev_obj_port_vlan *vlan)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun /* nothing needed */
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
224*4882a593Smuzhiyun
ksz_port_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)225*4882a593Smuzhiyun int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
226*4882a593Smuzhiyun void *data)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct ksz_device *dev = ds->priv;
229*4882a593Smuzhiyun int ret = 0;
230*4882a593Smuzhiyun u16 i = 0;
231*4882a593Smuzhiyun u16 entries = 0;
232*4882a593Smuzhiyun u8 timestamp = 0;
233*4882a593Smuzhiyun u8 fid;
234*4882a593Smuzhiyun u8 member;
235*4882a593Smuzhiyun struct alu_struct alu;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun do {
238*4882a593Smuzhiyun alu.is_static = false;
239*4882a593Smuzhiyun ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
240*4882a593Smuzhiyun &member, ×tamp,
241*4882a593Smuzhiyun &entries);
242*4882a593Smuzhiyun if (!ret && (member & BIT(port))) {
243*4882a593Smuzhiyun ret = cb(alu.mac, alu.fid, alu.is_static, data);
244*4882a593Smuzhiyun if (ret)
245*4882a593Smuzhiyun break;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun i++;
248*4882a593Smuzhiyun } while (i < entries);
249*4882a593Smuzhiyun if (i >= entries)
250*4882a593Smuzhiyun ret = 0;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun return ret;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
255*4882a593Smuzhiyun
ksz_port_mdb_prepare(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb)256*4882a593Smuzhiyun int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
257*4882a593Smuzhiyun const struct switchdev_obj_port_mdb *mdb)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun /* nothing to do */
260*4882a593Smuzhiyun return 0;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
263*4882a593Smuzhiyun
ksz_port_mdb_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb)264*4882a593Smuzhiyun void ksz_port_mdb_add(struct dsa_switch *ds, int port,
265*4882a593Smuzhiyun const struct switchdev_obj_port_mdb *mdb)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct ksz_device *dev = ds->priv;
268*4882a593Smuzhiyun struct alu_struct alu;
269*4882a593Smuzhiyun int index;
270*4882a593Smuzhiyun int empty = 0;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun alu.port_forward = 0;
273*4882a593Smuzhiyun for (index = 0; index < dev->num_statics; index++) {
274*4882a593Smuzhiyun if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
275*4882a593Smuzhiyun /* Found one already in static MAC table. */
276*4882a593Smuzhiyun if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
277*4882a593Smuzhiyun alu.fid == mdb->vid)
278*4882a593Smuzhiyun break;
279*4882a593Smuzhiyun /* Remember the first empty entry. */
280*4882a593Smuzhiyun } else if (!empty) {
281*4882a593Smuzhiyun empty = index + 1;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* no available entry */
286*4882a593Smuzhiyun if (index == dev->num_statics && !empty)
287*4882a593Smuzhiyun return;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* add entry */
290*4882a593Smuzhiyun if (index == dev->num_statics) {
291*4882a593Smuzhiyun index = empty - 1;
292*4882a593Smuzhiyun memset(&alu, 0, sizeof(alu));
293*4882a593Smuzhiyun memcpy(alu.mac, mdb->addr, ETH_ALEN);
294*4882a593Smuzhiyun alu.is_static = true;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun alu.port_forward |= BIT(port);
297*4882a593Smuzhiyun if (mdb->vid) {
298*4882a593Smuzhiyun alu.is_use_fid = true;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* Need a way to map VID to FID. */
301*4882a593Smuzhiyun alu.fid = mdb->vid;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun dev->dev_ops->w_sta_mac_table(dev, index, &alu);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
306*4882a593Smuzhiyun
ksz_port_mdb_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb)307*4882a593Smuzhiyun int ksz_port_mdb_del(struct dsa_switch *ds, int port,
308*4882a593Smuzhiyun const struct switchdev_obj_port_mdb *mdb)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun struct ksz_device *dev = ds->priv;
311*4882a593Smuzhiyun struct alu_struct alu;
312*4882a593Smuzhiyun int index;
313*4882a593Smuzhiyun int ret = 0;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun for (index = 0; index < dev->num_statics; index++) {
316*4882a593Smuzhiyun if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
317*4882a593Smuzhiyun /* Found one already in static MAC table. */
318*4882a593Smuzhiyun if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
319*4882a593Smuzhiyun alu.fid == mdb->vid)
320*4882a593Smuzhiyun break;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* no available entry */
325*4882a593Smuzhiyun if (index == dev->num_statics)
326*4882a593Smuzhiyun goto exit;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* clear port */
329*4882a593Smuzhiyun alu.port_forward &= ~BIT(port);
330*4882a593Smuzhiyun if (!alu.port_forward)
331*4882a593Smuzhiyun alu.is_static = false;
332*4882a593Smuzhiyun dev->dev_ops->w_sta_mac_table(dev, index, &alu);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun exit:
335*4882a593Smuzhiyun return ret;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
338*4882a593Smuzhiyun
ksz_enable_port(struct dsa_switch * ds,int port,struct phy_device * phy)339*4882a593Smuzhiyun int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct ksz_device *dev = ds->priv;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (!dsa_is_user_port(ds, port))
344*4882a593Smuzhiyun return 0;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /* setup slave port */
347*4882a593Smuzhiyun dev->dev_ops->port_setup(dev, port, false);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /* port_stp_state_set() will be called after to enable the port so
350*4882a593Smuzhiyun * there is no need to do anything.
351*4882a593Smuzhiyun */
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun return 0;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ksz_enable_port);
356*4882a593Smuzhiyun
ksz_switch_alloc(struct device * base,void * priv)357*4882a593Smuzhiyun struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct dsa_switch *ds;
360*4882a593Smuzhiyun struct ksz_device *swdev;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
363*4882a593Smuzhiyun if (!ds)
364*4882a593Smuzhiyun return NULL;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun ds->dev = base;
367*4882a593Smuzhiyun ds->num_ports = DSA_MAX_PORTS;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
370*4882a593Smuzhiyun if (!swdev)
371*4882a593Smuzhiyun return NULL;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun ds->priv = swdev;
374*4882a593Smuzhiyun swdev->dev = base;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun swdev->ds = ds;
377*4882a593Smuzhiyun swdev->priv = priv;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun return swdev;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun EXPORT_SYMBOL(ksz_switch_alloc);
382*4882a593Smuzhiyun
ksz_switch_register(struct ksz_device * dev,const struct ksz_dev_ops * ops)383*4882a593Smuzhiyun int ksz_switch_register(struct ksz_device *dev,
384*4882a593Smuzhiyun const struct ksz_dev_ops *ops)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct device_node *port, *ports;
387*4882a593Smuzhiyun phy_interface_t interface;
388*4882a593Smuzhiyun unsigned int port_num;
389*4882a593Smuzhiyun int ret;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun if (dev->pdata)
392*4882a593Smuzhiyun dev->chip_id = dev->pdata->chip_id;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
395*4882a593Smuzhiyun GPIOD_OUT_LOW);
396*4882a593Smuzhiyun if (IS_ERR(dev->reset_gpio))
397*4882a593Smuzhiyun return PTR_ERR(dev->reset_gpio);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun if (dev->reset_gpio) {
400*4882a593Smuzhiyun gpiod_set_value_cansleep(dev->reset_gpio, 1);
401*4882a593Smuzhiyun usleep_range(10000, 12000);
402*4882a593Smuzhiyun gpiod_set_value_cansleep(dev->reset_gpio, 0);
403*4882a593Smuzhiyun msleep(100);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun mutex_init(&dev->dev_mutex);
407*4882a593Smuzhiyun mutex_init(&dev->regmap_mutex);
408*4882a593Smuzhiyun mutex_init(&dev->alu_mutex);
409*4882a593Smuzhiyun mutex_init(&dev->vlan_mutex);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun dev->dev_ops = ops;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (dev->dev_ops->detect(dev))
414*4882a593Smuzhiyun return -EINVAL;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun ret = dev->dev_ops->init(dev);
417*4882a593Smuzhiyun if (ret)
418*4882a593Smuzhiyun return ret;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /* Host port interface will be self detected, or specifically set in
421*4882a593Smuzhiyun * device tree.
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun for (port_num = 0; port_num < dev->port_cnt; ++port_num)
424*4882a593Smuzhiyun dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
425*4882a593Smuzhiyun if (dev->dev->of_node) {
426*4882a593Smuzhiyun ret = of_get_phy_mode(dev->dev->of_node, &interface);
427*4882a593Smuzhiyun if (ret == 0)
428*4882a593Smuzhiyun dev->compat_interface = interface;
429*4882a593Smuzhiyun ports = of_get_child_by_name(dev->dev->of_node, "ports");
430*4882a593Smuzhiyun if (ports)
431*4882a593Smuzhiyun for_each_available_child_of_node(ports, port) {
432*4882a593Smuzhiyun if (of_property_read_u32(port, "reg",
433*4882a593Smuzhiyun &port_num))
434*4882a593Smuzhiyun continue;
435*4882a593Smuzhiyun if (port_num >= dev->mib_port_cnt)
436*4882a593Smuzhiyun return -EINVAL;
437*4882a593Smuzhiyun of_get_phy_mode(port,
438*4882a593Smuzhiyun &dev->ports[port_num].interface);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
441*4882a593Smuzhiyun "microchip,synclko-125");
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun ret = dsa_register_switch(dev->ds);
445*4882a593Smuzhiyun if (ret) {
446*4882a593Smuzhiyun dev->dev_ops->exit(dev);
447*4882a593Smuzhiyun return ret;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /* Read MIB counters every 30 seconds to avoid overflow. */
451*4882a593Smuzhiyun dev->mib_read_interval = msecs_to_jiffies(30000);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /* Start the MIB timer. */
454*4882a593Smuzhiyun schedule_delayed_work(&dev->mib_read, 0);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun return 0;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun EXPORT_SYMBOL(ksz_switch_register);
459*4882a593Smuzhiyun
ksz_switch_remove(struct ksz_device * dev)460*4882a593Smuzhiyun void ksz_switch_remove(struct ksz_device *dev)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun /* timer started */
463*4882a593Smuzhiyun if (dev->mib_read_interval) {
464*4882a593Smuzhiyun dev->mib_read_interval = 0;
465*4882a593Smuzhiyun cancel_delayed_work_sync(&dev->mib_read);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun dev->dev_ops->exit(dev);
469*4882a593Smuzhiyun dsa_unregister_switch(dev->ds);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (dev->reset_gpio)
472*4882a593Smuzhiyun gpiod_set_value_cansleep(dev->reset_gpio, 1);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun EXPORT_SYMBOL(ksz_switch_remove);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
478*4882a593Smuzhiyun MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
479*4882a593Smuzhiyun MODULE_LICENSE("GPL");
480