1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Userspace interface
4*4882a593Smuzhiyun * Linux ethernet bridge
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Authors:
7*4882a593Smuzhiyun * Lennert Buytenhek <buytenh@gnu.org>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/netdevice.h>
12*4882a593Smuzhiyun #include <linux/etherdevice.h>
13*4882a593Smuzhiyun #include <linux/netpoll.h>
14*4882a593Smuzhiyun #include <linux/ethtool.h>
15*4882a593Smuzhiyun #include <linux/if_arp.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun #include <linux/rtnetlink.h>
19*4882a593Smuzhiyun #include <linux/if_ether.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun #include <net/dsa.h>
22*4882a593Smuzhiyun #include <net/sock.h>
23*4882a593Smuzhiyun #include <linux/if_vlan.h>
24*4882a593Smuzhiyun #include <net/switchdev.h>
25*4882a593Smuzhiyun #include <net/net_namespace.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include "br_private.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun * Determine initial path cost based on speed.
31*4882a593Smuzhiyun * using recommendations from 802.1d standard
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * Since driver might sleep need to not be holding any locks.
34*4882a593Smuzhiyun */
port_cost(struct net_device * dev)35*4882a593Smuzhiyun static int port_cost(struct net_device *dev)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun struct ethtool_link_ksettings ecmd;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
40*4882a593Smuzhiyun switch (ecmd.base.speed) {
41*4882a593Smuzhiyun case SPEED_10000:
42*4882a593Smuzhiyun return 2;
43*4882a593Smuzhiyun case SPEED_1000:
44*4882a593Smuzhiyun return 4;
45*4882a593Smuzhiyun case SPEED_100:
46*4882a593Smuzhiyun return 19;
47*4882a593Smuzhiyun case SPEED_10:
48*4882a593Smuzhiyun return 100;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* Old silly heuristics based on name */
53*4882a593Smuzhiyun if (!strncmp(dev->name, "lec", 3))
54*4882a593Smuzhiyun return 7;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun if (!strncmp(dev->name, "plip", 4))
57*4882a593Smuzhiyun return 2500;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun return 100; /* assume old 10Mbps */
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* Check for port carrier transitions. */
br_port_carrier_check(struct net_bridge_port * p,bool * notified)64*4882a593Smuzhiyun void br_port_carrier_check(struct net_bridge_port *p, bool *notified)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun struct net_device *dev = p->dev;
67*4882a593Smuzhiyun struct net_bridge *br = p->br;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun if (!(p->flags & BR_ADMIN_COST) &&
70*4882a593Smuzhiyun netif_running(dev) && netif_oper_up(dev))
71*4882a593Smuzhiyun p->path_cost = port_cost(dev);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun *notified = false;
74*4882a593Smuzhiyun if (!netif_running(br->dev))
75*4882a593Smuzhiyun return;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun spin_lock_bh(&br->lock);
78*4882a593Smuzhiyun if (netif_running(dev) && netif_oper_up(dev)) {
79*4882a593Smuzhiyun if (p->state == BR_STATE_DISABLED) {
80*4882a593Smuzhiyun br_stp_enable_port(p);
81*4882a593Smuzhiyun *notified = true;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun } else {
84*4882a593Smuzhiyun if (p->state != BR_STATE_DISABLED) {
85*4882a593Smuzhiyun br_stp_disable_port(p);
86*4882a593Smuzhiyun *notified = true;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun spin_unlock_bh(&br->lock);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
br_port_set_promisc(struct net_bridge_port * p)92*4882a593Smuzhiyun static void br_port_set_promisc(struct net_bridge_port *p)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun int err = 0;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun if (br_promisc_port(p))
97*4882a593Smuzhiyun return;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun err = dev_set_promiscuity(p->dev, 1);
100*4882a593Smuzhiyun if (err)
101*4882a593Smuzhiyun return;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun br_fdb_unsync_static(p->br, p);
104*4882a593Smuzhiyun p->flags |= BR_PROMISC;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
br_port_clear_promisc(struct net_bridge_port * p)107*4882a593Smuzhiyun static void br_port_clear_promisc(struct net_bridge_port *p)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun int err;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /* Check if the port is already non-promisc or if it doesn't
112*4882a593Smuzhiyun * support UNICAST filtering. Without unicast filtering support
113*4882a593Smuzhiyun * we'll end up re-enabling promisc mode anyway, so just check for
114*4882a593Smuzhiyun * it here.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
117*4882a593Smuzhiyun return;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* Since we'll be clearing the promisc mode, program the port
120*4882a593Smuzhiyun * first so that we don't have interruption in traffic.
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun err = br_fdb_sync_static(p->br, p);
123*4882a593Smuzhiyun if (err)
124*4882a593Smuzhiyun return;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun dev_set_promiscuity(p->dev, -1);
127*4882a593Smuzhiyun p->flags &= ~BR_PROMISC;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* When a port is added or removed or when certain port flags
131*4882a593Smuzhiyun * change, this function is called to automatically manage
132*4882a593Smuzhiyun * promiscuity setting of all the bridge ports. We are always called
133*4882a593Smuzhiyun * under RTNL so can skip using rcu primitives.
134*4882a593Smuzhiyun */
br_manage_promisc(struct net_bridge * br)135*4882a593Smuzhiyun void br_manage_promisc(struct net_bridge *br)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct net_bridge_port *p;
138*4882a593Smuzhiyun bool set_all = false;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* If vlan filtering is disabled or bridge interface is placed
141*4882a593Smuzhiyun * into promiscuous mode, place all ports in promiscuous mode.
142*4882a593Smuzhiyun */
143*4882a593Smuzhiyun if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
144*4882a593Smuzhiyun set_all = true;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun list_for_each_entry(p, &br->port_list, list) {
147*4882a593Smuzhiyun if (set_all) {
148*4882a593Smuzhiyun br_port_set_promisc(p);
149*4882a593Smuzhiyun } else {
150*4882a593Smuzhiyun /* If the number of auto-ports is <= 1, then all other
151*4882a593Smuzhiyun * ports will have their output configuration
152*4882a593Smuzhiyun * statically specified through fdbs. Since ingress
153*4882a593Smuzhiyun * on the auto-port becomes forwarding/egress to other
154*4882a593Smuzhiyun * ports and egress configuration is statically known,
155*4882a593Smuzhiyun * we can say that ingress configuration of the
156*4882a593Smuzhiyun * auto-port is also statically known.
157*4882a593Smuzhiyun * This lets us disable promiscuous mode and write
158*4882a593Smuzhiyun * this config to hw.
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun if (br->auto_cnt == 0 ||
161*4882a593Smuzhiyun (br->auto_cnt == 1 && br_auto_port(p)))
162*4882a593Smuzhiyun br_port_clear_promisc(p);
163*4882a593Smuzhiyun else
164*4882a593Smuzhiyun br_port_set_promisc(p);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
nbp_backup_change(struct net_bridge_port * p,struct net_device * backup_dev)169*4882a593Smuzhiyun int nbp_backup_change(struct net_bridge_port *p,
170*4882a593Smuzhiyun struct net_device *backup_dev)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port);
173*4882a593Smuzhiyun struct net_bridge_port *backup_p = NULL;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun ASSERT_RTNL();
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (backup_dev) {
178*4882a593Smuzhiyun if (!netif_is_bridge_port(backup_dev))
179*4882a593Smuzhiyun return -ENOENT;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun backup_p = br_port_get_rtnl(backup_dev);
182*4882a593Smuzhiyun if (backup_p->br != p->br)
183*4882a593Smuzhiyun return -EINVAL;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (p == backup_p)
187*4882a593Smuzhiyun return -EINVAL;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (old_backup == backup_p)
190*4882a593Smuzhiyun return 0;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* if the backup link is already set, clear it */
193*4882a593Smuzhiyun if (old_backup)
194*4882a593Smuzhiyun old_backup->backup_redirected_cnt--;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (backup_p)
197*4882a593Smuzhiyun backup_p->backup_redirected_cnt++;
198*4882a593Smuzhiyun rcu_assign_pointer(p->backup_port, backup_p);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun return 0;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
nbp_backup_clear(struct net_bridge_port * p)203*4882a593Smuzhiyun static void nbp_backup_clear(struct net_bridge_port *p)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun nbp_backup_change(p, NULL);
206*4882a593Smuzhiyun if (p->backup_redirected_cnt) {
207*4882a593Smuzhiyun struct net_bridge_port *cur_p;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun list_for_each_entry(cur_p, &p->br->port_list, list) {
210*4882a593Smuzhiyun struct net_bridge_port *backup_p;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun backup_p = rtnl_dereference(cur_p->backup_port);
213*4882a593Smuzhiyun if (backup_p == p)
214*4882a593Smuzhiyun nbp_backup_change(cur_p, NULL);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
nbp_update_port_count(struct net_bridge * br)221*4882a593Smuzhiyun static void nbp_update_port_count(struct net_bridge *br)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun struct net_bridge_port *p;
224*4882a593Smuzhiyun u32 cnt = 0;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun list_for_each_entry(p, &br->port_list, list) {
227*4882a593Smuzhiyun if (br_auto_port(p))
228*4882a593Smuzhiyun cnt++;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun if (br->auto_cnt != cnt) {
231*4882a593Smuzhiyun br->auto_cnt = cnt;
232*4882a593Smuzhiyun br_manage_promisc(br);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
nbp_delete_promisc(struct net_bridge_port * p)236*4882a593Smuzhiyun static void nbp_delete_promisc(struct net_bridge_port *p)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun /* If port is currently promiscuous, unset promiscuity.
239*4882a593Smuzhiyun * Otherwise, it is a static port so remove all addresses
240*4882a593Smuzhiyun * from it.
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun dev_set_allmulti(p->dev, -1);
243*4882a593Smuzhiyun if (br_promisc_port(p))
244*4882a593Smuzhiyun dev_set_promiscuity(p->dev, -1);
245*4882a593Smuzhiyun else
246*4882a593Smuzhiyun br_fdb_unsync_static(p->br, p);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
release_nbp(struct kobject * kobj)249*4882a593Smuzhiyun static void release_nbp(struct kobject *kobj)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct net_bridge_port *p
252*4882a593Smuzhiyun = container_of(kobj, struct net_bridge_port, kobj);
253*4882a593Smuzhiyun kfree(p);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
brport_get_ownership(struct kobject * kobj,kuid_t * uid,kgid_t * gid)256*4882a593Smuzhiyun static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct net_bridge_port *p = kobj_to_brport(kobj);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun net_ns_get_ownership(dev_net(p->dev), uid, gid);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun static struct kobj_type brport_ktype = {
264*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
265*4882a593Smuzhiyun .sysfs_ops = &brport_sysfs_ops,
266*4882a593Smuzhiyun #endif
267*4882a593Smuzhiyun .release = release_nbp,
268*4882a593Smuzhiyun .get_ownership = brport_get_ownership,
269*4882a593Smuzhiyun };
270*4882a593Smuzhiyun
destroy_nbp(struct net_bridge_port * p)271*4882a593Smuzhiyun static void destroy_nbp(struct net_bridge_port *p)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct net_device *dev = p->dev;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun p->br = NULL;
276*4882a593Smuzhiyun p->dev = NULL;
277*4882a593Smuzhiyun dev_put(dev);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun kobject_put(&p->kobj);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
destroy_nbp_rcu(struct rcu_head * head)282*4882a593Smuzhiyun static void destroy_nbp_rcu(struct rcu_head *head)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun struct net_bridge_port *p =
285*4882a593Smuzhiyun container_of(head, struct net_bridge_port, rcu);
286*4882a593Smuzhiyun destroy_nbp(p);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
get_max_headroom(struct net_bridge * br)289*4882a593Smuzhiyun static unsigned get_max_headroom(struct net_bridge *br)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun unsigned max_headroom = 0;
292*4882a593Smuzhiyun struct net_bridge_port *p;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun list_for_each_entry(p, &br->port_list, list) {
295*4882a593Smuzhiyun unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun if (dev_headroom > max_headroom)
298*4882a593Smuzhiyun max_headroom = dev_headroom;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun return max_headroom;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
update_headroom(struct net_bridge * br,int new_hr)304*4882a593Smuzhiyun static void update_headroom(struct net_bridge *br, int new_hr)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct net_bridge_port *p;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun list_for_each_entry(p, &br->port_list, list)
309*4882a593Smuzhiyun netdev_set_rx_headroom(p->dev, new_hr);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun br->dev->needed_headroom = new_hr;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* Delete port(interface) from bridge is done in two steps.
315*4882a593Smuzhiyun * via RCU. First step, marks device as down. That deletes
316*4882a593Smuzhiyun * all the timers and stops new packets from flowing through.
317*4882a593Smuzhiyun *
318*4882a593Smuzhiyun * Final cleanup doesn't occur until after all CPU's finished
319*4882a593Smuzhiyun * processing packets.
320*4882a593Smuzhiyun *
321*4882a593Smuzhiyun * Protected from multiple admin operations by RTNL mutex
322*4882a593Smuzhiyun */
del_nbp(struct net_bridge_port * p)323*4882a593Smuzhiyun static void del_nbp(struct net_bridge_port *p)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct net_bridge *br = p->br;
326*4882a593Smuzhiyun struct net_device *dev = p->dev;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun sysfs_remove_link(br->ifobj, p->dev->name);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun nbp_delete_promisc(p);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun spin_lock_bh(&br->lock);
333*4882a593Smuzhiyun br_stp_disable_port(p);
334*4882a593Smuzhiyun spin_unlock_bh(&br->lock);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun br_mrp_port_del(br, p);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun br_ifinfo_notify(RTM_DELLINK, NULL, p);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun list_del_rcu(&p->list);
341*4882a593Smuzhiyun if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
342*4882a593Smuzhiyun update_headroom(br, get_max_headroom(br));
343*4882a593Smuzhiyun netdev_reset_rx_headroom(dev);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun nbp_vlan_flush(p);
346*4882a593Smuzhiyun br_fdb_delete_by_port(br, p, 0, 1);
347*4882a593Smuzhiyun switchdev_deferred_process();
348*4882a593Smuzhiyun nbp_backup_clear(p);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun nbp_update_port_count(br);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun netdev_upper_dev_unlink(dev, br->dev);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun dev->priv_flags &= ~IFF_BRIDGE_PORT;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun netdev_rx_handler_unregister(dev);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun br_multicast_del_port(p);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun kobject_uevent(&p->kobj, KOBJ_REMOVE);
361*4882a593Smuzhiyun kobject_del(&p->kobj);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun br_netpoll_disable(p);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun call_rcu(&p->rcu, destroy_nbp_rcu);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* Delete bridge device */
br_dev_delete(struct net_device * dev,struct list_head * head)369*4882a593Smuzhiyun void br_dev_delete(struct net_device *dev, struct list_head *head)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun struct net_bridge *br = netdev_priv(dev);
372*4882a593Smuzhiyun struct net_bridge_port *p, *n;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun list_for_each_entry_safe(p, n, &br->port_list, list) {
375*4882a593Smuzhiyun del_nbp(p);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun br_recalculate_neigh_suppress_enabled(br);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun br_fdb_delete_by_port(br, NULL, 0, 1);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun cancel_delayed_work_sync(&br->gc_work);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun br_sysfs_delbr(br->dev);
385*4882a593Smuzhiyun unregister_netdevice_queue(br->dev, head);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /* find an available port number */
find_portno(struct net_bridge * br)389*4882a593Smuzhiyun static int find_portno(struct net_bridge *br)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun int index;
392*4882a593Smuzhiyun struct net_bridge_port *p;
393*4882a593Smuzhiyun unsigned long *inuse;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun inuse = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
396*4882a593Smuzhiyun if (!inuse)
397*4882a593Smuzhiyun return -ENOMEM;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun set_bit(0, inuse); /* zero is reserved */
400*4882a593Smuzhiyun list_for_each_entry(p, &br->port_list, list) {
401*4882a593Smuzhiyun set_bit(p->port_no, inuse);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun index = find_first_zero_bit(inuse, BR_MAX_PORTS);
404*4882a593Smuzhiyun bitmap_free(inuse);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return (index >= BR_MAX_PORTS) ? -EXFULL : index;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* called with RTNL but without bridge lock */
new_nbp(struct net_bridge * br,struct net_device * dev)410*4882a593Smuzhiyun static struct net_bridge_port *new_nbp(struct net_bridge *br,
411*4882a593Smuzhiyun struct net_device *dev)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun struct net_bridge_port *p;
414*4882a593Smuzhiyun int index, err;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun index = find_portno(br);
417*4882a593Smuzhiyun if (index < 0)
418*4882a593Smuzhiyun return ERR_PTR(index);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun p = kzalloc(sizeof(*p), GFP_KERNEL);
421*4882a593Smuzhiyun if (p == NULL)
422*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun p->br = br;
425*4882a593Smuzhiyun dev_hold(dev);
426*4882a593Smuzhiyun p->dev = dev;
427*4882a593Smuzhiyun p->path_cost = port_cost(dev);
428*4882a593Smuzhiyun p->priority = 0x8000 >> BR_PORT_BITS;
429*4882a593Smuzhiyun p->port_no = index;
430*4882a593Smuzhiyun p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
431*4882a593Smuzhiyun br_init_port(p);
432*4882a593Smuzhiyun br_set_state(p, BR_STATE_DISABLED);
433*4882a593Smuzhiyun br_stp_port_timer_init(p);
434*4882a593Smuzhiyun err = br_multicast_add_port(p);
435*4882a593Smuzhiyun if (err) {
436*4882a593Smuzhiyun dev_put(dev);
437*4882a593Smuzhiyun kfree(p);
438*4882a593Smuzhiyun p = ERR_PTR(err);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun return p;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
br_add_bridge(struct net * net,const char * name)444*4882a593Smuzhiyun int br_add_bridge(struct net *net, const char *name)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct net_device *dev;
447*4882a593Smuzhiyun int res;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
450*4882a593Smuzhiyun br_dev_setup);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (!dev)
453*4882a593Smuzhiyun return -ENOMEM;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun dev_net_set(dev, net);
456*4882a593Smuzhiyun dev->rtnl_link_ops = &br_link_ops;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun res = register_netdev(dev);
459*4882a593Smuzhiyun if (res)
460*4882a593Smuzhiyun free_netdev(dev);
461*4882a593Smuzhiyun return res;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
br_del_bridge(struct net * net,const char * name)464*4882a593Smuzhiyun int br_del_bridge(struct net *net, const char *name)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun struct net_device *dev;
467*4882a593Smuzhiyun int ret = 0;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun rtnl_lock();
470*4882a593Smuzhiyun dev = __dev_get_by_name(net, name);
471*4882a593Smuzhiyun if (dev == NULL)
472*4882a593Smuzhiyun ret = -ENXIO; /* Could not find device */
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun else if (!(dev->priv_flags & IFF_EBRIDGE)) {
475*4882a593Smuzhiyun /* Attempt to delete non bridge device! */
476*4882a593Smuzhiyun ret = -EPERM;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun else if (dev->flags & IFF_UP) {
480*4882a593Smuzhiyun /* Not shutdown yet. */
481*4882a593Smuzhiyun ret = -EBUSY;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun else
485*4882a593Smuzhiyun br_dev_delete(dev, NULL);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun rtnl_unlock();
488*4882a593Smuzhiyun return ret;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
br_mtu_min(const struct net_bridge * br)492*4882a593Smuzhiyun static int br_mtu_min(const struct net_bridge *br)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun const struct net_bridge_port *p;
495*4882a593Smuzhiyun int ret_mtu = 0;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun list_for_each_entry(p, &br->port_list, list)
498*4882a593Smuzhiyun if (!ret_mtu || ret_mtu > p->dev->mtu)
499*4882a593Smuzhiyun ret_mtu = p->dev->mtu;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun return ret_mtu ? ret_mtu : ETH_DATA_LEN;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
br_mtu_auto_adjust(struct net_bridge * br)504*4882a593Smuzhiyun void br_mtu_auto_adjust(struct net_bridge *br)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun ASSERT_RTNL();
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /* if the bridge MTU was manually configured don't mess with it */
509*4882a593Smuzhiyun if (br_opt_get(br, BROPT_MTU_SET_BY_USER))
510*4882a593Smuzhiyun return;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /* change to the minimum MTU and clear the flag which was set by
513*4882a593Smuzhiyun * the bridge ndo_change_mtu callback
514*4882a593Smuzhiyun */
515*4882a593Smuzhiyun dev_set_mtu(br->dev, br_mtu_min(br));
516*4882a593Smuzhiyun br_opt_toggle(br, BROPT_MTU_SET_BY_USER, false);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
br_set_gso_limits(struct net_bridge * br)519*4882a593Smuzhiyun static void br_set_gso_limits(struct net_bridge *br)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun unsigned int gso_max_size = GSO_MAX_SIZE;
522*4882a593Smuzhiyun u16 gso_max_segs = GSO_MAX_SEGS;
523*4882a593Smuzhiyun const struct net_bridge_port *p;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun list_for_each_entry(p, &br->port_list, list) {
526*4882a593Smuzhiyun gso_max_size = min(gso_max_size, p->dev->gso_max_size);
527*4882a593Smuzhiyun gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun br->dev->gso_max_size = gso_max_size;
530*4882a593Smuzhiyun br->dev->gso_max_segs = gso_max_segs;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /*
534*4882a593Smuzhiyun * Recomputes features using slave's features
535*4882a593Smuzhiyun */
br_features_recompute(struct net_bridge * br,netdev_features_t features)536*4882a593Smuzhiyun netdev_features_t br_features_recompute(struct net_bridge *br,
537*4882a593Smuzhiyun netdev_features_t features)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun struct net_bridge_port *p;
540*4882a593Smuzhiyun netdev_features_t mask;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun if (list_empty(&br->port_list))
543*4882a593Smuzhiyun return features;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun mask = features;
546*4882a593Smuzhiyun features &= ~NETIF_F_ONE_FOR_ALL;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun list_for_each_entry(p, &br->port_list, list) {
549*4882a593Smuzhiyun features = netdev_increment_features(features,
550*4882a593Smuzhiyun p->dev->features, mask);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun features = netdev_add_tso_features(features, mask);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun return features;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /* called with RTNL */
br_add_if(struct net_bridge * br,struct net_device * dev,struct netlink_ext_ack * extack)558*4882a593Smuzhiyun int br_add_if(struct net_bridge *br, struct net_device *dev,
559*4882a593Smuzhiyun struct netlink_ext_ack *extack)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun struct net_bridge_port *p;
562*4882a593Smuzhiyun int err = 0;
563*4882a593Smuzhiyun unsigned br_hr, dev_hr;
564*4882a593Smuzhiyun bool changed_addr, fdb_synced = false;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /* Don't allow bridging non-ethernet like devices. */
567*4882a593Smuzhiyun if ((dev->flags & IFF_LOOPBACK) ||
568*4882a593Smuzhiyun dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
569*4882a593Smuzhiyun !is_valid_ether_addr(dev->dev_addr))
570*4882a593Smuzhiyun return -EINVAL;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun /* Also don't allow bridging of net devices that are DSA masters, since
573*4882a593Smuzhiyun * the bridge layer rx_handler prevents the DSA fake ethertype handler
574*4882a593Smuzhiyun * to be invoked, so we don't get the chance to strip off and parse the
575*4882a593Smuzhiyun * DSA switch tag protocol header (the bridge layer just returns
576*4882a593Smuzhiyun * RX_HANDLER_CONSUMED, stopping RX processing for these frames).
577*4882a593Smuzhiyun * The only case where that would not be an issue is when bridging can
578*4882a593Smuzhiyun * already be offloaded, such as when the DSA master is itself a DSA
579*4882a593Smuzhiyun * or plain switchdev port, and is bridged only with other ports from
580*4882a593Smuzhiyun * the same hardware device.
581*4882a593Smuzhiyun */
582*4882a593Smuzhiyun if (netdev_uses_dsa(dev)) {
583*4882a593Smuzhiyun list_for_each_entry(p, &br->port_list, list) {
584*4882a593Smuzhiyun if (!netdev_port_same_parent_id(dev, p->dev)) {
585*4882a593Smuzhiyun NL_SET_ERR_MSG(extack,
586*4882a593Smuzhiyun "Cannot do software bridging with a DSA master");
587*4882a593Smuzhiyun return -EINVAL;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* No bridging of bridges */
593*4882a593Smuzhiyun if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) {
594*4882a593Smuzhiyun NL_SET_ERR_MSG(extack,
595*4882a593Smuzhiyun "Can not enslave a bridge to a bridge");
596*4882a593Smuzhiyun return -ELOOP;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /* Device has master upper dev */
600*4882a593Smuzhiyun if (netdev_master_upper_dev_get(dev))
601*4882a593Smuzhiyun return -EBUSY;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /* No bridging devices that dislike that (e.g. wireless) */
604*4882a593Smuzhiyun if (dev->priv_flags & IFF_DONT_BRIDGE) {
605*4882a593Smuzhiyun NL_SET_ERR_MSG(extack,
606*4882a593Smuzhiyun "Device does not allow enslaving to a bridge");
607*4882a593Smuzhiyun return -EOPNOTSUPP;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun p = new_nbp(br, dev);
611*4882a593Smuzhiyun if (IS_ERR(p))
612*4882a593Smuzhiyun return PTR_ERR(p);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun call_netdevice_notifiers(NETDEV_JOIN, dev);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun err = dev_set_allmulti(dev, 1);
617*4882a593Smuzhiyun if (err) {
618*4882a593Smuzhiyun br_multicast_del_port(p);
619*4882a593Smuzhiyun kfree(p); /* kobject not yet init'd, manually free */
620*4882a593Smuzhiyun goto err1;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
624*4882a593Smuzhiyun SYSFS_BRIDGE_PORT_ATTR);
625*4882a593Smuzhiyun if (err)
626*4882a593Smuzhiyun goto err2;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun err = br_sysfs_addif(p);
629*4882a593Smuzhiyun if (err)
630*4882a593Smuzhiyun goto err2;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun err = br_netpoll_enable(p);
633*4882a593Smuzhiyun if (err)
634*4882a593Smuzhiyun goto err3;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun err = netdev_rx_handler_register(dev, br_get_rx_handler(dev), p);
637*4882a593Smuzhiyun if (err)
638*4882a593Smuzhiyun goto err4;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun dev->priv_flags |= IFF_BRIDGE_PORT;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
643*4882a593Smuzhiyun if (err)
644*4882a593Smuzhiyun goto err5;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun err = nbp_switchdev_mark_set(p);
647*4882a593Smuzhiyun if (err)
648*4882a593Smuzhiyun goto err6;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun dev_disable_lro(dev);
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun list_add_rcu(&p->list, &br->port_list);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun nbp_update_port_count(br);
655*4882a593Smuzhiyun if (!br_promisc_port(p) && (p->dev->priv_flags & IFF_UNICAST_FLT)) {
656*4882a593Smuzhiyun /* When updating the port count we also update all ports'
657*4882a593Smuzhiyun * promiscuous mode.
658*4882a593Smuzhiyun * A port leaving promiscuous mode normally gets the bridge's
659*4882a593Smuzhiyun * fdb synced to the unicast filter (if supported), however,
660*4882a593Smuzhiyun * `br_port_clear_promisc` does not distinguish between
661*4882a593Smuzhiyun * non-promiscuous ports and *new* ports, so we need to
662*4882a593Smuzhiyun * sync explicitly here.
663*4882a593Smuzhiyun */
664*4882a593Smuzhiyun fdb_synced = br_fdb_sync_static(br, p) == 0;
665*4882a593Smuzhiyun if (!fdb_synced)
666*4882a593Smuzhiyun netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n");
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun netdev_update_features(br->dev);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun br_hr = br->dev->needed_headroom;
672*4882a593Smuzhiyun dev_hr = netdev_get_fwd_headroom(dev);
673*4882a593Smuzhiyun if (br_hr < dev_hr)
674*4882a593Smuzhiyun update_headroom(br, dev_hr);
675*4882a593Smuzhiyun else
676*4882a593Smuzhiyun netdev_set_rx_headroom(dev, br_hr);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun if (br_fdb_insert(br, p, dev->dev_addr, 0))
679*4882a593Smuzhiyun netdev_err(dev, "failed insert local address bridge forwarding table\n");
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun if (br->dev->addr_assign_type != NET_ADDR_SET) {
682*4882a593Smuzhiyun /* Ask for permission to use this MAC address now, even if we
683*4882a593Smuzhiyun * don't end up choosing it below.
684*4882a593Smuzhiyun */
685*4882a593Smuzhiyun err = dev_pre_changeaddr_notify(br->dev, dev->dev_addr, extack);
686*4882a593Smuzhiyun if (err)
687*4882a593Smuzhiyun goto err7;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun err = nbp_vlan_init(p, extack);
691*4882a593Smuzhiyun if (err) {
692*4882a593Smuzhiyun netdev_err(dev, "failed to initialize vlan filtering on this port\n");
693*4882a593Smuzhiyun goto err7;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun spin_lock_bh(&br->lock);
697*4882a593Smuzhiyun changed_addr = br_stp_recalculate_bridge_id(br);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun if (netif_running(dev) && netif_oper_up(dev) &&
700*4882a593Smuzhiyun (br->dev->flags & IFF_UP))
701*4882a593Smuzhiyun br_stp_enable_port(p);
702*4882a593Smuzhiyun spin_unlock_bh(&br->lock);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun br_ifinfo_notify(RTM_NEWLINK, NULL, p);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun if (changed_addr)
707*4882a593Smuzhiyun call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun br_mtu_auto_adjust(br);
710*4882a593Smuzhiyun br_set_gso_limits(br);
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun kobject_uevent(&p->kobj, KOBJ_ADD);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun return 0;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun err7:
717*4882a593Smuzhiyun if (fdb_synced)
718*4882a593Smuzhiyun br_fdb_unsync_static(br, p);
719*4882a593Smuzhiyun list_del_rcu(&p->list);
720*4882a593Smuzhiyun br_fdb_delete_by_port(br, p, 0, 1);
721*4882a593Smuzhiyun nbp_update_port_count(br);
722*4882a593Smuzhiyun err6:
723*4882a593Smuzhiyun netdev_upper_dev_unlink(dev, br->dev);
724*4882a593Smuzhiyun err5:
725*4882a593Smuzhiyun dev->priv_flags &= ~IFF_BRIDGE_PORT;
726*4882a593Smuzhiyun netdev_rx_handler_unregister(dev);
727*4882a593Smuzhiyun err4:
728*4882a593Smuzhiyun br_netpoll_disable(p);
729*4882a593Smuzhiyun err3:
730*4882a593Smuzhiyun sysfs_remove_link(br->ifobj, p->dev->name);
731*4882a593Smuzhiyun err2:
732*4882a593Smuzhiyun br_multicast_del_port(p);
733*4882a593Smuzhiyun kobject_put(&p->kobj);
734*4882a593Smuzhiyun dev_set_allmulti(dev, -1);
735*4882a593Smuzhiyun err1:
736*4882a593Smuzhiyun dev_put(dev);
737*4882a593Smuzhiyun return err;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun /* called with RTNL */
br_del_if(struct net_bridge * br,struct net_device * dev)741*4882a593Smuzhiyun int br_del_if(struct net_bridge *br, struct net_device *dev)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun struct net_bridge_port *p;
744*4882a593Smuzhiyun bool changed_addr;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun p = br_port_get_rtnl(dev);
747*4882a593Smuzhiyun if (!p || p->br != br)
748*4882a593Smuzhiyun return -EINVAL;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /* Since more than one interface can be attached to a bridge,
751*4882a593Smuzhiyun * there still maybe an alternate path for netconsole to use;
752*4882a593Smuzhiyun * therefore there is no reason for a NETDEV_RELEASE event.
753*4882a593Smuzhiyun */
754*4882a593Smuzhiyun del_nbp(p);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun br_mtu_auto_adjust(br);
757*4882a593Smuzhiyun br_set_gso_limits(br);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun spin_lock_bh(&br->lock);
760*4882a593Smuzhiyun changed_addr = br_stp_recalculate_bridge_id(br);
761*4882a593Smuzhiyun spin_unlock_bh(&br->lock);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun if (changed_addr)
764*4882a593Smuzhiyun call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun netdev_update_features(br->dev);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun return 0;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
br_port_flags_change(struct net_bridge_port * p,unsigned long mask)771*4882a593Smuzhiyun void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun struct net_bridge *br = p->br;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun if (mask & BR_AUTO_MASK)
776*4882a593Smuzhiyun nbp_update_port_count(br);
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun if (mask & BR_NEIGH_SUPPRESS)
779*4882a593Smuzhiyun br_recalculate_neigh_suppress_enabled(br);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
br_port_flag_is_set(const struct net_device * dev,unsigned long flag)782*4882a593Smuzhiyun bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun struct net_bridge_port *p;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun p = br_port_get_rtnl_rcu(dev);
787*4882a593Smuzhiyun if (!p)
788*4882a593Smuzhiyun return false;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun return p->flags & flag;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(br_port_flag_is_set);
793