xref: /OK3568_Linux_fs/kernel/net/bridge/br_mrp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun #include <linux/mrp_bridge.h>
4*4882a593Smuzhiyun #include "br_private_mrp.h"
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
7*4882a593Smuzhiyun static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 };
8*4882a593Smuzhiyun 
br_mrp_is_ring_port(struct net_bridge_port * p_port,struct net_bridge_port * s_port,struct net_bridge_port * port)9*4882a593Smuzhiyun static bool br_mrp_is_ring_port(struct net_bridge_port *p_port,
10*4882a593Smuzhiyun 				struct net_bridge_port *s_port,
11*4882a593Smuzhiyun 				struct net_bridge_port *port)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun 	if (port == p_port ||
14*4882a593Smuzhiyun 	    port == s_port)
15*4882a593Smuzhiyun 		return true;
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 	return false;
18*4882a593Smuzhiyun }
19*4882a593Smuzhiyun 
br_mrp_is_in_port(struct net_bridge_port * i_port,struct net_bridge_port * port)20*4882a593Smuzhiyun static bool br_mrp_is_in_port(struct net_bridge_port *i_port,
21*4882a593Smuzhiyun 			      struct net_bridge_port *port)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	if (port == i_port)
24*4882a593Smuzhiyun 		return true;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	return false;
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun 
br_mrp_get_port(struct net_bridge * br,u32 ifindex)29*4882a593Smuzhiyun static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br,
30*4882a593Smuzhiyun 					       u32 ifindex)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	struct net_bridge_port *res = NULL;
33*4882a593Smuzhiyun 	struct net_bridge_port *port;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	list_for_each_entry(port, &br->port_list, list) {
36*4882a593Smuzhiyun 		if (port->dev->ifindex == ifindex) {
37*4882a593Smuzhiyun 			res = port;
38*4882a593Smuzhiyun 			break;
39*4882a593Smuzhiyun 		}
40*4882a593Smuzhiyun 	}
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	return res;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
br_mrp_find_id(struct net_bridge * br,u32 ring_id)45*4882a593Smuzhiyun static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	struct br_mrp *res = NULL;
48*4882a593Smuzhiyun 	struct br_mrp *mrp;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	list_for_each_entry_rcu(mrp, &br->mrp_list, list,
51*4882a593Smuzhiyun 				lockdep_rtnl_is_held()) {
52*4882a593Smuzhiyun 		if (mrp->ring_id == ring_id) {
53*4882a593Smuzhiyun 			res = mrp;
54*4882a593Smuzhiyun 			break;
55*4882a593Smuzhiyun 		}
56*4882a593Smuzhiyun 	}
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	return res;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
br_mrp_find_in_id(struct net_bridge * br,u32 in_id)61*4882a593Smuzhiyun static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	struct br_mrp *res = NULL;
64*4882a593Smuzhiyun 	struct br_mrp *mrp;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	list_for_each_entry_rcu(mrp, &br->mrp_list, list,
67*4882a593Smuzhiyun 				lockdep_rtnl_is_held()) {
68*4882a593Smuzhiyun 		if (mrp->in_id == in_id) {
69*4882a593Smuzhiyun 			res = mrp;
70*4882a593Smuzhiyun 			break;
71*4882a593Smuzhiyun 		}
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	return res;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
br_mrp_unique_ifindex(struct net_bridge * br,u32 ifindex)77*4882a593Smuzhiyun static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	struct br_mrp *mrp;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	list_for_each_entry_rcu(mrp, &br->mrp_list, list,
82*4882a593Smuzhiyun 				lockdep_rtnl_is_held()) {
83*4882a593Smuzhiyun 		struct net_bridge_port *p;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 		p = rtnl_dereference(mrp->p_port);
86*4882a593Smuzhiyun 		if (p && p->dev->ifindex == ifindex)
87*4882a593Smuzhiyun 			return false;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 		p = rtnl_dereference(mrp->s_port);
90*4882a593Smuzhiyun 		if (p && p->dev->ifindex == ifindex)
91*4882a593Smuzhiyun 			return false;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 		p = rtnl_dereference(mrp->i_port);
94*4882a593Smuzhiyun 		if (p && p->dev->ifindex == ifindex)
95*4882a593Smuzhiyun 			return false;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return true;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
br_mrp_find_port(struct net_bridge * br,struct net_bridge_port * p)101*4882a593Smuzhiyun static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
102*4882a593Smuzhiyun 				       struct net_bridge_port *p)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	struct br_mrp *res = NULL;
105*4882a593Smuzhiyun 	struct br_mrp *mrp;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	list_for_each_entry_rcu(mrp, &br->mrp_list, list,
108*4882a593Smuzhiyun 				lockdep_rtnl_is_held()) {
109*4882a593Smuzhiyun 		if (rcu_access_pointer(mrp->p_port) == p ||
110*4882a593Smuzhiyun 		    rcu_access_pointer(mrp->s_port) == p ||
111*4882a593Smuzhiyun 		    rcu_access_pointer(mrp->i_port) == p) {
112*4882a593Smuzhiyun 			res = mrp;
113*4882a593Smuzhiyun 			break;
114*4882a593Smuzhiyun 		}
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	return res;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
br_mrp_next_seq(struct br_mrp * mrp)120*4882a593Smuzhiyun static int br_mrp_next_seq(struct br_mrp *mrp)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	mrp->seq_id++;
123*4882a593Smuzhiyun 	return mrp->seq_id;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
br_mrp_skb_alloc(struct net_bridge_port * p,const u8 * src,const u8 * dst)126*4882a593Smuzhiyun static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
127*4882a593Smuzhiyun 					const u8 *src, const u8 *dst)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct ethhdr *eth_hdr;
130*4882a593Smuzhiyun 	struct sk_buff *skb;
131*4882a593Smuzhiyun 	__be16 *version;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
134*4882a593Smuzhiyun 	if (!skb)
135*4882a593Smuzhiyun 		return NULL;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	skb->dev = p->dev;
138*4882a593Smuzhiyun 	skb->protocol = htons(ETH_P_MRP);
139*4882a593Smuzhiyun 	skb->priority = MRP_FRAME_PRIO;
140*4882a593Smuzhiyun 	skb_reserve(skb, sizeof(*eth_hdr));
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	eth_hdr = skb_push(skb, sizeof(*eth_hdr));
143*4882a593Smuzhiyun 	ether_addr_copy(eth_hdr->h_dest, dst);
144*4882a593Smuzhiyun 	ether_addr_copy(eth_hdr->h_source, src);
145*4882a593Smuzhiyun 	eth_hdr->h_proto = htons(ETH_P_MRP);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	version = skb_put(skb, sizeof(*version));
148*4882a593Smuzhiyun 	*version = cpu_to_be16(MRP_VERSION);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	return skb;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
br_mrp_skb_tlv(struct sk_buff * skb,enum br_mrp_tlv_header_type type,u8 length)153*4882a593Smuzhiyun static void br_mrp_skb_tlv(struct sk_buff *skb,
154*4882a593Smuzhiyun 			   enum br_mrp_tlv_header_type type,
155*4882a593Smuzhiyun 			   u8 length)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	struct br_mrp_tlv_hdr *hdr;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	hdr = skb_put(skb, sizeof(*hdr));
160*4882a593Smuzhiyun 	hdr->type = type;
161*4882a593Smuzhiyun 	hdr->length = length;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
br_mrp_skb_common(struct sk_buff * skb,struct br_mrp * mrp)164*4882a593Smuzhiyun static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct br_mrp_common_hdr *hdr;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr));
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	hdr = skb_put(skb, sizeof(*hdr));
171*4882a593Smuzhiyun 	hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp));
172*4882a593Smuzhiyun 	memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
br_mrp_alloc_test_skb(struct br_mrp * mrp,struct net_bridge_port * p,enum br_mrp_port_role_type port_role)175*4882a593Smuzhiyun static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
176*4882a593Smuzhiyun 					     struct net_bridge_port *p,
177*4882a593Smuzhiyun 					     enum br_mrp_port_role_type port_role)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	struct br_mrp_ring_test_hdr *hdr = NULL;
180*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (!p)
183*4882a593Smuzhiyun 		return NULL;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac);
186*4882a593Smuzhiyun 	if (!skb)
187*4882a593Smuzhiyun 		return NULL;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr));
190*4882a593Smuzhiyun 	hdr = skb_put(skb, sizeof(*hdr));
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	hdr->prio = cpu_to_be16(mrp->prio);
193*4882a593Smuzhiyun 	ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
194*4882a593Smuzhiyun 	hdr->port_role = cpu_to_be16(port_role);
195*4882a593Smuzhiyun 	hdr->state = cpu_to_be16(mrp->ring_state);
196*4882a593Smuzhiyun 	hdr->transitions = cpu_to_be16(mrp->ring_transitions);
197*4882a593Smuzhiyun 	hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	br_mrp_skb_common(skb, mrp);
200*4882a593Smuzhiyun 	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	return skb;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
br_mrp_alloc_in_test_skb(struct br_mrp * mrp,struct net_bridge_port * p,enum br_mrp_port_role_type port_role)205*4882a593Smuzhiyun static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp,
206*4882a593Smuzhiyun 						struct net_bridge_port *p,
207*4882a593Smuzhiyun 						enum br_mrp_port_role_type port_role)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	struct br_mrp_in_test_hdr *hdr = NULL;
210*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	if (!p)
213*4882a593Smuzhiyun 		return NULL;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac);
216*4882a593Smuzhiyun 	if (!skb)
217*4882a593Smuzhiyun 		return NULL;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr));
220*4882a593Smuzhiyun 	hdr = skb_put(skb, sizeof(*hdr));
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	hdr->id = cpu_to_be16(mrp->in_id);
223*4882a593Smuzhiyun 	ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
224*4882a593Smuzhiyun 	hdr->port_role = cpu_to_be16(port_role);
225*4882a593Smuzhiyun 	hdr->state = cpu_to_be16(mrp->in_state);
226*4882a593Smuzhiyun 	hdr->transitions = cpu_to_be16(mrp->in_transitions);
227*4882a593Smuzhiyun 	hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	br_mrp_skb_common(skb, mrp);
230*4882a593Smuzhiyun 	br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	return skb;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun /* This function is continuously called in the following cases:
236*4882a593Smuzhiyun  * - when node role is MRM, in this case test_monitor is always set to false
237*4882a593Smuzhiyun  *   because it needs to notify the userspace that the ring is open and needs to
238*4882a593Smuzhiyun  *   send MRP_Test frames
239*4882a593Smuzhiyun  * - when node role is MRA, there are 2 subcases:
240*4882a593Smuzhiyun  *     - when MRA behaves as MRM, in this case is similar with MRM role
241*4882a593Smuzhiyun  *     - when MRA behaves as MRC, in this case test_monitor is set to true,
242*4882a593Smuzhiyun  *       because it needs to detect when it stops seeing MRP_Test frames
243*4882a593Smuzhiyun  *       from MRM node but it doesn't need to send MRP_Test frames.
244*4882a593Smuzhiyun  */
br_mrp_test_work_expired(struct work_struct * work)245*4882a593Smuzhiyun static void br_mrp_test_work_expired(struct work_struct *work)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct delayed_work *del_work = to_delayed_work(work);
248*4882a593Smuzhiyun 	struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work);
249*4882a593Smuzhiyun 	struct net_bridge_port *p;
250*4882a593Smuzhiyun 	bool notify_open = false;
251*4882a593Smuzhiyun 	struct sk_buff *skb;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (time_before_eq(mrp->test_end, jiffies))
254*4882a593Smuzhiyun 		return;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (mrp->test_count_miss < mrp->test_max_miss) {
257*4882a593Smuzhiyun 		mrp->test_count_miss++;
258*4882a593Smuzhiyun 	} else {
259*4882a593Smuzhiyun 		/* Notify that the ring is open only if the ring state is
260*4882a593Smuzhiyun 		 * closed, otherwise it would continue to notify at every
261*4882a593Smuzhiyun 		 * interval.
262*4882a593Smuzhiyun 		 * Also notify that the ring is open when the node has the
263*4882a593Smuzhiyun 		 * role MRA and behaves as MRC. The reason is that the
264*4882a593Smuzhiyun 		 * userspace needs to know when the MRM stopped sending
265*4882a593Smuzhiyun 		 * MRP_Test frames so that the current node to try to take
266*4882a593Smuzhiyun 		 * the role of a MRM.
267*4882a593Smuzhiyun 		 */
268*4882a593Smuzhiyun 		if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED ||
269*4882a593Smuzhiyun 		    mrp->test_monitor)
270*4882a593Smuzhiyun 			notify_open = true;
271*4882a593Smuzhiyun 	}
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	rcu_read_lock();
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	p = rcu_dereference(mrp->p_port);
276*4882a593Smuzhiyun 	if (p) {
277*4882a593Smuzhiyun 		if (!mrp->test_monitor) {
278*4882a593Smuzhiyun 			skb = br_mrp_alloc_test_skb(mrp, p,
279*4882a593Smuzhiyun 						    BR_MRP_PORT_ROLE_PRIMARY);
280*4882a593Smuzhiyun 			if (!skb)
281*4882a593Smuzhiyun 				goto out;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 			skb_reset_network_header(skb);
284*4882a593Smuzhiyun 			dev_queue_xmit(skb);
285*4882a593Smuzhiyun 		}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 		if (notify_open && !mrp->ring_role_offloaded)
288*4882a593Smuzhiyun 			br_mrp_ring_port_open(p->dev, true);
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	p = rcu_dereference(mrp->s_port);
292*4882a593Smuzhiyun 	if (p) {
293*4882a593Smuzhiyun 		if (!mrp->test_monitor) {
294*4882a593Smuzhiyun 			skb = br_mrp_alloc_test_skb(mrp, p,
295*4882a593Smuzhiyun 						    BR_MRP_PORT_ROLE_SECONDARY);
296*4882a593Smuzhiyun 			if (!skb)
297*4882a593Smuzhiyun 				goto out;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 			skb_reset_network_header(skb);
300*4882a593Smuzhiyun 			dev_queue_xmit(skb);
301*4882a593Smuzhiyun 		}
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 		if (notify_open && !mrp->ring_role_offloaded)
304*4882a593Smuzhiyun 			br_mrp_ring_port_open(p->dev, true);
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun out:
308*4882a593Smuzhiyun 	rcu_read_unlock();
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	queue_delayed_work(system_wq, &mrp->test_work,
311*4882a593Smuzhiyun 			   usecs_to_jiffies(mrp->test_interval));
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun /* This function is continuously called when the node has the interconnect role
315*4882a593Smuzhiyun  * MIM. It would generate interconnect test frames and will send them on all 3
316*4882a593Smuzhiyun  * ports. But will also check if it stop receiving interconnect test frames.
317*4882a593Smuzhiyun  */
br_mrp_in_test_work_expired(struct work_struct * work)318*4882a593Smuzhiyun static void br_mrp_in_test_work_expired(struct work_struct *work)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	struct delayed_work *del_work = to_delayed_work(work);
321*4882a593Smuzhiyun 	struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work);
322*4882a593Smuzhiyun 	struct net_bridge_port *p;
323*4882a593Smuzhiyun 	bool notify_open = false;
324*4882a593Smuzhiyun 	struct sk_buff *skb;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (time_before_eq(mrp->in_test_end, jiffies))
327*4882a593Smuzhiyun 		return;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	if (mrp->in_test_count_miss < mrp->in_test_max_miss) {
330*4882a593Smuzhiyun 		mrp->in_test_count_miss++;
331*4882a593Smuzhiyun 	} else {
332*4882a593Smuzhiyun 		/* Notify that the interconnect ring is open only if the
333*4882a593Smuzhiyun 		 * interconnect ring state is closed, otherwise it would
334*4882a593Smuzhiyun 		 * continue to notify at every interval.
335*4882a593Smuzhiyun 		 */
336*4882a593Smuzhiyun 		if (mrp->in_state == BR_MRP_IN_STATE_CLOSED)
337*4882a593Smuzhiyun 			notify_open = true;
338*4882a593Smuzhiyun 	}
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	rcu_read_lock();
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	p = rcu_dereference(mrp->p_port);
343*4882a593Smuzhiyun 	if (p) {
344*4882a593Smuzhiyun 		skb = br_mrp_alloc_in_test_skb(mrp, p,
345*4882a593Smuzhiyun 					       BR_MRP_PORT_ROLE_PRIMARY);
346*4882a593Smuzhiyun 		if (!skb)
347*4882a593Smuzhiyun 			goto out;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		skb_reset_network_header(skb);
350*4882a593Smuzhiyun 		dev_queue_xmit(skb);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 		if (notify_open && !mrp->in_role_offloaded)
353*4882a593Smuzhiyun 			br_mrp_in_port_open(p->dev, true);
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	p = rcu_dereference(mrp->s_port);
357*4882a593Smuzhiyun 	if (p) {
358*4882a593Smuzhiyun 		skb = br_mrp_alloc_in_test_skb(mrp, p,
359*4882a593Smuzhiyun 					       BR_MRP_PORT_ROLE_SECONDARY);
360*4882a593Smuzhiyun 		if (!skb)
361*4882a593Smuzhiyun 			goto out;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 		skb_reset_network_header(skb);
364*4882a593Smuzhiyun 		dev_queue_xmit(skb);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 		if (notify_open && !mrp->in_role_offloaded)
367*4882a593Smuzhiyun 			br_mrp_in_port_open(p->dev, true);
368*4882a593Smuzhiyun 	}
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	p = rcu_dereference(mrp->i_port);
371*4882a593Smuzhiyun 	if (p) {
372*4882a593Smuzhiyun 		skb = br_mrp_alloc_in_test_skb(mrp, p,
373*4882a593Smuzhiyun 					       BR_MRP_PORT_ROLE_INTER);
374*4882a593Smuzhiyun 		if (!skb)
375*4882a593Smuzhiyun 			goto out;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 		skb_reset_network_header(skb);
378*4882a593Smuzhiyun 		dev_queue_xmit(skb);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		if (notify_open && !mrp->in_role_offloaded)
381*4882a593Smuzhiyun 			br_mrp_in_port_open(p->dev, true);
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun out:
385*4882a593Smuzhiyun 	rcu_read_unlock();
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	queue_delayed_work(system_wq, &mrp->in_test_work,
388*4882a593Smuzhiyun 			   usecs_to_jiffies(mrp->in_test_interval));
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun /* Deletes the MRP instance.
392*4882a593Smuzhiyun  * note: called under rtnl_lock
393*4882a593Smuzhiyun  */
br_mrp_del_impl(struct net_bridge * br,struct br_mrp * mrp)394*4882a593Smuzhiyun static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct net_bridge_port *p;
397*4882a593Smuzhiyun 	u8 state;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/* Stop sending MRP_Test frames */
400*4882a593Smuzhiyun 	cancel_delayed_work_sync(&mrp->test_work);
401*4882a593Smuzhiyun 	br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	/* Stop sending MRP_InTest frames if has an interconnect role */
404*4882a593Smuzhiyun 	cancel_delayed_work_sync(&mrp->in_test_work);
405*4882a593Smuzhiyun 	br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	br_mrp_switchdev_del(br, mrp);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	/* Reset the ports */
410*4882a593Smuzhiyun 	p = rtnl_dereference(mrp->p_port);
411*4882a593Smuzhiyun 	if (p) {
412*4882a593Smuzhiyun 		spin_lock_bh(&br->lock);
413*4882a593Smuzhiyun 		state = netif_running(br->dev) ?
414*4882a593Smuzhiyun 				BR_STATE_FORWARDING : BR_STATE_DISABLED;
415*4882a593Smuzhiyun 		p->state = state;
416*4882a593Smuzhiyun 		p->flags &= ~BR_MRP_AWARE;
417*4882a593Smuzhiyun 		spin_unlock_bh(&br->lock);
418*4882a593Smuzhiyun 		br_mrp_port_switchdev_set_state(p, state);
419*4882a593Smuzhiyun 		rcu_assign_pointer(mrp->p_port, NULL);
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	p = rtnl_dereference(mrp->s_port);
423*4882a593Smuzhiyun 	if (p) {
424*4882a593Smuzhiyun 		spin_lock_bh(&br->lock);
425*4882a593Smuzhiyun 		state = netif_running(br->dev) ?
426*4882a593Smuzhiyun 				BR_STATE_FORWARDING : BR_STATE_DISABLED;
427*4882a593Smuzhiyun 		p->state = state;
428*4882a593Smuzhiyun 		p->flags &= ~BR_MRP_AWARE;
429*4882a593Smuzhiyun 		spin_unlock_bh(&br->lock);
430*4882a593Smuzhiyun 		br_mrp_port_switchdev_set_state(p, state);
431*4882a593Smuzhiyun 		rcu_assign_pointer(mrp->s_port, NULL);
432*4882a593Smuzhiyun 	}
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	p = rtnl_dereference(mrp->i_port);
435*4882a593Smuzhiyun 	if (p) {
436*4882a593Smuzhiyun 		spin_lock_bh(&br->lock);
437*4882a593Smuzhiyun 		state = netif_running(br->dev) ?
438*4882a593Smuzhiyun 				BR_STATE_FORWARDING : BR_STATE_DISABLED;
439*4882a593Smuzhiyun 		p->state = state;
440*4882a593Smuzhiyun 		p->flags &= ~BR_MRP_AWARE;
441*4882a593Smuzhiyun 		spin_unlock_bh(&br->lock);
442*4882a593Smuzhiyun 		br_mrp_port_switchdev_set_state(p, state);
443*4882a593Smuzhiyun 		rcu_assign_pointer(mrp->i_port, NULL);
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	list_del_rcu(&mrp->list);
447*4882a593Smuzhiyun 	kfree_rcu(mrp, rcu);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun /* Adds a new MRP instance.
451*4882a593Smuzhiyun  * note: called under rtnl_lock
452*4882a593Smuzhiyun  */
br_mrp_add(struct net_bridge * br,struct br_mrp_instance * instance)453*4882a593Smuzhiyun int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun 	struct net_bridge_port *p;
456*4882a593Smuzhiyun 	struct br_mrp *mrp;
457*4882a593Smuzhiyun 	int err;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	/* If the ring exists, it is not possible to create another one with the
460*4882a593Smuzhiyun 	 * same ring_id
461*4882a593Smuzhiyun 	 */
462*4882a593Smuzhiyun 	mrp = br_mrp_find_id(br, instance->ring_id);
463*4882a593Smuzhiyun 	if (mrp)
464*4882a593Smuzhiyun 		return -EINVAL;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	if (!br_mrp_get_port(br, instance->p_ifindex) ||
467*4882a593Smuzhiyun 	    !br_mrp_get_port(br, instance->s_ifindex))
468*4882a593Smuzhiyun 		return -EINVAL;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	/* It is not possible to have the same port part of multiple rings */
471*4882a593Smuzhiyun 	if (!br_mrp_unique_ifindex(br, instance->p_ifindex) ||
472*4882a593Smuzhiyun 	    !br_mrp_unique_ifindex(br, instance->s_ifindex))
473*4882a593Smuzhiyun 		return -EINVAL;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	mrp = kzalloc(sizeof(*mrp), GFP_KERNEL);
476*4882a593Smuzhiyun 	if (!mrp)
477*4882a593Smuzhiyun 		return -ENOMEM;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	mrp->ring_id = instance->ring_id;
480*4882a593Smuzhiyun 	mrp->prio = instance->prio;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	p = br_mrp_get_port(br, instance->p_ifindex);
483*4882a593Smuzhiyun 	spin_lock_bh(&br->lock);
484*4882a593Smuzhiyun 	p->state = BR_STATE_FORWARDING;
485*4882a593Smuzhiyun 	p->flags |= BR_MRP_AWARE;
486*4882a593Smuzhiyun 	spin_unlock_bh(&br->lock);
487*4882a593Smuzhiyun 	rcu_assign_pointer(mrp->p_port, p);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	p = br_mrp_get_port(br, instance->s_ifindex);
490*4882a593Smuzhiyun 	spin_lock_bh(&br->lock);
491*4882a593Smuzhiyun 	p->state = BR_STATE_FORWARDING;
492*4882a593Smuzhiyun 	p->flags |= BR_MRP_AWARE;
493*4882a593Smuzhiyun 	spin_unlock_bh(&br->lock);
494*4882a593Smuzhiyun 	rcu_assign_pointer(mrp->s_port, p);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
497*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired);
498*4882a593Smuzhiyun 	list_add_tail_rcu(&mrp->list, &br->mrp_list);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	err = br_mrp_switchdev_add(br, mrp);
501*4882a593Smuzhiyun 	if (err)
502*4882a593Smuzhiyun 		goto delete_mrp;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	return 0;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun delete_mrp:
507*4882a593Smuzhiyun 	br_mrp_del_impl(br, mrp);
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	return err;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun /* Deletes the MRP instance from which the port is part of
513*4882a593Smuzhiyun  * note: called under rtnl_lock
514*4882a593Smuzhiyun  */
br_mrp_port_del(struct net_bridge * br,struct net_bridge_port * p)515*4882a593Smuzhiyun void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	struct br_mrp *mrp = br_mrp_find_port(br, p);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	/* If the port is not part of a MRP instance just bail out */
520*4882a593Smuzhiyun 	if (!mrp)
521*4882a593Smuzhiyun 		return;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	br_mrp_del_impl(br, mrp);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun /* Deletes existing MRP instance based on ring_id
527*4882a593Smuzhiyun  * note: called under rtnl_lock
528*4882a593Smuzhiyun  */
br_mrp_del(struct net_bridge * br,struct br_mrp_instance * instance)529*4882a593Smuzhiyun int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	if (!mrp)
534*4882a593Smuzhiyun 		return -EINVAL;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	br_mrp_del_impl(br, mrp);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	return 0;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun /* Set port state, port state can be forwarding, blocked or disabled
542*4882a593Smuzhiyun  * note: already called with rtnl_lock
543*4882a593Smuzhiyun  */
br_mrp_set_port_state(struct net_bridge_port * p,enum br_mrp_port_state_type state)544*4882a593Smuzhiyun int br_mrp_set_port_state(struct net_bridge_port *p,
545*4882a593Smuzhiyun 			  enum br_mrp_port_state_type state)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	u32 port_state;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	if (!p || !(p->flags & BR_MRP_AWARE))
550*4882a593Smuzhiyun 		return -EINVAL;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	spin_lock_bh(&p->br->lock);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	if (state == BR_MRP_PORT_STATE_FORWARDING)
555*4882a593Smuzhiyun 		port_state = BR_STATE_FORWARDING;
556*4882a593Smuzhiyun 	else
557*4882a593Smuzhiyun 		port_state = BR_STATE_BLOCKING;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	p->state = port_state;
560*4882a593Smuzhiyun 	spin_unlock_bh(&p->br->lock);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	br_mrp_port_switchdev_set_state(p, port_state);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	return 0;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun /* Set port role, port role can be primary or secondary
568*4882a593Smuzhiyun  * note: already called with rtnl_lock
569*4882a593Smuzhiyun  */
br_mrp_set_port_role(struct net_bridge_port * p,enum br_mrp_port_role_type role)570*4882a593Smuzhiyun int br_mrp_set_port_role(struct net_bridge_port *p,
571*4882a593Smuzhiyun 			 enum br_mrp_port_role_type role)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun 	struct br_mrp *mrp;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	if (!p || !(p->flags & BR_MRP_AWARE))
576*4882a593Smuzhiyun 		return -EINVAL;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	mrp = br_mrp_find_port(p->br, p);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	if (!mrp)
581*4882a593Smuzhiyun 		return -EINVAL;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	switch (role) {
584*4882a593Smuzhiyun 	case BR_MRP_PORT_ROLE_PRIMARY:
585*4882a593Smuzhiyun 		rcu_assign_pointer(mrp->p_port, p);
586*4882a593Smuzhiyun 		break;
587*4882a593Smuzhiyun 	case BR_MRP_PORT_ROLE_SECONDARY:
588*4882a593Smuzhiyun 		rcu_assign_pointer(mrp->s_port, p);
589*4882a593Smuzhiyun 		break;
590*4882a593Smuzhiyun 	default:
591*4882a593Smuzhiyun 		return -EINVAL;
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	br_mrp_port_switchdev_set_role(p, role);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	return 0;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun /* Set ring state, ring state can be only Open or Closed
600*4882a593Smuzhiyun  * note: already called with rtnl_lock
601*4882a593Smuzhiyun  */
br_mrp_set_ring_state(struct net_bridge * br,struct br_mrp_ring_state * state)602*4882a593Smuzhiyun int br_mrp_set_ring_state(struct net_bridge *br,
603*4882a593Smuzhiyun 			  struct br_mrp_ring_state *state)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun 	struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	if (!mrp)
608*4882a593Smuzhiyun 		return -EINVAL;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	if (mrp->ring_state != state->ring_state)
611*4882a593Smuzhiyun 		mrp->ring_transitions++;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	mrp->ring_state = state->ring_state;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	return 0;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun /* Set ring role, ring role can be only MRM(Media Redundancy Manager) or
621*4882a593Smuzhiyun  * MRC(Media Redundancy Client).
622*4882a593Smuzhiyun  * note: already called with rtnl_lock
623*4882a593Smuzhiyun  */
br_mrp_set_ring_role(struct net_bridge * br,struct br_mrp_ring_role * role)624*4882a593Smuzhiyun int br_mrp_set_ring_role(struct net_bridge *br,
625*4882a593Smuzhiyun 			 struct br_mrp_ring_role *role)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun 	struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
628*4882a593Smuzhiyun 	int err;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	if (!mrp)
631*4882a593Smuzhiyun 		return -EINVAL;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	mrp->ring_role = role->ring_role;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	/* If there is an error just bailed out */
636*4882a593Smuzhiyun 	err = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role);
637*4882a593Smuzhiyun 	if (err && err != -EOPNOTSUPP)
638*4882a593Smuzhiyun 		return err;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	/* Now detect if the HW actually applied the role or not. If the HW
641*4882a593Smuzhiyun 	 * applied the role it means that the SW will not to do those operations
642*4882a593Smuzhiyun 	 * anymore. For example if the role ir MRM then the HW will notify the
643*4882a593Smuzhiyun 	 * SW when ring is open, but if the is not pushed to the HW the SW will
644*4882a593Smuzhiyun 	 * need to detect when the ring is open
645*4882a593Smuzhiyun 	 */
646*4882a593Smuzhiyun 	mrp->ring_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	return 0;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun /* Start to generate or monitor MRP test frames, the frames are generated by
652*4882a593Smuzhiyun  * HW and if it fails, they are generated by the SW.
653*4882a593Smuzhiyun  * note: already called with rtnl_lock
654*4882a593Smuzhiyun  */
br_mrp_start_test(struct net_bridge * br,struct br_mrp_start_test * test)655*4882a593Smuzhiyun int br_mrp_start_test(struct net_bridge *br,
656*4882a593Smuzhiyun 		      struct br_mrp_start_test *test)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun 	struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id);
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	if (!mrp)
661*4882a593Smuzhiyun 		return -EINVAL;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	/* Try to push it to the HW and if it fails then continue with SW
664*4882a593Smuzhiyun 	 * implementation and if that also fails then return error.
665*4882a593Smuzhiyun 	 */
666*4882a593Smuzhiyun 	if (!br_mrp_switchdev_send_ring_test(br, mrp, test->interval,
667*4882a593Smuzhiyun 					     test->max_miss, test->period,
668*4882a593Smuzhiyun 					     test->monitor))
669*4882a593Smuzhiyun 		return 0;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	mrp->test_interval = test->interval;
672*4882a593Smuzhiyun 	mrp->test_end = jiffies + usecs_to_jiffies(test->period);
673*4882a593Smuzhiyun 	mrp->test_max_miss = test->max_miss;
674*4882a593Smuzhiyun 	mrp->test_monitor = test->monitor;
675*4882a593Smuzhiyun 	mrp->test_count_miss = 0;
676*4882a593Smuzhiyun 	queue_delayed_work(system_wq, &mrp->test_work,
677*4882a593Smuzhiyun 			   usecs_to_jiffies(test->interval));
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	return 0;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun /* Set in state, int state can be only Open or Closed
683*4882a593Smuzhiyun  * note: already called with rtnl_lock
684*4882a593Smuzhiyun  */
br_mrp_set_in_state(struct net_bridge * br,struct br_mrp_in_state * state)685*4882a593Smuzhiyun int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun 	struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id);
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	if (!mrp)
690*4882a593Smuzhiyun 		return -EINVAL;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	if (mrp->in_state != state->in_state)
693*4882a593Smuzhiyun 		mrp->in_transitions++;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	mrp->in_state = state->in_state;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	br_mrp_switchdev_set_in_state(br, mrp, state->in_state);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	return 0;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun /* Set in role, in role can be only MIM(Media Interconnection Manager) or
703*4882a593Smuzhiyun  * MIC(Media Interconnection Client).
704*4882a593Smuzhiyun  * note: already called with rtnl_lock
705*4882a593Smuzhiyun  */
br_mrp_set_in_role(struct net_bridge * br,struct br_mrp_in_role * role)706*4882a593Smuzhiyun int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun 	struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
709*4882a593Smuzhiyun 	struct net_bridge_port *p;
710*4882a593Smuzhiyun 	int err;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	if (!mrp)
713*4882a593Smuzhiyun 		return -EINVAL;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	if (!br_mrp_get_port(br, role->i_ifindex))
716*4882a593Smuzhiyun 		return -EINVAL;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	if (role->in_role == BR_MRP_IN_ROLE_DISABLED) {
719*4882a593Smuzhiyun 		u8 state;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 		/* It is not allowed to disable a port that doesn't exist */
722*4882a593Smuzhiyun 		p = rtnl_dereference(mrp->i_port);
723*4882a593Smuzhiyun 		if (!p)
724*4882a593Smuzhiyun 			return -EINVAL;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 		/* Stop the generating MRP_InTest frames */
727*4882a593Smuzhiyun 		cancel_delayed_work_sync(&mrp->in_test_work);
728*4882a593Smuzhiyun 		br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 		/* Remove the port */
731*4882a593Smuzhiyun 		spin_lock_bh(&br->lock);
732*4882a593Smuzhiyun 		state = netif_running(br->dev) ?
733*4882a593Smuzhiyun 				BR_STATE_FORWARDING : BR_STATE_DISABLED;
734*4882a593Smuzhiyun 		p->state = state;
735*4882a593Smuzhiyun 		p->flags &= ~BR_MRP_AWARE;
736*4882a593Smuzhiyun 		spin_unlock_bh(&br->lock);
737*4882a593Smuzhiyun 		br_mrp_port_switchdev_set_state(p, state);
738*4882a593Smuzhiyun 		rcu_assign_pointer(mrp->i_port, NULL);
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 		mrp->in_role = role->in_role;
741*4882a593Smuzhiyun 		mrp->in_id = 0;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 		return 0;
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	/* It is not possible to have the same port part of multiple rings */
747*4882a593Smuzhiyun 	if (!br_mrp_unique_ifindex(br, role->i_ifindex))
748*4882a593Smuzhiyun 		return -EINVAL;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	/* It is not allowed to set a different interconnect port if the mrp
751*4882a593Smuzhiyun 	 * instance has already one. First it needs to be disabled and after
752*4882a593Smuzhiyun 	 * that set the new port
753*4882a593Smuzhiyun 	 */
754*4882a593Smuzhiyun 	if (rcu_access_pointer(mrp->i_port))
755*4882a593Smuzhiyun 		return -EINVAL;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	p = br_mrp_get_port(br, role->i_ifindex);
758*4882a593Smuzhiyun 	spin_lock_bh(&br->lock);
759*4882a593Smuzhiyun 	p->state = BR_STATE_FORWARDING;
760*4882a593Smuzhiyun 	p->flags |= BR_MRP_AWARE;
761*4882a593Smuzhiyun 	spin_unlock_bh(&br->lock);
762*4882a593Smuzhiyun 	rcu_assign_pointer(mrp->i_port, p);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	mrp->in_role = role->in_role;
765*4882a593Smuzhiyun 	mrp->in_id = role->in_id;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	/* If there is an error just bailed out */
768*4882a593Smuzhiyun 	err = br_mrp_switchdev_set_in_role(br, mrp, role->in_id,
769*4882a593Smuzhiyun 					   role->ring_id, role->in_role);
770*4882a593Smuzhiyun 	if (err && err != -EOPNOTSUPP)
771*4882a593Smuzhiyun 		return err;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	/* Now detect if the HW actually applied the role or not. If the HW
774*4882a593Smuzhiyun 	 * applied the role it means that the SW will not to do those operations
775*4882a593Smuzhiyun 	 * anymore. For example if the role is MIM then the HW will notify the
776*4882a593Smuzhiyun 	 * SW when interconnect ring is open, but if the is not pushed to the HW
777*4882a593Smuzhiyun 	 * the SW will need to detect when the interconnect ring is open.
778*4882a593Smuzhiyun 	 */
779*4882a593Smuzhiyun 	mrp->in_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	return 0;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun /* Start to generate MRP_InTest frames, the frames are generated by
785*4882a593Smuzhiyun  * HW and if it fails, they are generated by the SW.
786*4882a593Smuzhiyun  * note: already called with rtnl_lock
787*4882a593Smuzhiyun  */
br_mrp_start_in_test(struct net_bridge * br,struct br_mrp_start_in_test * in_test)788*4882a593Smuzhiyun int br_mrp_start_in_test(struct net_bridge *br,
789*4882a593Smuzhiyun 			 struct br_mrp_start_in_test *in_test)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun 	struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id);
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	if (!mrp)
794*4882a593Smuzhiyun 		return -EINVAL;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	if (mrp->in_role != BR_MRP_IN_ROLE_MIM)
797*4882a593Smuzhiyun 		return -EINVAL;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	/* Try to push it to the HW and if it fails then continue with SW
800*4882a593Smuzhiyun 	 * implementation and if that also fails then return error.
801*4882a593Smuzhiyun 	 */
802*4882a593Smuzhiyun 	if (!br_mrp_switchdev_send_in_test(br, mrp, in_test->interval,
803*4882a593Smuzhiyun 					   in_test->max_miss, in_test->period))
804*4882a593Smuzhiyun 		return 0;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	mrp->in_test_interval = in_test->interval;
807*4882a593Smuzhiyun 	mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period);
808*4882a593Smuzhiyun 	mrp->in_test_max_miss = in_test->max_miss;
809*4882a593Smuzhiyun 	mrp->in_test_count_miss = 0;
810*4882a593Smuzhiyun 	queue_delayed_work(system_wq, &mrp->in_test_work,
811*4882a593Smuzhiyun 			   usecs_to_jiffies(in_test->interval));
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	return 0;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun /* Determin if the frame type is a ring frame */
br_mrp_ring_frame(struct sk_buff * skb)817*4882a593Smuzhiyun static bool br_mrp_ring_frame(struct sk_buff *skb)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun 	const struct br_mrp_tlv_hdr *hdr;
820*4882a593Smuzhiyun 	struct br_mrp_tlv_hdr _hdr;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
823*4882a593Smuzhiyun 	if (!hdr)
824*4882a593Smuzhiyun 		return false;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST ||
827*4882a593Smuzhiyun 	    hdr->type == BR_MRP_TLV_HEADER_RING_TOPO ||
828*4882a593Smuzhiyun 	    hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN ||
829*4882a593Smuzhiyun 	    hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP ||
830*4882a593Smuzhiyun 	    hdr->type == BR_MRP_TLV_HEADER_OPTION)
831*4882a593Smuzhiyun 		return true;
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	return false;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun /* Determin if the frame type is an interconnect frame */
br_mrp_in_frame(struct sk_buff * skb)837*4882a593Smuzhiyun static bool br_mrp_in_frame(struct sk_buff *skb)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun 	const struct br_mrp_tlv_hdr *hdr;
840*4882a593Smuzhiyun 	struct br_mrp_tlv_hdr _hdr;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
843*4882a593Smuzhiyun 	if (!hdr)
844*4882a593Smuzhiyun 		return false;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST ||
847*4882a593Smuzhiyun 	    hdr->type == BR_MRP_TLV_HEADER_IN_TOPO ||
848*4882a593Smuzhiyun 	    hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN ||
849*4882a593Smuzhiyun 	    hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP)
850*4882a593Smuzhiyun 		return true;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	return false;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun /* Process only MRP Test frame. All the other MRP frames are processed by
856*4882a593Smuzhiyun  * userspace application
857*4882a593Smuzhiyun  * note: already called with rcu_read_lock
858*4882a593Smuzhiyun  */
br_mrp_mrm_process(struct br_mrp * mrp,struct net_bridge_port * port,struct sk_buff * skb)859*4882a593Smuzhiyun static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
860*4882a593Smuzhiyun 			       struct sk_buff *skb)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun 	const struct br_mrp_tlv_hdr *hdr;
863*4882a593Smuzhiyun 	struct br_mrp_tlv_hdr _hdr;
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	/* Each MRP header starts with a version field which is 16 bits.
866*4882a593Smuzhiyun 	 * Therefore skip the version and get directly the TLV header.
867*4882a593Smuzhiyun 	 */
868*4882a593Smuzhiyun 	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
869*4882a593Smuzhiyun 	if (!hdr)
870*4882a593Smuzhiyun 		return;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
873*4882a593Smuzhiyun 		return;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	mrp->test_count_miss = 0;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	/* Notify the userspace that the ring is closed only when the ring is
878*4882a593Smuzhiyun 	 * not closed
879*4882a593Smuzhiyun 	 */
880*4882a593Smuzhiyun 	if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED)
881*4882a593Smuzhiyun 		br_mrp_ring_port_open(port->dev, false);
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun /* Determin if the test hdr has a better priority than the node */
br_mrp_test_better_than_own(struct br_mrp * mrp,struct net_bridge * br,const struct br_mrp_ring_test_hdr * hdr)885*4882a593Smuzhiyun static bool br_mrp_test_better_than_own(struct br_mrp *mrp,
886*4882a593Smuzhiyun 					struct net_bridge *br,
887*4882a593Smuzhiyun 					const struct br_mrp_ring_test_hdr *hdr)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun 	u16 prio = be16_to_cpu(hdr->prio);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	if (prio < mrp->prio ||
892*4882a593Smuzhiyun 	    (prio == mrp->prio &&
893*4882a593Smuzhiyun 	    ether_addr_to_u64(hdr->sa) < ether_addr_to_u64(br->dev->dev_addr)))
894*4882a593Smuzhiyun 		return true;
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	return false;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun /* Process only MRP Test frame. All the other MRP frames are processed by
900*4882a593Smuzhiyun  * userspace application
901*4882a593Smuzhiyun  * note: already called with rcu_read_lock
902*4882a593Smuzhiyun  */
br_mrp_mra_process(struct br_mrp * mrp,struct net_bridge * br,struct net_bridge_port * port,struct sk_buff * skb)903*4882a593Smuzhiyun static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br,
904*4882a593Smuzhiyun 			       struct net_bridge_port *port,
905*4882a593Smuzhiyun 			       struct sk_buff *skb)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun 	const struct br_mrp_ring_test_hdr *test_hdr;
908*4882a593Smuzhiyun 	struct br_mrp_ring_test_hdr _test_hdr;
909*4882a593Smuzhiyun 	const struct br_mrp_tlv_hdr *hdr;
910*4882a593Smuzhiyun 	struct br_mrp_tlv_hdr _hdr;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	/* Each MRP header starts with a version field which is 16 bits.
913*4882a593Smuzhiyun 	 * Therefore skip the version and get directly the TLV header.
914*4882a593Smuzhiyun 	 */
915*4882a593Smuzhiyun 	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
916*4882a593Smuzhiyun 	if (!hdr)
917*4882a593Smuzhiyun 		return;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
920*4882a593Smuzhiyun 		return;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	test_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
923*4882a593Smuzhiyun 				      sizeof(_test_hdr), &_test_hdr);
924*4882a593Smuzhiyun 	if (!test_hdr)
925*4882a593Smuzhiyun 		return;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	/* Only frames that have a better priority than the node will
928*4882a593Smuzhiyun 	 * clear the miss counter because otherwise the node will need to behave
929*4882a593Smuzhiyun 	 * as MRM.
930*4882a593Smuzhiyun 	 */
931*4882a593Smuzhiyun 	if (br_mrp_test_better_than_own(mrp, br, test_hdr))
932*4882a593Smuzhiyun 		mrp->test_count_miss = 0;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun /* Process only MRP InTest frame. All the other MRP frames are processed by
936*4882a593Smuzhiyun  * userspace application
937*4882a593Smuzhiyun  * note: already called with rcu_read_lock
938*4882a593Smuzhiyun  */
br_mrp_mim_process(struct br_mrp * mrp,struct net_bridge_port * port,struct sk_buff * skb)939*4882a593Smuzhiyun static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port,
940*4882a593Smuzhiyun 			       struct sk_buff *skb)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	const struct br_mrp_in_test_hdr *in_hdr;
943*4882a593Smuzhiyun 	struct br_mrp_in_test_hdr _in_hdr;
944*4882a593Smuzhiyun 	const struct br_mrp_tlv_hdr *hdr;
945*4882a593Smuzhiyun 	struct br_mrp_tlv_hdr _hdr;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	/* Each MRP header starts with a version field which is 16 bits.
948*4882a593Smuzhiyun 	 * Therefore skip the version and get directly the TLV header.
949*4882a593Smuzhiyun 	 */
950*4882a593Smuzhiyun 	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
951*4882a593Smuzhiyun 	if (!hdr)
952*4882a593Smuzhiyun 		return false;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	/* The check for InTest frame type was already done */
955*4882a593Smuzhiyun 	in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
956*4882a593Smuzhiyun 				    sizeof(_in_hdr), &_in_hdr);
957*4882a593Smuzhiyun 	if (!in_hdr)
958*4882a593Smuzhiyun 		return false;
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	/* It needs to process only it's own InTest frames. */
961*4882a593Smuzhiyun 	if (mrp->in_id != ntohs(in_hdr->id))
962*4882a593Smuzhiyun 		return false;
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	mrp->in_test_count_miss = 0;
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	/* Notify the userspace that the ring is closed only when the ring is
967*4882a593Smuzhiyun 	 * not closed
968*4882a593Smuzhiyun 	 */
969*4882a593Smuzhiyun 	if (mrp->in_state != BR_MRP_IN_STATE_CLOSED)
970*4882a593Smuzhiyun 		br_mrp_in_port_open(port->dev, false);
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	return true;
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun /* Get the MRP frame type
976*4882a593Smuzhiyun  * note: already called with rcu_read_lock
977*4882a593Smuzhiyun  */
br_mrp_get_frame_type(struct sk_buff * skb)978*4882a593Smuzhiyun static u8 br_mrp_get_frame_type(struct sk_buff *skb)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun 	const struct br_mrp_tlv_hdr *hdr;
981*4882a593Smuzhiyun 	struct br_mrp_tlv_hdr _hdr;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	/* Each MRP header starts with a version field which is 16 bits.
984*4882a593Smuzhiyun 	 * Therefore skip the version and get directly the TLV header.
985*4882a593Smuzhiyun 	 */
986*4882a593Smuzhiyun 	hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
987*4882a593Smuzhiyun 	if (!hdr)
988*4882a593Smuzhiyun 		return 0xff;
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	return hdr->type;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun 
br_mrp_mrm_behaviour(struct br_mrp * mrp)993*4882a593Smuzhiyun static bool br_mrp_mrm_behaviour(struct br_mrp *mrp)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun 	if (mrp->ring_role == BR_MRP_RING_ROLE_MRM ||
996*4882a593Smuzhiyun 	    (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor))
997*4882a593Smuzhiyun 		return true;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	return false;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun 
br_mrp_mrc_behaviour(struct br_mrp * mrp)1002*4882a593Smuzhiyun static bool br_mrp_mrc_behaviour(struct br_mrp *mrp)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun 	if (mrp->ring_role == BR_MRP_RING_ROLE_MRC ||
1005*4882a593Smuzhiyun 	    (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor))
1006*4882a593Smuzhiyun 		return true;
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	return false;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun /* This will just forward the frame to the other mrp ring ports, depending on
1012*4882a593Smuzhiyun  * the frame type, ring role and interconnect role
1013*4882a593Smuzhiyun  * note: already called with rcu_read_lock
1014*4882a593Smuzhiyun  */
br_mrp_rcv(struct net_bridge_port * p,struct sk_buff * skb,struct net_device * dev)1015*4882a593Smuzhiyun static int br_mrp_rcv(struct net_bridge_port *p,
1016*4882a593Smuzhiyun 		      struct sk_buff *skb, struct net_device *dev)
1017*4882a593Smuzhiyun {
1018*4882a593Smuzhiyun 	struct net_bridge_port *p_port, *s_port, *i_port = NULL;
1019*4882a593Smuzhiyun 	struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL;
1020*4882a593Smuzhiyun 	struct net_bridge *br;
1021*4882a593Smuzhiyun 	struct br_mrp *mrp;
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	/* If port is disabled don't accept any frames */
1024*4882a593Smuzhiyun 	if (p->state == BR_STATE_DISABLED)
1025*4882a593Smuzhiyun 		return 0;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	br = p->br;
1028*4882a593Smuzhiyun 	mrp =  br_mrp_find_port(br, p);
1029*4882a593Smuzhiyun 	if (unlikely(!mrp))
1030*4882a593Smuzhiyun 		return 0;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	p_port = rcu_dereference(mrp->p_port);
1033*4882a593Smuzhiyun 	if (!p_port)
1034*4882a593Smuzhiyun 		return 0;
1035*4882a593Smuzhiyun 	p_dst = p_port;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	s_port = rcu_dereference(mrp->s_port);
1038*4882a593Smuzhiyun 	if (!s_port)
1039*4882a593Smuzhiyun 		return 0;
1040*4882a593Smuzhiyun 	s_dst = s_port;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	/* If the frame is a ring frame then it is not required to check the
1043*4882a593Smuzhiyun 	 * interconnect role and ports to process or forward the frame
1044*4882a593Smuzhiyun 	 */
1045*4882a593Smuzhiyun 	if (br_mrp_ring_frame(skb)) {
1046*4882a593Smuzhiyun 		/* If the role is MRM then don't forward the frames */
1047*4882a593Smuzhiyun 		if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
1048*4882a593Smuzhiyun 			br_mrp_mrm_process(mrp, p, skb);
1049*4882a593Smuzhiyun 			goto no_forward;
1050*4882a593Smuzhiyun 		}
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 		/* If the role is MRA then don't forward the frames if it
1053*4882a593Smuzhiyun 		 * behaves as MRM node
1054*4882a593Smuzhiyun 		 */
1055*4882a593Smuzhiyun 		if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
1056*4882a593Smuzhiyun 			if (!mrp->test_monitor) {
1057*4882a593Smuzhiyun 				br_mrp_mrm_process(mrp, p, skb);
1058*4882a593Smuzhiyun 				goto no_forward;
1059*4882a593Smuzhiyun 			}
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 			br_mrp_mra_process(mrp, br, p, skb);
1062*4882a593Smuzhiyun 		}
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 		goto forward;
1065*4882a593Smuzhiyun 	}
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	if (br_mrp_in_frame(skb)) {
1068*4882a593Smuzhiyun 		u8 in_type = br_mrp_get_frame_type(skb);
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 		i_port = rcu_dereference(mrp->i_port);
1071*4882a593Smuzhiyun 		i_dst = i_port;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 		/* If the ring port is in block state it should not forward
1074*4882a593Smuzhiyun 		 * In_Test frames
1075*4882a593Smuzhiyun 		 */
1076*4882a593Smuzhiyun 		if (br_mrp_is_ring_port(p_port, s_port, p) &&
1077*4882a593Smuzhiyun 		    p->state == BR_STATE_BLOCKING &&
1078*4882a593Smuzhiyun 		    in_type == BR_MRP_TLV_HEADER_IN_TEST)
1079*4882a593Smuzhiyun 			goto no_forward;
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 		/* Nodes that behaves as MRM needs to stop forwarding the
1082*4882a593Smuzhiyun 		 * frames in case the ring is closed, otherwise will be a loop.
1083*4882a593Smuzhiyun 		 * In this case the frame is no forward between the ring ports.
1084*4882a593Smuzhiyun 		 */
1085*4882a593Smuzhiyun 		if (br_mrp_mrm_behaviour(mrp) &&
1086*4882a593Smuzhiyun 		    br_mrp_is_ring_port(p_port, s_port, p) &&
1087*4882a593Smuzhiyun 		    (s_port->state != BR_STATE_FORWARDING ||
1088*4882a593Smuzhiyun 		     p_port->state != BR_STATE_FORWARDING)) {
1089*4882a593Smuzhiyun 			p_dst = NULL;
1090*4882a593Smuzhiyun 			s_dst = NULL;
1091*4882a593Smuzhiyun 		}
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 		/* A node that behaves as MRC and doesn't have a interconnect
1094*4882a593Smuzhiyun 		 * role then it should forward all frames between the ring ports
1095*4882a593Smuzhiyun 		 * because it doesn't have an interconnect port
1096*4882a593Smuzhiyun 		 */
1097*4882a593Smuzhiyun 		if (br_mrp_mrc_behaviour(mrp) &&
1098*4882a593Smuzhiyun 		    mrp->in_role == BR_MRP_IN_ROLE_DISABLED)
1099*4882a593Smuzhiyun 			goto forward;
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 		if (mrp->in_role == BR_MRP_IN_ROLE_MIM) {
1102*4882a593Smuzhiyun 			if (in_type == BR_MRP_TLV_HEADER_IN_TEST) {
1103*4882a593Smuzhiyun 				/* MIM should not forward it's own InTest
1104*4882a593Smuzhiyun 				 * frames
1105*4882a593Smuzhiyun 				 */
1106*4882a593Smuzhiyun 				if (br_mrp_mim_process(mrp, p, skb)) {
1107*4882a593Smuzhiyun 					goto no_forward;
1108*4882a593Smuzhiyun 				} else {
1109*4882a593Smuzhiyun 					if (br_mrp_is_ring_port(p_port, s_port,
1110*4882a593Smuzhiyun 								p))
1111*4882a593Smuzhiyun 						i_dst = NULL;
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 					if (br_mrp_is_in_port(i_port, p))
1114*4882a593Smuzhiyun 						goto no_forward;
1115*4882a593Smuzhiyun 				}
1116*4882a593Smuzhiyun 			} else {
1117*4882a593Smuzhiyun 				/* MIM should forward IntLinkChange and
1118*4882a593Smuzhiyun 				 * IntTopoChange between ring ports but MIM
1119*4882a593Smuzhiyun 				 * should not forward IntLinkChange and
1120*4882a593Smuzhiyun 				 * IntTopoChange if the frame was received at
1121*4882a593Smuzhiyun 				 * the interconnect port
1122*4882a593Smuzhiyun 				 */
1123*4882a593Smuzhiyun 				if (br_mrp_is_ring_port(p_port, s_port, p))
1124*4882a593Smuzhiyun 					i_dst = NULL;
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 				if (br_mrp_is_in_port(i_port, p))
1127*4882a593Smuzhiyun 					goto no_forward;
1128*4882a593Smuzhiyun 			}
1129*4882a593Smuzhiyun 		}
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 		if (mrp->in_role == BR_MRP_IN_ROLE_MIC) {
1132*4882a593Smuzhiyun 			/* MIC should forward InTest frames on all ports
1133*4882a593Smuzhiyun 			 * regardless of the received port
1134*4882a593Smuzhiyun 			 */
1135*4882a593Smuzhiyun 			if (in_type == BR_MRP_TLV_HEADER_IN_TEST)
1136*4882a593Smuzhiyun 				goto forward;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 			/* MIC should forward IntLinkChange frames only if they
1139*4882a593Smuzhiyun 			 * are received on ring ports to all the ports
1140*4882a593Smuzhiyun 			 */
1141*4882a593Smuzhiyun 			if (br_mrp_is_ring_port(p_port, s_port, p) &&
1142*4882a593Smuzhiyun 			    (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
1143*4882a593Smuzhiyun 			     in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN))
1144*4882a593Smuzhiyun 				goto forward;
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 			/* Should forward the InTopo frames only between the
1147*4882a593Smuzhiyun 			 * ring ports
1148*4882a593Smuzhiyun 			 */
1149*4882a593Smuzhiyun 			if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) {
1150*4882a593Smuzhiyun 				i_dst = NULL;
1151*4882a593Smuzhiyun 				goto forward;
1152*4882a593Smuzhiyun 			}
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 			/* In all the other cases don't forward the frames */
1155*4882a593Smuzhiyun 			goto no_forward;
1156*4882a593Smuzhiyun 		}
1157*4882a593Smuzhiyun 	}
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun forward:
1160*4882a593Smuzhiyun 	if (p_dst)
1161*4882a593Smuzhiyun 		br_forward(p_dst, skb, true, false);
1162*4882a593Smuzhiyun 	if (s_dst)
1163*4882a593Smuzhiyun 		br_forward(s_dst, skb, true, false);
1164*4882a593Smuzhiyun 	if (i_dst)
1165*4882a593Smuzhiyun 		br_forward(i_dst, skb, true, false);
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun no_forward:
1168*4882a593Smuzhiyun 	return 1;
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun /* Check if the frame was received on a port that is part of MRP ring
1172*4882a593Smuzhiyun  * and if the frame has MRP eth. In that case process the frame otherwise do
1173*4882a593Smuzhiyun  * normal forwarding.
1174*4882a593Smuzhiyun  * note: already called with rcu_read_lock
1175*4882a593Smuzhiyun  */
br_mrp_process(struct net_bridge_port * p,struct sk_buff * skb)1176*4882a593Smuzhiyun int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun 	/* If there is no MRP instance do normal forwarding */
1179*4882a593Smuzhiyun 	if (likely(!(p->flags & BR_MRP_AWARE)))
1180*4882a593Smuzhiyun 		goto out;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	if (unlikely(skb->protocol == htons(ETH_P_MRP)))
1183*4882a593Smuzhiyun 		return br_mrp_rcv(p, skb, p->dev);
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun out:
1186*4882a593Smuzhiyun 	return 0;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun 
br_mrp_enabled(struct net_bridge * br)1189*4882a593Smuzhiyun bool br_mrp_enabled(struct net_bridge *br)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	return !list_empty(&br->mrp_list);
1192*4882a593Smuzhiyun }
1193