1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright 2011-2014 Autronica Fire and Security AS
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Author(s):
5*4882a593Smuzhiyun * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Frame router for HSR and PRP.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "hsr_forward.h"
11*4882a593Smuzhiyun #include <linux/types.h>
12*4882a593Smuzhiyun #include <linux/skbuff.h>
13*4882a593Smuzhiyun #include <linux/etherdevice.h>
14*4882a593Smuzhiyun #include <linux/if_vlan.h>
15*4882a593Smuzhiyun #include "hsr_main.h"
16*4882a593Smuzhiyun #include "hsr_framereg.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun struct hsr_node;
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* The uses I can see for these HSR supervision frames are:
21*4882a593Smuzhiyun * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
22*4882a593Smuzhiyun * 22") to reset any sequence_nr counters belonging to that node. Useful if
23*4882a593Smuzhiyun * the other node's counter has been reset for some reason.
24*4882a593Smuzhiyun * --
25*4882a593Smuzhiyun * Or not - resetting the counter and bridging the frame would create a
26*4882a593Smuzhiyun * loop, unfortunately.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
29*4882a593Smuzhiyun * frame is received from a particular node, we know something is wrong.
30*4882a593Smuzhiyun * We just register these (as with normal frames) and throw them away.
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * 3) Allow different MAC addresses for the two slave interfaces, using the
33*4882a593Smuzhiyun * MacAddressA field.
34*4882a593Smuzhiyun */
is_supervision_frame(struct hsr_priv * hsr,struct sk_buff * skb)35*4882a593Smuzhiyun static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun struct ethhdr *eth_hdr;
38*4882a593Smuzhiyun struct hsr_sup_tag *hsr_sup_tag;
39*4882a593Smuzhiyun struct hsrv1_ethhdr_sp *hsr_V1_hdr;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun WARN_ON_ONCE(!skb_mac_header_was_set(skb));
42*4882a593Smuzhiyun eth_hdr = (struct ethhdr *)skb_mac_header(skb);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* Correct addr? */
45*4882a593Smuzhiyun if (!ether_addr_equal(eth_hdr->h_dest,
46*4882a593Smuzhiyun hsr->sup_multicast_addr))
47*4882a593Smuzhiyun return false;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Correct ether type?. */
50*4882a593Smuzhiyun if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
51*4882a593Smuzhiyun eth_hdr->h_proto == htons(ETH_P_HSR)))
52*4882a593Smuzhiyun return false;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Get the supervision header from correct location. */
55*4882a593Smuzhiyun if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
56*4882a593Smuzhiyun hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
57*4882a593Smuzhiyun if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
58*4882a593Smuzhiyun return false;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
61*4882a593Smuzhiyun } else {
62*4882a593Smuzhiyun hsr_sup_tag =
63*4882a593Smuzhiyun &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE &&
67*4882a593Smuzhiyun hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
68*4882a593Smuzhiyun hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
69*4882a593Smuzhiyun hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
70*4882a593Smuzhiyun return false;
71*4882a593Smuzhiyun if (hsr_sup_tag->HSR_TLV_length != 12 &&
72*4882a593Smuzhiyun hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload))
73*4882a593Smuzhiyun return false;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun return true;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
create_stripped_skb_hsr(struct sk_buff * skb_in,struct hsr_frame_info * frame)78*4882a593Smuzhiyun static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
79*4882a593Smuzhiyun struct hsr_frame_info *frame)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun struct sk_buff *skb;
82*4882a593Smuzhiyun int copylen;
83*4882a593Smuzhiyun unsigned char *dst, *src;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun skb_pull(skb_in, HSR_HLEN);
86*4882a593Smuzhiyun skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
87*4882a593Smuzhiyun skb_push(skb_in, HSR_HLEN);
88*4882a593Smuzhiyun if (!skb)
89*4882a593Smuzhiyun return NULL;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun skb_reset_mac_header(skb);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL)
94*4882a593Smuzhiyun skb->csum_start -= HSR_HLEN;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun copylen = 2 * ETH_ALEN;
97*4882a593Smuzhiyun if (frame->is_vlan)
98*4882a593Smuzhiyun copylen += VLAN_HLEN;
99*4882a593Smuzhiyun src = skb_mac_header(skb_in);
100*4882a593Smuzhiyun dst = skb_mac_header(skb);
101*4882a593Smuzhiyun memcpy(dst, src, copylen);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun skb->protocol = eth_hdr(skb)->h_proto;
104*4882a593Smuzhiyun return skb;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
hsr_get_untagged_frame(struct hsr_frame_info * frame,struct hsr_port * port)107*4882a593Smuzhiyun struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
108*4882a593Smuzhiyun struct hsr_port *port)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun if (!frame->skb_std) {
111*4882a593Smuzhiyun if (frame->skb_hsr)
112*4882a593Smuzhiyun frame->skb_std =
113*4882a593Smuzhiyun create_stripped_skb_hsr(frame->skb_hsr, frame);
114*4882a593Smuzhiyun else
115*4882a593Smuzhiyun netdev_warn_once(port->dev,
116*4882a593Smuzhiyun "Unexpected frame received in hsr_get_untagged_frame()\n");
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (!frame->skb_std)
119*4882a593Smuzhiyun return NULL;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun return skb_clone(frame->skb_std, GFP_ATOMIC);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
prp_get_untagged_frame(struct hsr_frame_info * frame,struct hsr_port * port)125*4882a593Smuzhiyun struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
126*4882a593Smuzhiyun struct hsr_port *port)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun if (!frame->skb_std) {
129*4882a593Smuzhiyun if (frame->skb_prp) {
130*4882a593Smuzhiyun /* trim the skb by len - HSR_HLEN to exclude RCT */
131*4882a593Smuzhiyun skb_trim(frame->skb_prp,
132*4882a593Smuzhiyun frame->skb_prp->len - HSR_HLEN);
133*4882a593Smuzhiyun frame->skb_std =
134*4882a593Smuzhiyun __pskb_copy(frame->skb_prp,
135*4882a593Smuzhiyun skb_headroom(frame->skb_prp),
136*4882a593Smuzhiyun GFP_ATOMIC);
137*4882a593Smuzhiyun } else {
138*4882a593Smuzhiyun /* Unexpected */
139*4882a593Smuzhiyun WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
140*4882a593Smuzhiyun __FILE__, __LINE__, port->dev->name);
141*4882a593Smuzhiyun return NULL;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun return skb_clone(frame->skb_std, GFP_ATOMIC);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
prp_set_lan_id(struct prp_rct * trailer,struct hsr_port * port)148*4882a593Smuzhiyun static void prp_set_lan_id(struct prp_rct *trailer,
149*4882a593Smuzhiyun struct hsr_port *port)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun int lane_id;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (port->type == HSR_PT_SLAVE_A)
154*4882a593Smuzhiyun lane_id = 0;
155*4882a593Smuzhiyun else
156*4882a593Smuzhiyun lane_id = 1;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* Add net_id in the upper 3 bits of lane_id */
159*4882a593Smuzhiyun lane_id |= port->hsr->net_id;
160*4882a593Smuzhiyun set_prp_lan_id(trailer, lane_id);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* Tailroom for PRP rct should have been created before calling this */
prp_fill_rct(struct sk_buff * skb,struct hsr_frame_info * frame,struct hsr_port * port)164*4882a593Smuzhiyun static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
165*4882a593Smuzhiyun struct hsr_frame_info *frame,
166*4882a593Smuzhiyun struct hsr_port *port)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct prp_rct *trailer;
169*4882a593Smuzhiyun int min_size = ETH_ZLEN;
170*4882a593Smuzhiyun int lsdu_size;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (!skb)
173*4882a593Smuzhiyun return skb;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (frame->is_vlan)
176*4882a593Smuzhiyun min_size = VLAN_ETH_ZLEN;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (skb_put_padto(skb, min_size))
179*4882a593Smuzhiyun return NULL;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
182*4882a593Smuzhiyun lsdu_size = skb->len - 14;
183*4882a593Smuzhiyun if (frame->is_vlan)
184*4882a593Smuzhiyun lsdu_size -= 4;
185*4882a593Smuzhiyun prp_set_lan_id(trailer, port);
186*4882a593Smuzhiyun set_prp_LSDU_size(trailer, lsdu_size);
187*4882a593Smuzhiyun trailer->sequence_nr = htons(frame->sequence_nr);
188*4882a593Smuzhiyun trailer->PRP_suffix = htons(ETH_P_PRP);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun return skb;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
hsr_set_path_id(struct hsr_ethhdr * hsr_ethhdr,struct hsr_port * port)193*4882a593Smuzhiyun static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
194*4882a593Smuzhiyun struct hsr_port *port)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun int path_id;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (port->type == HSR_PT_SLAVE_A)
199*4882a593Smuzhiyun path_id = 0;
200*4882a593Smuzhiyun else
201*4882a593Smuzhiyun path_id = 1;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
hsr_fill_tag(struct sk_buff * skb,struct hsr_frame_info * frame,struct hsr_port * port,u8 proto_version)206*4882a593Smuzhiyun static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
207*4882a593Smuzhiyun struct hsr_frame_info *frame,
208*4882a593Smuzhiyun struct hsr_port *port, u8 proto_version)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun struct hsr_ethhdr *hsr_ethhdr;
211*4882a593Smuzhiyun int lsdu_size;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* pad to minimum packet size which is 60 + 6 (HSR tag) */
214*4882a593Smuzhiyun if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
215*4882a593Smuzhiyun return NULL;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun lsdu_size = skb->len - 14;
218*4882a593Smuzhiyun if (frame->is_vlan)
219*4882a593Smuzhiyun lsdu_size -= 4;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun hsr_set_path_id(hsr_ethhdr, port);
224*4882a593Smuzhiyun set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
225*4882a593Smuzhiyun hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
226*4882a593Smuzhiyun hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
227*4882a593Smuzhiyun hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
228*4882a593Smuzhiyun ETH_P_HSR : ETH_P_PRP);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun return skb;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* If the original frame was an HSR tagged frame, just clone it to be sent
234*4882a593Smuzhiyun * unchanged. Otherwise, create a private frame especially tagged for 'port'.
235*4882a593Smuzhiyun */
hsr_create_tagged_frame(struct hsr_frame_info * frame,struct hsr_port * port)236*4882a593Smuzhiyun struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
237*4882a593Smuzhiyun struct hsr_port *port)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun unsigned char *dst, *src;
240*4882a593Smuzhiyun struct sk_buff *skb;
241*4882a593Smuzhiyun int movelen;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (frame->skb_hsr) {
244*4882a593Smuzhiyun struct hsr_ethhdr *hsr_ethhdr =
245*4882a593Smuzhiyun (struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* set the lane id properly */
248*4882a593Smuzhiyun hsr_set_path_id(hsr_ethhdr, port);
249*4882a593Smuzhiyun return skb_clone(frame->skb_hsr, GFP_ATOMIC);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* Create the new skb with enough headroom to fit the HSR tag */
253*4882a593Smuzhiyun skb = __pskb_copy(frame->skb_std,
254*4882a593Smuzhiyun skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
255*4882a593Smuzhiyun if (!skb)
256*4882a593Smuzhiyun return NULL;
257*4882a593Smuzhiyun skb_reset_mac_header(skb);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL)
260*4882a593Smuzhiyun skb->csum_start += HSR_HLEN;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun movelen = ETH_HLEN;
263*4882a593Smuzhiyun if (frame->is_vlan)
264*4882a593Smuzhiyun movelen += VLAN_HLEN;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun src = skb_mac_header(skb);
267*4882a593Smuzhiyun dst = skb_push(skb, HSR_HLEN);
268*4882a593Smuzhiyun memmove(dst, src, movelen);
269*4882a593Smuzhiyun skb_reset_mac_header(skb);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
272*4882a593Smuzhiyun * that case
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
prp_create_tagged_frame(struct hsr_frame_info * frame,struct hsr_port * port)277*4882a593Smuzhiyun struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
278*4882a593Smuzhiyun struct hsr_port *port)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun struct sk_buff *skb;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (frame->skb_prp) {
283*4882a593Smuzhiyun struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if (trailer) {
286*4882a593Smuzhiyun prp_set_lan_id(trailer, port);
287*4882a593Smuzhiyun } else {
288*4882a593Smuzhiyun WARN_ONCE(!trailer, "errored PRP skb");
289*4882a593Smuzhiyun return NULL;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun return skb_clone(frame->skb_prp, GFP_ATOMIC);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun skb = skb_copy_expand(frame->skb_std, 0,
295*4882a593Smuzhiyun skb_tailroom(frame->skb_std) + HSR_HLEN,
296*4882a593Smuzhiyun GFP_ATOMIC);
297*4882a593Smuzhiyun prp_fill_rct(skb, frame, port);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun return skb;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
hsr_deliver_master(struct sk_buff * skb,struct net_device * dev,struct hsr_node * node_src)302*4882a593Smuzhiyun static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
303*4882a593Smuzhiyun struct hsr_node *node_src)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun bool was_multicast_frame;
306*4882a593Smuzhiyun int res, recv_len;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
309*4882a593Smuzhiyun hsr_addr_subst_source(node_src, skb);
310*4882a593Smuzhiyun skb_pull(skb, ETH_HLEN);
311*4882a593Smuzhiyun recv_len = skb->len;
312*4882a593Smuzhiyun res = netif_rx(skb);
313*4882a593Smuzhiyun if (res == NET_RX_DROP) {
314*4882a593Smuzhiyun dev->stats.rx_dropped++;
315*4882a593Smuzhiyun } else {
316*4882a593Smuzhiyun dev->stats.rx_packets++;
317*4882a593Smuzhiyun dev->stats.rx_bytes += recv_len;
318*4882a593Smuzhiyun if (was_multicast_frame)
319*4882a593Smuzhiyun dev->stats.multicast++;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
hsr_xmit(struct sk_buff * skb,struct hsr_port * port,struct hsr_frame_info * frame)323*4882a593Smuzhiyun static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
324*4882a593Smuzhiyun struct hsr_frame_info *frame)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun if (frame->port_rcv->type == HSR_PT_MASTER) {
327*4882a593Smuzhiyun hsr_addr_subst_dest(frame->node_src, skb, port);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* Address substitution (IEC62439-3 pp 26, 50): replace mac
330*4882a593Smuzhiyun * address of outgoing frame with that of the outgoing slave's.
331*4882a593Smuzhiyun */
332*4882a593Smuzhiyun ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun return dev_queue_xmit(skb);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
prp_drop_frame(struct hsr_frame_info * frame,struct hsr_port * port)337*4882a593Smuzhiyun bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
340*4882a593Smuzhiyun port->type == HSR_PT_SLAVE_B) ||
341*4882a593Smuzhiyun (frame->port_rcv->type == HSR_PT_SLAVE_B &&
342*4882a593Smuzhiyun port->type == HSR_PT_SLAVE_A));
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* Forward the frame through all devices except:
346*4882a593Smuzhiyun * - Back through the receiving device
347*4882a593Smuzhiyun * - If it's a HSR frame: through a device where it has passed before
348*4882a593Smuzhiyun * - if it's a PRP frame: through another PRP slave device (no bridge)
349*4882a593Smuzhiyun * - To the local HSR master only if the frame is directly addressed to it, or
350*4882a593Smuzhiyun * a non-supervision multicast or broadcast frame.
351*4882a593Smuzhiyun *
352*4882a593Smuzhiyun * HSR slave devices should insert a HSR tag into the frame, or forward the
353*4882a593Smuzhiyun * frame unchanged if it's already tagged. Interlink devices should strip HSR
354*4882a593Smuzhiyun * tags if they're of the non-HSR type (but only after duplicate discard). The
355*4882a593Smuzhiyun * master device always strips HSR tags.
356*4882a593Smuzhiyun */
hsr_forward_do(struct hsr_frame_info * frame)357*4882a593Smuzhiyun static void hsr_forward_do(struct hsr_frame_info *frame)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct hsr_port *port;
360*4882a593Smuzhiyun struct sk_buff *skb;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun hsr_for_each_port(frame->port_rcv->hsr, port) {
363*4882a593Smuzhiyun struct hsr_priv *hsr = port->hsr;
364*4882a593Smuzhiyun /* Don't send frame back the way it came */
365*4882a593Smuzhiyun if (port == frame->port_rcv)
366*4882a593Smuzhiyun continue;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* Don't deliver locally unless we should */
369*4882a593Smuzhiyun if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
370*4882a593Smuzhiyun continue;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* Deliver frames directly addressed to us to master only */
373*4882a593Smuzhiyun if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
374*4882a593Smuzhiyun continue;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* Don't send frame over port where it has been sent before.
377*4882a593Smuzhiyun * Also fro SAN, this shouldn't be done.
378*4882a593Smuzhiyun */
379*4882a593Smuzhiyun if (!frame->is_from_san &&
380*4882a593Smuzhiyun hsr_register_frame_out(port, frame->node_src,
381*4882a593Smuzhiyun frame->sequence_nr))
382*4882a593Smuzhiyun continue;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (frame->is_supervision && port->type == HSR_PT_MASTER) {
385*4882a593Smuzhiyun hsr_handle_sup_frame(frame);
386*4882a593Smuzhiyun continue;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /* Check if frame is to be dropped. Eg. for PRP no forward
390*4882a593Smuzhiyun * between ports.
391*4882a593Smuzhiyun */
392*4882a593Smuzhiyun if (hsr->proto_ops->drop_frame &&
393*4882a593Smuzhiyun hsr->proto_ops->drop_frame(frame, port))
394*4882a593Smuzhiyun continue;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (port->type != HSR_PT_MASTER)
397*4882a593Smuzhiyun skb = hsr->proto_ops->create_tagged_frame(frame, port);
398*4882a593Smuzhiyun else
399*4882a593Smuzhiyun skb = hsr->proto_ops->get_untagged_frame(frame, port);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun if (!skb) {
402*4882a593Smuzhiyun frame->port_rcv->dev->stats.rx_dropped++;
403*4882a593Smuzhiyun continue;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun skb->dev = port->dev;
407*4882a593Smuzhiyun if (port->type == HSR_PT_MASTER)
408*4882a593Smuzhiyun hsr_deliver_master(skb, port->dev, frame->node_src);
409*4882a593Smuzhiyun else
410*4882a593Smuzhiyun hsr_xmit(skb, port, frame);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
check_local_dest(struct hsr_priv * hsr,struct sk_buff * skb,struct hsr_frame_info * frame)414*4882a593Smuzhiyun static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
415*4882a593Smuzhiyun struct hsr_frame_info *frame)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
418*4882a593Smuzhiyun frame->is_local_exclusive = true;
419*4882a593Smuzhiyun skb->pkt_type = PACKET_HOST;
420*4882a593Smuzhiyun } else {
421*4882a593Smuzhiyun frame->is_local_exclusive = false;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (skb->pkt_type == PACKET_HOST ||
425*4882a593Smuzhiyun skb->pkt_type == PACKET_MULTICAST ||
426*4882a593Smuzhiyun skb->pkt_type == PACKET_BROADCAST) {
427*4882a593Smuzhiyun frame->is_local_dest = true;
428*4882a593Smuzhiyun } else {
429*4882a593Smuzhiyun frame->is_local_dest = false;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
handle_std_frame(struct sk_buff * skb,struct hsr_frame_info * frame)433*4882a593Smuzhiyun static void handle_std_frame(struct sk_buff *skb,
434*4882a593Smuzhiyun struct hsr_frame_info *frame)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun struct hsr_port *port = frame->port_rcv;
437*4882a593Smuzhiyun struct hsr_priv *hsr = port->hsr;
438*4882a593Smuzhiyun unsigned long irqflags;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun frame->skb_hsr = NULL;
441*4882a593Smuzhiyun frame->skb_prp = NULL;
442*4882a593Smuzhiyun frame->skb_std = skb;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun if (port->type != HSR_PT_MASTER) {
445*4882a593Smuzhiyun frame->is_from_san = true;
446*4882a593Smuzhiyun } else {
447*4882a593Smuzhiyun /* Sequence nr for the master node */
448*4882a593Smuzhiyun spin_lock_irqsave(&hsr->seqnr_lock, irqflags);
449*4882a593Smuzhiyun frame->sequence_nr = hsr->sequence_nr;
450*4882a593Smuzhiyun hsr->sequence_nr++;
451*4882a593Smuzhiyun spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
hsr_fill_frame_info(__be16 proto,struct sk_buff * skb,struct hsr_frame_info * frame)455*4882a593Smuzhiyun int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
456*4882a593Smuzhiyun struct hsr_frame_info *frame)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun if (proto == htons(ETH_P_PRP) ||
459*4882a593Smuzhiyun proto == htons(ETH_P_HSR)) {
460*4882a593Smuzhiyun /* Check if skb contains hsr_ethhdr */
461*4882a593Smuzhiyun if (skb->mac_len < sizeof(struct hsr_ethhdr))
462*4882a593Smuzhiyun return -EINVAL;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /* HSR tagged frame :- Data or Supervision */
465*4882a593Smuzhiyun frame->skb_std = NULL;
466*4882a593Smuzhiyun frame->skb_prp = NULL;
467*4882a593Smuzhiyun frame->skb_hsr = skb;
468*4882a593Smuzhiyun frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
469*4882a593Smuzhiyun return 0;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* Standard frame or PRP from master port */
473*4882a593Smuzhiyun handle_std_frame(skb, frame);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun return 0;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
prp_fill_frame_info(__be16 proto,struct sk_buff * skb,struct hsr_frame_info * frame)478*4882a593Smuzhiyun int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
479*4882a593Smuzhiyun struct hsr_frame_info *frame)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun /* Supervision frame */
482*4882a593Smuzhiyun struct prp_rct *rct = skb_get_PRP_rct(skb);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun if (rct &&
485*4882a593Smuzhiyun prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
486*4882a593Smuzhiyun frame->skb_hsr = NULL;
487*4882a593Smuzhiyun frame->skb_std = NULL;
488*4882a593Smuzhiyun frame->skb_prp = skb;
489*4882a593Smuzhiyun frame->sequence_nr = prp_get_skb_sequence_nr(rct);
490*4882a593Smuzhiyun return 0;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun handle_std_frame(skb, frame);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return 0;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
fill_frame_info(struct hsr_frame_info * frame,struct sk_buff * skb,struct hsr_port * port)497*4882a593Smuzhiyun static int fill_frame_info(struct hsr_frame_info *frame,
498*4882a593Smuzhiyun struct sk_buff *skb, struct hsr_port *port)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct hsr_priv *hsr = port->hsr;
501*4882a593Smuzhiyun struct hsr_vlan_ethhdr *vlan_hdr;
502*4882a593Smuzhiyun struct ethhdr *ethhdr;
503*4882a593Smuzhiyun __be16 proto;
504*4882a593Smuzhiyun int ret;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* Check if skb contains ethhdr */
507*4882a593Smuzhiyun if (skb->mac_len < sizeof(struct ethhdr))
508*4882a593Smuzhiyun return -EINVAL;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun memset(frame, 0, sizeof(*frame));
511*4882a593Smuzhiyun frame->is_supervision = is_supervision_frame(port->hsr, skb);
512*4882a593Smuzhiyun frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
513*4882a593Smuzhiyun frame->is_supervision,
514*4882a593Smuzhiyun port->type);
515*4882a593Smuzhiyun if (!frame->node_src)
516*4882a593Smuzhiyun return -1; /* Unknown node and !is_supervision, or no mem */
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun ethhdr = (struct ethhdr *)skb_mac_header(skb);
519*4882a593Smuzhiyun frame->is_vlan = false;
520*4882a593Smuzhiyun proto = ethhdr->h_proto;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (proto == htons(ETH_P_8021Q))
523*4882a593Smuzhiyun frame->is_vlan = true;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun if (frame->is_vlan) {
526*4882a593Smuzhiyun vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
527*4882a593Smuzhiyun proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
528*4882a593Smuzhiyun /* FIXME: */
529*4882a593Smuzhiyun netdev_warn_once(skb->dev, "VLAN not yet supported");
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun frame->is_from_san = false;
533*4882a593Smuzhiyun frame->port_rcv = port;
534*4882a593Smuzhiyun ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
535*4882a593Smuzhiyun if (ret)
536*4882a593Smuzhiyun return ret;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun check_local_dest(port->hsr, skb, frame);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun return 0;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /* Must be called holding rcu read lock (because of the port parameter) */
hsr_forward_skb(struct sk_buff * skb,struct hsr_port * port)544*4882a593Smuzhiyun void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun struct hsr_frame_info frame;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (fill_frame_info(&frame, skb, port) < 0)
549*4882a593Smuzhiyun goto out_drop;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
552*4882a593Smuzhiyun hsr_forward_do(&frame);
553*4882a593Smuzhiyun /* Gets called for ingress frames as well as egress from master port.
554*4882a593Smuzhiyun * So check and increment stats for master port only here.
555*4882a593Smuzhiyun */
556*4882a593Smuzhiyun if (port->type == HSR_PT_MASTER) {
557*4882a593Smuzhiyun port->dev->stats.tx_packets++;
558*4882a593Smuzhiyun port->dev->stats.tx_bytes += skb->len;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun kfree_skb(frame.skb_hsr);
562*4882a593Smuzhiyun kfree_skb(frame.skb_prp);
563*4882a593Smuzhiyun kfree_skb(frame.skb_std);
564*4882a593Smuzhiyun return;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun out_drop:
567*4882a593Smuzhiyun port->dev->stats.tx_dropped++;
568*4882a593Smuzhiyun kfree_skb(skb);
569*4882a593Smuzhiyun }
570