1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright 2020, NXP Semiconductors
3*4882a593Smuzhiyun */
4*4882a593Smuzhiyun #include "sja1105.h"
5*4882a593Smuzhiyun #include "sja1105_vl.h"
6*4882a593Smuzhiyun
sja1105_rule_find(struct sja1105_private * priv,unsigned long cookie)7*4882a593Smuzhiyun struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
8*4882a593Smuzhiyun unsigned long cookie)
9*4882a593Smuzhiyun {
10*4882a593Smuzhiyun struct sja1105_rule *rule;
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun list_for_each_entry(rule, &priv->flow_block.rules, list)
13*4882a593Smuzhiyun if (rule->cookie == cookie)
14*4882a593Smuzhiyun return rule;
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun return NULL;
17*4882a593Smuzhiyun }
18*4882a593Smuzhiyun
sja1105_find_free_l2_policer(struct sja1105_private * priv)19*4882a593Smuzhiyun static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun int i;
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
24*4882a593Smuzhiyun if (!priv->flow_block.l2_policer_used[i])
25*4882a593Smuzhiyun return i;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun return -1;
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
sja1105_setup_bcast_policer(struct sja1105_private * priv,struct netlink_ext_ack * extack,unsigned long cookie,int port,u64 rate_bytes_per_sec,u32 burst)30*4882a593Smuzhiyun static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
31*4882a593Smuzhiyun struct netlink_ext_ack *extack,
32*4882a593Smuzhiyun unsigned long cookie, int port,
33*4882a593Smuzhiyun u64 rate_bytes_per_sec,
34*4882a593Smuzhiyun u32 burst)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
37*4882a593Smuzhiyun struct sja1105_l2_policing_entry *policing;
38*4882a593Smuzhiyun bool new_rule = false;
39*4882a593Smuzhiyun unsigned long p;
40*4882a593Smuzhiyun int rc;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun if (!rule) {
43*4882a593Smuzhiyun rule = kzalloc(sizeof(*rule), GFP_KERNEL);
44*4882a593Smuzhiyun if (!rule)
45*4882a593Smuzhiyun return -ENOMEM;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun rule->cookie = cookie;
48*4882a593Smuzhiyun rule->type = SJA1105_RULE_BCAST_POLICER;
49*4882a593Smuzhiyun rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
50*4882a593Smuzhiyun rule->key.type = SJA1105_KEY_BCAST;
51*4882a593Smuzhiyun new_rule = true;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun if (rule->bcast_pol.sharindx == -1) {
55*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
56*4882a593Smuzhiyun rc = -ENOSPC;
57*4882a593Smuzhiyun goto out;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun if (policing[(SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port].sharindx != port) {
63*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
64*4882a593Smuzhiyun "Port already has a broadcast policer");
65*4882a593Smuzhiyun rc = -EEXIST;
66*4882a593Smuzhiyun goto out;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun rule->port_mask |= BIT(port);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* Make the broadcast policers of all ports attached to this block
72*4882a593Smuzhiyun * point to the newly allocated policer
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
75*4882a593Smuzhiyun int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + p;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun policing[bcast].sharindx = rule->bcast_pol.sharindx;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
81*4882a593Smuzhiyun 512, 1000000);
82*4882a593Smuzhiyun policing[rule->bcast_pol.sharindx].smax = burst;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* TODO: support per-flow MTU */
85*4882a593Smuzhiyun policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
86*4882a593Smuzhiyun ETH_FCS_LEN;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun out:
91*4882a593Smuzhiyun if (rc == 0 && new_rule) {
92*4882a593Smuzhiyun priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
93*4882a593Smuzhiyun list_add(&rule->list, &priv->flow_block.rules);
94*4882a593Smuzhiyun } else if (new_rule) {
95*4882a593Smuzhiyun kfree(rule);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun return rc;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
sja1105_setup_tc_policer(struct sja1105_private * priv,struct netlink_ext_ack * extack,unsigned long cookie,int port,int tc,u64 rate_bytes_per_sec,u32 burst)101*4882a593Smuzhiyun static int sja1105_setup_tc_policer(struct sja1105_private *priv,
102*4882a593Smuzhiyun struct netlink_ext_ack *extack,
103*4882a593Smuzhiyun unsigned long cookie, int port, int tc,
104*4882a593Smuzhiyun u64 rate_bytes_per_sec,
105*4882a593Smuzhiyun u32 burst)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
108*4882a593Smuzhiyun struct sja1105_l2_policing_entry *policing;
109*4882a593Smuzhiyun bool new_rule = false;
110*4882a593Smuzhiyun unsigned long p;
111*4882a593Smuzhiyun int rc;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (!rule) {
114*4882a593Smuzhiyun rule = kzalloc(sizeof(*rule), GFP_KERNEL);
115*4882a593Smuzhiyun if (!rule)
116*4882a593Smuzhiyun return -ENOMEM;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun rule->cookie = cookie;
119*4882a593Smuzhiyun rule->type = SJA1105_RULE_TC_POLICER;
120*4882a593Smuzhiyun rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
121*4882a593Smuzhiyun rule->key.type = SJA1105_KEY_TC;
122*4882a593Smuzhiyun rule->key.tc.pcp = tc;
123*4882a593Smuzhiyun new_rule = true;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun if (rule->tc_pol.sharindx == -1) {
127*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
128*4882a593Smuzhiyun rc = -ENOSPC;
129*4882a593Smuzhiyun goto out;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
135*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
136*4882a593Smuzhiyun "Port-TC pair already has an L2 policer");
137*4882a593Smuzhiyun rc = -EEXIST;
138*4882a593Smuzhiyun goto out;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun rule->port_mask |= BIT(port);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* Make the policers for traffic class @tc of all ports attached to
144*4882a593Smuzhiyun * this block point to the newly allocated policer
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
147*4882a593Smuzhiyun int index = (p * SJA1105_NUM_TC) + tc;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun policing[index].sharindx = rule->tc_pol.sharindx;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
153*4882a593Smuzhiyun 512, 1000000);
154*4882a593Smuzhiyun policing[rule->tc_pol.sharindx].smax = burst;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* TODO: support per-flow MTU */
157*4882a593Smuzhiyun policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
158*4882a593Smuzhiyun ETH_FCS_LEN;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun out:
163*4882a593Smuzhiyun if (rc == 0 && new_rule) {
164*4882a593Smuzhiyun priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
165*4882a593Smuzhiyun list_add(&rule->list, &priv->flow_block.rules);
166*4882a593Smuzhiyun } else if (new_rule) {
167*4882a593Smuzhiyun kfree(rule);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return rc;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
sja1105_flower_policer(struct sja1105_private * priv,int port,struct netlink_ext_ack * extack,unsigned long cookie,struct sja1105_key * key,u64 rate_bytes_per_sec,u32 burst)173*4882a593Smuzhiyun static int sja1105_flower_policer(struct sja1105_private *priv, int port,
174*4882a593Smuzhiyun struct netlink_ext_ack *extack,
175*4882a593Smuzhiyun unsigned long cookie,
176*4882a593Smuzhiyun struct sja1105_key *key,
177*4882a593Smuzhiyun u64 rate_bytes_per_sec,
178*4882a593Smuzhiyun u32 burst)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun switch (key->type) {
181*4882a593Smuzhiyun case SJA1105_KEY_BCAST:
182*4882a593Smuzhiyun return sja1105_setup_bcast_policer(priv, extack, cookie, port,
183*4882a593Smuzhiyun rate_bytes_per_sec, burst);
184*4882a593Smuzhiyun case SJA1105_KEY_TC:
185*4882a593Smuzhiyun return sja1105_setup_tc_policer(priv, extack, cookie, port,
186*4882a593Smuzhiyun key->tc.pcp, rate_bytes_per_sec,
187*4882a593Smuzhiyun burst);
188*4882a593Smuzhiyun default:
189*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
190*4882a593Smuzhiyun return -EOPNOTSUPP;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
sja1105_flower_parse_key(struct sja1105_private * priv,struct netlink_ext_ack * extack,struct flow_cls_offload * cls,struct sja1105_key * key)194*4882a593Smuzhiyun static int sja1105_flower_parse_key(struct sja1105_private *priv,
195*4882a593Smuzhiyun struct netlink_ext_ack *extack,
196*4882a593Smuzhiyun struct flow_cls_offload *cls,
197*4882a593Smuzhiyun struct sja1105_key *key)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
200*4882a593Smuzhiyun struct flow_dissector *dissector = rule->match.dissector;
201*4882a593Smuzhiyun bool is_bcast_dmac = false;
202*4882a593Smuzhiyun u64 dmac = U64_MAX;
203*4882a593Smuzhiyun u16 vid = U16_MAX;
204*4882a593Smuzhiyun u16 pcp = U16_MAX;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (dissector->used_keys &
207*4882a593Smuzhiyun ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
208*4882a593Smuzhiyun BIT(FLOW_DISSECTOR_KEY_CONTROL) |
209*4882a593Smuzhiyun BIT(FLOW_DISSECTOR_KEY_VLAN) |
210*4882a593Smuzhiyun BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
211*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
212*4882a593Smuzhiyun "Unsupported keys used");
213*4882a593Smuzhiyun return -EOPNOTSUPP;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
217*4882a593Smuzhiyun struct flow_match_basic match;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun flow_rule_match_basic(rule, &match);
220*4882a593Smuzhiyun if (match.key->n_proto) {
221*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
222*4882a593Smuzhiyun "Matching on protocol not supported");
223*4882a593Smuzhiyun return -EOPNOTSUPP;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
228*4882a593Smuzhiyun u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
229*4882a593Smuzhiyun u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
230*4882a593Smuzhiyun struct flow_match_eth_addrs match;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun flow_rule_match_eth_addrs(rule, &match);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (!ether_addr_equal_masked(match.key->src, null,
235*4882a593Smuzhiyun match.mask->src)) {
236*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
237*4882a593Smuzhiyun "Matching on source MAC not supported");
238*4882a593Smuzhiyun return -EOPNOTSUPP;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun if (!ether_addr_equal(match.mask->dst, bcast)) {
242*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
243*4882a593Smuzhiyun "Masked matching on MAC not supported");
244*4882a593Smuzhiyun return -EOPNOTSUPP;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun dmac = ether_addr_to_u64(match.key->dst);
248*4882a593Smuzhiyun is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
252*4882a593Smuzhiyun struct flow_match_vlan match;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun flow_rule_match_vlan(rule, &match);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (match.mask->vlan_id &&
257*4882a593Smuzhiyun match.mask->vlan_id != VLAN_VID_MASK) {
258*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
259*4882a593Smuzhiyun "Masked matching on VID is not supported");
260*4882a593Smuzhiyun return -EOPNOTSUPP;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun if (match.mask->vlan_priority &&
264*4882a593Smuzhiyun match.mask->vlan_priority != 0x7) {
265*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
266*4882a593Smuzhiyun "Masked matching on PCP is not supported");
267*4882a593Smuzhiyun return -EOPNOTSUPP;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (match.mask->vlan_id)
271*4882a593Smuzhiyun vid = match.key->vlan_id;
272*4882a593Smuzhiyun if (match.mask->vlan_priority)
273*4882a593Smuzhiyun pcp = match.key->vlan_priority;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
277*4882a593Smuzhiyun key->type = SJA1105_KEY_BCAST;
278*4882a593Smuzhiyun return 0;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
281*4882a593Smuzhiyun key->type = SJA1105_KEY_TC;
282*4882a593Smuzhiyun key->tc.pcp = pcp;
283*4882a593Smuzhiyun return 0;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
286*4882a593Smuzhiyun key->type = SJA1105_KEY_VLAN_AWARE_VL;
287*4882a593Smuzhiyun key->vl.dmac = dmac;
288*4882a593Smuzhiyun key->vl.vid = vid;
289*4882a593Smuzhiyun key->vl.pcp = pcp;
290*4882a593Smuzhiyun return 0;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun if (dmac != U64_MAX) {
293*4882a593Smuzhiyun key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
294*4882a593Smuzhiyun key->vl.dmac = dmac;
295*4882a593Smuzhiyun return 0;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
299*4882a593Smuzhiyun return -EOPNOTSUPP;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
sja1105_cls_flower_add(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)302*4882a593Smuzhiyun int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
303*4882a593Smuzhiyun struct flow_cls_offload *cls, bool ingress)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
306*4882a593Smuzhiyun struct netlink_ext_ack *extack = cls->common.extack;
307*4882a593Smuzhiyun struct sja1105_private *priv = ds->priv;
308*4882a593Smuzhiyun const struct flow_action_entry *act;
309*4882a593Smuzhiyun unsigned long cookie = cls->cookie;
310*4882a593Smuzhiyun bool routing_rule = false;
311*4882a593Smuzhiyun struct sja1105_key key;
312*4882a593Smuzhiyun bool gate_rule = false;
313*4882a593Smuzhiyun bool vl_rule = false;
314*4882a593Smuzhiyun int rc, i;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun rc = sja1105_flower_parse_key(priv, extack, cls, &key);
317*4882a593Smuzhiyun if (rc)
318*4882a593Smuzhiyun return rc;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun rc = -EOPNOTSUPP;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun flow_action_for_each(i, act, &rule->action) {
323*4882a593Smuzhiyun switch (act->id) {
324*4882a593Smuzhiyun case FLOW_ACTION_POLICE:
325*4882a593Smuzhiyun rc = sja1105_flower_policer(priv, port, extack, cookie,
326*4882a593Smuzhiyun &key,
327*4882a593Smuzhiyun act->police.rate_bytes_ps,
328*4882a593Smuzhiyun act->police.burst);
329*4882a593Smuzhiyun if (rc)
330*4882a593Smuzhiyun goto out;
331*4882a593Smuzhiyun break;
332*4882a593Smuzhiyun case FLOW_ACTION_TRAP: {
333*4882a593Smuzhiyun int cpu = dsa_upstream_port(ds, port);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun routing_rule = true;
336*4882a593Smuzhiyun vl_rule = true;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun rc = sja1105_vl_redirect(priv, port, extack, cookie,
339*4882a593Smuzhiyun &key, BIT(cpu), true);
340*4882a593Smuzhiyun if (rc)
341*4882a593Smuzhiyun goto out;
342*4882a593Smuzhiyun break;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun case FLOW_ACTION_REDIRECT: {
345*4882a593Smuzhiyun struct dsa_port *to_dp;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun to_dp = dsa_port_from_netdev(act->dev);
348*4882a593Smuzhiyun if (IS_ERR(to_dp)) {
349*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
350*4882a593Smuzhiyun "Destination not a switch port");
351*4882a593Smuzhiyun return -EOPNOTSUPP;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun routing_rule = true;
355*4882a593Smuzhiyun vl_rule = true;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun rc = sja1105_vl_redirect(priv, port, extack, cookie,
358*4882a593Smuzhiyun &key, BIT(to_dp->index), true);
359*4882a593Smuzhiyun if (rc)
360*4882a593Smuzhiyun goto out;
361*4882a593Smuzhiyun break;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun case FLOW_ACTION_DROP:
364*4882a593Smuzhiyun vl_rule = true;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun rc = sja1105_vl_redirect(priv, port, extack, cookie,
367*4882a593Smuzhiyun &key, 0, false);
368*4882a593Smuzhiyun if (rc)
369*4882a593Smuzhiyun goto out;
370*4882a593Smuzhiyun break;
371*4882a593Smuzhiyun case FLOW_ACTION_GATE:
372*4882a593Smuzhiyun gate_rule = true;
373*4882a593Smuzhiyun vl_rule = true;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun rc = sja1105_vl_gate(priv, port, extack, cookie,
376*4882a593Smuzhiyun &key, act->gate.index,
377*4882a593Smuzhiyun act->gate.prio,
378*4882a593Smuzhiyun act->gate.basetime,
379*4882a593Smuzhiyun act->gate.cycletime,
380*4882a593Smuzhiyun act->gate.cycletimeext,
381*4882a593Smuzhiyun act->gate.num_entries,
382*4882a593Smuzhiyun act->gate.entries);
383*4882a593Smuzhiyun if (rc)
384*4882a593Smuzhiyun goto out;
385*4882a593Smuzhiyun break;
386*4882a593Smuzhiyun default:
387*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
388*4882a593Smuzhiyun "Action not supported");
389*4882a593Smuzhiyun rc = -EOPNOTSUPP;
390*4882a593Smuzhiyun goto out;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (vl_rule && !rc) {
395*4882a593Smuzhiyun /* Delay scheduling configuration until DESTPORTS has been
396*4882a593Smuzhiyun * populated by all other actions.
397*4882a593Smuzhiyun */
398*4882a593Smuzhiyun if (gate_rule) {
399*4882a593Smuzhiyun if (!routing_rule) {
400*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
401*4882a593Smuzhiyun "Can only offload gate action together with redirect or trap");
402*4882a593Smuzhiyun return -EOPNOTSUPP;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun rc = sja1105_init_scheduling(priv);
405*4882a593Smuzhiyun if (rc)
406*4882a593Smuzhiyun goto out;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun out:
413*4882a593Smuzhiyun return rc;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
sja1105_cls_flower_del(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)416*4882a593Smuzhiyun int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
417*4882a593Smuzhiyun struct flow_cls_offload *cls, bool ingress)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun struct sja1105_private *priv = ds->priv;
420*4882a593Smuzhiyun struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
421*4882a593Smuzhiyun struct sja1105_l2_policing_entry *policing;
422*4882a593Smuzhiyun int old_sharindx;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (!rule)
425*4882a593Smuzhiyun return 0;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (rule->type == SJA1105_RULE_VL)
428*4882a593Smuzhiyun return sja1105_vl_delete(priv, port, rule, cls->common.extack);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun if (rule->type == SJA1105_RULE_BCAST_POLICER) {
433*4882a593Smuzhiyun int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun old_sharindx = policing[bcast].sharindx;
436*4882a593Smuzhiyun policing[bcast].sharindx = port;
437*4882a593Smuzhiyun } else if (rule->type == SJA1105_RULE_TC_POLICER) {
438*4882a593Smuzhiyun int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun old_sharindx = policing[index].sharindx;
441*4882a593Smuzhiyun policing[index].sharindx = port;
442*4882a593Smuzhiyun } else {
443*4882a593Smuzhiyun return -EINVAL;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun rule->port_mask &= ~BIT(port);
447*4882a593Smuzhiyun if (!rule->port_mask) {
448*4882a593Smuzhiyun priv->flow_block.l2_policer_used[old_sharindx] = false;
449*4882a593Smuzhiyun list_del(&rule->list);
450*4882a593Smuzhiyun kfree(rule);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
sja1105_cls_flower_stats(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)456*4882a593Smuzhiyun int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
457*4882a593Smuzhiyun struct flow_cls_offload *cls, bool ingress)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun struct sja1105_private *priv = ds->priv;
460*4882a593Smuzhiyun struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
461*4882a593Smuzhiyun int rc;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun if (!rule)
464*4882a593Smuzhiyun return 0;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (rule->type != SJA1105_RULE_VL)
467*4882a593Smuzhiyun return 0;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
470*4882a593Smuzhiyun cls->common.extack);
471*4882a593Smuzhiyun if (rc)
472*4882a593Smuzhiyun return rc;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun return 0;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
sja1105_flower_setup(struct dsa_switch * ds)477*4882a593Smuzhiyun void sja1105_flower_setup(struct dsa_switch *ds)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun struct sja1105_private *priv = ds->priv;
480*4882a593Smuzhiyun int port;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun INIT_LIST_HEAD(&priv->flow_block.rules);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun for (port = 0; port < SJA1105_NUM_PORTS; port++)
485*4882a593Smuzhiyun priv->flow_block.l2_policer_used[port] = true;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
sja1105_flower_teardown(struct dsa_switch * ds)488*4882a593Smuzhiyun void sja1105_flower_teardown(struct dsa_switch *ds)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun struct sja1105_private *priv = ds->priv;
491*4882a593Smuzhiyun struct sja1105_rule *rule;
492*4882a593Smuzhiyun struct list_head *pos, *n;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun list_for_each_safe(pos, n, &priv->flow_block.rules) {
495*4882a593Smuzhiyun rule = list_entry(pos, struct sja1105_rule, list);
496*4882a593Smuzhiyun list_del(&rule->list);
497*4882a593Smuzhiyun kfree(rule);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun }
500