1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Broadcom Starfighter 2 DSA switch CFP support
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2016, Broadcom
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/list.h>
9*4882a593Smuzhiyun #include <linux/ethtool.h>
10*4882a593Smuzhiyun #include <linux/if_ether.h>
11*4882a593Smuzhiyun #include <linux/in.h>
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <net/dsa.h>
14*4882a593Smuzhiyun #include <linux/bitmap.h>
15*4882a593Smuzhiyun #include <net/flow_offload.h>
16*4882a593Smuzhiyun #include <net/switchdev.h>
17*4882a593Smuzhiyun #include <uapi/linux/if_bridge.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "bcm_sf2.h"
20*4882a593Smuzhiyun #include "bcm_sf2_regs.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun struct cfp_rule {
23*4882a593Smuzhiyun int port;
24*4882a593Smuzhiyun struct ethtool_rx_flow_spec fs;
25*4882a593Smuzhiyun struct list_head next;
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun struct cfp_udf_slice_layout {
29*4882a593Smuzhiyun u8 slices[UDFS_PER_SLICE];
30*4882a593Smuzhiyun u32 mask_value;
31*4882a593Smuzhiyun u32 base_offset;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct cfp_udf_layout {
35*4882a593Smuzhiyun struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES];
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static const u8 zero_slice[UDFS_PER_SLICE] = { };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* UDF slices layout for a TCPv4/UDPv4 specification */
41*4882a593Smuzhiyun static const struct cfp_udf_layout udf_tcpip4_layout = {
42*4882a593Smuzhiyun .udfs = {
43*4882a593Smuzhiyun [1] = {
44*4882a593Smuzhiyun .slices = {
45*4882a593Smuzhiyun /* End of L2, byte offset 12, src IP[0:15] */
46*4882a593Smuzhiyun CFG_UDF_EOL2 | 6,
47*4882a593Smuzhiyun /* End of L2, byte offset 14, src IP[16:31] */
48*4882a593Smuzhiyun CFG_UDF_EOL2 | 7,
49*4882a593Smuzhiyun /* End of L2, byte offset 16, dst IP[0:15] */
50*4882a593Smuzhiyun CFG_UDF_EOL2 | 8,
51*4882a593Smuzhiyun /* End of L2, byte offset 18, dst IP[16:31] */
52*4882a593Smuzhiyun CFG_UDF_EOL2 | 9,
53*4882a593Smuzhiyun /* End of L3, byte offset 0, src port */
54*4882a593Smuzhiyun CFG_UDF_EOL3 | 0,
55*4882a593Smuzhiyun /* End of L3, byte offset 2, dst port */
56*4882a593Smuzhiyun CFG_UDF_EOL3 | 1,
57*4882a593Smuzhiyun 0, 0, 0
58*4882a593Smuzhiyun },
59*4882a593Smuzhiyun .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
60*4882a593Smuzhiyun .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET,
61*4882a593Smuzhiyun },
62*4882a593Smuzhiyun },
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* UDF slices layout for a TCPv6/UDPv6 specification */
66*4882a593Smuzhiyun static const struct cfp_udf_layout udf_tcpip6_layout = {
67*4882a593Smuzhiyun .udfs = {
68*4882a593Smuzhiyun [0] = {
69*4882a593Smuzhiyun .slices = {
70*4882a593Smuzhiyun /* End of L2, byte offset 8, src IP[0:15] */
71*4882a593Smuzhiyun CFG_UDF_EOL2 | 4,
72*4882a593Smuzhiyun /* End of L2, byte offset 10, src IP[16:31] */
73*4882a593Smuzhiyun CFG_UDF_EOL2 | 5,
74*4882a593Smuzhiyun /* End of L2, byte offset 12, src IP[32:47] */
75*4882a593Smuzhiyun CFG_UDF_EOL2 | 6,
76*4882a593Smuzhiyun /* End of L2, byte offset 14, src IP[48:63] */
77*4882a593Smuzhiyun CFG_UDF_EOL2 | 7,
78*4882a593Smuzhiyun /* End of L2, byte offset 16, src IP[64:79] */
79*4882a593Smuzhiyun CFG_UDF_EOL2 | 8,
80*4882a593Smuzhiyun /* End of L2, byte offset 18, src IP[80:95] */
81*4882a593Smuzhiyun CFG_UDF_EOL2 | 9,
82*4882a593Smuzhiyun /* End of L2, byte offset 20, src IP[96:111] */
83*4882a593Smuzhiyun CFG_UDF_EOL2 | 10,
84*4882a593Smuzhiyun /* End of L2, byte offset 22, src IP[112:127] */
85*4882a593Smuzhiyun CFG_UDF_EOL2 | 11,
86*4882a593Smuzhiyun /* End of L3, byte offset 0, src port */
87*4882a593Smuzhiyun CFG_UDF_EOL3 | 0,
88*4882a593Smuzhiyun },
89*4882a593Smuzhiyun .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
90*4882a593Smuzhiyun .base_offset = CORE_UDF_0_B_0_8_PORT_0,
91*4882a593Smuzhiyun },
92*4882a593Smuzhiyun [3] = {
93*4882a593Smuzhiyun .slices = {
94*4882a593Smuzhiyun /* End of L2, byte offset 24, dst IP[0:15] */
95*4882a593Smuzhiyun CFG_UDF_EOL2 | 12,
96*4882a593Smuzhiyun /* End of L2, byte offset 26, dst IP[16:31] */
97*4882a593Smuzhiyun CFG_UDF_EOL2 | 13,
98*4882a593Smuzhiyun /* End of L2, byte offset 28, dst IP[32:47] */
99*4882a593Smuzhiyun CFG_UDF_EOL2 | 14,
100*4882a593Smuzhiyun /* End of L2, byte offset 30, dst IP[48:63] */
101*4882a593Smuzhiyun CFG_UDF_EOL2 | 15,
102*4882a593Smuzhiyun /* End of L2, byte offset 32, dst IP[64:79] */
103*4882a593Smuzhiyun CFG_UDF_EOL2 | 16,
104*4882a593Smuzhiyun /* End of L2, byte offset 34, dst IP[80:95] */
105*4882a593Smuzhiyun CFG_UDF_EOL2 | 17,
106*4882a593Smuzhiyun /* End of L2, byte offset 36, dst IP[96:111] */
107*4882a593Smuzhiyun CFG_UDF_EOL2 | 18,
108*4882a593Smuzhiyun /* End of L2, byte offset 38, dst IP[112:127] */
109*4882a593Smuzhiyun CFG_UDF_EOL2 | 19,
110*4882a593Smuzhiyun /* End of L3, byte offset 2, dst port */
111*4882a593Smuzhiyun CFG_UDF_EOL3 | 1,
112*4882a593Smuzhiyun },
113*4882a593Smuzhiyun .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
114*4882a593Smuzhiyun .base_offset = CORE_UDF_0_D_0_11_PORT_0,
115*4882a593Smuzhiyun },
116*4882a593Smuzhiyun },
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun
bcm_sf2_get_num_udf_slices(const u8 * layout)119*4882a593Smuzhiyun static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun unsigned int i, count = 0;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun for (i = 0; i < UDFS_PER_SLICE; i++) {
124*4882a593Smuzhiyun if (layout[i] != 0)
125*4882a593Smuzhiyun count++;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun return count;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
udf_upper_bits(int num_udf)131*4882a593Smuzhiyun static inline u32 udf_upper_bits(int num_udf)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
udf_lower_bits(int num_udf)136*4882a593Smuzhiyun static inline u32 udf_lower_bits(int num_udf)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun return (u8)GENMASK(num_udf - 1, 0);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
bcm_sf2_get_slice_number(const struct cfp_udf_layout * l,unsigned int start)141*4882a593Smuzhiyun static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l,
142*4882a593Smuzhiyun unsigned int start)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun const struct cfp_udf_slice_layout *slice_layout;
145*4882a593Smuzhiyun unsigned int slice_idx;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) {
148*4882a593Smuzhiyun slice_layout = &l->udfs[slice_idx];
149*4882a593Smuzhiyun if (memcmp(slice_layout->slices, zero_slice,
150*4882a593Smuzhiyun sizeof(zero_slice)))
151*4882a593Smuzhiyun break;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun return slice_idx;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
bcm_sf2_cfp_udf_set(struct bcm_sf2_priv * priv,const struct cfp_udf_layout * layout,unsigned int slice_num)157*4882a593Smuzhiyun static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
158*4882a593Smuzhiyun const struct cfp_udf_layout *layout,
159*4882a593Smuzhiyun unsigned int slice_num)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun u32 offset = layout->udfs[slice_num].base_offset;
162*4882a593Smuzhiyun unsigned int i;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun for (i = 0; i < UDFS_PER_SLICE; i++)
165*4882a593Smuzhiyun core_writel(priv, layout->udfs[slice_num].slices[i],
166*4882a593Smuzhiyun offset + i * 4);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
bcm_sf2_cfp_op(struct bcm_sf2_priv * priv,unsigned int op)169*4882a593Smuzhiyun static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun unsigned int timeout = 1000;
172*4882a593Smuzhiyun u32 reg;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun reg = core_readl(priv, CORE_CFP_ACC);
175*4882a593Smuzhiyun reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
176*4882a593Smuzhiyun reg |= OP_STR_DONE | op;
177*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_ACC);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun do {
180*4882a593Smuzhiyun reg = core_readl(priv, CORE_CFP_ACC);
181*4882a593Smuzhiyun if (!(reg & OP_STR_DONE))
182*4882a593Smuzhiyun break;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun cpu_relax();
185*4882a593Smuzhiyun } while (timeout--);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (!timeout)
188*4882a593Smuzhiyun return -ETIMEDOUT;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun return 0;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv * priv,unsigned int addr)193*4882a593Smuzhiyun static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
194*4882a593Smuzhiyun unsigned int addr)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun u32 reg;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun WARN_ON(addr >= priv->num_cfp_rules);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun reg = core_readl(priv, CORE_CFP_ACC);
201*4882a593Smuzhiyun reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
202*4882a593Smuzhiyun reg |= addr << XCESS_ADDR_SHIFT;
203*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_ACC);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
bcm_sf2_cfp_rule_size(struct bcm_sf2_priv * priv)206*4882a593Smuzhiyun static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun /* Entry #0 is reserved */
209*4882a593Smuzhiyun return priv->num_cfp_rules - 1;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv * priv,unsigned int rule_index,int src_port,unsigned int port_num,unsigned int queue_num,bool fwd_map_change)212*4882a593Smuzhiyun static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
213*4882a593Smuzhiyun unsigned int rule_index,
214*4882a593Smuzhiyun int src_port,
215*4882a593Smuzhiyun unsigned int port_num,
216*4882a593Smuzhiyun unsigned int queue_num,
217*4882a593Smuzhiyun bool fwd_map_change)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun int ret;
220*4882a593Smuzhiyun u32 reg;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* Replace ARL derived destination with DST_MAP derived, define
223*4882a593Smuzhiyun * which port and queue this should be forwarded to.
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun if (fwd_map_change)
226*4882a593Smuzhiyun reg = CHANGE_FWRD_MAP_IB_REP_ARL |
227*4882a593Smuzhiyun BIT(port_num + DST_MAP_IB_SHIFT) |
228*4882a593Smuzhiyun CHANGE_TC | queue_num << NEW_TC_SHIFT;
229*4882a593Smuzhiyun else
230*4882a593Smuzhiyun reg = 0;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* Enable looping back to the original port */
233*4882a593Smuzhiyun if (src_port == port_num)
234*4882a593Smuzhiyun reg |= LOOP_BK_EN;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun core_writel(priv, reg, CORE_ACT_POL_DATA0);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Set classification ID that needs to be put in Broadcom tag */
239*4882a593Smuzhiyun core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun core_writel(priv, 0, CORE_ACT_POL_DATA2);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* Configure policer RAM now */
244*4882a593Smuzhiyun ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
245*4882a593Smuzhiyun if (ret) {
246*4882a593Smuzhiyun pr_err("Policer entry at %d failed\n", rule_index);
247*4882a593Smuzhiyun return ret;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /* Disable the policer */
251*4882a593Smuzhiyun core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* Now the rate meter */
254*4882a593Smuzhiyun ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
255*4882a593Smuzhiyun if (ret) {
256*4882a593Smuzhiyun pr_err("Meter entry at %d failed\n", rule_index);
257*4882a593Smuzhiyun return ret;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun return 0;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv * priv,struct flow_dissector_key_ipv4_addrs * addrs,struct flow_dissector_key_ports * ports,const __be16 vlan_tci,unsigned int slice_num,u8 num_udf,bool mask)263*4882a593Smuzhiyun static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
264*4882a593Smuzhiyun struct flow_dissector_key_ipv4_addrs *addrs,
265*4882a593Smuzhiyun struct flow_dissector_key_ports *ports,
266*4882a593Smuzhiyun const __be16 vlan_tci,
267*4882a593Smuzhiyun unsigned int slice_num, u8 num_udf,
268*4882a593Smuzhiyun bool mask)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun u32 reg, offset;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* UDF_Valid[7:0] [31:24]
273*4882a593Smuzhiyun * S-Tag [23:8]
274*4882a593Smuzhiyun * C-Tag [7:0]
275*4882a593Smuzhiyun */
276*4882a593Smuzhiyun reg = udf_lower_bits(num_udf) << 24 | be16_to_cpu(vlan_tci) >> 8;
277*4882a593Smuzhiyun if (mask)
278*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
279*4882a593Smuzhiyun else
280*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* C-Tag [31:24]
283*4882a593Smuzhiyun * UDF_n_A8 [23:8]
284*4882a593Smuzhiyun * UDF_n_A7 [7:0]
285*4882a593Smuzhiyun */
286*4882a593Smuzhiyun reg = (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
287*4882a593Smuzhiyun if (mask)
288*4882a593Smuzhiyun offset = CORE_CFP_MASK_PORT(4);
289*4882a593Smuzhiyun else
290*4882a593Smuzhiyun offset = CORE_CFP_DATA_PORT(4);
291*4882a593Smuzhiyun core_writel(priv, reg, offset);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* UDF_n_A7 [31:24]
294*4882a593Smuzhiyun * UDF_n_A6 [23:8]
295*4882a593Smuzhiyun * UDF_n_A5 [7:0]
296*4882a593Smuzhiyun */
297*4882a593Smuzhiyun reg = be16_to_cpu(ports->dst) >> 8;
298*4882a593Smuzhiyun if (mask)
299*4882a593Smuzhiyun offset = CORE_CFP_MASK_PORT(3);
300*4882a593Smuzhiyun else
301*4882a593Smuzhiyun offset = CORE_CFP_DATA_PORT(3);
302*4882a593Smuzhiyun core_writel(priv, reg, offset);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* UDF_n_A5 [31:24]
305*4882a593Smuzhiyun * UDF_n_A4 [23:8]
306*4882a593Smuzhiyun * UDF_n_A3 [7:0]
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
309*4882a593Smuzhiyun (u32)be16_to_cpu(ports->src) << 8 |
310*4882a593Smuzhiyun (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
311*4882a593Smuzhiyun if (mask)
312*4882a593Smuzhiyun offset = CORE_CFP_MASK_PORT(2);
313*4882a593Smuzhiyun else
314*4882a593Smuzhiyun offset = CORE_CFP_DATA_PORT(2);
315*4882a593Smuzhiyun core_writel(priv, reg, offset);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* UDF_n_A3 [31:24]
318*4882a593Smuzhiyun * UDF_n_A2 [23:8]
319*4882a593Smuzhiyun * UDF_n_A1 [7:0]
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
322*4882a593Smuzhiyun (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
323*4882a593Smuzhiyun (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
324*4882a593Smuzhiyun if (mask)
325*4882a593Smuzhiyun offset = CORE_CFP_MASK_PORT(1);
326*4882a593Smuzhiyun else
327*4882a593Smuzhiyun offset = CORE_CFP_DATA_PORT(1);
328*4882a593Smuzhiyun core_writel(priv, reg, offset);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* UDF_n_A1 [31:24]
331*4882a593Smuzhiyun * UDF_n_A0 [23:8]
332*4882a593Smuzhiyun * Reserved [7:4]
333*4882a593Smuzhiyun * Slice ID [3:2]
334*4882a593Smuzhiyun * Slice valid [1:0]
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
337*4882a593Smuzhiyun (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
338*4882a593Smuzhiyun SLICE_NUM(slice_num) | SLICE_VALID;
339*4882a593Smuzhiyun if (mask)
340*4882a593Smuzhiyun offset = CORE_CFP_MASK_PORT(0);
341*4882a593Smuzhiyun else
342*4882a593Smuzhiyun offset = CORE_CFP_DATA_PORT(0);
343*4882a593Smuzhiyun core_writel(priv, reg, offset);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv * priv,int port,unsigned int port_num,unsigned int queue_num,struct ethtool_rx_flow_spec * fs)346*4882a593Smuzhiyun static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
347*4882a593Smuzhiyun unsigned int port_num,
348*4882a593Smuzhiyun unsigned int queue_num,
349*4882a593Smuzhiyun struct ethtool_rx_flow_spec *fs)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun __be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
352*4882a593Smuzhiyun struct ethtool_rx_flow_spec_input input = {};
353*4882a593Smuzhiyun const struct cfp_udf_layout *layout;
354*4882a593Smuzhiyun unsigned int slice_num, rule_index;
355*4882a593Smuzhiyun struct ethtool_rx_flow_rule *flow;
356*4882a593Smuzhiyun struct flow_match_ipv4_addrs ipv4;
357*4882a593Smuzhiyun struct flow_match_ports ports;
358*4882a593Smuzhiyun struct flow_match_ip ip;
359*4882a593Smuzhiyun u8 ip_proto, ip_frag;
360*4882a593Smuzhiyun u8 num_udf;
361*4882a593Smuzhiyun u32 reg;
362*4882a593Smuzhiyun int ret;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun switch (fs->flow_type & ~FLOW_EXT) {
365*4882a593Smuzhiyun case TCP_V4_FLOW:
366*4882a593Smuzhiyun ip_proto = IPPROTO_TCP;
367*4882a593Smuzhiyun break;
368*4882a593Smuzhiyun case UDP_V4_FLOW:
369*4882a593Smuzhiyun ip_proto = IPPROTO_UDP;
370*4882a593Smuzhiyun break;
371*4882a593Smuzhiyun default:
372*4882a593Smuzhiyun return -EINVAL;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* Extract VLAN TCI */
378*4882a593Smuzhiyun if (fs->flow_type & FLOW_EXT) {
379*4882a593Smuzhiyun vlan_tci = fs->h_ext.vlan_tci;
380*4882a593Smuzhiyun vlan_m_tci = fs->m_ext.vlan_tci;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* Locate the first rule available */
384*4882a593Smuzhiyun if (fs->location == RX_CLS_LOC_ANY)
385*4882a593Smuzhiyun rule_index = find_first_zero_bit(priv->cfp.used,
386*4882a593Smuzhiyun priv->num_cfp_rules);
387*4882a593Smuzhiyun else
388*4882a593Smuzhiyun rule_index = fs->location;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (rule_index > bcm_sf2_cfp_rule_size(priv))
391*4882a593Smuzhiyun return -ENOSPC;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun input.fs = fs;
394*4882a593Smuzhiyun flow = ethtool_rx_flow_rule_create(&input);
395*4882a593Smuzhiyun if (IS_ERR(flow))
396*4882a593Smuzhiyun return PTR_ERR(flow);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
399*4882a593Smuzhiyun flow_rule_match_ports(flow->rule, &ports);
400*4882a593Smuzhiyun flow_rule_match_ip(flow->rule, &ip);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun layout = &udf_tcpip4_layout;
403*4882a593Smuzhiyun /* We only use one UDF slice for now */
404*4882a593Smuzhiyun slice_num = bcm_sf2_get_slice_number(layout, 0);
405*4882a593Smuzhiyun if (slice_num == UDF_NUM_SLICES) {
406*4882a593Smuzhiyun ret = -EINVAL;
407*4882a593Smuzhiyun goto out_err_flow_rule;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun /* Apply the UDF layout for this filter */
413*4882a593Smuzhiyun bcm_sf2_cfp_udf_set(priv, layout, slice_num);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /* Apply to all packets received through this port */
416*4882a593Smuzhiyun core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* Source port map match */
419*4882a593Smuzhiyun core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* S-Tag status [31:30]
422*4882a593Smuzhiyun * C-Tag status [29:28]
423*4882a593Smuzhiyun * L2 framing [27:26]
424*4882a593Smuzhiyun * L3 framing [25:24]
425*4882a593Smuzhiyun * IP ToS [23:16]
426*4882a593Smuzhiyun * IP proto [15:08]
427*4882a593Smuzhiyun * IP Fragm [7]
428*4882a593Smuzhiyun * Non 1st frag [6]
429*4882a593Smuzhiyun * IP Authen [5]
430*4882a593Smuzhiyun * TTL range [4:3]
431*4882a593Smuzhiyun * PPPoE session [2]
432*4882a593Smuzhiyun * Reserved [1]
433*4882a593Smuzhiyun * UDF_Valid[8] [0]
434*4882a593Smuzhiyun */
435*4882a593Smuzhiyun core_writel(priv, ip.key->tos << IPTOS_SHIFT |
436*4882a593Smuzhiyun ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
437*4882a593Smuzhiyun udf_upper_bits(num_udf),
438*4882a593Smuzhiyun CORE_CFP_DATA_PORT(6));
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /* Mask with the specific layout for IPv4 packets */
441*4882a593Smuzhiyun core_writel(priv, layout->udfs[slice_num].mask_value |
442*4882a593Smuzhiyun udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /* Program the match and the mask */
445*4882a593Smuzhiyun bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, vlan_tci,
446*4882a593Smuzhiyun slice_num, num_udf, false);
447*4882a593Smuzhiyun bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, vlan_m_tci,
448*4882a593Smuzhiyun SLICE_NUM_MASK, num_udf, true);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /* Insert into TCAM now */
451*4882a593Smuzhiyun bcm_sf2_cfp_rule_addr_set(priv, rule_index);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
454*4882a593Smuzhiyun if (ret) {
455*4882a593Smuzhiyun pr_err("TCAM entry at addr %d failed\n", rule_index);
456*4882a593Smuzhiyun goto out_err_flow_rule;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* Insert into Action and policer RAMs now */
460*4882a593Smuzhiyun ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num,
461*4882a593Smuzhiyun queue_num, true);
462*4882a593Smuzhiyun if (ret)
463*4882a593Smuzhiyun goto out_err_flow_rule;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* Turn on CFP for this rule now */
466*4882a593Smuzhiyun reg = core_readl(priv, CORE_CFP_CTL_REG);
467*4882a593Smuzhiyun reg |= BIT(port);
468*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_CTL_REG);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* Flag the rule as being used and return it */
471*4882a593Smuzhiyun set_bit(rule_index, priv->cfp.used);
472*4882a593Smuzhiyun set_bit(rule_index, priv->cfp.unique);
473*4882a593Smuzhiyun fs->location = rule_index;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun return 0;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun out_err_flow_rule:
478*4882a593Smuzhiyun ethtool_rx_flow_rule_destroy(flow);
479*4882a593Smuzhiyun return ret;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv * priv,const __be32 * ip6_addr,const __be16 port,const __be16 vlan_tci,unsigned int slice_num,u32 udf_bits,bool mask)482*4882a593Smuzhiyun static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
483*4882a593Smuzhiyun const __be32 *ip6_addr, const __be16 port,
484*4882a593Smuzhiyun const __be16 vlan_tci,
485*4882a593Smuzhiyun unsigned int slice_num, u32 udf_bits,
486*4882a593Smuzhiyun bool mask)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun u32 reg, tmp, val, offset;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /* UDF_Valid[7:0] [31:24]
491*4882a593Smuzhiyun * S-Tag [23:8]
492*4882a593Smuzhiyun * C-Tag [7:0]
493*4882a593Smuzhiyun */
494*4882a593Smuzhiyun reg = udf_bits << 24 | be16_to_cpu(vlan_tci) >> 8;
495*4882a593Smuzhiyun if (mask)
496*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
497*4882a593Smuzhiyun else
498*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* C-Tag [31:24]
501*4882a593Smuzhiyun * UDF_n_B8 [23:8] (port)
502*4882a593Smuzhiyun * UDF_n_B7 (upper) [7:0] (addr[15:8])
503*4882a593Smuzhiyun */
504*4882a593Smuzhiyun reg = be32_to_cpu(ip6_addr[3]);
505*4882a593Smuzhiyun val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
506*4882a593Smuzhiyun val |= (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
507*4882a593Smuzhiyun if (mask)
508*4882a593Smuzhiyun offset = CORE_CFP_MASK_PORT(4);
509*4882a593Smuzhiyun else
510*4882a593Smuzhiyun offset = CORE_CFP_DATA_PORT(4);
511*4882a593Smuzhiyun core_writel(priv, val, offset);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /* UDF_n_B7 (lower) [31:24] (addr[7:0])
514*4882a593Smuzhiyun * UDF_n_B6 [23:8] (addr[31:16])
515*4882a593Smuzhiyun * UDF_n_B5 (upper) [7:0] (addr[47:40])
516*4882a593Smuzhiyun */
517*4882a593Smuzhiyun tmp = be32_to_cpu(ip6_addr[2]);
518*4882a593Smuzhiyun val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
519*4882a593Smuzhiyun ((tmp >> 8) & 0xff);
520*4882a593Smuzhiyun if (mask)
521*4882a593Smuzhiyun offset = CORE_CFP_MASK_PORT(3);
522*4882a593Smuzhiyun else
523*4882a593Smuzhiyun offset = CORE_CFP_DATA_PORT(3);
524*4882a593Smuzhiyun core_writel(priv, val, offset);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun /* UDF_n_B5 (lower) [31:24] (addr[39:32])
527*4882a593Smuzhiyun * UDF_n_B4 [23:8] (addr[63:48])
528*4882a593Smuzhiyun * UDF_n_B3 (upper) [7:0] (addr[79:72])
529*4882a593Smuzhiyun */
530*4882a593Smuzhiyun reg = be32_to_cpu(ip6_addr[1]);
531*4882a593Smuzhiyun val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
532*4882a593Smuzhiyun ((reg >> 8) & 0xff);
533*4882a593Smuzhiyun if (mask)
534*4882a593Smuzhiyun offset = CORE_CFP_MASK_PORT(2);
535*4882a593Smuzhiyun else
536*4882a593Smuzhiyun offset = CORE_CFP_DATA_PORT(2);
537*4882a593Smuzhiyun core_writel(priv, val, offset);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /* UDF_n_B3 (lower) [31:24] (addr[71:64])
540*4882a593Smuzhiyun * UDF_n_B2 [23:8] (addr[95:80])
541*4882a593Smuzhiyun * UDF_n_B1 (upper) [7:0] (addr[111:104])
542*4882a593Smuzhiyun */
543*4882a593Smuzhiyun tmp = be32_to_cpu(ip6_addr[0]);
544*4882a593Smuzhiyun val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
545*4882a593Smuzhiyun ((tmp >> 8) & 0xff);
546*4882a593Smuzhiyun if (mask)
547*4882a593Smuzhiyun offset = CORE_CFP_MASK_PORT(1);
548*4882a593Smuzhiyun else
549*4882a593Smuzhiyun offset = CORE_CFP_DATA_PORT(1);
550*4882a593Smuzhiyun core_writel(priv, val, offset);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /* UDF_n_B1 (lower) [31:24] (addr[103:96])
553*4882a593Smuzhiyun * UDF_n_B0 [23:8] (addr[127:112])
554*4882a593Smuzhiyun * Reserved [7:4]
555*4882a593Smuzhiyun * Slice ID [3:2]
556*4882a593Smuzhiyun * Slice valid [1:0]
557*4882a593Smuzhiyun */
558*4882a593Smuzhiyun reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
559*4882a593Smuzhiyun SLICE_NUM(slice_num) | SLICE_VALID;
560*4882a593Smuzhiyun if (mask)
561*4882a593Smuzhiyun offset = CORE_CFP_MASK_PORT(0);
562*4882a593Smuzhiyun else
563*4882a593Smuzhiyun offset = CORE_CFP_DATA_PORT(0);
564*4882a593Smuzhiyun core_writel(priv, reg, offset);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
bcm_sf2_cfp_rule_find(struct bcm_sf2_priv * priv,int port,u32 location)567*4882a593Smuzhiyun static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
568*4882a593Smuzhiyun int port, u32 location)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun struct cfp_rule *rule;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun list_for_each_entry(rule, &priv->cfp.rules_list, next) {
573*4882a593Smuzhiyun if (rule->port == port && rule->fs.location == location)
574*4882a593Smuzhiyun return rule;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun return NULL;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv * priv,int port,struct ethtool_rx_flow_spec * fs)580*4882a593Smuzhiyun static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
581*4882a593Smuzhiyun struct ethtool_rx_flow_spec *fs)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun struct cfp_rule *rule = NULL;
584*4882a593Smuzhiyun size_t fs_size = 0;
585*4882a593Smuzhiyun int ret = 1;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun if (list_empty(&priv->cfp.rules_list))
588*4882a593Smuzhiyun return ret;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun list_for_each_entry(rule, &priv->cfp.rules_list, next) {
591*4882a593Smuzhiyun ret = 1;
592*4882a593Smuzhiyun if (rule->port != port)
593*4882a593Smuzhiyun continue;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (rule->fs.flow_type != fs->flow_type ||
596*4882a593Smuzhiyun rule->fs.ring_cookie != fs->ring_cookie ||
597*4882a593Smuzhiyun rule->fs.h_ext.data[0] != fs->h_ext.data[0])
598*4882a593Smuzhiyun continue;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun switch (fs->flow_type & ~FLOW_EXT) {
601*4882a593Smuzhiyun case TCP_V6_FLOW:
602*4882a593Smuzhiyun case UDP_V6_FLOW:
603*4882a593Smuzhiyun fs_size = sizeof(struct ethtool_tcpip6_spec);
604*4882a593Smuzhiyun break;
605*4882a593Smuzhiyun case TCP_V4_FLOW:
606*4882a593Smuzhiyun case UDP_V4_FLOW:
607*4882a593Smuzhiyun fs_size = sizeof(struct ethtool_tcpip4_spec);
608*4882a593Smuzhiyun break;
609*4882a593Smuzhiyun default:
610*4882a593Smuzhiyun continue;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
614*4882a593Smuzhiyun ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
615*4882a593Smuzhiyun /* Compare VLAN TCI values as well */
616*4882a593Smuzhiyun if (rule->fs.flow_type & FLOW_EXT) {
617*4882a593Smuzhiyun ret |= rule->fs.h_ext.vlan_tci != fs->h_ext.vlan_tci;
618*4882a593Smuzhiyun ret |= rule->fs.m_ext.vlan_tci != fs->m_ext.vlan_tci;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun if (ret == 0)
621*4882a593Smuzhiyun break;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun return ret;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv * priv,int port,unsigned int port_num,unsigned int queue_num,struct ethtool_rx_flow_spec * fs)627*4882a593Smuzhiyun static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
628*4882a593Smuzhiyun unsigned int port_num,
629*4882a593Smuzhiyun unsigned int queue_num,
630*4882a593Smuzhiyun struct ethtool_rx_flow_spec *fs)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun __be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
633*4882a593Smuzhiyun struct ethtool_rx_flow_spec_input input = {};
634*4882a593Smuzhiyun unsigned int slice_num, rule_index[2];
635*4882a593Smuzhiyun const struct cfp_udf_layout *layout;
636*4882a593Smuzhiyun struct ethtool_rx_flow_rule *flow;
637*4882a593Smuzhiyun struct flow_match_ipv6_addrs ipv6;
638*4882a593Smuzhiyun struct flow_match_ports ports;
639*4882a593Smuzhiyun u8 ip_proto, ip_frag;
640*4882a593Smuzhiyun int ret = 0;
641*4882a593Smuzhiyun u8 num_udf;
642*4882a593Smuzhiyun u32 reg;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun switch (fs->flow_type & ~FLOW_EXT) {
645*4882a593Smuzhiyun case TCP_V6_FLOW:
646*4882a593Smuzhiyun ip_proto = IPPROTO_TCP;
647*4882a593Smuzhiyun break;
648*4882a593Smuzhiyun case UDP_V6_FLOW:
649*4882a593Smuzhiyun ip_proto = IPPROTO_UDP;
650*4882a593Smuzhiyun break;
651*4882a593Smuzhiyun default:
652*4882a593Smuzhiyun return -EINVAL;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /* Extract VLAN TCI */
658*4882a593Smuzhiyun if (fs->flow_type & FLOW_EXT) {
659*4882a593Smuzhiyun vlan_tci = fs->h_ext.vlan_tci;
660*4882a593Smuzhiyun vlan_m_tci = fs->m_ext.vlan_tci;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun layout = &udf_tcpip6_layout;
664*4882a593Smuzhiyun slice_num = bcm_sf2_get_slice_number(layout, 0);
665*4882a593Smuzhiyun if (slice_num == UDF_NUM_SLICES)
666*4882a593Smuzhiyun return -EINVAL;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /* Negotiate two indexes, one for the second half which we are chained
671*4882a593Smuzhiyun * from, which is what we will return to user-space, and a second one
672*4882a593Smuzhiyun * which is used to store its first half. That first half does not
673*4882a593Smuzhiyun * allow any choice of placement, so it just needs to find the next
674*4882a593Smuzhiyun * available bit. We return the second half as fs->location because
675*4882a593Smuzhiyun * that helps with the rule lookup later on since the second half is
676*4882a593Smuzhiyun * chained from its first half, we can easily identify IPv6 CFP rules
677*4882a593Smuzhiyun * by looking whether they carry a CHAIN_ID.
678*4882a593Smuzhiyun *
679*4882a593Smuzhiyun * We also want the second half to have a lower rule_index than its
680*4882a593Smuzhiyun * first half because the HW search is by incrementing addresses.
681*4882a593Smuzhiyun */
682*4882a593Smuzhiyun if (fs->location == RX_CLS_LOC_ANY)
683*4882a593Smuzhiyun rule_index[1] = find_first_zero_bit(priv->cfp.used,
684*4882a593Smuzhiyun priv->num_cfp_rules);
685*4882a593Smuzhiyun else
686*4882a593Smuzhiyun rule_index[1] = fs->location;
687*4882a593Smuzhiyun if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
688*4882a593Smuzhiyun return -ENOSPC;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /* Flag it as used (cleared on error path) such that we can immediately
691*4882a593Smuzhiyun * obtain a second one to chain from.
692*4882a593Smuzhiyun */
693*4882a593Smuzhiyun set_bit(rule_index[1], priv->cfp.used);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun rule_index[0] = find_first_zero_bit(priv->cfp.used,
696*4882a593Smuzhiyun priv->num_cfp_rules);
697*4882a593Smuzhiyun if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
698*4882a593Smuzhiyun ret = -ENOSPC;
699*4882a593Smuzhiyun goto out_err;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun input.fs = fs;
703*4882a593Smuzhiyun flow = ethtool_rx_flow_rule_create(&input);
704*4882a593Smuzhiyun if (IS_ERR(flow)) {
705*4882a593Smuzhiyun ret = PTR_ERR(flow);
706*4882a593Smuzhiyun goto out_err;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
709*4882a593Smuzhiyun flow_rule_match_ports(flow->rule, &ports);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun /* Apply the UDF layout for this filter */
712*4882a593Smuzhiyun bcm_sf2_cfp_udf_set(priv, layout, slice_num);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /* Apply to all packets received through this port */
715*4882a593Smuzhiyun core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun /* Source port map match */
718*4882a593Smuzhiyun core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /* S-Tag status [31:30]
721*4882a593Smuzhiyun * C-Tag status [29:28]
722*4882a593Smuzhiyun * L2 framing [27:26]
723*4882a593Smuzhiyun * L3 framing [25:24]
724*4882a593Smuzhiyun * IP ToS [23:16]
725*4882a593Smuzhiyun * IP proto [15:08]
726*4882a593Smuzhiyun * IP Fragm [7]
727*4882a593Smuzhiyun * Non 1st frag [6]
728*4882a593Smuzhiyun * IP Authen [5]
729*4882a593Smuzhiyun * TTL range [4:3]
730*4882a593Smuzhiyun * PPPoE session [2]
731*4882a593Smuzhiyun * Reserved [1]
732*4882a593Smuzhiyun * UDF_Valid[8] [0]
733*4882a593Smuzhiyun */
734*4882a593Smuzhiyun reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
735*4882a593Smuzhiyun ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
736*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun /* Mask with the specific layout for IPv6 packets including
739*4882a593Smuzhiyun * UDF_Valid[8]
740*4882a593Smuzhiyun */
741*4882a593Smuzhiyun reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
742*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun /* Slice the IPv6 source address and port */
745*4882a593Smuzhiyun bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
746*4882a593Smuzhiyun ports.key->src, vlan_tci, slice_num,
747*4882a593Smuzhiyun udf_lower_bits(num_udf), false);
748*4882a593Smuzhiyun bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
749*4882a593Smuzhiyun ports.mask->src, vlan_m_tci, SLICE_NUM_MASK,
750*4882a593Smuzhiyun udf_lower_bits(num_udf), true);
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /* Insert into TCAM now because we need to insert a second rule */
753*4882a593Smuzhiyun bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
756*4882a593Smuzhiyun if (ret) {
757*4882a593Smuzhiyun pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
758*4882a593Smuzhiyun goto out_err_flow_rule;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /* Insert into Action and policer RAMs now */
762*4882a593Smuzhiyun ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num,
763*4882a593Smuzhiyun queue_num, false);
764*4882a593Smuzhiyun if (ret)
765*4882a593Smuzhiyun goto out_err_flow_rule;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /* Now deal with the second slice to chain this rule */
768*4882a593Smuzhiyun slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
769*4882a593Smuzhiyun if (slice_num == UDF_NUM_SLICES) {
770*4882a593Smuzhiyun ret = -EINVAL;
771*4882a593Smuzhiyun goto out_err_flow_rule;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun /* Apply the UDF layout for this filter */
777*4882a593Smuzhiyun bcm_sf2_cfp_udf_set(priv, layout, slice_num);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun /* Chained rule, source port match is coming from the rule we are
780*4882a593Smuzhiyun * chained from.
781*4882a593Smuzhiyun */
782*4882a593Smuzhiyun core_writel(priv, 0, CORE_CFP_DATA_PORT(7));
783*4882a593Smuzhiyun core_writel(priv, 0, CORE_CFP_MASK_PORT(7));
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /*
786*4882a593Smuzhiyun * CHAIN ID [31:24] chain to previous slice
787*4882a593Smuzhiyun * Reserved [23:20]
788*4882a593Smuzhiyun * UDF_Valid[11:8] [19:16]
789*4882a593Smuzhiyun * UDF_Valid[7:0] [15:8]
790*4882a593Smuzhiyun * UDF_n_D11 [7:0]
791*4882a593Smuzhiyun */
792*4882a593Smuzhiyun reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 |
793*4882a593Smuzhiyun udf_lower_bits(num_udf) << 8;
794*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */
797*4882a593Smuzhiyun reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 |
798*4882a593Smuzhiyun udf_lower_bits(num_udf) << 8;
799*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
802*4882a593Smuzhiyun ports.key->dst, 0, slice_num,
803*4882a593Smuzhiyun 0, false);
804*4882a593Smuzhiyun bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
805*4882a593Smuzhiyun ports.key->dst, 0, SLICE_NUM_MASK,
806*4882a593Smuzhiyun 0, true);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun /* Insert into TCAM now */
809*4882a593Smuzhiyun bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
812*4882a593Smuzhiyun if (ret) {
813*4882a593Smuzhiyun pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
814*4882a593Smuzhiyun goto out_err_flow_rule;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /* Insert into Action and policer RAMs now, set chain ID to
818*4882a593Smuzhiyun * the one we are chained to
819*4882a593Smuzhiyun */
820*4882a593Smuzhiyun ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num,
821*4882a593Smuzhiyun queue_num, true);
822*4882a593Smuzhiyun if (ret)
823*4882a593Smuzhiyun goto out_err_flow_rule;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /* Turn on CFP for this rule now */
826*4882a593Smuzhiyun reg = core_readl(priv, CORE_CFP_CTL_REG);
827*4882a593Smuzhiyun reg |= BIT(port);
828*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_CTL_REG);
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun /* Flag the second half rule as being used now, return it as the
831*4882a593Smuzhiyun * location, and flag it as unique while dumping rules
832*4882a593Smuzhiyun */
833*4882a593Smuzhiyun set_bit(rule_index[0], priv->cfp.used);
834*4882a593Smuzhiyun set_bit(rule_index[1], priv->cfp.unique);
835*4882a593Smuzhiyun fs->location = rule_index[1];
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun return ret;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun out_err_flow_rule:
840*4882a593Smuzhiyun ethtool_rx_flow_rule_destroy(flow);
841*4882a593Smuzhiyun out_err:
842*4882a593Smuzhiyun clear_bit(rule_index[1], priv->cfp.used);
843*4882a593Smuzhiyun return ret;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
bcm_sf2_cfp_rule_insert(struct dsa_switch * ds,int port,struct ethtool_rx_flow_spec * fs)846*4882a593Smuzhiyun static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
847*4882a593Smuzhiyun struct ethtool_rx_flow_spec *fs)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
850*4882a593Smuzhiyun s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
851*4882a593Smuzhiyun __u64 ring_cookie = fs->ring_cookie;
852*4882a593Smuzhiyun struct switchdev_obj_port_vlan vlan;
853*4882a593Smuzhiyun unsigned int queue_num, port_num;
854*4882a593Smuzhiyun u16 vid;
855*4882a593Smuzhiyun int ret;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /* This rule is a Wake-on-LAN filter and we must specifically
858*4882a593Smuzhiyun * target the CPU port in order for it to be working.
859*4882a593Smuzhiyun */
860*4882a593Smuzhiyun if (ring_cookie == RX_CLS_FLOW_WAKE)
861*4882a593Smuzhiyun ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /* We do not support discarding packets, check that the
864*4882a593Smuzhiyun * destination port is enabled and that we are within the
865*4882a593Smuzhiyun * number of ports supported by the switch
866*4882a593Smuzhiyun */
867*4882a593Smuzhiyun port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun if (ring_cookie == RX_CLS_FLOW_DISC ||
870*4882a593Smuzhiyun !(dsa_is_user_port(ds, port_num) ||
871*4882a593Smuzhiyun dsa_is_cpu_port(ds, port_num)) ||
872*4882a593Smuzhiyun port_num >= priv->hw_params.num_ports)
873*4882a593Smuzhiyun return -EINVAL;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun /* If the rule is matching a particular VLAN, make sure that we honor
876*4882a593Smuzhiyun * the matching and have it tagged or untagged on the destination port,
877*4882a593Smuzhiyun * we do this on egress with a VLAN entry. The egress tagging attribute
878*4882a593Smuzhiyun * is expected to be provided in h_ext.data[1] bit 0. A 1 means untagged,
879*4882a593Smuzhiyun * a 0 means tagged.
880*4882a593Smuzhiyun */
881*4882a593Smuzhiyun if (fs->flow_type & FLOW_EXT) {
882*4882a593Smuzhiyun /* We cannot support matching multiple VLAN IDs yet */
883*4882a593Smuzhiyun if ((be16_to_cpu(fs->m_ext.vlan_tci) & VLAN_VID_MASK) !=
884*4882a593Smuzhiyun VLAN_VID_MASK)
885*4882a593Smuzhiyun return -EINVAL;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
888*4882a593Smuzhiyun vlan.vid_begin = vid;
889*4882a593Smuzhiyun vlan.vid_end = vid;
890*4882a593Smuzhiyun if (cpu_to_be32(fs->h_ext.data[1]) & 1)
891*4882a593Smuzhiyun vlan.flags = BRIDGE_VLAN_INFO_UNTAGGED;
892*4882a593Smuzhiyun else
893*4882a593Smuzhiyun vlan.flags = 0;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun ret = ds->ops->port_vlan_prepare(ds, port_num, &vlan);
896*4882a593Smuzhiyun if (ret)
897*4882a593Smuzhiyun return ret;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun ds->ops->port_vlan_add(ds, port_num, &vlan);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun /*
903*4882a593Smuzhiyun * We have a small oddity where Port 6 just does not have a
904*4882a593Smuzhiyun * valid bit here (so we substract by one).
905*4882a593Smuzhiyun */
906*4882a593Smuzhiyun queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES;
907*4882a593Smuzhiyun if (port_num >= 7)
908*4882a593Smuzhiyun port_num -= 1;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun switch (fs->flow_type & ~FLOW_EXT) {
911*4882a593Smuzhiyun case TCP_V4_FLOW:
912*4882a593Smuzhiyun case UDP_V4_FLOW:
913*4882a593Smuzhiyun ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num,
914*4882a593Smuzhiyun queue_num, fs);
915*4882a593Smuzhiyun break;
916*4882a593Smuzhiyun case TCP_V6_FLOW:
917*4882a593Smuzhiyun case UDP_V6_FLOW:
918*4882a593Smuzhiyun ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num,
919*4882a593Smuzhiyun queue_num, fs);
920*4882a593Smuzhiyun break;
921*4882a593Smuzhiyun default:
922*4882a593Smuzhiyun ret = -EINVAL;
923*4882a593Smuzhiyun break;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun return ret;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
bcm_sf2_cfp_rule_set(struct dsa_switch * ds,int port,struct ethtool_rx_flow_spec * fs)929*4882a593Smuzhiyun static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
930*4882a593Smuzhiyun struct ethtool_rx_flow_spec *fs)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
933*4882a593Smuzhiyun struct cfp_rule *rule = NULL;
934*4882a593Smuzhiyun int ret = -EINVAL;
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun /* Check for unsupported extensions */
937*4882a593Smuzhiyun if (fs->flow_type & FLOW_MAC_EXT)
938*4882a593Smuzhiyun return -EINVAL;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun if (fs->location != RX_CLS_LOC_ANY &&
941*4882a593Smuzhiyun fs->location > bcm_sf2_cfp_rule_size(priv))
942*4882a593Smuzhiyun return -EINVAL;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if ((fs->flow_type & FLOW_EXT) &&
945*4882a593Smuzhiyun !(ds->ops->port_vlan_prepare || ds->ops->port_vlan_add ||
946*4882a593Smuzhiyun ds->ops->port_vlan_del))
947*4882a593Smuzhiyun return -EOPNOTSUPP;
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun if (fs->location != RX_CLS_LOC_ANY &&
950*4882a593Smuzhiyun test_bit(fs->location, priv->cfp.used))
951*4882a593Smuzhiyun return -EBUSY;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
954*4882a593Smuzhiyun if (ret == 0)
955*4882a593Smuzhiyun return -EEXIST;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun rule = kzalloc(sizeof(*rule), GFP_KERNEL);
958*4882a593Smuzhiyun if (!rule)
959*4882a593Smuzhiyun return -ENOMEM;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
962*4882a593Smuzhiyun if (ret) {
963*4882a593Smuzhiyun kfree(rule);
964*4882a593Smuzhiyun return ret;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun rule->port = port;
968*4882a593Smuzhiyun memcpy(&rule->fs, fs, sizeof(*fs));
969*4882a593Smuzhiyun list_add_tail(&rule->next, &priv->cfp.rules_list);
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun return ret;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv * priv,int port,u32 loc,u32 * next_loc)974*4882a593Smuzhiyun static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
975*4882a593Smuzhiyun u32 loc, u32 *next_loc)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun int ret;
978*4882a593Smuzhiyun u32 reg;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /* Indicate which rule we want to read */
981*4882a593Smuzhiyun bcm_sf2_cfp_rule_addr_set(priv, loc);
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
984*4882a593Smuzhiyun if (ret)
985*4882a593Smuzhiyun return ret;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun /* Check if this is possibly an IPv6 rule that would
988*4882a593Smuzhiyun * indicate we need to delete its companion rule
989*4882a593Smuzhiyun * as well
990*4882a593Smuzhiyun */
991*4882a593Smuzhiyun reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
992*4882a593Smuzhiyun if (next_loc)
993*4882a593Smuzhiyun *next_loc = (reg >> 24) & CHAIN_ID_MASK;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun /* Clear its valid bits */
996*4882a593Smuzhiyun reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
997*4882a593Smuzhiyun reg &= ~SLICE_VALID;
998*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun /* Write back this entry into the TCAM now */
1001*4882a593Smuzhiyun ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
1002*4882a593Smuzhiyun if (ret)
1003*4882a593Smuzhiyun return ret;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun clear_bit(loc, priv->cfp.used);
1006*4882a593Smuzhiyun clear_bit(loc, priv->cfp.unique);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun return 0;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv * priv,int port,u32 loc)1011*4882a593Smuzhiyun static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
1012*4882a593Smuzhiyun u32 loc)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun u32 next_loc = 0;
1015*4882a593Smuzhiyun int ret;
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
1018*4882a593Smuzhiyun if (ret)
1019*4882a593Smuzhiyun return ret;
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun /* If this was an IPv6 rule, delete is companion rule too */
1022*4882a593Smuzhiyun if (next_loc)
1023*4882a593Smuzhiyun ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun return ret;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun
bcm_sf2_cfp_rule_del(struct bcm_sf2_priv * priv,int port,u32 loc)1028*4882a593Smuzhiyun static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun struct cfp_rule *rule;
1031*4882a593Smuzhiyun int ret;
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun if (loc > bcm_sf2_cfp_rule_size(priv))
1034*4882a593Smuzhiyun return -EINVAL;
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /* Refuse deleting unused rules, and those that are not unique since
1037*4882a593Smuzhiyun * that could leave IPv6 rules with one of the chained rule in the
1038*4882a593Smuzhiyun * table.
1039*4882a593Smuzhiyun */
1040*4882a593Smuzhiyun if (!test_bit(loc, priv->cfp.unique) || loc == 0)
1041*4882a593Smuzhiyun return -EINVAL;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun rule = bcm_sf2_cfp_rule_find(priv, port, loc);
1044*4882a593Smuzhiyun if (!rule)
1045*4882a593Smuzhiyun return -EINVAL;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun list_del(&rule->next);
1050*4882a593Smuzhiyun kfree(rule);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun return ret;
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun
bcm_sf2_invert_masks(struct ethtool_rx_flow_spec * flow)1055*4882a593Smuzhiyun static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun unsigned int i;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun for (i = 0; i < sizeof(flow->m_u); i++)
1060*4882a593Smuzhiyun flow->m_u.hdata[i] ^= 0xff;
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
1063*4882a593Smuzhiyun flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
1064*4882a593Smuzhiyun flow->m_ext.data[0] ^= cpu_to_be32(~0);
1065*4882a593Smuzhiyun flow->m_ext.data[1] ^= cpu_to_be32(~0);
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun
bcm_sf2_cfp_rule_get(struct bcm_sf2_priv * priv,int port,struct ethtool_rxnfc * nfc)1068*4882a593Smuzhiyun static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
1069*4882a593Smuzhiyun struct ethtool_rxnfc *nfc)
1070*4882a593Smuzhiyun {
1071*4882a593Smuzhiyun struct cfp_rule *rule;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
1074*4882a593Smuzhiyun if (!rule)
1075*4882a593Smuzhiyun return -EINVAL;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun bcm_sf2_invert_masks(&nfc->fs);
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun /* Put the TCAM size here */
1082*4882a593Smuzhiyun nfc->data = bcm_sf2_cfp_rule_size(priv);
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun return 0;
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun /* We implement the search doing a TCAM search operation */
bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv * priv,int port,struct ethtool_rxnfc * nfc,u32 * rule_locs)1088*4882a593Smuzhiyun static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
1089*4882a593Smuzhiyun int port, struct ethtool_rxnfc *nfc,
1090*4882a593Smuzhiyun u32 *rule_locs)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun unsigned int index = 1, rules_cnt = 0;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) {
1095*4882a593Smuzhiyun rule_locs[rules_cnt] = index;
1096*4882a593Smuzhiyun rules_cnt++;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun /* Put the TCAM size here */
1100*4882a593Smuzhiyun nfc->data = bcm_sf2_cfp_rule_size(priv);
1101*4882a593Smuzhiyun nfc->rule_cnt = rules_cnt;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun return 0;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
bcm_sf2_get_rxnfc(struct dsa_switch * ds,int port,struct ethtool_rxnfc * nfc,u32 * rule_locs)1106*4882a593Smuzhiyun int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
1107*4882a593Smuzhiyun struct ethtool_rxnfc *nfc, u32 *rule_locs)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
1110*4882a593Smuzhiyun struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1111*4882a593Smuzhiyun int ret = 0;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun mutex_lock(&priv->cfp.lock);
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun switch (nfc->cmd) {
1116*4882a593Smuzhiyun case ETHTOOL_GRXCLSRLCNT:
1117*4882a593Smuzhiyun /* Subtract the default, unusable rule */
1118*4882a593Smuzhiyun nfc->rule_cnt = bitmap_weight(priv->cfp.unique,
1119*4882a593Smuzhiyun priv->num_cfp_rules) - 1;
1120*4882a593Smuzhiyun /* We support specifying rule locations */
1121*4882a593Smuzhiyun nfc->data |= RX_CLS_LOC_SPECIAL;
1122*4882a593Smuzhiyun break;
1123*4882a593Smuzhiyun case ETHTOOL_GRXCLSRULE:
1124*4882a593Smuzhiyun ret = bcm_sf2_cfp_rule_get(priv, port, nfc);
1125*4882a593Smuzhiyun break;
1126*4882a593Smuzhiyun case ETHTOOL_GRXCLSRLALL:
1127*4882a593Smuzhiyun ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
1128*4882a593Smuzhiyun break;
1129*4882a593Smuzhiyun default:
1130*4882a593Smuzhiyun ret = -EOPNOTSUPP;
1131*4882a593Smuzhiyun break;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun mutex_unlock(&priv->cfp.lock);
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun if (ret)
1137*4882a593Smuzhiyun return ret;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun /* Pass up the commands to the attached master network device */
1140*4882a593Smuzhiyun if (p->ethtool_ops->get_rxnfc) {
1141*4882a593Smuzhiyun ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
1142*4882a593Smuzhiyun if (ret == -EOPNOTSUPP)
1143*4882a593Smuzhiyun ret = 0;
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun return ret;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
bcm_sf2_set_rxnfc(struct dsa_switch * ds,int port,struct ethtool_rxnfc * nfc)1149*4882a593Smuzhiyun int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
1150*4882a593Smuzhiyun struct ethtool_rxnfc *nfc)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
1153*4882a593Smuzhiyun struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1154*4882a593Smuzhiyun int ret = 0;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun mutex_lock(&priv->cfp.lock);
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun switch (nfc->cmd) {
1159*4882a593Smuzhiyun case ETHTOOL_SRXCLSRLINS:
1160*4882a593Smuzhiyun ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
1161*4882a593Smuzhiyun break;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun case ETHTOOL_SRXCLSRLDEL:
1164*4882a593Smuzhiyun ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1165*4882a593Smuzhiyun break;
1166*4882a593Smuzhiyun default:
1167*4882a593Smuzhiyun ret = -EOPNOTSUPP;
1168*4882a593Smuzhiyun break;
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun mutex_unlock(&priv->cfp.lock);
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun if (ret)
1174*4882a593Smuzhiyun return ret;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun /* Pass up the commands to the attached master network device.
1177*4882a593Smuzhiyun * This can fail, so rollback the operation if we need to.
1178*4882a593Smuzhiyun */
1179*4882a593Smuzhiyun if (p->ethtool_ops->set_rxnfc) {
1180*4882a593Smuzhiyun ret = p->ethtool_ops->set_rxnfc(p, nfc);
1181*4882a593Smuzhiyun if (ret && ret != -EOPNOTSUPP) {
1182*4882a593Smuzhiyun mutex_lock(&priv->cfp.lock);
1183*4882a593Smuzhiyun bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1184*4882a593Smuzhiyun mutex_unlock(&priv->cfp.lock);
1185*4882a593Smuzhiyun } else {
1186*4882a593Smuzhiyun ret = 0;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun return ret;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun
bcm_sf2_cfp_rst(struct bcm_sf2_priv * priv)1193*4882a593Smuzhiyun int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
1194*4882a593Smuzhiyun {
1195*4882a593Smuzhiyun unsigned int timeout = 1000;
1196*4882a593Smuzhiyun u32 reg;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun reg = core_readl(priv, CORE_CFP_ACC);
1199*4882a593Smuzhiyun reg |= TCAM_RESET;
1200*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_ACC);
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun do {
1203*4882a593Smuzhiyun reg = core_readl(priv, CORE_CFP_ACC);
1204*4882a593Smuzhiyun if (!(reg & TCAM_RESET))
1205*4882a593Smuzhiyun break;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun cpu_relax();
1208*4882a593Smuzhiyun } while (timeout--);
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun if (!timeout)
1211*4882a593Smuzhiyun return -ETIMEDOUT;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun return 0;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun
bcm_sf2_cfp_exit(struct dsa_switch * ds)1216*4882a593Smuzhiyun void bcm_sf2_cfp_exit(struct dsa_switch *ds)
1217*4882a593Smuzhiyun {
1218*4882a593Smuzhiyun struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1219*4882a593Smuzhiyun struct cfp_rule *rule, *n;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun if (list_empty(&priv->cfp.rules_list))
1222*4882a593Smuzhiyun return;
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
1225*4882a593Smuzhiyun bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun
bcm_sf2_cfp_resume(struct dsa_switch * ds)1228*4882a593Smuzhiyun int bcm_sf2_cfp_resume(struct dsa_switch *ds)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1231*4882a593Smuzhiyun struct cfp_rule *rule;
1232*4882a593Smuzhiyun int ret = 0;
1233*4882a593Smuzhiyun u32 reg;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun if (list_empty(&priv->cfp.rules_list))
1236*4882a593Smuzhiyun return ret;
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun reg = core_readl(priv, CORE_CFP_CTL_REG);
1239*4882a593Smuzhiyun reg &= ~CFP_EN_MAP_MASK;
1240*4882a593Smuzhiyun core_writel(priv, reg, CORE_CFP_CTL_REG);
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun ret = bcm_sf2_cfp_rst(priv);
1243*4882a593Smuzhiyun if (ret)
1244*4882a593Smuzhiyun return ret;
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun list_for_each_entry(rule, &priv->cfp.rules_list, next) {
1247*4882a593Smuzhiyun ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
1248*4882a593Smuzhiyun rule->fs.location);
1249*4882a593Smuzhiyun if (ret) {
1250*4882a593Smuzhiyun dev_err(ds->dev, "failed to remove rule\n");
1251*4882a593Smuzhiyun return ret;
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
1255*4882a593Smuzhiyun if (ret) {
1256*4882a593Smuzhiyun dev_err(ds->dev, "failed to restore rule\n");
1257*4882a593Smuzhiyun return ret;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun return ret;
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun static const struct bcm_sf2_cfp_stat {
1265*4882a593Smuzhiyun unsigned int offset;
1266*4882a593Smuzhiyun unsigned int ram_loc;
1267*4882a593Smuzhiyun const char *name;
1268*4882a593Smuzhiyun } bcm_sf2_cfp_stats[] = {
1269*4882a593Smuzhiyun {
1270*4882a593Smuzhiyun .offset = CORE_STAT_GREEN_CNTR,
1271*4882a593Smuzhiyun .ram_loc = GREEN_STAT_RAM,
1272*4882a593Smuzhiyun .name = "Green"
1273*4882a593Smuzhiyun },
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun .offset = CORE_STAT_YELLOW_CNTR,
1276*4882a593Smuzhiyun .ram_loc = YELLOW_STAT_RAM,
1277*4882a593Smuzhiyun .name = "Yellow"
1278*4882a593Smuzhiyun },
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun .offset = CORE_STAT_RED_CNTR,
1281*4882a593Smuzhiyun .ram_loc = RED_STAT_RAM,
1282*4882a593Smuzhiyun .name = "Red"
1283*4882a593Smuzhiyun },
1284*4882a593Smuzhiyun };
1285*4882a593Smuzhiyun
bcm_sf2_cfp_get_strings(struct dsa_switch * ds,int port,u32 stringset,uint8_t * data)1286*4882a593Smuzhiyun void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
1287*4882a593Smuzhiyun u32 stringset, uint8_t *data)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1290*4882a593Smuzhiyun unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
1291*4882a593Smuzhiyun char buf[ETH_GSTRING_LEN];
1292*4882a593Smuzhiyun unsigned int i, j, iter;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun if (stringset != ETH_SS_STATS)
1295*4882a593Smuzhiyun return;
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun for (i = 1; i < priv->num_cfp_rules; i++) {
1298*4882a593Smuzhiyun for (j = 0; j < s; j++) {
1299*4882a593Smuzhiyun snprintf(buf, sizeof(buf),
1300*4882a593Smuzhiyun "CFP%03d_%sCntr",
1301*4882a593Smuzhiyun i, bcm_sf2_cfp_stats[j].name);
1302*4882a593Smuzhiyun iter = (i - 1) * s + j;
1303*4882a593Smuzhiyun strlcpy(data + iter * ETH_GSTRING_LEN,
1304*4882a593Smuzhiyun buf, ETH_GSTRING_LEN);
1305*4882a593Smuzhiyun }
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * data)1309*4882a593Smuzhiyun void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
1310*4882a593Smuzhiyun uint64_t *data)
1311*4882a593Smuzhiyun {
1312*4882a593Smuzhiyun struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1313*4882a593Smuzhiyun unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
1314*4882a593Smuzhiyun const struct bcm_sf2_cfp_stat *stat;
1315*4882a593Smuzhiyun unsigned int i, j, iter;
1316*4882a593Smuzhiyun struct cfp_rule *rule;
1317*4882a593Smuzhiyun int ret;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun mutex_lock(&priv->cfp.lock);
1320*4882a593Smuzhiyun for (i = 1; i < priv->num_cfp_rules; i++) {
1321*4882a593Smuzhiyun rule = bcm_sf2_cfp_rule_find(priv, port, i);
1322*4882a593Smuzhiyun if (!rule)
1323*4882a593Smuzhiyun continue;
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun for (j = 0; j < s; j++) {
1326*4882a593Smuzhiyun stat = &bcm_sf2_cfp_stats[j];
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun bcm_sf2_cfp_rule_addr_set(priv, i);
1329*4882a593Smuzhiyun ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ);
1330*4882a593Smuzhiyun if (ret)
1331*4882a593Smuzhiyun continue;
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun iter = (i - 1) * s + j;
1334*4882a593Smuzhiyun data[iter] = core_readl(priv, stat->offset);
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun mutex_unlock(&priv->cfp.lock);
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun
bcm_sf2_cfp_get_sset_count(struct dsa_switch * ds,int port,int sset)1341*4882a593Smuzhiyun int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset)
1342*4882a593Smuzhiyun {
1343*4882a593Smuzhiyun struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun if (sset != ETH_SS_STATS)
1346*4882a593Smuzhiyun return 0;
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun /* 3 counters per CFP rules */
1349*4882a593Smuzhiyun return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats);
1350*4882a593Smuzhiyun }
1351