1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2*4882a593Smuzhiyun /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/bitfield.h>
5*4882a593Smuzhiyun #include <linux/mpls.h>
6*4882a593Smuzhiyun #include <net/pkt_cls.h>
7*4882a593Smuzhiyun #include <net/tc_act/tc_csum.h>
8*4882a593Smuzhiyun #include <net/tc_act/tc_gact.h>
9*4882a593Smuzhiyun #include <net/tc_act/tc_mirred.h>
10*4882a593Smuzhiyun #include <net/tc_act/tc_mpls.h>
11*4882a593Smuzhiyun #include <net/tc_act/tc_pedit.h>
12*4882a593Smuzhiyun #include <net/tc_act/tc_vlan.h>
13*4882a593Smuzhiyun #include <net/tc_act/tc_tunnel_key.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "cmsg.h"
16*4882a593Smuzhiyun #include "main.h"
17*4882a593Smuzhiyun #include "../nfp_net_repr.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
20*4882a593Smuzhiyun * to change. Such changes will break our FW ABI.
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
23*4882a593Smuzhiyun #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
24*4882a593Smuzhiyun #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
25*4882a593Smuzhiyun #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \
26*4882a593Smuzhiyun IP_TUNNEL_INFO_IPV6)
27*4882a593Smuzhiyun #define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
28*4882a593Smuzhiyun NFP_FL_TUNNEL_KEY | \
29*4882a593Smuzhiyun NFP_FL_TUNNEL_GENEVE_OPT)
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun static int
nfp_fl_push_mpls(struct nfp_fl_push_mpls * push_mpls,const struct flow_action_entry * act,struct netlink_ext_ack * extack)32*4882a593Smuzhiyun nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls,
33*4882a593Smuzhiyun const struct flow_action_entry *act,
34*4882a593Smuzhiyun struct netlink_ext_ack *extack)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun size_t act_size = sizeof(struct nfp_fl_push_mpls);
37*4882a593Smuzhiyun u32 mpls_lse = 0;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS;
40*4882a593Smuzhiyun push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* BOS is optional in the TC action but required for offload. */
43*4882a593Smuzhiyun if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) {
44*4882a593Smuzhiyun mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT;
45*4882a593Smuzhiyun } else {
46*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push");
47*4882a593Smuzhiyun return -EOPNOTSUPP;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* Leave MPLS TC as a default value of 0 if not explicitly set. */
51*4882a593Smuzhiyun if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET)
52*4882a593Smuzhiyun mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Proto, label and TTL are enforced and verified for MPLS push. */
55*4882a593Smuzhiyun mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT;
56*4882a593Smuzhiyun mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT;
57*4882a593Smuzhiyun push_mpls->ethtype = act->mpls_push.proto;
58*4882a593Smuzhiyun push_mpls->lse = cpu_to_be32(mpls_lse);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun return 0;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static void
nfp_fl_pop_mpls(struct nfp_fl_pop_mpls * pop_mpls,const struct flow_action_entry * act)64*4882a593Smuzhiyun nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls,
65*4882a593Smuzhiyun const struct flow_action_entry *act)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun size_t act_size = sizeof(struct nfp_fl_pop_mpls);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS;
70*4882a593Smuzhiyun pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
71*4882a593Smuzhiyun pop_mpls->ethtype = act->mpls_pop.proto;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun static void
nfp_fl_set_mpls(struct nfp_fl_set_mpls * set_mpls,const struct flow_action_entry * act)75*4882a593Smuzhiyun nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls,
76*4882a593Smuzhiyun const struct flow_action_entry *act)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun size_t act_size = sizeof(struct nfp_fl_set_mpls);
79*4882a593Smuzhiyun u32 mpls_lse = 0, mpls_mask = 0;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS;
82*4882a593Smuzhiyun set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) {
85*4882a593Smuzhiyun mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT;
86*4882a593Smuzhiyun mpls_mask |= MPLS_LS_LABEL_MASK;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) {
89*4882a593Smuzhiyun mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT;
90*4882a593Smuzhiyun mpls_mask |= MPLS_LS_TC_MASK;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) {
93*4882a593Smuzhiyun mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT;
94*4882a593Smuzhiyun mpls_mask |= MPLS_LS_S_MASK;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun if (act->mpls_mangle.ttl) {
97*4882a593Smuzhiyun mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT;
98*4882a593Smuzhiyun mpls_mask |= MPLS_LS_TTL_MASK;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun set_mpls->lse = cpu_to_be32(mpls_lse);
102*4882a593Smuzhiyun set_mpls->lse_mask = cpu_to_be32(mpls_mask);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
nfp_fl_pop_vlan(struct nfp_fl_pop_vlan * pop_vlan)105*4882a593Smuzhiyun static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun size_t act_size = sizeof(struct nfp_fl_pop_vlan);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
110*4882a593Smuzhiyun pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
111*4882a593Smuzhiyun pop_vlan->reserved = 0;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun static void
nfp_fl_push_vlan(struct nfp_fl_push_vlan * push_vlan,const struct flow_action_entry * act)115*4882a593Smuzhiyun nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
116*4882a593Smuzhiyun const struct flow_action_entry *act)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun size_t act_size = sizeof(struct nfp_fl_push_vlan);
119*4882a593Smuzhiyun u16 tmp_push_vlan_tci;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
122*4882a593Smuzhiyun push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
123*4882a593Smuzhiyun push_vlan->reserved = 0;
124*4882a593Smuzhiyun push_vlan->vlan_tpid = act->vlan.proto;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun tmp_push_vlan_tci =
127*4882a593Smuzhiyun FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
128*4882a593Smuzhiyun FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
129*4882a593Smuzhiyun push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun static int
nfp_fl_pre_lag(struct nfp_app * app,const struct flow_action_entry * act,struct nfp_fl_payload * nfp_flow,int act_len,struct netlink_ext_ack * extack)133*4882a593Smuzhiyun nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
134*4882a593Smuzhiyun struct nfp_fl_payload *nfp_flow, int act_len,
135*4882a593Smuzhiyun struct netlink_ext_ack *extack)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun size_t act_size = sizeof(struct nfp_fl_pre_lag);
138*4882a593Smuzhiyun struct nfp_fl_pre_lag *pre_lag;
139*4882a593Smuzhiyun struct net_device *out_dev;
140*4882a593Smuzhiyun int err;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun out_dev = act->dev;
143*4882a593Smuzhiyun if (!out_dev || !netif_is_lag_master(out_dev))
144*4882a593Smuzhiyun return 0;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (act_len + act_size > NFP_FL_MAX_A_SIZ) {
147*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at LAG action");
148*4882a593Smuzhiyun return -EOPNOTSUPP;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* Pre_lag action must be first on action list.
152*4882a593Smuzhiyun * If other actions already exist they need pushed forward.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun if (act_len)
155*4882a593Smuzhiyun memmove(nfp_flow->action_data + act_size,
156*4882a593Smuzhiyun nfp_flow->action_data, act_len);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
159*4882a593Smuzhiyun err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag, extack);
160*4882a593Smuzhiyun if (err)
161*4882a593Smuzhiyun return err;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
164*4882a593Smuzhiyun pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun return act_size;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun static int
nfp_fl_output(struct nfp_app * app,struct nfp_fl_output * output,const struct flow_action_entry * act,struct nfp_fl_payload * nfp_flow,bool last,struct net_device * in_dev,enum nfp_flower_tun_type tun_type,int * tun_out_cnt,bool pkt_host,struct netlink_ext_ack * extack)172*4882a593Smuzhiyun nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
173*4882a593Smuzhiyun const struct flow_action_entry *act,
174*4882a593Smuzhiyun struct nfp_fl_payload *nfp_flow,
175*4882a593Smuzhiyun bool last, struct net_device *in_dev,
176*4882a593Smuzhiyun enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
177*4882a593Smuzhiyun bool pkt_host, struct netlink_ext_ack *extack)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun size_t act_size = sizeof(struct nfp_fl_output);
180*4882a593Smuzhiyun struct nfp_flower_priv *priv = app->priv;
181*4882a593Smuzhiyun struct net_device *out_dev;
182*4882a593Smuzhiyun u16 tmp_flags;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
185*4882a593Smuzhiyun output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun out_dev = act->dev;
188*4882a593Smuzhiyun if (!out_dev) {
189*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid egress interface for mirred action");
190*4882a593Smuzhiyun return -EOPNOTSUPP;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (tun_type) {
196*4882a593Smuzhiyun /* Verify the egress netdev matches the tunnel type. */
197*4882a593Smuzhiyun if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) {
198*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface does not match the required tunnel type");
199*4882a593Smuzhiyun return -EOPNOTSUPP;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (*tun_out_cnt) {
203*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot offload more than one tunnel mirred output per filter");
204*4882a593Smuzhiyun return -EOPNOTSUPP;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun (*tun_out_cnt)++;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun output->flags = cpu_to_be16(tmp_flags |
209*4882a593Smuzhiyun NFP_FL_OUT_FLAGS_USE_TUN);
210*4882a593Smuzhiyun output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
211*4882a593Smuzhiyun } else if (netif_is_lag_master(out_dev) &&
212*4882a593Smuzhiyun priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
213*4882a593Smuzhiyun int gid;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun output->flags = cpu_to_be16(tmp_flags);
216*4882a593Smuzhiyun gid = nfp_flower_lag_get_output_id(app, out_dev);
217*4882a593Smuzhiyun if (gid < 0) {
218*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot find group id for LAG action");
219*4882a593Smuzhiyun return gid;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
222*4882a593Smuzhiyun } else if (nfp_flower_internal_port_can_offload(app, out_dev)) {
223*4882a593Smuzhiyun if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) {
224*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
225*4882a593Smuzhiyun return -EOPNOTSUPP;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (nfp_flow->pre_tun_rule.dev || !pkt_host) {
229*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action");
230*4882a593Smuzhiyun return -EOPNOTSUPP;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun nfp_flow->pre_tun_rule.dev = out_dev;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun return 0;
236*4882a593Smuzhiyun } else {
237*4882a593Smuzhiyun /* Set action output parameters. */
238*4882a593Smuzhiyun output->flags = cpu_to_be16(tmp_flags);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (nfp_netdev_is_nfp_repr(in_dev)) {
241*4882a593Smuzhiyun /* Confirm ingress and egress are on same device. */
242*4882a593Smuzhiyun if (!netdev_port_same_parent_id(in_dev, out_dev)) {
243*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress and egress interfaces are on different devices");
244*4882a593Smuzhiyun return -EOPNOTSUPP;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (!nfp_netdev_is_nfp_repr(out_dev)) {
249*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface is not an nfp port");
250*4882a593Smuzhiyun return -EOPNOTSUPP;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
254*4882a593Smuzhiyun if (!output->port) {
255*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid port id for egress interface");
256*4882a593Smuzhiyun return -EOPNOTSUPP;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun nfp_flow->meta.shortcut = output->port;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun static bool
nfp_flower_tun_is_gre(struct flow_cls_offload * flow,int start_idx)265*4882a593Smuzhiyun nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct flow_action_entry *act = flow->rule->action.entries;
268*4882a593Smuzhiyun int num_act = flow->rule->action.num_entries;
269*4882a593Smuzhiyun int act_idx;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /* Preparse action list for next mirred or redirect action */
272*4882a593Smuzhiyun for (act_idx = start_idx + 1; act_idx < num_act; act_idx++)
273*4882a593Smuzhiyun if (act[act_idx].id == FLOW_ACTION_REDIRECT ||
274*4882a593Smuzhiyun act[act_idx].id == FLOW_ACTION_MIRRED)
275*4882a593Smuzhiyun return netif_is_gretap(act[act_idx].dev);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun return false;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun static enum nfp_flower_tun_type
nfp_fl_get_tun_from_act(struct nfp_app * app,struct flow_cls_offload * flow,const struct flow_action_entry * act,int act_idx)281*4882a593Smuzhiyun nfp_fl_get_tun_from_act(struct nfp_app *app,
282*4882a593Smuzhiyun struct flow_cls_offload *flow,
283*4882a593Smuzhiyun const struct flow_action_entry *act, int act_idx)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun const struct ip_tunnel_info *tun = act->tunnel;
286*4882a593Smuzhiyun struct nfp_flower_priv *priv = app->priv;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* Determine the tunnel type based on the egress netdev
289*4882a593Smuzhiyun * in the mirred action for tunnels without l4.
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun if (nfp_flower_tun_is_gre(flow, act_idx))
292*4882a593Smuzhiyun return NFP_FL_TUNNEL_GRE;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun switch (tun->key.tp_dst) {
295*4882a593Smuzhiyun case htons(IANA_VXLAN_UDP_PORT):
296*4882a593Smuzhiyun return NFP_FL_TUNNEL_VXLAN;
297*4882a593Smuzhiyun case htons(GENEVE_UDP_PORT):
298*4882a593Smuzhiyun if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
299*4882a593Smuzhiyun return NFP_FL_TUNNEL_GENEVE;
300*4882a593Smuzhiyun fallthrough;
301*4882a593Smuzhiyun default:
302*4882a593Smuzhiyun return NFP_FL_TUNNEL_NONE;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
nfp_fl_pre_tunnel(char * act_data,int act_len)306*4882a593Smuzhiyun static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
309*4882a593Smuzhiyun struct nfp_fl_pre_tunnel *pre_tun_act;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* Pre_tunnel action must be first on action list.
312*4882a593Smuzhiyun * If other actions already exist they need to be pushed forward.
313*4882a593Smuzhiyun */
314*4882a593Smuzhiyun if (act_len)
315*4882a593Smuzhiyun memmove(act_data + act_size, act_data, act_len);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun memset(pre_tun_act, 0, act_size);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
322*4882a593Smuzhiyun pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun return pre_tun_act;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun static int
nfp_fl_push_geneve_options(struct nfp_fl_payload * nfp_fl,int * list_len,const struct flow_action_entry * act,struct netlink_ext_ack * extack)328*4882a593Smuzhiyun nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
329*4882a593Smuzhiyun const struct flow_action_entry *act,
330*4882a593Smuzhiyun struct netlink_ext_ack *extack)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel;
333*4882a593Smuzhiyun int opt_len, opt_cnt, act_start, tot_push_len;
334*4882a593Smuzhiyun u8 *src = ip_tunnel_info_opts(ip_tun);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /* We need to populate the options in reverse order for HW.
337*4882a593Smuzhiyun * Therefore we go through the options, calculating the
338*4882a593Smuzhiyun * number of options and the total size, then we populate
339*4882a593Smuzhiyun * them in reverse order in the action list.
340*4882a593Smuzhiyun */
341*4882a593Smuzhiyun opt_cnt = 0;
342*4882a593Smuzhiyun tot_push_len = 0;
343*4882a593Smuzhiyun opt_len = ip_tun->options_len;
344*4882a593Smuzhiyun while (opt_len > 0) {
345*4882a593Smuzhiyun struct geneve_opt *opt = (struct geneve_opt *)src;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun opt_cnt++;
348*4882a593Smuzhiyun if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) {
349*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed number of geneve options exceeded");
350*4882a593Smuzhiyun return -EOPNOTSUPP;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun tot_push_len += sizeof(struct nfp_fl_push_geneve) +
354*4882a593Smuzhiyun opt->length * 4;
355*4882a593Smuzhiyun if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) {
356*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
357*4882a593Smuzhiyun return -EOPNOTSUPP;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
361*4882a593Smuzhiyun src += sizeof(struct geneve_opt) + opt->length * 4;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) {
365*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
366*4882a593Smuzhiyun return -EOPNOTSUPP;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun act_start = *list_len;
370*4882a593Smuzhiyun *list_len += tot_push_len;
371*4882a593Smuzhiyun src = ip_tunnel_info_opts(ip_tun);
372*4882a593Smuzhiyun while (opt_cnt) {
373*4882a593Smuzhiyun struct geneve_opt *opt = (struct geneve_opt *)src;
374*4882a593Smuzhiyun struct nfp_fl_push_geneve *push;
375*4882a593Smuzhiyun size_t act_size, len;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun opt_cnt--;
378*4882a593Smuzhiyun act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
379*4882a593Smuzhiyun tot_push_len -= act_size;
380*4882a593Smuzhiyun len = act_start + tot_push_len;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
383*4882a593Smuzhiyun push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
384*4882a593Smuzhiyun push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
385*4882a593Smuzhiyun push->reserved = 0;
386*4882a593Smuzhiyun push->class = opt->opt_class;
387*4882a593Smuzhiyun push->type = opt->type;
388*4882a593Smuzhiyun push->length = opt->length;
389*4882a593Smuzhiyun memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun src += sizeof(struct geneve_opt) + opt->length * 4;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun return 0;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun static int
nfp_fl_set_tun(struct nfp_app * app,struct nfp_fl_set_tun * set_tun,const struct flow_action_entry * act,struct nfp_fl_pre_tunnel * pre_tun,enum nfp_flower_tun_type tun_type,struct net_device * netdev,struct netlink_ext_ack * extack)398*4882a593Smuzhiyun nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
399*4882a593Smuzhiyun const struct flow_action_entry *act,
400*4882a593Smuzhiyun struct nfp_fl_pre_tunnel *pre_tun,
401*4882a593Smuzhiyun enum nfp_flower_tun_type tun_type,
402*4882a593Smuzhiyun struct net_device *netdev, struct netlink_ext_ack *extack)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun const struct ip_tunnel_info *ip_tun = act->tunnel;
405*4882a593Smuzhiyun bool ipv6 = ip_tunnel_info_af(ip_tun) == AF_INET6;
406*4882a593Smuzhiyun size_t act_size = sizeof(struct nfp_fl_set_tun);
407*4882a593Smuzhiyun struct nfp_flower_priv *priv = app->priv;
408*4882a593Smuzhiyun u32 tmp_set_ip_tun_type_index = 0;
409*4882a593Smuzhiyun /* Currently support one pre-tunnel so index is always 0. */
410*4882a593Smuzhiyun int pretun_idx = 0;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_IPV6) && ipv6)
413*4882a593Smuzhiyun return -EOPNOTSUPP;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN))
416*4882a593Smuzhiyun return -EOPNOTSUPP;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
419*4882a593Smuzhiyun NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
420*4882a593Smuzhiyun NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
421*4882a593Smuzhiyun if (ip_tun->options_len &&
422*4882a593Smuzhiyun (tun_type != NFP_FL_TUNNEL_GENEVE ||
423*4882a593Smuzhiyun !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
424*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve options offload");
425*4882a593Smuzhiyun return -EOPNOTSUPP;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL;
429*4882a593Smuzhiyun set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /* Set tunnel type and pre-tunnel index. */
432*4882a593Smuzhiyun tmp_set_ip_tun_type_index |=
433*4882a593Smuzhiyun FIELD_PREP(NFP_FL_TUNNEL_TYPE, tun_type) |
434*4882a593Smuzhiyun FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
437*4882a593Smuzhiyun set_tun->tun_id = ip_tun->key.tun_id;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if (ip_tun->key.ttl) {
440*4882a593Smuzhiyun set_tun->ttl = ip_tun->key.ttl;
441*4882a593Smuzhiyun #ifdef CONFIG_IPV6
442*4882a593Smuzhiyun } else if (ipv6) {
443*4882a593Smuzhiyun struct net *net = dev_net(netdev);
444*4882a593Smuzhiyun struct flowi6 flow = {};
445*4882a593Smuzhiyun struct dst_entry *dst;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun flow.daddr = ip_tun->key.u.ipv6.dst;
448*4882a593Smuzhiyun flow.flowi4_proto = IPPROTO_UDP;
449*4882a593Smuzhiyun dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &flow, NULL);
450*4882a593Smuzhiyun if (!IS_ERR(dst)) {
451*4882a593Smuzhiyun set_tun->ttl = ip6_dst_hoplimit(dst);
452*4882a593Smuzhiyun dst_release(dst);
453*4882a593Smuzhiyun } else {
454*4882a593Smuzhiyun set_tun->ttl = net->ipv6.devconf_all->hop_limit;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun #endif
457*4882a593Smuzhiyun } else {
458*4882a593Smuzhiyun struct net *net = dev_net(netdev);
459*4882a593Smuzhiyun struct flowi4 flow = {};
460*4882a593Smuzhiyun struct rtable *rt;
461*4882a593Smuzhiyun int err;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /* Do a route lookup to determine ttl - if fails then use
464*4882a593Smuzhiyun * default. Note that CONFIG_INET is a requirement of
465*4882a593Smuzhiyun * CONFIG_NET_SWITCHDEV so must be defined here.
466*4882a593Smuzhiyun */
467*4882a593Smuzhiyun flow.daddr = ip_tun->key.u.ipv4.dst;
468*4882a593Smuzhiyun flow.flowi4_proto = IPPROTO_UDP;
469*4882a593Smuzhiyun rt = ip_route_output_key(net, &flow);
470*4882a593Smuzhiyun err = PTR_ERR_OR_ZERO(rt);
471*4882a593Smuzhiyun if (!err) {
472*4882a593Smuzhiyun set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
473*4882a593Smuzhiyun ip_rt_put(rt);
474*4882a593Smuzhiyun } else {
475*4882a593Smuzhiyun set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun set_tun->tos = ip_tun->key.tos;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
482*4882a593Smuzhiyun ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
483*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
484*4882a593Smuzhiyun return -EOPNOTSUPP;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun set_tun->tun_flags = ip_tun->key.tun_flags;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun if (tun_type == NFP_FL_TUNNEL_GENEVE) {
489*4882a593Smuzhiyun set_tun->tun_proto = htons(ETH_P_TEB);
490*4882a593Smuzhiyun set_tun->tun_len = ip_tun->options_len / 4;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /* Complete pre_tunnel action. */
494*4882a593Smuzhiyun if (ipv6) {
495*4882a593Smuzhiyun pre_tun->flags |= cpu_to_be16(NFP_FL_PRE_TUN_IPV6);
496*4882a593Smuzhiyun pre_tun->ipv6_dst = ip_tun->key.u.ipv6.dst;
497*4882a593Smuzhiyun } else {
498*4882a593Smuzhiyun pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun return 0;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
nfp_fl_set_helper32(u32 value,u32 mask,u8 * p_exact,u8 * p_mask)504*4882a593Smuzhiyun static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun u32 oldvalue = get_unaligned((u32 *)p_exact);
507*4882a593Smuzhiyun u32 oldmask = get_unaligned((u32 *)p_mask);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun value &= mask;
510*4882a593Smuzhiyun value |= oldvalue & ~mask;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun put_unaligned(oldmask | mask, (u32 *)p_mask);
513*4882a593Smuzhiyun put_unaligned(value, (u32 *)p_exact);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun static int
nfp_fl_set_eth(const struct flow_action_entry * act,u32 off,struct nfp_fl_set_eth * set_eth,struct netlink_ext_ack * extack)517*4882a593Smuzhiyun nfp_fl_set_eth(const struct flow_action_entry *act, u32 off,
518*4882a593Smuzhiyun struct nfp_fl_set_eth *set_eth, struct netlink_ext_ack *extack)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun u32 exact, mask;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (off + 4 > ETH_ALEN * 2) {
523*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
524*4882a593Smuzhiyun return -EOPNOTSUPP;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun mask = ~act->mangle.mask;
528*4882a593Smuzhiyun exact = act->mangle.val;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (exact & ~mask) {
531*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
532*4882a593Smuzhiyun return -EOPNOTSUPP;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
536*4882a593Smuzhiyun &set_eth->eth_addr_mask[off]);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun set_eth->reserved = cpu_to_be16(0);
539*4882a593Smuzhiyun set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
540*4882a593Smuzhiyun set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun return 0;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun struct ipv4_ttl_word {
546*4882a593Smuzhiyun __u8 ttl;
547*4882a593Smuzhiyun __u8 protocol;
548*4882a593Smuzhiyun __sum16 check;
549*4882a593Smuzhiyun };
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun static int
nfp_fl_set_ip4(const struct flow_action_entry * act,u32 off,struct nfp_fl_set_ip4_addrs * set_ip_addr,struct nfp_fl_set_ip4_ttl_tos * set_ip_ttl_tos,struct netlink_ext_ack * extack)552*4882a593Smuzhiyun nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
553*4882a593Smuzhiyun struct nfp_fl_set_ip4_addrs *set_ip_addr,
554*4882a593Smuzhiyun struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos,
555*4882a593Smuzhiyun struct netlink_ext_ack *extack)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun struct ipv4_ttl_word *ttl_word_mask;
558*4882a593Smuzhiyun struct ipv4_ttl_word *ttl_word;
559*4882a593Smuzhiyun struct iphdr *tos_word_mask;
560*4882a593Smuzhiyun struct iphdr *tos_word;
561*4882a593Smuzhiyun __be32 exact, mask;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /* We are expecting tcf_pedit to return a big endian value */
564*4882a593Smuzhiyun mask = (__force __be32)~act->mangle.mask;
565*4882a593Smuzhiyun exact = (__force __be32)act->mangle.val;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if (exact & ~mask) {
568*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 action");
569*4882a593Smuzhiyun return -EOPNOTSUPP;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun switch (off) {
573*4882a593Smuzhiyun case offsetof(struct iphdr, daddr):
574*4882a593Smuzhiyun set_ip_addr->ipv4_dst_mask |= mask;
575*4882a593Smuzhiyun set_ip_addr->ipv4_dst &= ~mask;
576*4882a593Smuzhiyun set_ip_addr->ipv4_dst |= exact & mask;
577*4882a593Smuzhiyun set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
578*4882a593Smuzhiyun set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
579*4882a593Smuzhiyun NFP_FL_LW_SIZ;
580*4882a593Smuzhiyun break;
581*4882a593Smuzhiyun case offsetof(struct iphdr, saddr):
582*4882a593Smuzhiyun set_ip_addr->ipv4_src_mask |= mask;
583*4882a593Smuzhiyun set_ip_addr->ipv4_src &= ~mask;
584*4882a593Smuzhiyun set_ip_addr->ipv4_src |= exact & mask;
585*4882a593Smuzhiyun set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
586*4882a593Smuzhiyun set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
587*4882a593Smuzhiyun NFP_FL_LW_SIZ;
588*4882a593Smuzhiyun break;
589*4882a593Smuzhiyun case offsetof(struct iphdr, ttl):
590*4882a593Smuzhiyun ttl_word_mask = (struct ipv4_ttl_word *)&mask;
591*4882a593Smuzhiyun ttl_word = (struct ipv4_ttl_word *)&exact;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun if (ttl_word_mask->protocol || ttl_word_mask->check) {
594*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 ttl action");
595*4882a593Smuzhiyun return -EOPNOTSUPP;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
599*4882a593Smuzhiyun set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
600*4882a593Smuzhiyun set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
601*4882a593Smuzhiyun set_ip_ttl_tos->head.jump_id =
602*4882a593Smuzhiyun NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
603*4882a593Smuzhiyun set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
604*4882a593Smuzhiyun NFP_FL_LW_SIZ;
605*4882a593Smuzhiyun break;
606*4882a593Smuzhiyun case round_down(offsetof(struct iphdr, tos), 4):
607*4882a593Smuzhiyun tos_word_mask = (struct iphdr *)&mask;
608*4882a593Smuzhiyun tos_word = (struct iphdr *)&exact;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun if (tos_word_mask->version || tos_word_mask->ihl ||
611*4882a593Smuzhiyun tos_word_mask->tot_len) {
612*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 tos action");
613*4882a593Smuzhiyun return -EOPNOTSUPP;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
617*4882a593Smuzhiyun set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
618*4882a593Smuzhiyun set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
619*4882a593Smuzhiyun set_ip_ttl_tos->head.jump_id =
620*4882a593Smuzhiyun NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
621*4882a593Smuzhiyun set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
622*4882a593Smuzhiyun NFP_FL_LW_SIZ;
623*4882a593Smuzhiyun break;
624*4882a593Smuzhiyun default:
625*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv4 header");
626*4882a593Smuzhiyun return -EOPNOTSUPP;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun return 0;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun static void
nfp_fl_set_ip6_helper(int opcode_tag,u8 word,__be32 exact,__be32 mask,struct nfp_fl_set_ipv6_addr * ip6)633*4882a593Smuzhiyun nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
634*4882a593Smuzhiyun struct nfp_fl_set_ipv6_addr *ip6)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun ip6->ipv6[word].mask |= mask;
637*4882a593Smuzhiyun ip6->ipv6[word].exact &= ~mask;
638*4882a593Smuzhiyun ip6->ipv6[word].exact |= exact & mask;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun ip6->reserved = cpu_to_be16(0);
641*4882a593Smuzhiyun ip6->head.jump_id = opcode_tag;
642*4882a593Smuzhiyun ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun struct ipv6_hop_limit_word {
646*4882a593Smuzhiyun __be16 payload_len;
647*4882a593Smuzhiyun u8 nexthdr;
648*4882a593Smuzhiyun u8 hop_limit;
649*4882a593Smuzhiyun };
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun static int
nfp_fl_set_ip6_hop_limit_flow_label(u32 off,__be32 exact,__be32 mask,struct nfp_fl_set_ipv6_tc_hl_fl * ip_hl_fl,struct netlink_ext_ack * extack)652*4882a593Smuzhiyun nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
653*4882a593Smuzhiyun struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
654*4882a593Smuzhiyun struct netlink_ext_ack *extack)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun struct ipv6_hop_limit_word *fl_hl_mask;
657*4882a593Smuzhiyun struct ipv6_hop_limit_word *fl_hl;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun switch (off) {
660*4882a593Smuzhiyun case offsetof(struct ipv6hdr, payload_len):
661*4882a593Smuzhiyun fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
662*4882a593Smuzhiyun fl_hl = (struct ipv6_hop_limit_word *)&exact;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) {
665*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 hop limit action");
666*4882a593Smuzhiyun return -EOPNOTSUPP;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
670*4882a593Smuzhiyun ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
671*4882a593Smuzhiyun ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
672*4882a593Smuzhiyun fl_hl_mask->hop_limit;
673*4882a593Smuzhiyun break;
674*4882a593Smuzhiyun case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
675*4882a593Smuzhiyun if (mask & ~IPV6_FLOW_LABEL_MASK ||
676*4882a593Smuzhiyun exact & ~IPV6_FLOW_LABEL_MASK) {
677*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action");
678*4882a593Smuzhiyun return -EOPNOTSUPP;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun ip_hl_fl->ipv6_label_mask |= mask;
682*4882a593Smuzhiyun ip_hl_fl->ipv6_label &= ~mask;
683*4882a593Smuzhiyun ip_hl_fl->ipv6_label |= exact & mask;
684*4882a593Smuzhiyun break;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
688*4882a593Smuzhiyun ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun return 0;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun static int
nfp_fl_set_ip6(const struct flow_action_entry * act,u32 off,struct nfp_fl_set_ipv6_addr * ip_dst,struct nfp_fl_set_ipv6_addr * ip_src,struct nfp_fl_set_ipv6_tc_hl_fl * ip_hl_fl,struct netlink_ext_ack * extack)694*4882a593Smuzhiyun nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
695*4882a593Smuzhiyun struct nfp_fl_set_ipv6_addr *ip_dst,
696*4882a593Smuzhiyun struct nfp_fl_set_ipv6_addr *ip_src,
697*4882a593Smuzhiyun struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
698*4882a593Smuzhiyun struct netlink_ext_ack *extack)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun __be32 exact, mask;
701*4882a593Smuzhiyun int err = 0;
702*4882a593Smuzhiyun u8 word;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /* We are expecting tcf_pedit to return a big endian value */
705*4882a593Smuzhiyun mask = (__force __be32)~act->mangle.mask;
706*4882a593Smuzhiyun exact = (__force __be32)act->mangle.val;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun if (exact & ~mask) {
709*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 action");
710*4882a593Smuzhiyun return -EOPNOTSUPP;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun if (off < offsetof(struct ipv6hdr, saddr)) {
714*4882a593Smuzhiyun err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
715*4882a593Smuzhiyun ip_hl_fl, extack);
716*4882a593Smuzhiyun } else if (off < offsetof(struct ipv6hdr, daddr)) {
717*4882a593Smuzhiyun word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
718*4882a593Smuzhiyun nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
719*4882a593Smuzhiyun exact, mask, ip_src);
720*4882a593Smuzhiyun } else if (off < offsetof(struct ipv6hdr, daddr) +
721*4882a593Smuzhiyun sizeof(struct in6_addr)) {
722*4882a593Smuzhiyun word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
723*4882a593Smuzhiyun nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
724*4882a593Smuzhiyun exact, mask, ip_dst);
725*4882a593Smuzhiyun } else {
726*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv6 header");
727*4882a593Smuzhiyun return -EOPNOTSUPP;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun return err;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun static int
nfp_fl_set_tport(const struct flow_action_entry * act,u32 off,struct nfp_fl_set_tport * set_tport,int opcode,struct netlink_ext_ack * extack)734*4882a593Smuzhiyun nfp_fl_set_tport(const struct flow_action_entry *act, u32 off,
735*4882a593Smuzhiyun struct nfp_fl_set_tport *set_tport, int opcode,
736*4882a593Smuzhiyun struct netlink_ext_ack *extack)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun u32 exact, mask;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun if (off) {
741*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of L4 header");
742*4882a593Smuzhiyun return -EOPNOTSUPP;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun mask = ~act->mangle.mask;
746*4882a593Smuzhiyun exact = act->mangle.val;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun if (exact & ~mask) {
749*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit L4 action");
750*4882a593Smuzhiyun return -EOPNOTSUPP;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
754*4882a593Smuzhiyun set_tport->tp_port_mask);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun set_tport->reserved = cpu_to_be16(0);
757*4882a593Smuzhiyun set_tport->head.jump_id = opcode;
758*4882a593Smuzhiyun set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun return 0;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
nfp_fl_csum_l4_to_flag(u8 ip_proto)763*4882a593Smuzhiyun static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun switch (ip_proto) {
766*4882a593Smuzhiyun case 0:
767*4882a593Smuzhiyun /* Filter doesn't force proto match,
768*4882a593Smuzhiyun * both TCP and UDP will be updated if encountered
769*4882a593Smuzhiyun */
770*4882a593Smuzhiyun return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
771*4882a593Smuzhiyun case IPPROTO_TCP:
772*4882a593Smuzhiyun return TCA_CSUM_UPDATE_FLAG_TCP;
773*4882a593Smuzhiyun case IPPROTO_UDP:
774*4882a593Smuzhiyun return TCA_CSUM_UPDATE_FLAG_UDP;
775*4882a593Smuzhiyun default:
776*4882a593Smuzhiyun /* All other protocols will be ignored by FW */
777*4882a593Smuzhiyun return 0;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun struct nfp_flower_pedit_acts {
782*4882a593Smuzhiyun struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
783*4882a593Smuzhiyun struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
784*4882a593Smuzhiyun struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
785*4882a593Smuzhiyun struct nfp_fl_set_ip4_addrs set_ip_addr;
786*4882a593Smuzhiyun struct nfp_fl_set_tport set_tport;
787*4882a593Smuzhiyun struct nfp_fl_set_eth set_eth;
788*4882a593Smuzhiyun };
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun static int
nfp_fl_commit_mangle(struct flow_cls_offload * flow,char * nfp_action,int * a_len,struct nfp_flower_pedit_acts * set_act,u32 * csum_updated)791*4882a593Smuzhiyun nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
792*4882a593Smuzhiyun int *a_len, struct nfp_flower_pedit_acts *set_act,
793*4882a593Smuzhiyun u32 *csum_updated)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
796*4882a593Smuzhiyun size_t act_size = 0;
797*4882a593Smuzhiyun u8 ip_proto = 0;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
800*4882a593Smuzhiyun struct flow_match_basic match;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun flow_rule_match_basic(rule, &match);
803*4882a593Smuzhiyun ip_proto = match.key->ip_proto;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun if (set_act->set_eth.head.len_lw) {
807*4882a593Smuzhiyun act_size = sizeof(set_act->set_eth);
808*4882a593Smuzhiyun memcpy(nfp_action, &set_act->set_eth, act_size);
809*4882a593Smuzhiyun *a_len += act_size;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun if (set_act->set_ip_ttl_tos.head.len_lw) {
813*4882a593Smuzhiyun nfp_action += act_size;
814*4882a593Smuzhiyun act_size = sizeof(set_act->set_ip_ttl_tos);
815*4882a593Smuzhiyun memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
816*4882a593Smuzhiyun *a_len += act_size;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
819*4882a593Smuzhiyun *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
820*4882a593Smuzhiyun nfp_fl_csum_l4_to_flag(ip_proto);
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun if (set_act->set_ip_addr.head.len_lw) {
824*4882a593Smuzhiyun nfp_action += act_size;
825*4882a593Smuzhiyun act_size = sizeof(set_act->set_ip_addr);
826*4882a593Smuzhiyun memcpy(nfp_action, &set_act->set_ip_addr, act_size);
827*4882a593Smuzhiyun *a_len += act_size;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
830*4882a593Smuzhiyun *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
831*4882a593Smuzhiyun nfp_fl_csum_l4_to_flag(ip_proto);
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
835*4882a593Smuzhiyun nfp_action += act_size;
836*4882a593Smuzhiyun act_size = sizeof(set_act->set_ip6_tc_hl_fl);
837*4882a593Smuzhiyun memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
838*4882a593Smuzhiyun *a_len += act_size;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /* Hardware will automatically fix TCP/UDP checksum. */
841*4882a593Smuzhiyun *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (set_act->set_ip6_dst.head.len_lw &&
845*4882a593Smuzhiyun set_act->set_ip6_src.head.len_lw) {
846*4882a593Smuzhiyun /* TC compiles set src and dst IPv6 address as a single action,
847*4882a593Smuzhiyun * the hardware requires this to be 2 separate actions.
848*4882a593Smuzhiyun */
849*4882a593Smuzhiyun nfp_action += act_size;
850*4882a593Smuzhiyun act_size = sizeof(set_act->set_ip6_src);
851*4882a593Smuzhiyun memcpy(nfp_action, &set_act->set_ip6_src, act_size);
852*4882a593Smuzhiyun *a_len += act_size;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun act_size = sizeof(set_act->set_ip6_dst);
855*4882a593Smuzhiyun memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
856*4882a593Smuzhiyun &set_act->set_ip6_dst, act_size);
857*4882a593Smuzhiyun *a_len += act_size;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun /* Hardware will automatically fix TCP/UDP checksum. */
860*4882a593Smuzhiyun *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
861*4882a593Smuzhiyun } else if (set_act->set_ip6_dst.head.len_lw) {
862*4882a593Smuzhiyun nfp_action += act_size;
863*4882a593Smuzhiyun act_size = sizeof(set_act->set_ip6_dst);
864*4882a593Smuzhiyun memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
865*4882a593Smuzhiyun *a_len += act_size;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun /* Hardware will automatically fix TCP/UDP checksum. */
868*4882a593Smuzhiyun *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
869*4882a593Smuzhiyun } else if (set_act->set_ip6_src.head.len_lw) {
870*4882a593Smuzhiyun nfp_action += act_size;
871*4882a593Smuzhiyun act_size = sizeof(set_act->set_ip6_src);
872*4882a593Smuzhiyun memcpy(nfp_action, &set_act->set_ip6_src, act_size);
873*4882a593Smuzhiyun *a_len += act_size;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun /* Hardware will automatically fix TCP/UDP checksum. */
876*4882a593Smuzhiyun *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun if (set_act->set_tport.head.len_lw) {
879*4882a593Smuzhiyun nfp_action += act_size;
880*4882a593Smuzhiyun act_size = sizeof(set_act->set_tport);
881*4882a593Smuzhiyun memcpy(nfp_action, &set_act->set_tport, act_size);
882*4882a593Smuzhiyun *a_len += act_size;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun /* Hardware will automatically fix TCP/UDP checksum. */
885*4882a593Smuzhiyun *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun return 0;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun static int
nfp_fl_pedit(const struct flow_action_entry * act,struct flow_cls_offload * flow,char * nfp_action,int * a_len,u32 * csum_updated,struct nfp_flower_pedit_acts * set_act,struct netlink_ext_ack * extack)892*4882a593Smuzhiyun nfp_fl_pedit(const struct flow_action_entry *act,
893*4882a593Smuzhiyun struct flow_cls_offload *flow, char *nfp_action, int *a_len,
894*4882a593Smuzhiyun u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
895*4882a593Smuzhiyun struct netlink_ext_ack *extack)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun enum flow_action_mangle_base htype;
898*4882a593Smuzhiyun u32 offset;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun htype = act->mangle.htype;
901*4882a593Smuzhiyun offset = act->mangle.offset;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun switch (htype) {
904*4882a593Smuzhiyun case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
905*4882a593Smuzhiyun return nfp_fl_set_eth(act, offset, &set_act->set_eth, extack);
906*4882a593Smuzhiyun case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
907*4882a593Smuzhiyun return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
908*4882a593Smuzhiyun &set_act->set_ip_ttl_tos, extack);
909*4882a593Smuzhiyun case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
910*4882a593Smuzhiyun return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
911*4882a593Smuzhiyun &set_act->set_ip6_src,
912*4882a593Smuzhiyun &set_act->set_ip6_tc_hl_fl, extack);
913*4882a593Smuzhiyun case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
914*4882a593Smuzhiyun return nfp_fl_set_tport(act, offset, &set_act->set_tport,
915*4882a593Smuzhiyun NFP_FL_ACTION_OPCODE_SET_TCP, extack);
916*4882a593Smuzhiyun case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
917*4882a593Smuzhiyun return nfp_fl_set_tport(act, offset, &set_act->set_tport,
918*4882a593Smuzhiyun NFP_FL_ACTION_OPCODE_SET_UDP, extack);
919*4882a593Smuzhiyun default:
920*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported header");
921*4882a593Smuzhiyun return -EOPNOTSUPP;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun static int
nfp_flower_output_action(struct nfp_app * app,const struct flow_action_entry * act,struct nfp_fl_payload * nfp_fl,int * a_len,struct net_device * netdev,bool last,enum nfp_flower_tun_type * tun_type,int * tun_out_cnt,int * out_cnt,u32 * csum_updated,bool pkt_host,struct netlink_ext_ack * extack)926*4882a593Smuzhiyun nfp_flower_output_action(struct nfp_app *app,
927*4882a593Smuzhiyun const struct flow_action_entry *act,
928*4882a593Smuzhiyun struct nfp_fl_payload *nfp_fl, int *a_len,
929*4882a593Smuzhiyun struct net_device *netdev, bool last,
930*4882a593Smuzhiyun enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
931*4882a593Smuzhiyun int *out_cnt, u32 *csum_updated, bool pkt_host,
932*4882a593Smuzhiyun struct netlink_ext_ack *extack)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun struct nfp_flower_priv *priv = app->priv;
935*4882a593Smuzhiyun struct nfp_fl_output *output;
936*4882a593Smuzhiyun int err, prelag_size;
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun /* If csum_updated has not been reset by now, it means HW will
939*4882a593Smuzhiyun * incorrectly update csums when they are not requested.
940*4882a593Smuzhiyun */
941*4882a593Smuzhiyun if (*csum_updated) {
942*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: set actions without updating checksums are not supported");
943*4882a593Smuzhiyun return -EOPNOTSUPP;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) {
947*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: mirred output increases action list size beyond the allowed maximum");
948*4882a593Smuzhiyun return -EOPNOTSUPP;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
952*4882a593Smuzhiyun err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
953*4882a593Smuzhiyun tun_out_cnt, pkt_host, extack);
954*4882a593Smuzhiyun if (err)
955*4882a593Smuzhiyun return err;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun *a_len += sizeof(struct nfp_fl_output);
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun if (priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
960*4882a593Smuzhiyun /* nfp_fl_pre_lag returns -err or size of prelag action added.
961*4882a593Smuzhiyun * This will be 0 if it is not egressing to a lag dev.
962*4882a593Smuzhiyun */
963*4882a593Smuzhiyun prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len, extack);
964*4882a593Smuzhiyun if (prelag_size < 0) {
965*4882a593Smuzhiyun return prelag_size;
966*4882a593Smuzhiyun } else if (prelag_size > 0 && (!last || *out_cnt)) {
967*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: LAG action has to be last action in action list");
968*4882a593Smuzhiyun return -EOPNOTSUPP;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun *a_len += prelag_size;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun (*out_cnt)++;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun return 0;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun static int
nfp_flower_loop_action(struct nfp_app * app,const struct flow_action_entry * act,struct flow_cls_offload * flow,struct nfp_fl_payload * nfp_fl,int * a_len,struct net_device * netdev,enum nfp_flower_tun_type * tun_type,int * tun_out_cnt,int * out_cnt,u32 * csum_updated,struct nfp_flower_pedit_acts * set_act,bool * pkt_host,struct netlink_ext_ack * extack,int act_idx)979*4882a593Smuzhiyun nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
980*4882a593Smuzhiyun struct flow_cls_offload *flow,
981*4882a593Smuzhiyun struct nfp_fl_payload *nfp_fl, int *a_len,
982*4882a593Smuzhiyun struct net_device *netdev,
983*4882a593Smuzhiyun enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
984*4882a593Smuzhiyun int *out_cnt, u32 *csum_updated,
985*4882a593Smuzhiyun struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
986*4882a593Smuzhiyun struct netlink_ext_ack *extack, int act_idx)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun struct nfp_fl_pre_tunnel *pre_tun;
989*4882a593Smuzhiyun struct nfp_fl_set_tun *set_tun;
990*4882a593Smuzhiyun struct nfp_fl_push_vlan *psh_v;
991*4882a593Smuzhiyun struct nfp_fl_push_mpls *psh_m;
992*4882a593Smuzhiyun struct nfp_fl_pop_vlan *pop_v;
993*4882a593Smuzhiyun struct nfp_fl_pop_mpls *pop_m;
994*4882a593Smuzhiyun struct nfp_fl_set_mpls *set_m;
995*4882a593Smuzhiyun int err;
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun switch (act->id) {
998*4882a593Smuzhiyun case FLOW_ACTION_DROP:
999*4882a593Smuzhiyun nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
1000*4882a593Smuzhiyun break;
1001*4882a593Smuzhiyun case FLOW_ACTION_REDIRECT_INGRESS:
1002*4882a593Smuzhiyun case FLOW_ACTION_REDIRECT:
1003*4882a593Smuzhiyun err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
1004*4882a593Smuzhiyun true, tun_type, tun_out_cnt,
1005*4882a593Smuzhiyun out_cnt, csum_updated, *pkt_host,
1006*4882a593Smuzhiyun extack);
1007*4882a593Smuzhiyun if (err)
1008*4882a593Smuzhiyun return err;
1009*4882a593Smuzhiyun break;
1010*4882a593Smuzhiyun case FLOW_ACTION_MIRRED_INGRESS:
1011*4882a593Smuzhiyun case FLOW_ACTION_MIRRED:
1012*4882a593Smuzhiyun err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
1013*4882a593Smuzhiyun false, tun_type, tun_out_cnt,
1014*4882a593Smuzhiyun out_cnt, csum_updated, *pkt_host,
1015*4882a593Smuzhiyun extack);
1016*4882a593Smuzhiyun if (err)
1017*4882a593Smuzhiyun return err;
1018*4882a593Smuzhiyun break;
1019*4882a593Smuzhiyun case FLOW_ACTION_VLAN_POP:
1020*4882a593Smuzhiyun if (*a_len +
1021*4882a593Smuzhiyun sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) {
1022*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop vlan");
1023*4882a593Smuzhiyun return -EOPNOTSUPP;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
1027*4882a593Smuzhiyun nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun nfp_fl_pop_vlan(pop_v);
1030*4882a593Smuzhiyun *a_len += sizeof(struct nfp_fl_pop_vlan);
1031*4882a593Smuzhiyun break;
1032*4882a593Smuzhiyun case FLOW_ACTION_VLAN_PUSH:
1033*4882a593Smuzhiyun if (*a_len +
1034*4882a593Smuzhiyun sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) {
1035*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push vlan");
1036*4882a593Smuzhiyun return -EOPNOTSUPP;
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
1040*4882a593Smuzhiyun nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun nfp_fl_push_vlan(psh_v, act);
1043*4882a593Smuzhiyun *a_len += sizeof(struct nfp_fl_push_vlan);
1044*4882a593Smuzhiyun break;
1045*4882a593Smuzhiyun case FLOW_ACTION_TUNNEL_ENCAP: {
1046*4882a593Smuzhiyun const struct ip_tunnel_info *ip_tun = act->tunnel;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun *tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx);
1049*4882a593Smuzhiyun if (*tun_type == NFP_FL_TUNNEL_NONE) {
1050*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
1051*4882a593Smuzhiyun return -EOPNOTSUPP;
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) {
1055*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel flags in action list");
1056*4882a593Smuzhiyun return -EOPNOTSUPP;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun /* Pre-tunnel action is required for tunnel encap.
1060*4882a593Smuzhiyun * This checks for next hop entries on NFP.
1061*4882a593Smuzhiyun * If none, the packet falls back before applying other actions.
1062*4882a593Smuzhiyun */
1063*4882a593Smuzhiyun if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
1064*4882a593Smuzhiyun sizeof(struct nfp_fl_set_tun) > NFP_FL_MAX_A_SIZ) {
1065*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
1066*4882a593Smuzhiyun return -EOPNOTSUPP;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
1070*4882a593Smuzhiyun nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1071*4882a593Smuzhiyun *a_len += sizeof(struct nfp_fl_pre_tunnel);
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun err = nfp_fl_push_geneve_options(nfp_fl, a_len, act, extack);
1074*4882a593Smuzhiyun if (err)
1075*4882a593Smuzhiyun return err;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun set_tun = (void *)&nfp_fl->action_data[*a_len];
1078*4882a593Smuzhiyun err = nfp_fl_set_tun(app, set_tun, act, pre_tun, *tun_type,
1079*4882a593Smuzhiyun netdev, extack);
1080*4882a593Smuzhiyun if (err)
1081*4882a593Smuzhiyun return err;
1082*4882a593Smuzhiyun *a_len += sizeof(struct nfp_fl_set_tun);
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun break;
1085*4882a593Smuzhiyun case FLOW_ACTION_TUNNEL_DECAP:
1086*4882a593Smuzhiyun /* Tunnel decap is handled by default so accept action. */
1087*4882a593Smuzhiyun return 0;
1088*4882a593Smuzhiyun case FLOW_ACTION_MANGLE:
1089*4882a593Smuzhiyun if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
1090*4882a593Smuzhiyun a_len, csum_updated, set_act, extack))
1091*4882a593Smuzhiyun return -EOPNOTSUPP;
1092*4882a593Smuzhiyun break;
1093*4882a593Smuzhiyun case FLOW_ACTION_CSUM:
1094*4882a593Smuzhiyun /* csum action requests recalc of something we have not fixed */
1095*4882a593Smuzhiyun if (act->csum_flags & ~*csum_updated) {
1096*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported csum update action in action list");
1097*4882a593Smuzhiyun return -EOPNOTSUPP;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun /* If we will correctly fix the csum we can remove it from the
1100*4882a593Smuzhiyun * csum update list. Which will later be used to check support.
1101*4882a593Smuzhiyun */
1102*4882a593Smuzhiyun *csum_updated &= ~act->csum_flags;
1103*4882a593Smuzhiyun break;
1104*4882a593Smuzhiyun case FLOW_ACTION_MPLS_PUSH:
1105*4882a593Smuzhiyun if (*a_len +
1106*4882a593Smuzhiyun sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) {
1107*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS");
1108*4882a593Smuzhiyun return -EOPNOTSUPP;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len];
1112*4882a593Smuzhiyun nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun err = nfp_fl_push_mpls(psh_m, act, extack);
1115*4882a593Smuzhiyun if (err)
1116*4882a593Smuzhiyun return err;
1117*4882a593Smuzhiyun *a_len += sizeof(struct nfp_fl_push_mpls);
1118*4882a593Smuzhiyun break;
1119*4882a593Smuzhiyun case FLOW_ACTION_MPLS_POP:
1120*4882a593Smuzhiyun if (*a_len +
1121*4882a593Smuzhiyun sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) {
1122*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS");
1123*4882a593Smuzhiyun return -EOPNOTSUPP;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len];
1127*4882a593Smuzhiyun nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun nfp_fl_pop_mpls(pop_m, act);
1130*4882a593Smuzhiyun *a_len += sizeof(struct nfp_fl_pop_mpls);
1131*4882a593Smuzhiyun break;
1132*4882a593Smuzhiyun case FLOW_ACTION_MPLS_MANGLE:
1133*4882a593Smuzhiyun if (*a_len +
1134*4882a593Smuzhiyun sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) {
1135*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS");
1136*4882a593Smuzhiyun return -EOPNOTSUPP;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len];
1140*4882a593Smuzhiyun nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun nfp_fl_set_mpls(set_m, act);
1143*4882a593Smuzhiyun *a_len += sizeof(struct nfp_fl_set_mpls);
1144*4882a593Smuzhiyun break;
1145*4882a593Smuzhiyun case FLOW_ACTION_PTYPE:
1146*4882a593Smuzhiyun /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */
1147*4882a593Smuzhiyun if (act->ptype != PACKET_HOST)
1148*4882a593Smuzhiyun return -EOPNOTSUPP;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun *pkt_host = true;
1151*4882a593Smuzhiyun break;
1152*4882a593Smuzhiyun default:
1153*4882a593Smuzhiyun /* Currently we do not handle any other actions. */
1154*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
1155*4882a593Smuzhiyun return -EOPNOTSUPP;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun return 0;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
nfp_fl_check_mangle_start(struct flow_action * flow_act,int current_act_idx)1161*4882a593Smuzhiyun static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
1162*4882a593Smuzhiyun int current_act_idx)
1163*4882a593Smuzhiyun {
1164*4882a593Smuzhiyun struct flow_action_entry current_act;
1165*4882a593Smuzhiyun struct flow_action_entry prev_act;
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun current_act = flow_act->entries[current_act_idx];
1168*4882a593Smuzhiyun if (current_act.id != FLOW_ACTION_MANGLE)
1169*4882a593Smuzhiyun return false;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun if (current_act_idx == 0)
1172*4882a593Smuzhiyun return true;
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun prev_act = flow_act->entries[current_act_idx - 1];
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun return prev_act.id != FLOW_ACTION_MANGLE;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun
nfp_fl_check_mangle_end(struct flow_action * flow_act,int current_act_idx)1179*4882a593Smuzhiyun static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
1180*4882a593Smuzhiyun int current_act_idx)
1181*4882a593Smuzhiyun {
1182*4882a593Smuzhiyun struct flow_action_entry current_act;
1183*4882a593Smuzhiyun struct flow_action_entry next_act;
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun current_act = flow_act->entries[current_act_idx];
1186*4882a593Smuzhiyun if (current_act.id != FLOW_ACTION_MANGLE)
1187*4882a593Smuzhiyun return false;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun if (current_act_idx == flow_act->num_entries)
1190*4882a593Smuzhiyun return true;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun next_act = flow_act->entries[current_act_idx + 1];
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun return next_act.id != FLOW_ACTION_MANGLE;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
nfp_flower_compile_action(struct nfp_app * app,struct flow_cls_offload * flow,struct net_device * netdev,struct nfp_fl_payload * nfp_flow,struct netlink_ext_ack * extack)1197*4882a593Smuzhiyun int nfp_flower_compile_action(struct nfp_app *app,
1198*4882a593Smuzhiyun struct flow_cls_offload *flow,
1199*4882a593Smuzhiyun struct net_device *netdev,
1200*4882a593Smuzhiyun struct nfp_fl_payload *nfp_flow,
1201*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1202*4882a593Smuzhiyun {
1203*4882a593Smuzhiyun int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
1204*4882a593Smuzhiyun struct nfp_flower_pedit_acts set_act;
1205*4882a593Smuzhiyun enum nfp_flower_tun_type tun_type;
1206*4882a593Smuzhiyun struct flow_action_entry *act;
1207*4882a593Smuzhiyun bool pkt_host = false;
1208*4882a593Smuzhiyun u32 csum_updated = 0;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun if (!flow_action_hw_stats_check(&flow->rule->action, extack,
1211*4882a593Smuzhiyun FLOW_ACTION_HW_STATS_DELAYED_BIT))
1212*4882a593Smuzhiyun return -EOPNOTSUPP;
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
1215*4882a593Smuzhiyun nfp_flow->meta.act_len = 0;
1216*4882a593Smuzhiyun tun_type = NFP_FL_TUNNEL_NONE;
1217*4882a593Smuzhiyun act_len = 0;
1218*4882a593Smuzhiyun act_cnt = 0;
1219*4882a593Smuzhiyun tun_out_cnt = 0;
1220*4882a593Smuzhiyun out_cnt = 0;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun flow_action_for_each(i, act, &flow->rule->action) {
1223*4882a593Smuzhiyun if (nfp_fl_check_mangle_start(&flow->rule->action, i))
1224*4882a593Smuzhiyun memset(&set_act, 0, sizeof(set_act));
1225*4882a593Smuzhiyun err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
1226*4882a593Smuzhiyun netdev, &tun_type, &tun_out_cnt,
1227*4882a593Smuzhiyun &out_cnt, &csum_updated,
1228*4882a593Smuzhiyun &set_act, &pkt_host, extack, i);
1229*4882a593Smuzhiyun if (err)
1230*4882a593Smuzhiyun return err;
1231*4882a593Smuzhiyun act_cnt++;
1232*4882a593Smuzhiyun if (nfp_fl_check_mangle_end(&flow->rule->action, i))
1233*4882a593Smuzhiyun nfp_fl_commit_mangle(flow,
1234*4882a593Smuzhiyun &nfp_flow->action_data[act_len],
1235*4882a593Smuzhiyun &act_len, &set_act, &csum_updated);
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun /* We optimise when the action list is small, this can unfortunately
1239*4882a593Smuzhiyun * not happen once we have more than one action in the action list.
1240*4882a593Smuzhiyun */
1241*4882a593Smuzhiyun if (act_cnt > 1)
1242*4882a593Smuzhiyun nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun nfp_flow->meta.act_len = act_len;
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun return 0;
1247*4882a593Smuzhiyun }
1248