xref: /OK3568_Linux_fs/kernel/net/openvswitch/datapath.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2007-2014 Nicira, Inc.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef DATAPATH_H
7*4882a593Smuzhiyun #define DATAPATH_H 1
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <asm/page.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/mutex.h>
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/skbuff.h>
14*4882a593Smuzhiyun #include <linux/u64_stats_sync.h>
15*4882a593Smuzhiyun #include <net/ip_tunnels.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "conntrack.h"
18*4882a593Smuzhiyun #include "flow.h"
19*4882a593Smuzhiyun #include "flow_table.h"
20*4882a593Smuzhiyun #include "meter.h"
21*4882a593Smuzhiyun #include "vport-internal_dev.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define DP_MAX_PORTS                USHRT_MAX
24*4882a593Smuzhiyun #define DP_VPORT_HASH_BUCKETS       1024
25*4882a593Smuzhiyun #define DP_MASKS_REBALANCE_INTERVAL 4000
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /**
28*4882a593Smuzhiyun  * struct dp_stats_percpu - per-cpu packet processing statistics for a given
29*4882a593Smuzhiyun  * datapath.
30*4882a593Smuzhiyun  * @n_hit: Number of received packets for which a matching flow was found in
31*4882a593Smuzhiyun  * the flow table.
32*4882a593Smuzhiyun  * @n_miss: Number of received packets that had no matching flow in the flow
33*4882a593Smuzhiyun  * table.  The sum of @n_hit and @n_miss is the number of packets that have
34*4882a593Smuzhiyun  * been received by the datapath.
35*4882a593Smuzhiyun  * @n_lost: Number of received packets that had no matching flow in the flow
36*4882a593Smuzhiyun  * table that could not be sent to userspace (normally due to an overflow in
37*4882a593Smuzhiyun  * one of the datapath's queues).
38*4882a593Smuzhiyun  * @n_mask_hit: Number of masks looked up for flow match.
39*4882a593Smuzhiyun  *   @n_mask_hit / (@n_hit + @n_missed)  will be the average masks looked
40*4882a593Smuzhiyun  *   up per packet.
41*4882a593Smuzhiyun  * @n_cache_hit: The number of received packets that had their mask found using
42*4882a593Smuzhiyun  * the mask cache.
43*4882a593Smuzhiyun  */
44*4882a593Smuzhiyun struct dp_stats_percpu {
45*4882a593Smuzhiyun 	u64 n_hit;
46*4882a593Smuzhiyun 	u64 n_missed;
47*4882a593Smuzhiyun 	u64 n_lost;
48*4882a593Smuzhiyun 	u64 n_mask_hit;
49*4882a593Smuzhiyun 	u64 n_cache_hit;
50*4882a593Smuzhiyun 	struct u64_stats_sync syncp;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /**
54*4882a593Smuzhiyun  * struct datapath - datapath for flow-based packet switching
55*4882a593Smuzhiyun  * @rcu: RCU callback head for deferred destruction.
56*4882a593Smuzhiyun  * @list_node: Element in global 'dps' list.
57*4882a593Smuzhiyun  * @table: flow table.
58*4882a593Smuzhiyun  * @ports: Hash table for ports.  %OVSP_LOCAL port always exists.  Protected by
59*4882a593Smuzhiyun  * ovs_mutex and RCU.
60*4882a593Smuzhiyun  * @stats_percpu: Per-CPU datapath statistics.
61*4882a593Smuzhiyun  * @net: Reference to net namespace.
62*4882a593Smuzhiyun  * @max_headroom: the maximum headroom of all vports in this datapath; it will
63*4882a593Smuzhiyun  * be used by all the internal vports in this dp.
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * Context: See the comment on locking at the top of datapath.c for additional
66*4882a593Smuzhiyun  * locking information.
67*4882a593Smuzhiyun  */
68*4882a593Smuzhiyun struct datapath {
69*4882a593Smuzhiyun 	struct rcu_head rcu;
70*4882a593Smuzhiyun 	struct list_head list_node;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	/* Flow table. */
73*4882a593Smuzhiyun 	struct flow_table table;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/* Switch ports. */
76*4882a593Smuzhiyun 	struct hlist_head *ports;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	/* Stats. */
79*4882a593Smuzhiyun 	struct dp_stats_percpu __percpu *stats_percpu;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	/* Network namespace ref. */
82*4882a593Smuzhiyun 	possible_net_t net;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	u32 user_features;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	u32 max_headroom;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/* Switch meters. */
89*4882a593Smuzhiyun 	struct dp_meter_table meter_tbl;
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun  * struct ovs_skb_cb - OVS data in skb CB
94*4882a593Smuzhiyun  * @input_vport: The original vport packet came in on. This value is cached
95*4882a593Smuzhiyun  * when a packet is received by OVS.
96*4882a593Smuzhiyun  * @mru: The maximum received fragement size; 0 if the packet is not
97*4882a593Smuzhiyun  * fragmented.
98*4882a593Smuzhiyun  * @acts_origlen: The netlink size of the flow actions applied to this skb.
99*4882a593Smuzhiyun  * @cutlen: The number of bytes from the packet end to be removed.
100*4882a593Smuzhiyun  */
101*4882a593Smuzhiyun struct ovs_skb_cb {
102*4882a593Smuzhiyun 	struct vport		*input_vport;
103*4882a593Smuzhiyun 	u16			mru;
104*4882a593Smuzhiyun 	u16			acts_origlen;
105*4882a593Smuzhiyun 	u32			cutlen;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /**
110*4882a593Smuzhiyun  * struct dp_upcall - metadata to include with a packet to send to userspace
111*4882a593Smuzhiyun  * @cmd: One of %OVS_PACKET_CMD_*.
112*4882a593Smuzhiyun  * @userdata: If nonnull, its variable-length value is passed to userspace as
113*4882a593Smuzhiyun  * %OVS_PACKET_ATTR_USERDATA.
114*4882a593Smuzhiyun  * @portid: Netlink portid to which packet should be sent.  If @portid is 0
115*4882a593Smuzhiyun  * then no packet is sent and the packet is accounted in the datapath's @n_lost
116*4882a593Smuzhiyun  * counter.
117*4882a593Smuzhiyun  * @egress_tun_info: If nonnull, becomes %OVS_PACKET_ATTR_EGRESS_TUN_KEY.
118*4882a593Smuzhiyun  * @mru: If not zero, Maximum received IP fragment size.
119*4882a593Smuzhiyun  */
120*4882a593Smuzhiyun struct dp_upcall_info {
121*4882a593Smuzhiyun 	struct ip_tunnel_info *egress_tun_info;
122*4882a593Smuzhiyun 	const struct nlattr *userdata;
123*4882a593Smuzhiyun 	const struct nlattr *actions;
124*4882a593Smuzhiyun 	int actions_len;
125*4882a593Smuzhiyun 	u32 portid;
126*4882a593Smuzhiyun 	u8 cmd;
127*4882a593Smuzhiyun 	u16 mru;
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun  * struct ovs_net - Per net-namespace data for ovs.
132*4882a593Smuzhiyun  * @dps: List of datapaths to enable dumping them all out.
133*4882a593Smuzhiyun  * Protected by genl_mutex.
134*4882a593Smuzhiyun  */
135*4882a593Smuzhiyun struct ovs_net {
136*4882a593Smuzhiyun 	struct list_head dps;
137*4882a593Smuzhiyun 	struct work_struct dp_notify_work;
138*4882a593Smuzhiyun 	struct delayed_work masks_rebalance;
139*4882a593Smuzhiyun #if	IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
140*4882a593Smuzhiyun 	struct ovs_ct_limit_info *ct_limit_info;
141*4882a593Smuzhiyun #endif
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	/* Module reference for configuring conntrack. */
144*4882a593Smuzhiyun 	bool xt_label;
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun /**
148*4882a593Smuzhiyun  * enum ovs_pkt_hash_types - hash info to include with a packet
149*4882a593Smuzhiyun  * to send to userspace.
150*4882a593Smuzhiyun  * @OVS_PACKET_HASH_SW_BIT: indicates hash was computed in software stack.
151*4882a593Smuzhiyun  * @OVS_PACKET_HASH_L4_BIT: indicates hash is a canonical 4-tuple hash
152*4882a593Smuzhiyun  * over transport ports.
153*4882a593Smuzhiyun  */
154*4882a593Smuzhiyun enum ovs_pkt_hash_types {
155*4882a593Smuzhiyun 	OVS_PACKET_HASH_SW_BIT = (1ULL << 32),
156*4882a593Smuzhiyun 	OVS_PACKET_HASH_L4_BIT = (1ULL << 33),
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun extern unsigned int ovs_net_id;
160*4882a593Smuzhiyun void ovs_lock(void);
161*4882a593Smuzhiyun void ovs_unlock(void);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
164*4882a593Smuzhiyun int lockdep_ovsl_is_held(void);
165*4882a593Smuzhiyun #else
166*4882a593Smuzhiyun #define lockdep_ovsl_is_held()	1
167*4882a593Smuzhiyun #endif
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun #define ASSERT_OVSL()		WARN_ON(!lockdep_ovsl_is_held())
170*4882a593Smuzhiyun #define ovsl_dereference(p)					\
171*4882a593Smuzhiyun 	rcu_dereference_protected(p, lockdep_ovsl_is_held())
172*4882a593Smuzhiyun #define rcu_dereference_ovsl(p)					\
173*4882a593Smuzhiyun 	rcu_dereference_check(p, lockdep_ovsl_is_held())
174*4882a593Smuzhiyun 
ovs_dp_get_net(const struct datapath * dp)175*4882a593Smuzhiyun static inline struct net *ovs_dp_get_net(const struct datapath *dp)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	return read_pnet(&dp->net);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
ovs_dp_set_net(struct datapath * dp,struct net * net)180*4882a593Smuzhiyun static inline void ovs_dp_set_net(struct datapath *dp, struct net *net)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	write_pnet(&dp->net, net);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no);
186*4882a593Smuzhiyun 
ovs_vport_rcu(const struct datapath * dp,int port_no)187*4882a593Smuzhiyun static inline struct vport *ovs_vport_rcu(const struct datapath *dp, int port_no)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	WARN_ON_ONCE(!rcu_read_lock_held());
190*4882a593Smuzhiyun 	return ovs_lookup_vport(dp, port_no);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
ovs_vport_ovsl_rcu(const struct datapath * dp,int port_no)193*4882a593Smuzhiyun static inline struct vport *ovs_vport_ovsl_rcu(const struct datapath *dp, int port_no)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
196*4882a593Smuzhiyun 	return ovs_lookup_vport(dp, port_no);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
ovs_vport_ovsl(const struct datapath * dp,int port_no)199*4882a593Smuzhiyun static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_no)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	ASSERT_OVSL();
202*4882a593Smuzhiyun 	return ovs_lookup_vport(dp, port_no);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun /* Must be called with rcu_read_lock. */
get_dp_rcu(struct net * net,int dp_ifindex)206*4882a593Smuzhiyun static inline struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	if (dev) {
211*4882a593Smuzhiyun 		struct vport *vport = ovs_internal_dev_get_vport(dev);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 		if (vport)
214*4882a593Smuzhiyun 			return vport->dp;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	return NULL;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun /* The caller must hold either ovs_mutex or rcu_read_lock to keep the
221*4882a593Smuzhiyun  * returned dp pointer valid.
222*4882a593Smuzhiyun  */
get_dp(struct net * net,int dp_ifindex)223*4882a593Smuzhiyun static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	struct datapath *dp;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
228*4882a593Smuzhiyun 	rcu_read_lock();
229*4882a593Smuzhiyun 	dp = get_dp_rcu(net, dp_ifindex);
230*4882a593Smuzhiyun 	rcu_read_unlock();
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	return dp;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun extern struct notifier_block ovs_dp_device_notifier;
236*4882a593Smuzhiyun extern struct genl_family dp_vport_genl_family;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun DECLARE_STATIC_KEY_FALSE(tc_recirc_sharing_support);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key);
241*4882a593Smuzhiyun void ovs_dp_detach_port(struct vport *);
242*4882a593Smuzhiyun int ovs_dp_upcall(struct datapath *, struct sk_buff *,
243*4882a593Smuzhiyun 		  const struct sw_flow_key *, const struct dp_upcall_info *,
244*4882a593Smuzhiyun 		  uint32_t cutlen);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun const char *ovs_dp_name(const struct datapath *dp);
247*4882a593Smuzhiyun struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
248*4882a593Smuzhiyun 					 u32 portid, u32 seq, u8 cmd);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
251*4882a593Smuzhiyun 			const struct sw_flow_actions *, struct sw_flow_key *);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun void ovs_dp_notify_wq(struct work_struct *work);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun int action_fifos_init(void);
256*4882a593Smuzhiyun void action_fifos_exit(void);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun /* 'KEY' must not have any bits set outside of the 'MASK' */
259*4882a593Smuzhiyun #define OVS_MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
260*4882a593Smuzhiyun #define OVS_SET_MASKED(OLD, KEY, MASK) ((OLD) = OVS_MASKED(OLD, KEY, MASK))
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun #define OVS_NLERR(logging_allowed, fmt, ...)			\
263*4882a593Smuzhiyun do {								\
264*4882a593Smuzhiyun 	if (logging_allowed && net_ratelimit())			\
265*4882a593Smuzhiyun 		pr_info("netlink: " fmt "\n", ##__VA_ARGS__);	\
266*4882a593Smuzhiyun } while (0)
267*4882a593Smuzhiyun #endif /* datapath.h */
268