xref: /OK3568_Linux_fs/kernel/net/openvswitch/flow_table.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2007-2013 Nicira, Inc.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef FLOW_TABLE_H
7*4882a593Smuzhiyun #define FLOW_TABLE_H 1
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/netlink.h>
11*4882a593Smuzhiyun #include <linux/openvswitch.h>
12*4882a593Smuzhiyun #include <linux/spinlock.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/rcupdate.h>
15*4882a593Smuzhiyun #include <linux/if_ether.h>
16*4882a593Smuzhiyun #include <linux/in6.h>
17*4882a593Smuzhiyun #include <linux/jiffies.h>
18*4882a593Smuzhiyun #include <linux/time.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <net/inet_ecn.h>
21*4882a593Smuzhiyun #include <net/ip_tunnels.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include "flow.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun struct mask_cache_entry {
26*4882a593Smuzhiyun 	u32 skb_hash;
27*4882a593Smuzhiyun 	u32 mask_index;
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct mask_cache {
31*4882a593Smuzhiyun 	struct rcu_head rcu;
32*4882a593Smuzhiyun 	u32 cache_size;  /* Must be ^2 value. */
33*4882a593Smuzhiyun 	struct mask_cache_entry __percpu *mask_cache;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun struct mask_count {
37*4882a593Smuzhiyun 	int index;
38*4882a593Smuzhiyun 	u64 counter;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun struct mask_array_stats {
42*4882a593Smuzhiyun 	struct u64_stats_sync syncp;
43*4882a593Smuzhiyun 	u64 usage_cntrs[];
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun struct mask_array {
47*4882a593Smuzhiyun 	struct rcu_head rcu;
48*4882a593Smuzhiyun 	int count, max;
49*4882a593Smuzhiyun 	struct mask_array_stats __percpu *masks_usage_stats;
50*4882a593Smuzhiyun 	u64 *masks_usage_zero_cntr;
51*4882a593Smuzhiyun 	struct sw_flow_mask __rcu *masks[];
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun struct table_instance {
55*4882a593Smuzhiyun 	struct hlist_head *buckets;
56*4882a593Smuzhiyun 	unsigned int n_buckets;
57*4882a593Smuzhiyun 	struct rcu_head rcu;
58*4882a593Smuzhiyun 	int node_ver;
59*4882a593Smuzhiyun 	u32 hash_seed;
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun struct flow_table {
63*4882a593Smuzhiyun 	struct table_instance __rcu *ti;
64*4882a593Smuzhiyun 	struct table_instance __rcu *ufid_ti;
65*4882a593Smuzhiyun 	struct mask_cache __rcu *mask_cache;
66*4882a593Smuzhiyun 	struct mask_array __rcu *mask_array;
67*4882a593Smuzhiyun 	unsigned long last_rehash;
68*4882a593Smuzhiyun 	unsigned int count;
69*4882a593Smuzhiyun 	unsigned int ufid_count;
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun extern struct kmem_cache *flow_stats_cache;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun int ovs_flow_init(void);
75*4882a593Smuzhiyun void ovs_flow_exit(void);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun struct sw_flow *ovs_flow_alloc(void);
78*4882a593Smuzhiyun void ovs_flow_free(struct sw_flow *, bool deferred);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun int ovs_flow_tbl_init(struct flow_table *);
81*4882a593Smuzhiyun int ovs_flow_tbl_count(const struct flow_table *table);
82*4882a593Smuzhiyun void ovs_flow_tbl_destroy(struct flow_table *table);
83*4882a593Smuzhiyun int ovs_flow_tbl_flush(struct flow_table *flow_table);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
86*4882a593Smuzhiyun 			const struct sw_flow_mask *mask);
87*4882a593Smuzhiyun void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
88*4882a593Smuzhiyun int  ovs_flow_tbl_num_masks(const struct flow_table *table);
89*4882a593Smuzhiyun u32  ovs_flow_tbl_masks_cache_size(const struct flow_table *table);
90*4882a593Smuzhiyun int  ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size);
91*4882a593Smuzhiyun struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
92*4882a593Smuzhiyun 				       u32 *bucket, u32 *idx);
93*4882a593Smuzhiyun struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
94*4882a593Smuzhiyun 					  const struct sw_flow_key *,
95*4882a593Smuzhiyun 					  u32 skb_hash,
96*4882a593Smuzhiyun 					  u32 *n_mask_hit,
97*4882a593Smuzhiyun 					  u32 *n_cache_hit);
98*4882a593Smuzhiyun struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
99*4882a593Smuzhiyun 				    const struct sw_flow_key *);
100*4882a593Smuzhiyun struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
101*4882a593Smuzhiyun 					  const struct sw_flow_match *match);
102*4882a593Smuzhiyun struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *,
103*4882a593Smuzhiyun 					 const struct sw_flow_id *);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
108*4882a593Smuzhiyun 		       bool full, const struct sw_flow_mask *mask);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun void ovs_flow_masks_rebalance(struct flow_table *table);
111*4882a593Smuzhiyun void table_instance_flow_flush(struct flow_table *table,
112*4882a593Smuzhiyun 			       struct table_instance *ti,
113*4882a593Smuzhiyun 			       struct table_instance *ufid_ti);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun #endif /* flow_table.h */
116