1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2007-2014 Nicira, Inc.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/etherdevice.h>
7*4882a593Smuzhiyun #include <linux/if.h>
8*4882a593Smuzhiyun #include <linux/if_vlan.h>
9*4882a593Smuzhiyun #include <linux/jhash.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/list.h>
12*4882a593Smuzhiyun #include <linux/mutex.h>
13*4882a593Smuzhiyun #include <linux/percpu.h>
14*4882a593Smuzhiyun #include <linux/rcupdate.h>
15*4882a593Smuzhiyun #include <linux/rtnetlink.h>
16*4882a593Smuzhiyun #include <linux/compat.h>
17*4882a593Smuzhiyun #include <net/net_namespace.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "datapath.h"
21*4882a593Smuzhiyun #include "vport.h"
22*4882a593Smuzhiyun #include "vport-internal_dev.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun static LIST_HEAD(vport_ops_list);
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* Protected by RCU read lock for reading, ovs_mutex for writing. */
27*4882a593Smuzhiyun static struct hlist_head *dev_table;
28*4882a593Smuzhiyun #define VPORT_HASH_BUCKETS 1024
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /**
31*4882a593Smuzhiyun * ovs_vport_init - initialize vport subsystem
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * Called at module load time to initialize the vport subsystem.
34*4882a593Smuzhiyun */
ovs_vport_init(void)35*4882a593Smuzhiyun int ovs_vport_init(void)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun dev_table = kcalloc(VPORT_HASH_BUCKETS, sizeof(struct hlist_head),
38*4882a593Smuzhiyun GFP_KERNEL);
39*4882a593Smuzhiyun if (!dev_table)
40*4882a593Smuzhiyun return -ENOMEM;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun return 0;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun * ovs_vport_exit - shutdown vport subsystem
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * Called at module exit time to shutdown the vport subsystem.
49*4882a593Smuzhiyun */
ovs_vport_exit(void)50*4882a593Smuzhiyun void ovs_vport_exit(void)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun kfree(dev_table);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
hash_bucket(const struct net * net,const char * name)55*4882a593Smuzhiyun static struct hlist_head *hash_bucket(const struct net *net, const char *name)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
58*4882a593Smuzhiyun return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
__ovs_vport_ops_register(struct vport_ops * ops)61*4882a593Smuzhiyun int __ovs_vport_ops_register(struct vport_ops *ops)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun int err = -EEXIST;
64*4882a593Smuzhiyun struct vport_ops *o;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun ovs_lock();
67*4882a593Smuzhiyun list_for_each_entry(o, &vport_ops_list, list)
68*4882a593Smuzhiyun if (ops->type == o->type)
69*4882a593Smuzhiyun goto errout;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun list_add_tail(&ops->list, &vport_ops_list);
72*4882a593Smuzhiyun err = 0;
73*4882a593Smuzhiyun errout:
74*4882a593Smuzhiyun ovs_unlock();
75*4882a593Smuzhiyun return err;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
78*4882a593Smuzhiyun
ovs_vport_ops_unregister(struct vport_ops * ops)79*4882a593Smuzhiyun void ovs_vport_ops_unregister(struct vport_ops *ops)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun ovs_lock();
82*4882a593Smuzhiyun list_del(&ops->list);
83*4882a593Smuzhiyun ovs_unlock();
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /**
88*4882a593Smuzhiyun * ovs_vport_locate - find a port that has already been created
89*4882a593Smuzhiyun *
90*4882a593Smuzhiyun * @net: network namespace
91*4882a593Smuzhiyun * @name: name of port to find
92*4882a593Smuzhiyun *
93*4882a593Smuzhiyun * Must be called with ovs or RCU read lock.
94*4882a593Smuzhiyun */
ovs_vport_locate(const struct net * net,const char * name)95*4882a593Smuzhiyun struct vport *ovs_vport_locate(const struct net *net, const char *name)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun struct hlist_head *bucket = hash_bucket(net, name);
98*4882a593Smuzhiyun struct vport *vport;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun hlist_for_each_entry_rcu(vport, bucket, hash_node,
101*4882a593Smuzhiyun lockdep_ovsl_is_held())
102*4882a593Smuzhiyun if (!strcmp(name, ovs_vport_name(vport)) &&
103*4882a593Smuzhiyun net_eq(ovs_dp_get_net(vport->dp), net))
104*4882a593Smuzhiyun return vport;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun return NULL;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /**
110*4882a593Smuzhiyun * ovs_vport_alloc - allocate and initialize new vport
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * @priv_size: Size of private data area to allocate.
113*4882a593Smuzhiyun * @ops: vport device ops
114*4882a593Smuzhiyun *
115*4882a593Smuzhiyun * Allocate and initialize a new vport defined by @ops. The vport will contain
116*4882a593Smuzhiyun * a private data area of size @priv_size that can be accessed using
117*4882a593Smuzhiyun * vport_priv(). vports that are no longer needed should be released with
118*4882a593Smuzhiyun * vport_free().
119*4882a593Smuzhiyun */
ovs_vport_alloc(int priv_size,const struct vport_ops * ops,const struct vport_parms * parms)120*4882a593Smuzhiyun struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
121*4882a593Smuzhiyun const struct vport_parms *parms)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun struct vport *vport;
124*4882a593Smuzhiyun size_t alloc_size;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun alloc_size = sizeof(struct vport);
127*4882a593Smuzhiyun if (priv_size) {
128*4882a593Smuzhiyun alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
129*4882a593Smuzhiyun alloc_size += priv_size;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun vport = kzalloc(alloc_size, GFP_KERNEL);
133*4882a593Smuzhiyun if (!vport)
134*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun vport->dp = parms->dp;
137*4882a593Smuzhiyun vport->port_no = parms->port_no;
138*4882a593Smuzhiyun vport->ops = ops;
139*4882a593Smuzhiyun INIT_HLIST_NODE(&vport->dp_hash_node);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
142*4882a593Smuzhiyun kfree(vport);
143*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun return vport;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ovs_vport_alloc);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /**
151*4882a593Smuzhiyun * ovs_vport_free - uninitialize and free vport
152*4882a593Smuzhiyun *
153*4882a593Smuzhiyun * @vport: vport to free
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun * Frees a vport allocated with vport_alloc() when it is no longer needed.
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun * The caller must ensure that an RCU grace period has passed since the last
158*4882a593Smuzhiyun * time @vport was in a datapath.
159*4882a593Smuzhiyun */
ovs_vport_free(struct vport * vport)160*4882a593Smuzhiyun void ovs_vport_free(struct vport *vport)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun /* vport is freed from RCU callback or error path, Therefore
163*4882a593Smuzhiyun * it is safe to use raw dereference.
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun kfree(rcu_dereference_raw(vport->upcall_portids));
166*4882a593Smuzhiyun kfree(vport);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ovs_vport_free);
169*4882a593Smuzhiyun
ovs_vport_lookup(const struct vport_parms * parms)170*4882a593Smuzhiyun static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct vport_ops *ops;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun list_for_each_entry(ops, &vport_ops_list, list)
175*4882a593Smuzhiyun if (ops->type == parms->type)
176*4882a593Smuzhiyun return ops;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return NULL;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /**
182*4882a593Smuzhiyun * ovs_vport_add - add vport device (for kernel callers)
183*4882a593Smuzhiyun *
184*4882a593Smuzhiyun * @parms: Information about new vport.
185*4882a593Smuzhiyun *
186*4882a593Smuzhiyun * Creates a new vport with the specified configuration (which is dependent on
187*4882a593Smuzhiyun * device type). ovs_mutex must be held.
188*4882a593Smuzhiyun */
ovs_vport_add(const struct vport_parms * parms)189*4882a593Smuzhiyun struct vport *ovs_vport_add(const struct vport_parms *parms)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct vport_ops *ops;
192*4882a593Smuzhiyun struct vport *vport;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun ops = ovs_vport_lookup(parms);
195*4882a593Smuzhiyun if (ops) {
196*4882a593Smuzhiyun struct hlist_head *bucket;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (!try_module_get(ops->owner))
199*4882a593Smuzhiyun return ERR_PTR(-EAFNOSUPPORT);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun vport = ops->create(parms);
202*4882a593Smuzhiyun if (IS_ERR(vport)) {
203*4882a593Smuzhiyun module_put(ops->owner);
204*4882a593Smuzhiyun return vport;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun bucket = hash_bucket(ovs_dp_get_net(vport->dp),
208*4882a593Smuzhiyun ovs_vport_name(vport));
209*4882a593Smuzhiyun hlist_add_head_rcu(&vport->hash_node, bucket);
210*4882a593Smuzhiyun return vport;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* Unlock to attempt module load and return -EAGAIN if load
214*4882a593Smuzhiyun * was successful as we need to restart the port addition
215*4882a593Smuzhiyun * workflow.
216*4882a593Smuzhiyun */
217*4882a593Smuzhiyun ovs_unlock();
218*4882a593Smuzhiyun request_module("vport-type-%d", parms->type);
219*4882a593Smuzhiyun ovs_lock();
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (!ovs_vport_lookup(parms))
222*4882a593Smuzhiyun return ERR_PTR(-EAFNOSUPPORT);
223*4882a593Smuzhiyun else
224*4882a593Smuzhiyun return ERR_PTR(-EAGAIN);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /**
228*4882a593Smuzhiyun * ovs_vport_set_options - modify existing vport device (for kernel callers)
229*4882a593Smuzhiyun *
230*4882a593Smuzhiyun * @vport: vport to modify.
231*4882a593Smuzhiyun * @options: New configuration.
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * Modifies an existing device with the specified configuration (which is
234*4882a593Smuzhiyun * dependent on device type). ovs_mutex must be held.
235*4882a593Smuzhiyun */
ovs_vport_set_options(struct vport * vport,struct nlattr * options)236*4882a593Smuzhiyun int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun if (!vport->ops->set_options)
239*4882a593Smuzhiyun return -EOPNOTSUPP;
240*4882a593Smuzhiyun return vport->ops->set_options(vport, options);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /**
244*4882a593Smuzhiyun * ovs_vport_del - delete existing vport device
245*4882a593Smuzhiyun *
246*4882a593Smuzhiyun * @vport: vport to delete.
247*4882a593Smuzhiyun *
248*4882a593Smuzhiyun * Detaches @vport from its datapath and destroys it. ovs_mutex must
249*4882a593Smuzhiyun * be held.
250*4882a593Smuzhiyun */
ovs_vport_del(struct vport * vport)251*4882a593Smuzhiyun void ovs_vport_del(struct vport *vport)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun hlist_del_rcu(&vport->hash_node);
254*4882a593Smuzhiyun module_put(vport->ops->owner);
255*4882a593Smuzhiyun vport->ops->destroy(vport);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /**
259*4882a593Smuzhiyun * ovs_vport_get_stats - retrieve device stats
260*4882a593Smuzhiyun *
261*4882a593Smuzhiyun * @vport: vport from which to retrieve the stats
262*4882a593Smuzhiyun * @stats: location to store stats
263*4882a593Smuzhiyun *
264*4882a593Smuzhiyun * Retrieves transmit, receive, and error stats for the given device.
265*4882a593Smuzhiyun *
266*4882a593Smuzhiyun * Must be called with ovs_mutex or rcu_read_lock.
267*4882a593Smuzhiyun */
ovs_vport_get_stats(struct vport * vport,struct ovs_vport_stats * stats)268*4882a593Smuzhiyun void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun const struct rtnl_link_stats64 *dev_stats;
271*4882a593Smuzhiyun struct rtnl_link_stats64 temp;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun dev_stats = dev_get_stats(vport->dev, &temp);
274*4882a593Smuzhiyun stats->rx_errors = dev_stats->rx_errors;
275*4882a593Smuzhiyun stats->tx_errors = dev_stats->tx_errors;
276*4882a593Smuzhiyun stats->tx_dropped = dev_stats->tx_dropped;
277*4882a593Smuzhiyun stats->rx_dropped = dev_stats->rx_dropped;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun stats->rx_bytes = dev_stats->rx_bytes;
280*4882a593Smuzhiyun stats->rx_packets = dev_stats->rx_packets;
281*4882a593Smuzhiyun stats->tx_bytes = dev_stats->tx_bytes;
282*4882a593Smuzhiyun stats->tx_packets = dev_stats->tx_packets;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /**
286*4882a593Smuzhiyun * ovs_vport_get_options - retrieve device options
287*4882a593Smuzhiyun *
288*4882a593Smuzhiyun * @vport: vport from which to retrieve the options.
289*4882a593Smuzhiyun * @skb: sk_buff where options should be appended.
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * Retrieves the configuration of the given device, appending an
292*4882a593Smuzhiyun * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
293*4882a593Smuzhiyun * vport-specific attributes to @skb.
294*4882a593Smuzhiyun *
295*4882a593Smuzhiyun * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
296*4882a593Smuzhiyun * negative error code if a real error occurred. If an error occurs, @skb is
297*4882a593Smuzhiyun * left unmodified.
298*4882a593Smuzhiyun *
299*4882a593Smuzhiyun * Must be called with ovs_mutex or rcu_read_lock.
300*4882a593Smuzhiyun */
ovs_vport_get_options(const struct vport * vport,struct sk_buff * skb)301*4882a593Smuzhiyun int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun struct nlattr *nla;
304*4882a593Smuzhiyun int err;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun if (!vport->ops->get_options)
307*4882a593Smuzhiyun return 0;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_OPTIONS);
310*4882a593Smuzhiyun if (!nla)
311*4882a593Smuzhiyun return -EMSGSIZE;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun err = vport->ops->get_options(vport, skb);
314*4882a593Smuzhiyun if (err) {
315*4882a593Smuzhiyun nla_nest_cancel(skb, nla);
316*4882a593Smuzhiyun return err;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun nla_nest_end(skb, nla);
320*4882a593Smuzhiyun return 0;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /**
324*4882a593Smuzhiyun * ovs_vport_set_upcall_portids - set upcall portids of @vport.
325*4882a593Smuzhiyun *
326*4882a593Smuzhiyun * @vport: vport to modify.
327*4882a593Smuzhiyun * @ids: new configuration, an array of port ids.
328*4882a593Smuzhiyun *
329*4882a593Smuzhiyun * Sets the vport's upcall_portids to @ids.
330*4882a593Smuzhiyun *
331*4882a593Smuzhiyun * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
332*4882a593Smuzhiyun * as an array of U32.
333*4882a593Smuzhiyun *
334*4882a593Smuzhiyun * Must be called with ovs_mutex.
335*4882a593Smuzhiyun */
ovs_vport_set_upcall_portids(struct vport * vport,const struct nlattr * ids)336*4882a593Smuzhiyun int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct vport_portids *old, *vport_portids;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
341*4882a593Smuzhiyun return -EINVAL;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun old = ovsl_dereference(vport->upcall_portids);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
346*4882a593Smuzhiyun GFP_KERNEL);
347*4882a593Smuzhiyun if (!vport_portids)
348*4882a593Smuzhiyun return -ENOMEM;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun vport_portids->n_ids = nla_len(ids) / sizeof(u32);
351*4882a593Smuzhiyun vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
352*4882a593Smuzhiyun nla_memcpy(vport_portids->ids, ids, nla_len(ids));
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun rcu_assign_pointer(vport->upcall_portids, vport_portids);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (old)
357*4882a593Smuzhiyun kfree_rcu(old, rcu);
358*4882a593Smuzhiyun return 0;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /**
362*4882a593Smuzhiyun * ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
363*4882a593Smuzhiyun *
364*4882a593Smuzhiyun * @vport: vport from which to retrieve the portids.
365*4882a593Smuzhiyun * @skb: sk_buff where portids should be appended.
366*4882a593Smuzhiyun *
367*4882a593Smuzhiyun * Retrieves the configuration of the given vport, appending the
368*4882a593Smuzhiyun * %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
369*4882a593Smuzhiyun * portids to @skb.
370*4882a593Smuzhiyun *
371*4882a593Smuzhiyun * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
372*4882a593Smuzhiyun * If an error occurs, @skb is left unmodified. Must be called with
373*4882a593Smuzhiyun * ovs_mutex or rcu_read_lock.
374*4882a593Smuzhiyun */
ovs_vport_get_upcall_portids(const struct vport * vport,struct sk_buff * skb)375*4882a593Smuzhiyun int ovs_vport_get_upcall_portids(const struct vport *vport,
376*4882a593Smuzhiyun struct sk_buff *skb)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun struct vport_portids *ids;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun ids = rcu_dereference_ovsl(vport->upcall_portids);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
383*4882a593Smuzhiyun return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
384*4882a593Smuzhiyun ids->n_ids * sizeof(u32), (void *)ids->ids);
385*4882a593Smuzhiyun else
386*4882a593Smuzhiyun return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /**
390*4882a593Smuzhiyun * ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
391*4882a593Smuzhiyun *
392*4882a593Smuzhiyun * @vport: vport from which the missed packet is received.
393*4882a593Smuzhiyun * @skb: skb that the missed packet was received.
394*4882a593Smuzhiyun *
395*4882a593Smuzhiyun * Uses the skb_get_hash() to select the upcall portid to send the
396*4882a593Smuzhiyun * upcall.
397*4882a593Smuzhiyun *
398*4882a593Smuzhiyun * Returns the portid of the target socket. Must be called with rcu_read_lock.
399*4882a593Smuzhiyun */
ovs_vport_find_upcall_portid(const struct vport * vport,struct sk_buff * skb)400*4882a593Smuzhiyun u32 ovs_vport_find_upcall_portid(const struct vport *vport,
401*4882a593Smuzhiyun struct sk_buff *skb)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun struct vport_portids *ids;
404*4882a593Smuzhiyun u32 ids_index;
405*4882a593Smuzhiyun u32 hash;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun ids = rcu_dereference(vport->upcall_portids);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* If there is only one portid, select it in the fast-path. */
410*4882a593Smuzhiyun if (ids->n_ids == 1)
411*4882a593Smuzhiyun return ids->ids[0];
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun hash = skb_get_hash(skb);
414*4882a593Smuzhiyun ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
415*4882a593Smuzhiyun return ids->ids[ids_index];
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /**
419*4882a593Smuzhiyun * ovs_vport_receive - pass up received packet to the datapath for processing
420*4882a593Smuzhiyun *
421*4882a593Smuzhiyun * @vport: vport that received the packet
422*4882a593Smuzhiyun * @skb: skb that was received
423*4882a593Smuzhiyun * @tun_info: tunnel (if any) that carried packet
424*4882a593Smuzhiyun *
425*4882a593Smuzhiyun * Must be called with rcu_read_lock. The packet cannot be shared and
426*4882a593Smuzhiyun * skb->data should point to the Ethernet header.
427*4882a593Smuzhiyun */
ovs_vport_receive(struct vport * vport,struct sk_buff * skb,const struct ip_tunnel_info * tun_info)428*4882a593Smuzhiyun int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
429*4882a593Smuzhiyun const struct ip_tunnel_info *tun_info)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun struct sw_flow_key key;
432*4882a593Smuzhiyun int error;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun OVS_CB(skb)->input_vport = vport;
435*4882a593Smuzhiyun OVS_CB(skb)->mru = 0;
436*4882a593Smuzhiyun OVS_CB(skb)->cutlen = 0;
437*4882a593Smuzhiyun if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
438*4882a593Smuzhiyun u32 mark;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun mark = skb->mark;
441*4882a593Smuzhiyun skb_scrub_packet(skb, true);
442*4882a593Smuzhiyun skb->mark = mark;
443*4882a593Smuzhiyun tun_info = NULL;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /* Extract flow from 'skb' into 'key'. */
447*4882a593Smuzhiyun error = ovs_flow_key_extract(tun_info, skb, &key);
448*4882a593Smuzhiyun if (unlikely(error)) {
449*4882a593Smuzhiyun kfree_skb(skb);
450*4882a593Smuzhiyun return error;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun ovs_dp_process_packet(skb, &key);
453*4882a593Smuzhiyun return 0;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
packet_length(const struct sk_buff * skb,struct net_device * dev)456*4882a593Smuzhiyun static int packet_length(const struct sk_buff *skb,
457*4882a593Smuzhiyun struct net_device *dev)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun int length = skb->len - dev->hard_header_len;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (!skb_vlan_tag_present(skb) &&
462*4882a593Smuzhiyun eth_type_vlan(skb->protocol))
463*4882a593Smuzhiyun length -= VLAN_HLEN;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* Don't subtract for multiple VLAN tags. Most (all?) drivers allow
466*4882a593Smuzhiyun * (ETH_LEN + VLAN_HLEN) in addition to the mtu value, but almost none
467*4882a593Smuzhiyun * account for 802.1ad. e.g. is_skb_forwardable().
468*4882a593Smuzhiyun */
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun return length > 0 ? length : 0;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
ovs_vport_send(struct vport * vport,struct sk_buff * skb,u8 mac_proto)473*4882a593Smuzhiyun void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun int mtu = vport->dev->mtu;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun switch (vport->dev->type) {
478*4882a593Smuzhiyun case ARPHRD_NONE:
479*4882a593Smuzhiyun if (mac_proto == MAC_PROTO_ETHERNET) {
480*4882a593Smuzhiyun skb_reset_network_header(skb);
481*4882a593Smuzhiyun skb_reset_mac_len(skb);
482*4882a593Smuzhiyun skb->protocol = htons(ETH_P_TEB);
483*4882a593Smuzhiyun } else if (mac_proto != MAC_PROTO_NONE) {
484*4882a593Smuzhiyun WARN_ON_ONCE(1);
485*4882a593Smuzhiyun goto drop;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun break;
488*4882a593Smuzhiyun case ARPHRD_ETHER:
489*4882a593Smuzhiyun if (mac_proto != MAC_PROTO_ETHERNET)
490*4882a593Smuzhiyun goto drop;
491*4882a593Smuzhiyun break;
492*4882a593Smuzhiyun default:
493*4882a593Smuzhiyun goto drop;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun if (unlikely(packet_length(skb, vport->dev) > mtu &&
497*4882a593Smuzhiyun !skb_is_gso(skb))) {
498*4882a593Smuzhiyun net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
499*4882a593Smuzhiyun vport->dev->name,
500*4882a593Smuzhiyun packet_length(skb, vport->dev), mtu);
501*4882a593Smuzhiyun vport->dev->stats.tx_errors++;
502*4882a593Smuzhiyun goto drop;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun skb->dev = vport->dev;
506*4882a593Smuzhiyun skb->tstamp = 0;
507*4882a593Smuzhiyun vport->ops->send(skb);
508*4882a593Smuzhiyun return;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun drop:
511*4882a593Smuzhiyun kfree_skb(skb);
512*4882a593Smuzhiyun }
513