xref: /OK3568_Linux_fs/kernel/net/caif/caif_dev.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * CAIF Interface registration.
4*4882a593Smuzhiyun  * Copyright (C) ST-Ericsson AB 2010
5*4882a593Smuzhiyun  * Author:	Sjur Brendeland
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
8*4882a593Smuzhiyun  *  and Sakari Ailus <sakari.ailus@nokia.com>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/if_arp.h>
15*4882a593Smuzhiyun #include <linux/net.h>
16*4882a593Smuzhiyun #include <linux/netdevice.h>
17*4882a593Smuzhiyun #include <linux/mutex.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/spinlock.h>
20*4882a593Smuzhiyun #include <net/netns/generic.h>
21*4882a593Smuzhiyun #include <net/net_namespace.h>
22*4882a593Smuzhiyun #include <net/pkt_sched.h>
23*4882a593Smuzhiyun #include <net/caif/caif_device.h>
24*4882a593Smuzhiyun #include <net/caif/caif_layer.h>
25*4882a593Smuzhiyun #include <net/caif/caif_dev.h>
26*4882a593Smuzhiyun #include <net/caif/cfpkt.h>
27*4882a593Smuzhiyun #include <net/caif/cfcnfg.h>
28*4882a593Smuzhiyun #include <net/caif/cfserl.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun MODULE_LICENSE("GPL");
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /* Used for local tracking of the CAIF net devices */
33*4882a593Smuzhiyun struct caif_device_entry {
34*4882a593Smuzhiyun 	struct cflayer layer;
35*4882a593Smuzhiyun 	struct list_head list;
36*4882a593Smuzhiyun 	struct net_device *netdev;
37*4882a593Smuzhiyun 	int __percpu *pcpu_refcnt;
38*4882a593Smuzhiyun 	spinlock_t flow_lock;
39*4882a593Smuzhiyun 	struct sk_buff *xoff_skb;
40*4882a593Smuzhiyun 	void (*xoff_skb_dtor)(struct sk_buff *skb);
41*4882a593Smuzhiyun 	bool xoff;
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun struct caif_device_entry_list {
45*4882a593Smuzhiyun 	struct list_head list;
46*4882a593Smuzhiyun 	/* Protects simulanous deletes in list */
47*4882a593Smuzhiyun 	struct mutex lock;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun struct caif_net {
51*4882a593Smuzhiyun 	struct cfcnfg *cfg;
52*4882a593Smuzhiyun 	struct caif_device_entry_list caifdevs;
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun static unsigned int caif_net_id;
56*4882a593Smuzhiyun static int q_high = 50; /* Percent */
57*4882a593Smuzhiyun 
get_cfcnfg(struct net * net)58*4882a593Smuzhiyun struct cfcnfg *get_cfcnfg(struct net *net)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct caif_net *caifn;
61*4882a593Smuzhiyun 	caifn = net_generic(net, caif_net_id);
62*4882a593Smuzhiyun 	return caifn->cfg;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun EXPORT_SYMBOL(get_cfcnfg);
65*4882a593Smuzhiyun 
caif_device_list(struct net * net)66*4882a593Smuzhiyun static struct caif_device_entry_list *caif_device_list(struct net *net)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct caif_net *caifn;
69*4882a593Smuzhiyun 	caifn = net_generic(net, caif_net_id);
70*4882a593Smuzhiyun 	return &caifn->caifdevs;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
caifd_put(struct caif_device_entry * e)73*4882a593Smuzhiyun static void caifd_put(struct caif_device_entry *e)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	this_cpu_dec(*e->pcpu_refcnt);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
caifd_hold(struct caif_device_entry * e)78*4882a593Smuzhiyun static void caifd_hold(struct caif_device_entry *e)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	this_cpu_inc(*e->pcpu_refcnt);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
caifd_refcnt_read(struct caif_device_entry * e)83*4882a593Smuzhiyun static int caifd_refcnt_read(struct caif_device_entry *e)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	int i, refcnt = 0;
86*4882a593Smuzhiyun 	for_each_possible_cpu(i)
87*4882a593Smuzhiyun 		refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
88*4882a593Smuzhiyun 	return refcnt;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /* Allocate new CAIF device. */
caif_device_alloc(struct net_device * dev)92*4882a593Smuzhiyun static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct caif_device_entry *caifd;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
97*4882a593Smuzhiyun 	if (!caifd)
98*4882a593Smuzhiyun 		return NULL;
99*4882a593Smuzhiyun 	caifd->pcpu_refcnt = alloc_percpu(int);
100*4882a593Smuzhiyun 	if (!caifd->pcpu_refcnt) {
101*4882a593Smuzhiyun 		kfree(caifd);
102*4882a593Smuzhiyun 		return NULL;
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun 	caifd->netdev = dev;
105*4882a593Smuzhiyun 	dev_hold(dev);
106*4882a593Smuzhiyun 	return caifd;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
caif_get(struct net_device * dev)109*4882a593Smuzhiyun static struct caif_device_entry *caif_get(struct net_device *dev)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	struct caif_device_entry_list *caifdevs =
112*4882a593Smuzhiyun 	    caif_device_list(dev_net(dev));
113*4882a593Smuzhiyun 	struct caif_device_entry *caifd;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	list_for_each_entry_rcu(caifd, &caifdevs->list, list,
116*4882a593Smuzhiyun 				lockdep_rtnl_is_held()) {
117*4882a593Smuzhiyun 		if (caifd->netdev == dev)
118*4882a593Smuzhiyun 			return caifd;
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 	return NULL;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
caif_flow_cb(struct sk_buff * skb)123*4882a593Smuzhiyun static void caif_flow_cb(struct sk_buff *skb)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	struct caif_device_entry *caifd;
126*4882a593Smuzhiyun 	void (*dtor)(struct sk_buff *skb) = NULL;
127*4882a593Smuzhiyun 	bool send_xoff;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	WARN_ON(skb->dev == NULL);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	rcu_read_lock();
132*4882a593Smuzhiyun 	caifd = caif_get(skb->dev);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	WARN_ON(caifd == NULL);
135*4882a593Smuzhiyun 	if (!caifd) {
136*4882a593Smuzhiyun 		rcu_read_unlock();
137*4882a593Smuzhiyun 		return;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	caifd_hold(caifd);
141*4882a593Smuzhiyun 	rcu_read_unlock();
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	spin_lock_bh(&caifd->flow_lock);
144*4882a593Smuzhiyun 	send_xoff = caifd->xoff;
145*4882a593Smuzhiyun 	caifd->xoff = false;
146*4882a593Smuzhiyun 	dtor = caifd->xoff_skb_dtor;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (WARN_ON(caifd->xoff_skb != skb))
149*4882a593Smuzhiyun 		skb = NULL;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	caifd->xoff_skb = NULL;
152*4882a593Smuzhiyun 	caifd->xoff_skb_dtor = NULL;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	spin_unlock_bh(&caifd->flow_lock);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	if (dtor && skb)
157*4882a593Smuzhiyun 		dtor(skb);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (send_xoff)
160*4882a593Smuzhiyun 		caifd->layer.up->
161*4882a593Smuzhiyun 			ctrlcmd(caifd->layer.up,
162*4882a593Smuzhiyun 				_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
163*4882a593Smuzhiyun 				caifd->layer.id);
164*4882a593Smuzhiyun 	caifd_put(caifd);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
transmit(struct cflayer * layer,struct cfpkt * pkt)167*4882a593Smuzhiyun static int transmit(struct cflayer *layer, struct cfpkt *pkt)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	int err, high = 0, qlen = 0;
170*4882a593Smuzhiyun 	struct caif_device_entry *caifd =
171*4882a593Smuzhiyun 	    container_of(layer, struct caif_device_entry, layer);
172*4882a593Smuzhiyun 	struct sk_buff *skb;
173*4882a593Smuzhiyun 	struct netdev_queue *txq;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	rcu_read_lock_bh();
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	skb = cfpkt_tonative(pkt);
178*4882a593Smuzhiyun 	skb->dev = caifd->netdev;
179*4882a593Smuzhiyun 	skb_reset_network_header(skb);
180*4882a593Smuzhiyun 	skb->protocol = htons(ETH_P_CAIF);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	/* Check if we need to handle xoff */
183*4882a593Smuzhiyun 	if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
184*4882a593Smuzhiyun 		goto noxoff;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	if (unlikely(caifd->xoff))
187*4882a593Smuzhiyun 		goto noxoff;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (likely(!netif_queue_stopped(caifd->netdev))) {
190*4882a593Smuzhiyun 		struct Qdisc *sch;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 		/* If we run with a TX queue, check if the queue is too long*/
193*4882a593Smuzhiyun 		txq = netdev_get_tx_queue(skb->dev, 0);
194*4882a593Smuzhiyun 		sch = rcu_dereference_bh(txq->qdisc);
195*4882a593Smuzhiyun 		if (likely(qdisc_is_empty(sch)))
196*4882a593Smuzhiyun 			goto noxoff;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		/* can check for explicit qdisc len value only !NOLOCK,
199*4882a593Smuzhiyun 		 * always set flow off otherwise
200*4882a593Smuzhiyun 		 */
201*4882a593Smuzhiyun 		high = (caifd->netdev->tx_queue_len * q_high) / 100;
202*4882a593Smuzhiyun 		if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
203*4882a593Smuzhiyun 			goto noxoff;
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/* Hold lock while accessing xoff */
207*4882a593Smuzhiyun 	spin_lock_bh(&caifd->flow_lock);
208*4882a593Smuzhiyun 	if (caifd->xoff) {
209*4882a593Smuzhiyun 		spin_unlock_bh(&caifd->flow_lock);
210*4882a593Smuzhiyun 		goto noxoff;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/*
214*4882a593Smuzhiyun 	 * Handle flow off, we do this by temporary hi-jacking this
215*4882a593Smuzhiyun 	 * skb's destructor function, and replace it with our own
216*4882a593Smuzhiyun 	 * flow-on callback. The callback will set flow-on and call
217*4882a593Smuzhiyun 	 * the original destructor.
218*4882a593Smuzhiyun 	 */
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
221*4882a593Smuzhiyun 			netif_queue_stopped(caifd->netdev),
222*4882a593Smuzhiyun 			qlen, high);
223*4882a593Smuzhiyun 	caifd->xoff = true;
224*4882a593Smuzhiyun 	caifd->xoff_skb = skb;
225*4882a593Smuzhiyun 	caifd->xoff_skb_dtor = skb->destructor;
226*4882a593Smuzhiyun 	skb->destructor = caif_flow_cb;
227*4882a593Smuzhiyun 	spin_unlock_bh(&caifd->flow_lock);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	caifd->layer.up->ctrlcmd(caifd->layer.up,
230*4882a593Smuzhiyun 					_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
231*4882a593Smuzhiyun 					caifd->layer.id);
232*4882a593Smuzhiyun noxoff:
233*4882a593Smuzhiyun 	rcu_read_unlock_bh();
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	err = dev_queue_xmit(skb);
236*4882a593Smuzhiyun 	if (err > 0)
237*4882a593Smuzhiyun 		err = -EIO;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	return err;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun  * Stuff received packets into the CAIF stack.
244*4882a593Smuzhiyun  * On error, returns non-zero and releases the skb.
245*4882a593Smuzhiyun  */
receive(struct sk_buff * skb,struct net_device * dev,struct packet_type * pkttype,struct net_device * orig_dev)246*4882a593Smuzhiyun static int receive(struct sk_buff *skb, struct net_device *dev,
247*4882a593Smuzhiyun 		   struct packet_type *pkttype, struct net_device *orig_dev)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	struct cfpkt *pkt;
250*4882a593Smuzhiyun 	struct caif_device_entry *caifd;
251*4882a593Smuzhiyun 	int err;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	rcu_read_lock();
256*4882a593Smuzhiyun 	caifd = caif_get(dev);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
259*4882a593Smuzhiyun 			!netif_oper_up(caifd->netdev)) {
260*4882a593Smuzhiyun 		rcu_read_unlock();
261*4882a593Smuzhiyun 		kfree_skb(skb);
262*4882a593Smuzhiyun 		return NET_RX_DROP;
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	/* Hold reference to netdevice while using CAIF stack */
266*4882a593Smuzhiyun 	caifd_hold(caifd);
267*4882a593Smuzhiyun 	rcu_read_unlock();
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	err = caifd->layer.up->receive(caifd->layer.up, pkt);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	/* For -EILSEQ the packet is not freed so so it now */
272*4882a593Smuzhiyun 	if (err == -EILSEQ)
273*4882a593Smuzhiyun 		cfpkt_destroy(pkt);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/* Release reference to stack upwards */
276*4882a593Smuzhiyun 	caifd_put(caifd);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (err != 0)
279*4882a593Smuzhiyun 		err = NET_RX_DROP;
280*4882a593Smuzhiyun 	return err;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun static struct packet_type caif_packet_type __read_mostly = {
284*4882a593Smuzhiyun 	.type = cpu_to_be16(ETH_P_CAIF),
285*4882a593Smuzhiyun 	.func = receive,
286*4882a593Smuzhiyun };
287*4882a593Smuzhiyun 
dev_flowctrl(struct net_device * dev,int on)288*4882a593Smuzhiyun static void dev_flowctrl(struct net_device *dev, int on)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct caif_device_entry *caifd;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	rcu_read_lock();
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	caifd = caif_get(dev);
295*4882a593Smuzhiyun 	if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
296*4882a593Smuzhiyun 		rcu_read_unlock();
297*4882a593Smuzhiyun 		return;
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	caifd_hold(caifd);
301*4882a593Smuzhiyun 	rcu_read_unlock();
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	caifd->layer.up->ctrlcmd(caifd->layer.up,
304*4882a593Smuzhiyun 				 on ?
305*4882a593Smuzhiyun 				 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
306*4882a593Smuzhiyun 				 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
307*4882a593Smuzhiyun 				 caifd->layer.id);
308*4882a593Smuzhiyun 	caifd_put(caifd);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
caif_enroll_dev(struct net_device * dev,struct caif_dev_common * caifdev,struct cflayer * link_support,int head_room,struct cflayer ** layer,int (** rcv_func)(struct sk_buff *,struct net_device *,struct packet_type *,struct net_device *))311*4882a593Smuzhiyun int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
312*4882a593Smuzhiyun 		     struct cflayer *link_support, int head_room,
313*4882a593Smuzhiyun 		     struct cflayer **layer,
314*4882a593Smuzhiyun 		     int (**rcv_func)(struct sk_buff *, struct net_device *,
315*4882a593Smuzhiyun 				      struct packet_type *,
316*4882a593Smuzhiyun 				      struct net_device *))
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	struct caif_device_entry *caifd;
319*4882a593Smuzhiyun 	enum cfcnfg_phy_preference pref;
320*4882a593Smuzhiyun 	struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
321*4882a593Smuzhiyun 	struct caif_device_entry_list *caifdevs;
322*4882a593Smuzhiyun 	int res;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	caifdevs = caif_device_list(dev_net(dev));
325*4882a593Smuzhiyun 	caifd = caif_device_alloc(dev);
326*4882a593Smuzhiyun 	if (!caifd)
327*4882a593Smuzhiyun 		return -ENOMEM;
328*4882a593Smuzhiyun 	*layer = &caifd->layer;
329*4882a593Smuzhiyun 	spin_lock_init(&caifd->flow_lock);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	switch (caifdev->link_select) {
332*4882a593Smuzhiyun 	case CAIF_LINK_HIGH_BANDW:
333*4882a593Smuzhiyun 		pref = CFPHYPREF_HIGH_BW;
334*4882a593Smuzhiyun 		break;
335*4882a593Smuzhiyun 	case CAIF_LINK_LOW_LATENCY:
336*4882a593Smuzhiyun 		pref = CFPHYPREF_LOW_LAT;
337*4882a593Smuzhiyun 		break;
338*4882a593Smuzhiyun 	default:
339*4882a593Smuzhiyun 		pref = CFPHYPREF_HIGH_BW;
340*4882a593Smuzhiyun 		break;
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 	mutex_lock(&caifdevs->lock);
343*4882a593Smuzhiyun 	list_add_rcu(&caifd->list, &caifdevs->list);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	strlcpy(caifd->layer.name, dev->name,
346*4882a593Smuzhiyun 		sizeof(caifd->layer.name));
347*4882a593Smuzhiyun 	caifd->layer.transmit = transmit;
348*4882a593Smuzhiyun 	res = cfcnfg_add_phy_layer(cfg,
349*4882a593Smuzhiyun 				dev,
350*4882a593Smuzhiyun 				&caifd->layer,
351*4882a593Smuzhiyun 				pref,
352*4882a593Smuzhiyun 				link_support,
353*4882a593Smuzhiyun 				caifdev->use_fcs,
354*4882a593Smuzhiyun 				head_room);
355*4882a593Smuzhiyun 	mutex_unlock(&caifdevs->lock);
356*4882a593Smuzhiyun 	if (rcv_func)
357*4882a593Smuzhiyun 		*rcv_func = receive;
358*4882a593Smuzhiyun 	return res;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun EXPORT_SYMBOL(caif_enroll_dev);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun /* notify Caif of device events */
caif_device_notify(struct notifier_block * me,unsigned long what,void * ptr)363*4882a593Smuzhiyun static int caif_device_notify(struct notifier_block *me, unsigned long what,
364*4882a593Smuzhiyun 			      void *ptr)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
367*4882a593Smuzhiyun 	struct caif_device_entry *caifd = NULL;
368*4882a593Smuzhiyun 	struct caif_dev_common *caifdev;
369*4882a593Smuzhiyun 	struct cfcnfg *cfg;
370*4882a593Smuzhiyun 	struct cflayer *layer, *link_support;
371*4882a593Smuzhiyun 	int head_room = 0;
372*4882a593Smuzhiyun 	struct caif_device_entry_list *caifdevs;
373*4882a593Smuzhiyun 	int res;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	cfg = get_cfcnfg(dev_net(dev));
376*4882a593Smuzhiyun 	caifdevs = caif_device_list(dev_net(dev));
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	caifd = caif_get(dev);
379*4882a593Smuzhiyun 	if (caifd == NULL && dev->type != ARPHRD_CAIF)
380*4882a593Smuzhiyun 		return 0;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	switch (what) {
383*4882a593Smuzhiyun 	case NETDEV_REGISTER:
384*4882a593Smuzhiyun 		if (caifd != NULL)
385*4882a593Smuzhiyun 			break;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		caifdev = netdev_priv(dev);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		link_support = NULL;
390*4882a593Smuzhiyun 		if (caifdev->use_frag) {
391*4882a593Smuzhiyun 			head_room = 1;
392*4882a593Smuzhiyun 			link_support = cfserl_create(dev->ifindex,
393*4882a593Smuzhiyun 							caifdev->use_stx);
394*4882a593Smuzhiyun 			if (!link_support) {
395*4882a593Smuzhiyun 				pr_warn("Out of memory\n");
396*4882a593Smuzhiyun 				break;
397*4882a593Smuzhiyun 			}
398*4882a593Smuzhiyun 		}
399*4882a593Smuzhiyun 		res = caif_enroll_dev(dev, caifdev, link_support, head_room,
400*4882a593Smuzhiyun 				&layer, NULL);
401*4882a593Smuzhiyun 		if (res)
402*4882a593Smuzhiyun 			cfserl_release(link_support);
403*4882a593Smuzhiyun 		caifdev->flowctrl = dev_flowctrl;
404*4882a593Smuzhiyun 		break;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	case NETDEV_UP:
407*4882a593Smuzhiyun 		rcu_read_lock();
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 		caifd = caif_get(dev);
410*4882a593Smuzhiyun 		if (caifd == NULL) {
411*4882a593Smuzhiyun 			rcu_read_unlock();
412*4882a593Smuzhiyun 			break;
413*4882a593Smuzhiyun 		}
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		caifd->xoff = false;
416*4882a593Smuzhiyun 		cfcnfg_set_phy_state(cfg, &caifd->layer, true);
417*4882a593Smuzhiyun 		rcu_read_unlock();
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 		break;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	case NETDEV_DOWN:
422*4882a593Smuzhiyun 		rcu_read_lock();
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		caifd = caif_get(dev);
425*4882a593Smuzhiyun 		if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
426*4882a593Smuzhiyun 			rcu_read_unlock();
427*4882a593Smuzhiyun 			return -EINVAL;
428*4882a593Smuzhiyun 		}
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
431*4882a593Smuzhiyun 		caifd_hold(caifd);
432*4882a593Smuzhiyun 		rcu_read_unlock();
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 		caifd->layer.up->ctrlcmd(caifd->layer.up,
435*4882a593Smuzhiyun 					 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
436*4882a593Smuzhiyun 					 caifd->layer.id);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 		spin_lock_bh(&caifd->flow_lock);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		/*
441*4882a593Smuzhiyun 		 * Replace our xoff-destructor with original destructor.
442*4882a593Smuzhiyun 		 * We trust that skb->destructor *always* is called before
443*4882a593Smuzhiyun 		 * the skb reference is invalid. The hijacked SKB destructor
444*4882a593Smuzhiyun 		 * takes the flow_lock so manipulating the skb->destructor here
445*4882a593Smuzhiyun 		 * should be safe.
446*4882a593Smuzhiyun 		*/
447*4882a593Smuzhiyun 		if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
448*4882a593Smuzhiyun 			caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 		caifd->xoff = false;
451*4882a593Smuzhiyun 		caifd->xoff_skb_dtor = NULL;
452*4882a593Smuzhiyun 		caifd->xoff_skb = NULL;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		spin_unlock_bh(&caifd->flow_lock);
455*4882a593Smuzhiyun 		caifd_put(caifd);
456*4882a593Smuzhiyun 		break;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	case NETDEV_UNREGISTER:
459*4882a593Smuzhiyun 		mutex_lock(&caifdevs->lock);
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 		caifd = caif_get(dev);
462*4882a593Smuzhiyun 		if (caifd == NULL) {
463*4882a593Smuzhiyun 			mutex_unlock(&caifdevs->lock);
464*4882a593Smuzhiyun 			break;
465*4882a593Smuzhiyun 		}
466*4882a593Smuzhiyun 		list_del_rcu(&caifd->list);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 		/*
469*4882a593Smuzhiyun 		 * NETDEV_UNREGISTER is called repeatedly until all reference
470*4882a593Smuzhiyun 		 * counts for the net-device are released. If references to
471*4882a593Smuzhiyun 		 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
472*4882a593Smuzhiyun 		 * the next call to NETDEV_UNREGISTER.
473*4882a593Smuzhiyun 		 *
474*4882a593Smuzhiyun 		 * If any packets are in flight down the CAIF Stack,
475*4882a593Smuzhiyun 		 * cfcnfg_del_phy_layer will return nonzero.
476*4882a593Smuzhiyun 		 * If no packets are in flight, the CAIF Stack associated
477*4882a593Smuzhiyun 		 * with the net-device un-registering is freed.
478*4882a593Smuzhiyun 		 */
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 		if (caifd_refcnt_read(caifd) != 0 ||
481*4882a593Smuzhiyun 			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 			pr_info("Wait for device inuse\n");
484*4882a593Smuzhiyun 			/* Enrole device if CAIF Stack is still in use */
485*4882a593Smuzhiyun 			list_add_rcu(&caifd->list, &caifdevs->list);
486*4882a593Smuzhiyun 			mutex_unlock(&caifdevs->lock);
487*4882a593Smuzhiyun 			break;
488*4882a593Smuzhiyun 		}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 		synchronize_rcu();
491*4882a593Smuzhiyun 		dev_put(caifd->netdev);
492*4882a593Smuzhiyun 		free_percpu(caifd->pcpu_refcnt);
493*4882a593Smuzhiyun 		kfree(caifd);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 		mutex_unlock(&caifdevs->lock);
496*4882a593Smuzhiyun 		break;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 	return 0;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun static struct notifier_block caif_device_notifier = {
502*4882a593Smuzhiyun 	.notifier_call = caif_device_notify,
503*4882a593Smuzhiyun 	.priority = 0,
504*4882a593Smuzhiyun };
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun /* Per-namespace Caif devices handling */
caif_init_net(struct net * net)507*4882a593Smuzhiyun static int caif_init_net(struct net *net)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	struct caif_net *caifn = net_generic(net, caif_net_id);
510*4882a593Smuzhiyun 	INIT_LIST_HEAD(&caifn->caifdevs.list);
511*4882a593Smuzhiyun 	mutex_init(&caifn->caifdevs.lock);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	caifn->cfg = cfcnfg_create();
514*4882a593Smuzhiyun 	if (!caifn->cfg)
515*4882a593Smuzhiyun 		return -ENOMEM;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	return 0;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
caif_exit_net(struct net * net)520*4882a593Smuzhiyun static void caif_exit_net(struct net *net)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun 	struct caif_device_entry *caifd, *tmp;
523*4882a593Smuzhiyun 	struct caif_device_entry_list *caifdevs =
524*4882a593Smuzhiyun 	    caif_device_list(net);
525*4882a593Smuzhiyun 	struct cfcnfg *cfg =  get_cfcnfg(net);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	rtnl_lock();
528*4882a593Smuzhiyun 	mutex_lock(&caifdevs->lock);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
531*4882a593Smuzhiyun 		int i = 0;
532*4882a593Smuzhiyun 		list_del_rcu(&caifd->list);
533*4882a593Smuzhiyun 		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 		while (i < 10 &&
536*4882a593Smuzhiyun 			(caifd_refcnt_read(caifd) != 0 ||
537*4882a593Smuzhiyun 			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 			pr_info("Wait for device inuse\n");
540*4882a593Smuzhiyun 			msleep(250);
541*4882a593Smuzhiyun 			i++;
542*4882a593Smuzhiyun 		}
543*4882a593Smuzhiyun 		synchronize_rcu();
544*4882a593Smuzhiyun 		dev_put(caifd->netdev);
545*4882a593Smuzhiyun 		free_percpu(caifd->pcpu_refcnt);
546*4882a593Smuzhiyun 		kfree(caifd);
547*4882a593Smuzhiyun 	}
548*4882a593Smuzhiyun 	cfcnfg_remove(cfg);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	mutex_unlock(&caifdevs->lock);
551*4882a593Smuzhiyun 	rtnl_unlock();
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun static struct pernet_operations caif_net_ops = {
555*4882a593Smuzhiyun 	.init = caif_init_net,
556*4882a593Smuzhiyun 	.exit = caif_exit_net,
557*4882a593Smuzhiyun 	.id   = &caif_net_id,
558*4882a593Smuzhiyun 	.size = sizeof(struct caif_net),
559*4882a593Smuzhiyun };
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun /* Initialize Caif devices list */
caif_device_init(void)562*4882a593Smuzhiyun static int __init caif_device_init(void)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun 	int result;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	result = register_pernet_subsys(&caif_net_ops);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	if (result)
569*4882a593Smuzhiyun 		return result;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	register_netdevice_notifier(&caif_device_notifier);
572*4882a593Smuzhiyun 	dev_add_pack(&caif_packet_type);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	return result;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun 
caif_device_exit(void)577*4882a593Smuzhiyun static void __exit caif_device_exit(void)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun 	unregister_netdevice_notifier(&caif_device_notifier);
580*4882a593Smuzhiyun 	dev_remove_pack(&caif_packet_type);
581*4882a593Smuzhiyun 	unregister_pernet_subsys(&caif_net_ops);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun module_init(caif_device_init);
585*4882a593Smuzhiyun module_exit(caif_device_exit);
586