xref: /OK3568_Linux_fs/kernel/drivers/connector/connector.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *	connector.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
6*4882a593Smuzhiyun  * All rights reserved.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/compiler.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/list.h>
13*4882a593Smuzhiyun #include <linux/skbuff.h>
14*4882a593Smuzhiyun #include <net/netlink.h>
15*4882a593Smuzhiyun #include <linux/moduleparam.h>
16*4882a593Smuzhiyun #include <linux/connector.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/mutex.h>
19*4882a593Smuzhiyun #include <linux/proc_fs.h>
20*4882a593Smuzhiyun #include <linux/spinlock.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <net/sock.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun MODULE_LICENSE("GPL");
25*4882a593Smuzhiyun MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
26*4882a593Smuzhiyun MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
27*4882a593Smuzhiyun MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static struct cn_dev cdev;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun static int cn_already_initialized;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * Sends mult (multiple) cn_msg at a time.
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  * msg->seq and msg->ack are used to determine message genealogy.
37*4882a593Smuzhiyun  * When someone sends message it puts there locally unique sequence
38*4882a593Smuzhiyun  * and random acknowledge numbers.  Sequence number may be copied into
39*4882a593Smuzhiyun  * nlmsghdr->nlmsg_seq too.
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * Sequence number is incremented with each message to be sent.
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * If we expect a reply to our message then the sequence number in
44*4882a593Smuzhiyun  * received message MUST be the same as in original message, and
45*4882a593Smuzhiyun  * acknowledge number MUST be the same + 1.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * If we receive a message and its sequence number is not equal to the
48*4882a593Smuzhiyun  * one we are expecting then it is a new message.
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  * If we receive a message and its sequence number is the same as one
51*4882a593Smuzhiyun  * we are expecting but it's acknowledgement number is not equal to
52*4882a593Smuzhiyun  * the acknowledgement number in the original message + 1, then it is
53*4882a593Smuzhiyun  * a new message.
54*4882a593Smuzhiyun  *
55*4882a593Smuzhiyun  * If msg->len != len, then additional cn_msg messages are expected following
56*4882a593Smuzhiyun  * the first msg.
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  * The message is sent to, the portid if given, the group if given, both if
59*4882a593Smuzhiyun  * both, or if both are zero then the group is looked up and sent there.
60*4882a593Smuzhiyun  */
cn_netlink_send_mult(struct cn_msg * msg,u16 len,u32 portid,u32 __group,gfp_t gfp_mask)61*4882a593Smuzhiyun int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
62*4882a593Smuzhiyun 	gfp_t gfp_mask)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct cn_callback_entry *__cbq;
65*4882a593Smuzhiyun 	unsigned int size;
66*4882a593Smuzhiyun 	struct sk_buff *skb;
67*4882a593Smuzhiyun 	struct nlmsghdr *nlh;
68*4882a593Smuzhiyun 	struct cn_msg *data;
69*4882a593Smuzhiyun 	struct cn_dev *dev = &cdev;
70*4882a593Smuzhiyun 	u32 group = 0;
71*4882a593Smuzhiyun 	int found = 0;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	if (portid || __group) {
74*4882a593Smuzhiyun 		group = __group;
75*4882a593Smuzhiyun 	} else {
76*4882a593Smuzhiyun 		spin_lock_bh(&dev->cbdev->queue_lock);
77*4882a593Smuzhiyun 		list_for_each_entry(__cbq, &dev->cbdev->queue_list,
78*4882a593Smuzhiyun 				    callback_entry) {
79*4882a593Smuzhiyun 			if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
80*4882a593Smuzhiyun 				found = 1;
81*4882a593Smuzhiyun 				group = __cbq->group;
82*4882a593Smuzhiyun 				break;
83*4882a593Smuzhiyun 			}
84*4882a593Smuzhiyun 		}
85*4882a593Smuzhiyun 		spin_unlock_bh(&dev->cbdev->queue_lock);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 		if (!found)
88*4882a593Smuzhiyun 			return -ENODEV;
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	if (!portid && !netlink_has_listeners(dev->nls, group))
92*4882a593Smuzhiyun 		return -ESRCH;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	size = sizeof(*msg) + len;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	skb = nlmsg_new(size, gfp_mask);
97*4882a593Smuzhiyun 	if (!skb)
98*4882a593Smuzhiyun 		return -ENOMEM;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
101*4882a593Smuzhiyun 	if (!nlh) {
102*4882a593Smuzhiyun 		kfree_skb(skb);
103*4882a593Smuzhiyun 		return -EMSGSIZE;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	data = nlmsg_data(nlh);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	memcpy(data, msg, size);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	NETLINK_CB(skb).dst_group = group;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	if (group)
113*4882a593Smuzhiyun 		return netlink_broadcast(dev->nls, skb, portid, group,
114*4882a593Smuzhiyun 					 gfp_mask);
115*4882a593Smuzhiyun 	return netlink_unicast(dev->nls, skb, portid,
116*4882a593Smuzhiyun 			!gfpflags_allow_blocking(gfp_mask));
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /* same as cn_netlink_send_mult except msg->len is used for len */
cn_netlink_send(struct cn_msg * msg,u32 portid,u32 __group,gfp_t gfp_mask)121*4882a593Smuzhiyun int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
122*4882a593Smuzhiyun 	gfp_t gfp_mask)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cn_netlink_send);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /*
129*4882a593Smuzhiyun  * Callback helper - queues work and setup destructor for given data.
130*4882a593Smuzhiyun  */
cn_call_callback(struct sk_buff * skb)131*4882a593Smuzhiyun static int cn_call_callback(struct sk_buff *skb)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	struct nlmsghdr *nlh;
134*4882a593Smuzhiyun 	struct cn_callback_entry *i, *cbq = NULL;
135*4882a593Smuzhiyun 	struct cn_dev *dev = &cdev;
136*4882a593Smuzhiyun 	struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
137*4882a593Smuzhiyun 	struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
138*4882a593Smuzhiyun 	int err = -ENODEV;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/* verify msg->len is within skb */
141*4882a593Smuzhiyun 	nlh = nlmsg_hdr(skb);
142*4882a593Smuzhiyun 	if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
143*4882a593Smuzhiyun 		return -EINVAL;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	spin_lock_bh(&dev->cbdev->queue_lock);
146*4882a593Smuzhiyun 	list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
147*4882a593Smuzhiyun 		if (cn_cb_equal(&i->id.id, &msg->id)) {
148*4882a593Smuzhiyun 			refcount_inc(&i->refcnt);
149*4882a593Smuzhiyun 			cbq = i;
150*4882a593Smuzhiyun 			break;
151*4882a593Smuzhiyun 		}
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 	spin_unlock_bh(&dev->cbdev->queue_lock);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (cbq != NULL) {
156*4882a593Smuzhiyun 		cbq->callback(msg, nsp);
157*4882a593Smuzhiyun 		kfree_skb(skb);
158*4882a593Smuzhiyun 		cn_queue_release_callback(cbq);
159*4882a593Smuzhiyun 		err = 0;
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	return err;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun  * Main netlink receiving function.
167*4882a593Smuzhiyun  *
168*4882a593Smuzhiyun  * It checks skb, netlink header and msg sizes, and calls callback helper.
169*4882a593Smuzhiyun  */
cn_rx_skb(struct sk_buff * skb)170*4882a593Smuzhiyun static void cn_rx_skb(struct sk_buff *skb)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	struct nlmsghdr *nlh;
173*4882a593Smuzhiyun 	int len, err;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (skb->len >= NLMSG_HDRLEN) {
176*4882a593Smuzhiyun 		nlh = nlmsg_hdr(skb);
177*4882a593Smuzhiyun 		len = nlmsg_len(nlh);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 		if (len < (int)sizeof(struct cn_msg) ||
180*4882a593Smuzhiyun 		    skb->len < nlh->nlmsg_len ||
181*4882a593Smuzhiyun 		    len > CONNECTOR_MAX_MSG_SIZE)
182*4882a593Smuzhiyun 			return;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		err = cn_call_callback(skb_get(skb));
185*4882a593Smuzhiyun 		if (err < 0)
186*4882a593Smuzhiyun 			kfree_skb(skb);
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun  * Callback add routing - adds callback with given ID and name.
192*4882a593Smuzhiyun  * If there is registered callback with the same ID it will not be added.
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * May sleep.
195*4882a593Smuzhiyun  */
cn_add_callback(struct cb_id * id,const char * name,void (* callback)(struct cn_msg *,struct netlink_skb_parms *))196*4882a593Smuzhiyun int cn_add_callback(struct cb_id *id, const char *name,
197*4882a593Smuzhiyun 		    void (*callback)(struct cn_msg *,
198*4882a593Smuzhiyun 				     struct netlink_skb_parms *))
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct cn_dev *dev = &cdev;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (!cn_already_initialized)
203*4882a593Smuzhiyun 		return -EAGAIN;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return cn_queue_add_callback(dev->cbdev, name, id, callback);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cn_add_callback);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /*
210*4882a593Smuzhiyun  * Callback remove routing - removes callback
211*4882a593Smuzhiyun  * with given ID.
212*4882a593Smuzhiyun  * If there is no registered callback with given
213*4882a593Smuzhiyun  * ID nothing happens.
214*4882a593Smuzhiyun  *
215*4882a593Smuzhiyun  * May sleep while waiting for reference counter to become zero.
216*4882a593Smuzhiyun  */
cn_del_callback(struct cb_id * id)217*4882a593Smuzhiyun void cn_del_callback(struct cb_id *id)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct cn_dev *dev = &cdev;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	cn_queue_del_callback(dev->cbdev, id);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cn_del_callback);
224*4882a593Smuzhiyun 
cn_proc_show(struct seq_file * m,void * v)225*4882a593Smuzhiyun static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	struct cn_queue_dev *dev = cdev.cbdev;
228*4882a593Smuzhiyun 	struct cn_callback_entry *cbq;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	seq_printf(m, "Name            ID\n");
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	spin_lock_bh(&dev->queue_lock);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
235*4882a593Smuzhiyun 		seq_printf(m, "%-15s %u:%u\n",
236*4882a593Smuzhiyun 			   cbq->id.name,
237*4882a593Smuzhiyun 			   cbq->id.id.idx,
238*4882a593Smuzhiyun 			   cbq->id.id.val);
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	spin_unlock_bh(&dev->queue_lock);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	return 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
cn_init(void)246*4882a593Smuzhiyun static int cn_init(void)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct cn_dev *dev = &cdev;
249*4882a593Smuzhiyun 	struct netlink_kernel_cfg cfg = {
250*4882a593Smuzhiyun 		.groups	= CN_NETLINK_USERS + 0xf,
251*4882a593Smuzhiyun 		.input	= cn_rx_skb,
252*4882a593Smuzhiyun 	};
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
255*4882a593Smuzhiyun 	if (!dev->nls)
256*4882a593Smuzhiyun 		return -EIO;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
259*4882a593Smuzhiyun 	if (!dev->cbdev) {
260*4882a593Smuzhiyun 		netlink_kernel_release(dev->nls);
261*4882a593Smuzhiyun 		return -EINVAL;
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	cn_already_initialized = 1;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	proc_create_single("connector", S_IRUGO, init_net.proc_net, cn_proc_show);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
cn_fini(void)271*4882a593Smuzhiyun static void cn_fini(void)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct cn_dev *dev = &cdev;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	cn_already_initialized = 0;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	remove_proc_entry("connector", init_net.proc_net);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	cn_queue_free_dev(dev->cbdev);
280*4882a593Smuzhiyun 	netlink_kernel_release(dev->nls);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun subsys_initcall(cn_init);
284*4882a593Smuzhiyun module_exit(cn_fini);
285