xref: /OK3568_Linux_fs/kernel/drivers/misc/vmw_vmci/vmci_event.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * VMware VMCI Driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2012 VMware, Inc. All rights reserved.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/vmw_vmci_defs.h>
9*4882a593Smuzhiyun #include <linux/vmw_vmci_api.h>
10*4882a593Smuzhiyun #include <linux/list.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/sched.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/rculist.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include "vmci_driver.h"
17*4882a593Smuzhiyun #include "vmci_event.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define EVENT_MAGIC 0xEABE0000
20*4882a593Smuzhiyun #define VMCI_EVENT_MAX_ATTEMPTS 10
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun struct vmci_subscription {
23*4882a593Smuzhiyun 	u32 id;
24*4882a593Smuzhiyun 	u32 event;
25*4882a593Smuzhiyun 	vmci_event_cb callback;
26*4882a593Smuzhiyun 	void *callback_data;
27*4882a593Smuzhiyun 	struct list_head node;	/* on one of subscriber lists */
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static struct list_head subscriber_array[VMCI_EVENT_MAX];
31*4882a593Smuzhiyun static DEFINE_MUTEX(subscriber_mutex);
32*4882a593Smuzhiyun 
vmci_event_init(void)33*4882a593Smuzhiyun int __init vmci_event_init(void)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	int i;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	for (i = 0; i < VMCI_EVENT_MAX; i++)
38*4882a593Smuzhiyun 		INIT_LIST_HEAD(&subscriber_array[i]);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	return VMCI_SUCCESS;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
vmci_event_exit(void)43*4882a593Smuzhiyun void vmci_event_exit(void)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	int e;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	/* We free all memory at exit. */
48*4882a593Smuzhiyun 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
49*4882a593Smuzhiyun 		struct vmci_subscription *cur, *p2;
50*4882a593Smuzhiyun 		list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 			/*
53*4882a593Smuzhiyun 			 * We should never get here because all events
54*4882a593Smuzhiyun 			 * should have been unregistered before we try
55*4882a593Smuzhiyun 			 * to unload the driver module.
56*4882a593Smuzhiyun 			 */
57*4882a593Smuzhiyun 			pr_warn("Unexpected free events occurring\n");
58*4882a593Smuzhiyun 			list_del(&cur->node);
59*4882a593Smuzhiyun 			kfree(cur);
60*4882a593Smuzhiyun 		}
61*4882a593Smuzhiyun 	}
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun  * Find entry. Assumes subscriber_mutex is held.
66*4882a593Smuzhiyun  */
event_find(u32 sub_id)67*4882a593Smuzhiyun static struct vmci_subscription *event_find(u32 sub_id)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	int e;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
72*4882a593Smuzhiyun 		struct vmci_subscription *cur;
73*4882a593Smuzhiyun 		list_for_each_entry(cur, &subscriber_array[e], node) {
74*4882a593Smuzhiyun 			if (cur->id == sub_id)
75*4882a593Smuzhiyun 				return cur;
76*4882a593Smuzhiyun 		}
77*4882a593Smuzhiyun 	}
78*4882a593Smuzhiyun 	return NULL;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun  * Actually delivers the events to the subscribers.
83*4882a593Smuzhiyun  * The callback function for each subscriber is invoked.
84*4882a593Smuzhiyun  */
event_deliver(struct vmci_event_msg * event_msg)85*4882a593Smuzhiyun static void event_deliver(struct vmci_event_msg *event_msg)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	struct vmci_subscription *cur;
88*4882a593Smuzhiyun 	struct list_head *subscriber_list;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	rcu_read_lock();
91*4882a593Smuzhiyun 	subscriber_list = &subscriber_array[event_msg->event_data.event];
92*4882a593Smuzhiyun 	list_for_each_entry_rcu(cur, subscriber_list, node) {
93*4882a593Smuzhiyun 		cur->callback(cur->id, &event_msg->event_data,
94*4882a593Smuzhiyun 			      cur->callback_data);
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 	rcu_read_unlock();
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun  * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
101*4882a593Smuzhiyun  * subscribers for given event.
102*4882a593Smuzhiyun  */
vmci_event_dispatch(struct vmci_datagram * msg)103*4882a593Smuzhiyun int vmci_event_dispatch(struct vmci_datagram *msg)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	if (msg->payload_size < sizeof(u32) ||
108*4882a593Smuzhiyun 	    msg->payload_size > sizeof(struct vmci_event_data_max))
109*4882a593Smuzhiyun 		return VMCI_ERROR_INVALID_ARGS;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (!VMCI_EVENT_VALID(event_msg->event_data.event))
112*4882a593Smuzhiyun 		return VMCI_ERROR_EVENT_UNKNOWN;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	event_deliver(event_msg);
115*4882a593Smuzhiyun 	return VMCI_SUCCESS;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun  * vmci_event_subscribe() - Subscribe to a given event.
120*4882a593Smuzhiyun  * @event:      The event to subscribe to.
121*4882a593Smuzhiyun  * @callback:   The callback to invoke upon the event.
122*4882a593Smuzhiyun  * @callback_data:      Data to pass to the callback.
123*4882a593Smuzhiyun  * @subscription_id:    ID used to track subscription.  Used with
124*4882a593Smuzhiyun  *              vmci_event_unsubscribe()
125*4882a593Smuzhiyun  *
126*4882a593Smuzhiyun  * Subscribes to the provided event. The callback specified will be
127*4882a593Smuzhiyun  * fired from RCU critical section and therefore must not sleep.
128*4882a593Smuzhiyun  */
vmci_event_subscribe(u32 event,vmci_event_cb callback,void * callback_data,u32 * new_subscription_id)129*4882a593Smuzhiyun int vmci_event_subscribe(u32 event,
130*4882a593Smuzhiyun 			 vmci_event_cb callback,
131*4882a593Smuzhiyun 			 void *callback_data,
132*4882a593Smuzhiyun 			 u32 *new_subscription_id)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	struct vmci_subscription *sub;
135*4882a593Smuzhiyun 	int attempts;
136*4882a593Smuzhiyun 	int retval;
137*4882a593Smuzhiyun 	bool have_new_id = false;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	if (!new_subscription_id) {
140*4882a593Smuzhiyun 		pr_devel("%s: Invalid subscription (NULL)\n", __func__);
141*4882a593Smuzhiyun 		return VMCI_ERROR_INVALID_ARGS;
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (!VMCI_EVENT_VALID(event) || !callback) {
145*4882a593Smuzhiyun 		pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
146*4882a593Smuzhiyun 			 __func__, event, callback, callback_data);
147*4882a593Smuzhiyun 		return VMCI_ERROR_INVALID_ARGS;
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	sub = kzalloc(sizeof(*sub), GFP_KERNEL);
151*4882a593Smuzhiyun 	if (!sub)
152*4882a593Smuzhiyun 		return VMCI_ERROR_NO_MEM;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	sub->id = VMCI_EVENT_MAX;
155*4882a593Smuzhiyun 	sub->event = event;
156*4882a593Smuzhiyun 	sub->callback = callback;
157*4882a593Smuzhiyun 	sub->callback_data = callback_data;
158*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sub->node);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	mutex_lock(&subscriber_mutex);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/* Creation of a new event is always allowed. */
163*4882a593Smuzhiyun 	for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
164*4882a593Smuzhiyun 		static u32 subscription_id;
165*4882a593Smuzhiyun 		/*
166*4882a593Smuzhiyun 		 * We try to get an id a couple of time before
167*4882a593Smuzhiyun 		 * claiming we are out of resources.
168*4882a593Smuzhiyun 		 */
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 		/* Test for duplicate id. */
171*4882a593Smuzhiyun 		if (!event_find(++subscription_id)) {
172*4882a593Smuzhiyun 			sub->id = subscription_id;
173*4882a593Smuzhiyun 			have_new_id = true;
174*4882a593Smuzhiyun 			break;
175*4882a593Smuzhiyun 		}
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	if (have_new_id) {
179*4882a593Smuzhiyun 		list_add_rcu(&sub->node, &subscriber_array[event]);
180*4882a593Smuzhiyun 		retval = VMCI_SUCCESS;
181*4882a593Smuzhiyun 	} else {
182*4882a593Smuzhiyun 		retval = VMCI_ERROR_NO_RESOURCES;
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	mutex_unlock(&subscriber_mutex);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	*new_subscription_id = sub->id;
188*4882a593Smuzhiyun 	return retval;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vmci_event_subscribe);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun  * vmci_event_unsubscribe() - unsubscribe from an event.
194*4882a593Smuzhiyun  * @sub_id:     A subscription ID as provided by vmci_event_subscribe()
195*4882a593Smuzhiyun  *
196*4882a593Smuzhiyun  * Unsubscribe from given event. Removes it from list and frees it.
197*4882a593Smuzhiyun  * Will return callback_data if requested by caller.
198*4882a593Smuzhiyun  */
vmci_event_unsubscribe(u32 sub_id)199*4882a593Smuzhiyun int vmci_event_unsubscribe(u32 sub_id)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct vmci_subscription *s;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	mutex_lock(&subscriber_mutex);
204*4882a593Smuzhiyun 	s = event_find(sub_id);
205*4882a593Smuzhiyun 	if (s)
206*4882a593Smuzhiyun 		list_del_rcu(&s->node);
207*4882a593Smuzhiyun 	mutex_unlock(&subscriber_mutex);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (!s)
210*4882a593Smuzhiyun 		return VMCI_ERROR_NOT_FOUND;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	synchronize_rcu();
213*4882a593Smuzhiyun 	kfree(s);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	return VMCI_SUCCESS;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
218