1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * VMware VMCI Driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2012 VMware, Inc. All rights reserved.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/vmw_vmci_defs.h>
9*4882a593Smuzhiyun #include <linux/vmw_vmci_api.h>
10*4882a593Smuzhiyun #include <linux/highmem.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun #include <linux/cred.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "vmci_queue_pair.h"
18*4882a593Smuzhiyun #include "vmci_datagram.h"
19*4882a593Smuzhiyun #include "vmci_doorbell.h"
20*4882a593Smuzhiyun #include "vmci_context.h"
21*4882a593Smuzhiyun #include "vmci_driver.h"
22*4882a593Smuzhiyun #include "vmci_event.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* Use a wide upper bound for the maximum contexts. */
25*4882a593Smuzhiyun #define VMCI_MAX_CONTEXTS 2000
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * List of current VMCI contexts. Contexts can be added by
29*4882a593Smuzhiyun * vmci_ctx_create() and removed via vmci_ctx_destroy().
30*4882a593Smuzhiyun * These, along with context lookup, are protected by the
31*4882a593Smuzhiyun * list structure's lock.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun static struct {
34*4882a593Smuzhiyun struct list_head head;
35*4882a593Smuzhiyun spinlock_t lock; /* Spinlock for context list operations */
36*4882a593Smuzhiyun } ctx_list = {
37*4882a593Smuzhiyun .head = LIST_HEAD_INIT(ctx_list.head),
38*4882a593Smuzhiyun .lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock),
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* Used by contexts that did not set up notify flag pointers */
42*4882a593Smuzhiyun static bool ctx_dummy_notify;
43*4882a593Smuzhiyun
ctx_signal_notify(struct vmci_ctx * context)44*4882a593Smuzhiyun static void ctx_signal_notify(struct vmci_ctx *context)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun *context->notify = true;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
ctx_clear_notify(struct vmci_ctx * context)49*4882a593Smuzhiyun static void ctx_clear_notify(struct vmci_ctx *context)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun *context->notify = false;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * If nothing requires the attention of the guest, clears both
56*4882a593Smuzhiyun * notify flag and call.
57*4882a593Smuzhiyun */
ctx_clear_notify_call(struct vmci_ctx * context)58*4882a593Smuzhiyun static void ctx_clear_notify_call(struct vmci_ctx *context)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun if (context->pending_datagrams == 0 &&
61*4882a593Smuzhiyun vmci_handle_arr_get_size(context->pending_doorbell_array) == 0)
62*4882a593Smuzhiyun ctx_clear_notify(context);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * Sets the context's notify flag iff datagrams are pending for this
67*4882a593Smuzhiyun * context. Called from vmci_setup_notify().
68*4882a593Smuzhiyun */
vmci_ctx_check_signal_notify(struct vmci_ctx * context)69*4882a593Smuzhiyun void vmci_ctx_check_signal_notify(struct vmci_ctx *context)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun spin_lock(&context->lock);
72*4882a593Smuzhiyun if (context->pending_datagrams)
73*4882a593Smuzhiyun ctx_signal_notify(context);
74*4882a593Smuzhiyun spin_unlock(&context->lock);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * Allocates and initializes a VMCI context.
79*4882a593Smuzhiyun */
vmci_ctx_create(u32 cid,u32 priv_flags,uintptr_t event_hnd,int user_version,const struct cred * cred)80*4882a593Smuzhiyun struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
81*4882a593Smuzhiyun uintptr_t event_hnd,
82*4882a593Smuzhiyun int user_version,
83*4882a593Smuzhiyun const struct cred *cred)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun struct vmci_ctx *context;
86*4882a593Smuzhiyun int error;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (cid == VMCI_INVALID_ID) {
89*4882a593Smuzhiyun pr_devel("Invalid context ID for VMCI context\n");
90*4882a593Smuzhiyun error = -EINVAL;
91*4882a593Smuzhiyun goto err_out;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) {
95*4882a593Smuzhiyun pr_devel("Invalid flag (flags=0x%x) for VMCI context\n",
96*4882a593Smuzhiyun priv_flags);
97*4882a593Smuzhiyun error = -EINVAL;
98*4882a593Smuzhiyun goto err_out;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun if (user_version == 0) {
102*4882a593Smuzhiyun pr_devel("Invalid suer_version %d\n", user_version);
103*4882a593Smuzhiyun error = -EINVAL;
104*4882a593Smuzhiyun goto err_out;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun context = kzalloc(sizeof(*context), GFP_KERNEL);
108*4882a593Smuzhiyun if (!context) {
109*4882a593Smuzhiyun pr_warn("Failed to allocate memory for VMCI context\n");
110*4882a593Smuzhiyun error = -EINVAL;
111*4882a593Smuzhiyun goto err_out;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun kref_init(&context->kref);
115*4882a593Smuzhiyun spin_lock_init(&context->lock);
116*4882a593Smuzhiyun INIT_LIST_HEAD(&context->list_item);
117*4882a593Smuzhiyun INIT_LIST_HEAD(&context->datagram_queue);
118*4882a593Smuzhiyun INIT_LIST_HEAD(&context->notifier_list);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* Initialize host-specific VMCI context. */
121*4882a593Smuzhiyun init_waitqueue_head(&context->host_context.wait_queue);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun context->queue_pair_array =
124*4882a593Smuzhiyun vmci_handle_arr_create(0, VMCI_MAX_GUEST_QP_COUNT);
125*4882a593Smuzhiyun if (!context->queue_pair_array) {
126*4882a593Smuzhiyun error = -ENOMEM;
127*4882a593Smuzhiyun goto err_free_ctx;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun context->doorbell_array =
131*4882a593Smuzhiyun vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
132*4882a593Smuzhiyun if (!context->doorbell_array) {
133*4882a593Smuzhiyun error = -ENOMEM;
134*4882a593Smuzhiyun goto err_free_qp_array;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun context->pending_doorbell_array =
138*4882a593Smuzhiyun vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
139*4882a593Smuzhiyun if (!context->pending_doorbell_array) {
140*4882a593Smuzhiyun error = -ENOMEM;
141*4882a593Smuzhiyun goto err_free_db_array;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun context->user_version = user_version;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun context->priv_flags = priv_flags;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if (cred)
149*4882a593Smuzhiyun context->cred = get_cred(cred);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun context->notify = &ctx_dummy_notify;
152*4882a593Smuzhiyun context->notify_page = NULL;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * If we collide with an existing context we generate a new
156*4882a593Smuzhiyun * and use it instead. The VMX will determine if regeneration
157*4882a593Smuzhiyun * is okay. Since there isn't 4B - 16 VMs running on a given
158*4882a593Smuzhiyun * host, the below loop will terminate.
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun spin_lock(&ctx_list.lock);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun while (vmci_ctx_exists(cid)) {
163*4882a593Smuzhiyun /* We reserve the lowest 16 ids for fixed contexts. */
164*4882a593Smuzhiyun cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1;
165*4882a593Smuzhiyun if (cid == VMCI_INVALID_ID)
166*4882a593Smuzhiyun cid = VMCI_RESERVED_CID_LIMIT;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun context->cid = cid;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun list_add_tail_rcu(&context->list_item, &ctx_list.head);
171*4882a593Smuzhiyun spin_unlock(&ctx_list.lock);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun return context;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun err_free_db_array:
176*4882a593Smuzhiyun vmci_handle_arr_destroy(context->doorbell_array);
177*4882a593Smuzhiyun err_free_qp_array:
178*4882a593Smuzhiyun vmci_handle_arr_destroy(context->queue_pair_array);
179*4882a593Smuzhiyun err_free_ctx:
180*4882a593Smuzhiyun kfree(context);
181*4882a593Smuzhiyun err_out:
182*4882a593Smuzhiyun return ERR_PTR(error);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /*
186*4882a593Smuzhiyun * Destroy VMCI context.
187*4882a593Smuzhiyun */
vmci_ctx_destroy(struct vmci_ctx * context)188*4882a593Smuzhiyun void vmci_ctx_destroy(struct vmci_ctx *context)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun spin_lock(&ctx_list.lock);
191*4882a593Smuzhiyun list_del_rcu(&context->list_item);
192*4882a593Smuzhiyun spin_unlock(&ctx_list.lock);
193*4882a593Smuzhiyun synchronize_rcu();
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun vmci_ctx_put(context);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * Fire notification for all contexts interested in given cid.
200*4882a593Smuzhiyun */
ctx_fire_notification(u32 context_id,u32 priv_flags)201*4882a593Smuzhiyun static int ctx_fire_notification(u32 context_id, u32 priv_flags)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun u32 i, array_size;
204*4882a593Smuzhiyun struct vmci_ctx *sub_ctx;
205*4882a593Smuzhiyun struct vmci_handle_arr *subscriber_array;
206*4882a593Smuzhiyun struct vmci_handle context_handle =
207*4882a593Smuzhiyun vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /*
210*4882a593Smuzhiyun * We create an array to hold the subscribers we find when
211*4882a593Smuzhiyun * scanning through all contexts.
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun subscriber_array = vmci_handle_arr_create(0, VMCI_MAX_CONTEXTS);
214*4882a593Smuzhiyun if (subscriber_array == NULL)
215*4882a593Smuzhiyun return VMCI_ERROR_NO_MEM;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun * Scan all contexts to find who is interested in being
219*4882a593Smuzhiyun * notified about given contextID.
220*4882a593Smuzhiyun */
221*4882a593Smuzhiyun rcu_read_lock();
222*4882a593Smuzhiyun list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) {
223*4882a593Smuzhiyun struct vmci_handle_list *node;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun * We only deliver notifications of the removal of
227*4882a593Smuzhiyun * contexts, if the two contexts are allowed to
228*4882a593Smuzhiyun * interact.
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags))
231*4882a593Smuzhiyun continue;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) {
234*4882a593Smuzhiyun if (!vmci_handle_is_equal(node->handle, context_handle))
235*4882a593Smuzhiyun continue;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun vmci_handle_arr_append_entry(&subscriber_array,
238*4882a593Smuzhiyun vmci_make_handle(sub_ctx->cid,
239*4882a593Smuzhiyun VMCI_EVENT_HANDLER));
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun rcu_read_unlock();
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* Fire event to all subscribers. */
245*4882a593Smuzhiyun array_size = vmci_handle_arr_get_size(subscriber_array);
246*4882a593Smuzhiyun for (i = 0; i < array_size; i++) {
247*4882a593Smuzhiyun int result;
248*4882a593Smuzhiyun struct vmci_event_ctx ev;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i);
251*4882a593Smuzhiyun ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
252*4882a593Smuzhiyun VMCI_CONTEXT_RESOURCE_ID);
253*4882a593Smuzhiyun ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
254*4882a593Smuzhiyun ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED;
255*4882a593Smuzhiyun ev.payload.context_id = context_id;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
258*4882a593Smuzhiyun &ev.msg.hdr, false);
259*4882a593Smuzhiyun if (result < VMCI_SUCCESS) {
260*4882a593Smuzhiyun pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n",
261*4882a593Smuzhiyun ev.msg.event_data.event,
262*4882a593Smuzhiyun ev.msg.hdr.dst.context);
263*4882a593Smuzhiyun /* We continue to enqueue on next subscriber. */
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun vmci_handle_arr_destroy(subscriber_array);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun return VMCI_SUCCESS;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * Returns the current number of pending datagrams. The call may
273*4882a593Smuzhiyun * also serve as a synchronization point for the datagram queue,
274*4882a593Smuzhiyun * as no enqueue operations can occur concurrently.
275*4882a593Smuzhiyun */
vmci_ctx_pending_datagrams(u32 cid,u32 * pending)276*4882a593Smuzhiyun int vmci_ctx_pending_datagrams(u32 cid, u32 *pending)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun struct vmci_ctx *context;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun context = vmci_ctx_get(cid);
281*4882a593Smuzhiyun if (context == NULL)
282*4882a593Smuzhiyun return VMCI_ERROR_INVALID_ARGS;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun spin_lock(&context->lock);
285*4882a593Smuzhiyun if (pending)
286*4882a593Smuzhiyun *pending = context->pending_datagrams;
287*4882a593Smuzhiyun spin_unlock(&context->lock);
288*4882a593Smuzhiyun vmci_ctx_put(context);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun return VMCI_SUCCESS;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /*
294*4882a593Smuzhiyun * Queues a VMCI datagram for the appropriate target VM context.
295*4882a593Smuzhiyun */
vmci_ctx_enqueue_datagram(u32 cid,struct vmci_datagram * dg)296*4882a593Smuzhiyun int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun struct vmci_datagram_queue_entry *dq_entry;
299*4882a593Smuzhiyun struct vmci_ctx *context;
300*4882a593Smuzhiyun struct vmci_handle dg_src;
301*4882a593Smuzhiyun size_t vmci_dg_size;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun vmci_dg_size = VMCI_DG_SIZE(dg);
304*4882a593Smuzhiyun if (vmci_dg_size > VMCI_MAX_DG_SIZE) {
305*4882a593Smuzhiyun pr_devel("Datagram too large (bytes=%zu)\n", vmci_dg_size);
306*4882a593Smuzhiyun return VMCI_ERROR_INVALID_ARGS;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Get the target VM's VMCI context. */
310*4882a593Smuzhiyun context = vmci_ctx_get(cid);
311*4882a593Smuzhiyun if (!context) {
312*4882a593Smuzhiyun pr_devel("Invalid context (ID=0x%x)\n", cid);
313*4882a593Smuzhiyun return VMCI_ERROR_INVALID_ARGS;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* Allocate guest call entry and add it to the target VM's queue. */
317*4882a593Smuzhiyun dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL);
318*4882a593Smuzhiyun if (dq_entry == NULL) {
319*4882a593Smuzhiyun pr_warn("Failed to allocate memory for datagram\n");
320*4882a593Smuzhiyun vmci_ctx_put(context);
321*4882a593Smuzhiyun return VMCI_ERROR_NO_MEM;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun dq_entry->dg = dg;
324*4882a593Smuzhiyun dq_entry->dg_size = vmci_dg_size;
325*4882a593Smuzhiyun dg_src = dg->src;
326*4882a593Smuzhiyun INIT_LIST_HEAD(&dq_entry->list_item);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun spin_lock(&context->lock);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun * We put a higher limit on datagrams from the hypervisor. If
332*4882a593Smuzhiyun * the pending datagram is not from hypervisor, then we check
333*4882a593Smuzhiyun * if enqueueing it would exceed the
334*4882a593Smuzhiyun * VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination. If
335*4882a593Smuzhiyun * the pending datagram is from hypervisor, we allow it to be
336*4882a593Smuzhiyun * queued at the destination side provided we don't reach the
337*4882a593Smuzhiyun * VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit.
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun if (context->datagram_queue_size + vmci_dg_size >=
340*4882a593Smuzhiyun VMCI_MAX_DATAGRAM_QUEUE_SIZE &&
341*4882a593Smuzhiyun (!vmci_handle_is_equal(dg_src,
342*4882a593Smuzhiyun vmci_make_handle
343*4882a593Smuzhiyun (VMCI_HYPERVISOR_CONTEXT_ID,
344*4882a593Smuzhiyun VMCI_CONTEXT_RESOURCE_ID)) ||
345*4882a593Smuzhiyun context->datagram_queue_size + vmci_dg_size >=
346*4882a593Smuzhiyun VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) {
347*4882a593Smuzhiyun spin_unlock(&context->lock);
348*4882a593Smuzhiyun vmci_ctx_put(context);
349*4882a593Smuzhiyun kfree(dq_entry);
350*4882a593Smuzhiyun pr_devel("Context (ID=0x%x) receive queue is full\n", cid);
351*4882a593Smuzhiyun return VMCI_ERROR_NO_RESOURCES;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun list_add(&dq_entry->list_item, &context->datagram_queue);
355*4882a593Smuzhiyun context->pending_datagrams++;
356*4882a593Smuzhiyun context->datagram_queue_size += vmci_dg_size;
357*4882a593Smuzhiyun ctx_signal_notify(context);
358*4882a593Smuzhiyun wake_up(&context->host_context.wait_queue);
359*4882a593Smuzhiyun spin_unlock(&context->lock);
360*4882a593Smuzhiyun vmci_ctx_put(context);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return vmci_dg_size;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * Verifies whether a context with the specified context ID exists.
367*4882a593Smuzhiyun * FIXME: utility is dubious as no decisions can be reliably made
368*4882a593Smuzhiyun * using this data as context can appear and disappear at any time.
369*4882a593Smuzhiyun */
vmci_ctx_exists(u32 cid)370*4882a593Smuzhiyun bool vmci_ctx_exists(u32 cid)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun struct vmci_ctx *context;
373*4882a593Smuzhiyun bool exists = false;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun rcu_read_lock();
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun list_for_each_entry_rcu(context, &ctx_list.head, list_item) {
378*4882a593Smuzhiyun if (context->cid == cid) {
379*4882a593Smuzhiyun exists = true;
380*4882a593Smuzhiyun break;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun rcu_read_unlock();
385*4882a593Smuzhiyun return exists;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /*
389*4882a593Smuzhiyun * Retrieves VMCI context corresponding to the given cid.
390*4882a593Smuzhiyun */
vmci_ctx_get(u32 cid)391*4882a593Smuzhiyun struct vmci_ctx *vmci_ctx_get(u32 cid)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct vmci_ctx *c, *context = NULL;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun if (cid == VMCI_INVALID_ID)
396*4882a593Smuzhiyun return NULL;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun rcu_read_lock();
399*4882a593Smuzhiyun list_for_each_entry_rcu(c, &ctx_list.head, list_item) {
400*4882a593Smuzhiyun if (c->cid == cid) {
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun * The context owner drops its own reference to the
403*4882a593Smuzhiyun * context only after removing it from the list and
404*4882a593Smuzhiyun * waiting for RCU grace period to expire. This
405*4882a593Smuzhiyun * means that we are not about to increase the
406*4882a593Smuzhiyun * reference count of something that is in the
407*4882a593Smuzhiyun * process of being destroyed.
408*4882a593Smuzhiyun */
409*4882a593Smuzhiyun context = c;
410*4882a593Smuzhiyun kref_get(&context->kref);
411*4882a593Smuzhiyun break;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun rcu_read_unlock();
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun return context;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * Deallocates all parts of a context data structure. This
421*4882a593Smuzhiyun * function doesn't lock the context, because it assumes that
422*4882a593Smuzhiyun * the caller was holding the last reference to context.
423*4882a593Smuzhiyun */
ctx_free_ctx(struct kref * kref)424*4882a593Smuzhiyun static void ctx_free_ctx(struct kref *kref)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref);
427*4882a593Smuzhiyun struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp;
428*4882a593Smuzhiyun struct vmci_handle temp_handle;
429*4882a593Smuzhiyun struct vmci_handle_list *notifier, *tmp;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /*
432*4882a593Smuzhiyun * Fire event to all contexts interested in knowing this
433*4882a593Smuzhiyun * context is dying.
434*4882a593Smuzhiyun */
435*4882a593Smuzhiyun ctx_fire_notification(context->cid, context->priv_flags);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun * Cleanup all queue pair resources attached to context. If
439*4882a593Smuzhiyun * the VM dies without cleaning up, this code will make sure
440*4882a593Smuzhiyun * that no resources are leaked.
441*4882a593Smuzhiyun */
442*4882a593Smuzhiyun temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0);
443*4882a593Smuzhiyun while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) {
444*4882a593Smuzhiyun if (vmci_qp_broker_detach(temp_handle,
445*4882a593Smuzhiyun context) < VMCI_SUCCESS) {
446*4882a593Smuzhiyun /*
447*4882a593Smuzhiyun * When vmci_qp_broker_detach() succeeds it
448*4882a593Smuzhiyun * removes the handle from the array. If
449*4882a593Smuzhiyun * detach fails, we must remove the handle
450*4882a593Smuzhiyun * ourselves.
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun vmci_handle_arr_remove_entry(context->queue_pair_array,
453*4882a593Smuzhiyun temp_handle);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun temp_handle =
456*4882a593Smuzhiyun vmci_handle_arr_get_entry(context->queue_pair_array, 0);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /*
460*4882a593Smuzhiyun * It is fine to destroy this without locking the callQueue, as
461*4882a593Smuzhiyun * this is the only thread having a reference to the context.
462*4882a593Smuzhiyun */
463*4882a593Smuzhiyun list_for_each_entry_safe(dq_entry, dq_entry_tmp,
464*4882a593Smuzhiyun &context->datagram_queue, list_item) {
465*4882a593Smuzhiyun WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg));
466*4882a593Smuzhiyun list_del(&dq_entry->list_item);
467*4882a593Smuzhiyun kfree(dq_entry->dg);
468*4882a593Smuzhiyun kfree(dq_entry);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun list_for_each_entry_safe(notifier, tmp,
472*4882a593Smuzhiyun &context->notifier_list, node) {
473*4882a593Smuzhiyun list_del(¬ifier->node);
474*4882a593Smuzhiyun kfree(notifier);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun vmci_handle_arr_destroy(context->queue_pair_array);
478*4882a593Smuzhiyun vmci_handle_arr_destroy(context->doorbell_array);
479*4882a593Smuzhiyun vmci_handle_arr_destroy(context->pending_doorbell_array);
480*4882a593Smuzhiyun vmci_ctx_unset_notify(context);
481*4882a593Smuzhiyun if (context->cred)
482*4882a593Smuzhiyun put_cred(context->cred);
483*4882a593Smuzhiyun kfree(context);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun * Drops reference to VMCI context. If this is the last reference to
488*4882a593Smuzhiyun * the context it will be deallocated. A context is created with
489*4882a593Smuzhiyun * a reference count of one, and on destroy, it is removed from
490*4882a593Smuzhiyun * the context list before its reference count is decremented. Thus,
491*4882a593Smuzhiyun * if we reach zero, we are sure that nobody else are about to increment
492*4882a593Smuzhiyun * it (they need the entry in the context list for that), and so there
493*4882a593Smuzhiyun * is no need for locking.
494*4882a593Smuzhiyun */
vmci_ctx_put(struct vmci_ctx * context)495*4882a593Smuzhiyun void vmci_ctx_put(struct vmci_ctx *context)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun kref_put(&context->kref, ctx_free_ctx);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /*
501*4882a593Smuzhiyun * Dequeues the next datagram and returns it to caller.
502*4882a593Smuzhiyun * The caller passes in a pointer to the max size datagram
503*4882a593Smuzhiyun * it can handle and the datagram is only unqueued if the
504*4882a593Smuzhiyun * size is less than max_size. If larger max_size is set to
505*4882a593Smuzhiyun * the size of the datagram to give the caller a chance to
506*4882a593Smuzhiyun * set up a larger buffer for the guestcall.
507*4882a593Smuzhiyun */
vmci_ctx_dequeue_datagram(struct vmci_ctx * context,size_t * max_size,struct vmci_datagram ** dg)508*4882a593Smuzhiyun int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
509*4882a593Smuzhiyun size_t *max_size,
510*4882a593Smuzhiyun struct vmci_datagram **dg)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun struct vmci_datagram_queue_entry *dq_entry;
513*4882a593Smuzhiyun struct list_head *list_item;
514*4882a593Smuzhiyun int rv;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /* Dequeue the next datagram entry. */
517*4882a593Smuzhiyun spin_lock(&context->lock);
518*4882a593Smuzhiyun if (context->pending_datagrams == 0) {
519*4882a593Smuzhiyun ctx_clear_notify_call(context);
520*4882a593Smuzhiyun spin_unlock(&context->lock);
521*4882a593Smuzhiyun pr_devel("No datagrams pending\n");
522*4882a593Smuzhiyun return VMCI_ERROR_NO_MORE_DATAGRAMS;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun list_item = context->datagram_queue.next;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun dq_entry =
528*4882a593Smuzhiyun list_entry(list_item, struct vmci_datagram_queue_entry, list_item);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /* Check size of caller's buffer. */
531*4882a593Smuzhiyun if (*max_size < dq_entry->dg_size) {
532*4882a593Smuzhiyun *max_size = dq_entry->dg_size;
533*4882a593Smuzhiyun spin_unlock(&context->lock);
534*4882a593Smuzhiyun pr_devel("Caller's buffer should be at least (size=%u bytes)\n",
535*4882a593Smuzhiyun (u32) *max_size);
536*4882a593Smuzhiyun return VMCI_ERROR_NO_MEM;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun list_del(list_item);
540*4882a593Smuzhiyun context->pending_datagrams--;
541*4882a593Smuzhiyun context->datagram_queue_size -= dq_entry->dg_size;
542*4882a593Smuzhiyun if (context->pending_datagrams == 0) {
543*4882a593Smuzhiyun ctx_clear_notify_call(context);
544*4882a593Smuzhiyun rv = VMCI_SUCCESS;
545*4882a593Smuzhiyun } else {
546*4882a593Smuzhiyun /*
547*4882a593Smuzhiyun * Return the size of the next datagram.
548*4882a593Smuzhiyun */
549*4882a593Smuzhiyun struct vmci_datagram_queue_entry *next_entry;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun list_item = context->datagram_queue.next;
552*4882a593Smuzhiyun next_entry =
553*4882a593Smuzhiyun list_entry(list_item, struct vmci_datagram_queue_entry,
554*4882a593Smuzhiyun list_item);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /*
557*4882a593Smuzhiyun * The following size_t -> int truncation is fine as
558*4882a593Smuzhiyun * the maximum size of a (routable) datagram is 68KB.
559*4882a593Smuzhiyun */
560*4882a593Smuzhiyun rv = (int)next_entry->dg_size;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun spin_unlock(&context->lock);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /* Caller must free datagram. */
565*4882a593Smuzhiyun *dg = dq_entry->dg;
566*4882a593Smuzhiyun dq_entry->dg = NULL;
567*4882a593Smuzhiyun kfree(dq_entry);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun return rv;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun /*
573*4882a593Smuzhiyun * Reverts actions set up by vmci_setup_notify(). Unmaps and unlocks the
574*4882a593Smuzhiyun * page mapped/locked by vmci_setup_notify().
575*4882a593Smuzhiyun */
vmci_ctx_unset_notify(struct vmci_ctx * context)576*4882a593Smuzhiyun void vmci_ctx_unset_notify(struct vmci_ctx *context)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun struct page *notify_page;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun spin_lock(&context->lock);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun notify_page = context->notify_page;
583*4882a593Smuzhiyun context->notify = &ctx_dummy_notify;
584*4882a593Smuzhiyun context->notify_page = NULL;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun spin_unlock(&context->lock);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (notify_page) {
589*4882a593Smuzhiyun kunmap(notify_page);
590*4882a593Smuzhiyun put_page(notify_page);
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /*
595*4882a593Smuzhiyun * Add remote_cid to list of contexts current contexts wants
596*4882a593Smuzhiyun * notifications from/about.
597*4882a593Smuzhiyun */
vmci_ctx_add_notification(u32 context_id,u32 remote_cid)598*4882a593Smuzhiyun int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun struct vmci_ctx *context;
601*4882a593Smuzhiyun struct vmci_handle_list *notifier, *n;
602*4882a593Smuzhiyun int result;
603*4882a593Smuzhiyun bool exists = false;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun context = vmci_ctx_get(context_id);
606*4882a593Smuzhiyun if (!context)
607*4882a593Smuzhiyun return VMCI_ERROR_NOT_FOUND;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) {
610*4882a593Smuzhiyun pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n",
611*4882a593Smuzhiyun context_id, remote_cid);
612*4882a593Smuzhiyun result = VMCI_ERROR_DST_UNREACHABLE;
613*4882a593Smuzhiyun goto out;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) {
617*4882a593Smuzhiyun result = VMCI_ERROR_NO_ACCESS;
618*4882a593Smuzhiyun goto out;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL);
622*4882a593Smuzhiyun if (!notifier) {
623*4882a593Smuzhiyun result = VMCI_ERROR_NO_MEM;
624*4882a593Smuzhiyun goto out;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun INIT_LIST_HEAD(¬ifier->node);
628*4882a593Smuzhiyun notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun spin_lock(&context->lock);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun if (context->n_notifiers < VMCI_MAX_CONTEXTS) {
633*4882a593Smuzhiyun list_for_each_entry(n, &context->notifier_list, node) {
634*4882a593Smuzhiyun if (vmci_handle_is_equal(n->handle, notifier->handle)) {
635*4882a593Smuzhiyun exists = true;
636*4882a593Smuzhiyun break;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun if (exists) {
641*4882a593Smuzhiyun kfree(notifier);
642*4882a593Smuzhiyun result = VMCI_ERROR_ALREADY_EXISTS;
643*4882a593Smuzhiyun } else {
644*4882a593Smuzhiyun list_add_tail_rcu(¬ifier->node,
645*4882a593Smuzhiyun &context->notifier_list);
646*4882a593Smuzhiyun context->n_notifiers++;
647*4882a593Smuzhiyun result = VMCI_SUCCESS;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun } else {
650*4882a593Smuzhiyun kfree(notifier);
651*4882a593Smuzhiyun result = VMCI_ERROR_NO_MEM;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun spin_unlock(&context->lock);
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun out:
657*4882a593Smuzhiyun vmci_ctx_put(context);
658*4882a593Smuzhiyun return result;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun /*
662*4882a593Smuzhiyun * Remove remote_cid from current context's list of contexts it is
663*4882a593Smuzhiyun * interested in getting notifications from/about.
664*4882a593Smuzhiyun */
vmci_ctx_remove_notification(u32 context_id,u32 remote_cid)665*4882a593Smuzhiyun int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun struct vmci_ctx *context;
668*4882a593Smuzhiyun struct vmci_handle_list *notifier, *tmp;
669*4882a593Smuzhiyun struct vmci_handle handle;
670*4882a593Smuzhiyun bool found = false;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun context = vmci_ctx_get(context_id);
673*4882a593Smuzhiyun if (!context)
674*4882a593Smuzhiyun return VMCI_ERROR_NOT_FOUND;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun spin_lock(&context->lock);
679*4882a593Smuzhiyun list_for_each_entry_safe(notifier, tmp,
680*4882a593Smuzhiyun &context->notifier_list, node) {
681*4882a593Smuzhiyun if (vmci_handle_is_equal(notifier->handle, handle)) {
682*4882a593Smuzhiyun list_del_rcu(¬ifier->node);
683*4882a593Smuzhiyun context->n_notifiers--;
684*4882a593Smuzhiyun found = true;
685*4882a593Smuzhiyun break;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun spin_unlock(&context->lock);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun if (found) {
691*4882a593Smuzhiyun synchronize_rcu();
692*4882a593Smuzhiyun kfree(notifier);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun vmci_ctx_put(context);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
vmci_ctx_get_chkpt_notifiers(struct vmci_ctx * context,u32 * buf_size,void ** pbuf)700*4882a593Smuzhiyun static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context,
701*4882a593Smuzhiyun u32 *buf_size, void **pbuf)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun u32 *notifiers;
704*4882a593Smuzhiyun size_t data_size;
705*4882a593Smuzhiyun struct vmci_handle_list *entry;
706*4882a593Smuzhiyun int i = 0;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun if (context->n_notifiers == 0) {
709*4882a593Smuzhiyun *buf_size = 0;
710*4882a593Smuzhiyun *pbuf = NULL;
711*4882a593Smuzhiyun return VMCI_SUCCESS;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun data_size = context->n_notifiers * sizeof(*notifiers);
715*4882a593Smuzhiyun if (*buf_size < data_size) {
716*4882a593Smuzhiyun *buf_size = data_size;
717*4882a593Smuzhiyun return VMCI_ERROR_MORE_DATA;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */
721*4882a593Smuzhiyun if (!notifiers)
722*4882a593Smuzhiyun return VMCI_ERROR_NO_MEM;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun list_for_each_entry(entry, &context->notifier_list, node)
725*4882a593Smuzhiyun notifiers[i++] = entry->handle.context;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun *buf_size = data_size;
728*4882a593Smuzhiyun *pbuf = notifiers;
729*4882a593Smuzhiyun return VMCI_SUCCESS;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
vmci_ctx_get_chkpt_doorbells(struct vmci_ctx * context,u32 * buf_size,void ** pbuf)732*4882a593Smuzhiyun static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
733*4882a593Smuzhiyun u32 *buf_size, void **pbuf)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun struct dbell_cpt_state *dbells;
736*4882a593Smuzhiyun u32 i, n_doorbells;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
739*4882a593Smuzhiyun if (n_doorbells > 0) {
740*4882a593Smuzhiyun size_t data_size = n_doorbells * sizeof(*dbells);
741*4882a593Smuzhiyun if (*buf_size < data_size) {
742*4882a593Smuzhiyun *buf_size = data_size;
743*4882a593Smuzhiyun return VMCI_ERROR_MORE_DATA;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun dbells = kzalloc(data_size, GFP_ATOMIC);
747*4882a593Smuzhiyun if (!dbells)
748*4882a593Smuzhiyun return VMCI_ERROR_NO_MEM;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun for (i = 0; i < n_doorbells; i++)
751*4882a593Smuzhiyun dbells[i].handle = vmci_handle_arr_get_entry(
752*4882a593Smuzhiyun context->doorbell_array, i);
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun *buf_size = data_size;
755*4882a593Smuzhiyun *pbuf = dbells;
756*4882a593Smuzhiyun } else {
757*4882a593Smuzhiyun *buf_size = 0;
758*4882a593Smuzhiyun *pbuf = NULL;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun return VMCI_SUCCESS;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /*
765*4882a593Smuzhiyun * Get current context's checkpoint state of given type.
766*4882a593Smuzhiyun */
vmci_ctx_get_chkpt_state(u32 context_id,u32 cpt_type,u32 * buf_size,void ** pbuf)767*4882a593Smuzhiyun int vmci_ctx_get_chkpt_state(u32 context_id,
768*4882a593Smuzhiyun u32 cpt_type,
769*4882a593Smuzhiyun u32 *buf_size,
770*4882a593Smuzhiyun void **pbuf)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun struct vmci_ctx *context;
773*4882a593Smuzhiyun int result;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun context = vmci_ctx_get(context_id);
776*4882a593Smuzhiyun if (!context)
777*4882a593Smuzhiyun return VMCI_ERROR_NOT_FOUND;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun spin_lock(&context->lock);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun switch (cpt_type) {
782*4882a593Smuzhiyun case VMCI_NOTIFICATION_CPT_STATE:
783*4882a593Smuzhiyun result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf);
784*4882a593Smuzhiyun break;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun case VMCI_WELLKNOWN_CPT_STATE:
787*4882a593Smuzhiyun /*
788*4882a593Smuzhiyun * For compatibility with VMX'en with VM to VM communication, we
789*4882a593Smuzhiyun * always return zero wellknown handles.
790*4882a593Smuzhiyun */
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun *buf_size = 0;
793*4882a593Smuzhiyun *pbuf = NULL;
794*4882a593Smuzhiyun result = VMCI_SUCCESS;
795*4882a593Smuzhiyun break;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun case VMCI_DOORBELL_CPT_STATE:
798*4882a593Smuzhiyun result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf);
799*4882a593Smuzhiyun break;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun default:
802*4882a593Smuzhiyun pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
803*4882a593Smuzhiyun result = VMCI_ERROR_INVALID_ARGS;
804*4882a593Smuzhiyun break;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun spin_unlock(&context->lock);
808*4882a593Smuzhiyun vmci_ctx_put(context);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun return result;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /*
814*4882a593Smuzhiyun * Set current context's checkpoint state of given type.
815*4882a593Smuzhiyun */
vmci_ctx_set_chkpt_state(u32 context_id,u32 cpt_type,u32 buf_size,void * cpt_buf)816*4882a593Smuzhiyun int vmci_ctx_set_chkpt_state(u32 context_id,
817*4882a593Smuzhiyun u32 cpt_type,
818*4882a593Smuzhiyun u32 buf_size,
819*4882a593Smuzhiyun void *cpt_buf)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun u32 i;
822*4882a593Smuzhiyun u32 current_id;
823*4882a593Smuzhiyun int result = VMCI_SUCCESS;
824*4882a593Smuzhiyun u32 num_ids = buf_size / sizeof(u32);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) {
827*4882a593Smuzhiyun /*
828*4882a593Smuzhiyun * We would end up here if VMX with VM to VM communication
829*4882a593Smuzhiyun * attempts to restore a checkpoint with wellknown handles.
830*4882a593Smuzhiyun */
831*4882a593Smuzhiyun pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n");
832*4882a593Smuzhiyun return VMCI_ERROR_OBSOLETE;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) {
836*4882a593Smuzhiyun pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
837*4882a593Smuzhiyun return VMCI_ERROR_INVALID_ARGS;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) {
841*4882a593Smuzhiyun current_id = ((u32 *)cpt_buf)[i];
842*4882a593Smuzhiyun result = vmci_ctx_add_notification(context_id, current_id);
843*4882a593Smuzhiyun if (result != VMCI_SUCCESS)
844*4882a593Smuzhiyun break;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun if (result != VMCI_SUCCESS)
847*4882a593Smuzhiyun pr_devel("Failed to set cpt state (type=%d) (error=%d)\n",
848*4882a593Smuzhiyun cpt_type, result);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun return result;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /*
854*4882a593Smuzhiyun * Retrieves the specified context's pending notifications in the
855*4882a593Smuzhiyun * form of a handle array. The handle arrays returned are the
856*4882a593Smuzhiyun * actual data - not a copy and should not be modified by the
857*4882a593Smuzhiyun * caller. They must be released using
858*4882a593Smuzhiyun * vmci_ctx_rcv_notifications_release.
859*4882a593Smuzhiyun */
vmci_ctx_rcv_notifications_get(u32 context_id,struct vmci_handle_arr ** db_handle_array,struct vmci_handle_arr ** qp_handle_array)860*4882a593Smuzhiyun int vmci_ctx_rcv_notifications_get(u32 context_id,
861*4882a593Smuzhiyun struct vmci_handle_arr **db_handle_array,
862*4882a593Smuzhiyun struct vmci_handle_arr **qp_handle_array)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun struct vmci_ctx *context;
865*4882a593Smuzhiyun int result = VMCI_SUCCESS;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun context = vmci_ctx_get(context_id);
868*4882a593Smuzhiyun if (context == NULL)
869*4882a593Smuzhiyun return VMCI_ERROR_NOT_FOUND;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun spin_lock(&context->lock);
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun *db_handle_array = context->pending_doorbell_array;
874*4882a593Smuzhiyun context->pending_doorbell_array =
875*4882a593Smuzhiyun vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
876*4882a593Smuzhiyun if (!context->pending_doorbell_array) {
877*4882a593Smuzhiyun context->pending_doorbell_array = *db_handle_array;
878*4882a593Smuzhiyun *db_handle_array = NULL;
879*4882a593Smuzhiyun result = VMCI_ERROR_NO_MEM;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun *qp_handle_array = NULL;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun spin_unlock(&context->lock);
884*4882a593Smuzhiyun vmci_ctx_put(context);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun return result;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun /*
890*4882a593Smuzhiyun * Releases handle arrays with pending notifications previously
891*4882a593Smuzhiyun * retrieved using vmci_ctx_rcv_notifications_get. If the
892*4882a593Smuzhiyun * notifications were not successfully handed over to the guest,
893*4882a593Smuzhiyun * success must be false.
894*4882a593Smuzhiyun */
vmci_ctx_rcv_notifications_release(u32 context_id,struct vmci_handle_arr * db_handle_array,struct vmci_handle_arr * qp_handle_array,bool success)895*4882a593Smuzhiyun void vmci_ctx_rcv_notifications_release(u32 context_id,
896*4882a593Smuzhiyun struct vmci_handle_arr *db_handle_array,
897*4882a593Smuzhiyun struct vmci_handle_arr *qp_handle_array,
898*4882a593Smuzhiyun bool success)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun struct vmci_ctx *context = vmci_ctx_get(context_id);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun spin_lock(&context->lock);
903*4882a593Smuzhiyun if (!success) {
904*4882a593Smuzhiyun struct vmci_handle handle;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun /*
907*4882a593Smuzhiyun * New notifications may have been added while we were not
908*4882a593Smuzhiyun * holding the context lock, so we transfer any new pending
909*4882a593Smuzhiyun * doorbell notifications to the old array, and reinstate the
910*4882a593Smuzhiyun * old array.
911*4882a593Smuzhiyun */
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun handle = vmci_handle_arr_remove_tail(
914*4882a593Smuzhiyun context->pending_doorbell_array);
915*4882a593Smuzhiyun while (!vmci_handle_is_invalid(handle)) {
916*4882a593Smuzhiyun if (!vmci_handle_arr_has_entry(db_handle_array,
917*4882a593Smuzhiyun handle)) {
918*4882a593Smuzhiyun vmci_handle_arr_append_entry(
919*4882a593Smuzhiyun &db_handle_array, handle);
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun handle = vmci_handle_arr_remove_tail(
922*4882a593Smuzhiyun context->pending_doorbell_array);
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun vmci_handle_arr_destroy(context->pending_doorbell_array);
925*4882a593Smuzhiyun context->pending_doorbell_array = db_handle_array;
926*4882a593Smuzhiyun db_handle_array = NULL;
927*4882a593Smuzhiyun } else {
928*4882a593Smuzhiyun ctx_clear_notify_call(context);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun spin_unlock(&context->lock);
931*4882a593Smuzhiyun vmci_ctx_put(context);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun if (db_handle_array)
934*4882a593Smuzhiyun vmci_handle_arr_destroy(db_handle_array);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun if (qp_handle_array)
937*4882a593Smuzhiyun vmci_handle_arr_destroy(qp_handle_array);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun /*
941*4882a593Smuzhiyun * Registers that a new doorbell handle has been allocated by the
942*4882a593Smuzhiyun * context. Only doorbell handles registered can be notified.
943*4882a593Smuzhiyun */
vmci_ctx_dbell_create(u32 context_id,struct vmci_handle handle)944*4882a593Smuzhiyun int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun struct vmci_ctx *context;
947*4882a593Smuzhiyun int result;
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
950*4882a593Smuzhiyun return VMCI_ERROR_INVALID_ARGS;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun context = vmci_ctx_get(context_id);
953*4882a593Smuzhiyun if (context == NULL)
954*4882a593Smuzhiyun return VMCI_ERROR_NOT_FOUND;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun spin_lock(&context->lock);
957*4882a593Smuzhiyun if (!vmci_handle_arr_has_entry(context->doorbell_array, handle))
958*4882a593Smuzhiyun result = vmci_handle_arr_append_entry(&context->doorbell_array,
959*4882a593Smuzhiyun handle);
960*4882a593Smuzhiyun else
961*4882a593Smuzhiyun result = VMCI_ERROR_DUPLICATE_ENTRY;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun spin_unlock(&context->lock);
964*4882a593Smuzhiyun vmci_ctx_put(context);
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun return result;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun /*
970*4882a593Smuzhiyun * Unregisters a doorbell handle that was previously registered
971*4882a593Smuzhiyun * with vmci_ctx_dbell_create.
972*4882a593Smuzhiyun */
vmci_ctx_dbell_destroy(u32 context_id,struct vmci_handle handle)973*4882a593Smuzhiyun int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun struct vmci_ctx *context;
976*4882a593Smuzhiyun struct vmci_handle removed_handle;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
979*4882a593Smuzhiyun return VMCI_ERROR_INVALID_ARGS;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun context = vmci_ctx_get(context_id);
982*4882a593Smuzhiyun if (context == NULL)
983*4882a593Smuzhiyun return VMCI_ERROR_NOT_FOUND;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun spin_lock(&context->lock);
986*4882a593Smuzhiyun removed_handle =
987*4882a593Smuzhiyun vmci_handle_arr_remove_entry(context->doorbell_array, handle);
988*4882a593Smuzhiyun vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle);
989*4882a593Smuzhiyun spin_unlock(&context->lock);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun vmci_ctx_put(context);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun return vmci_handle_is_invalid(removed_handle) ?
994*4882a593Smuzhiyun VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun /*
998*4882a593Smuzhiyun * Unregisters all doorbell handles that were previously
999*4882a593Smuzhiyun * registered with vmci_ctx_dbell_create.
1000*4882a593Smuzhiyun */
vmci_ctx_dbell_destroy_all(u32 context_id)1001*4882a593Smuzhiyun int vmci_ctx_dbell_destroy_all(u32 context_id)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun struct vmci_ctx *context;
1004*4882a593Smuzhiyun struct vmci_handle handle;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun if (context_id == VMCI_INVALID_ID)
1007*4882a593Smuzhiyun return VMCI_ERROR_INVALID_ARGS;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun context = vmci_ctx_get(context_id);
1010*4882a593Smuzhiyun if (context == NULL)
1011*4882a593Smuzhiyun return VMCI_ERROR_NOT_FOUND;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun spin_lock(&context->lock);
1014*4882a593Smuzhiyun do {
1015*4882a593Smuzhiyun struct vmci_handle_arr *arr = context->doorbell_array;
1016*4882a593Smuzhiyun handle = vmci_handle_arr_remove_tail(arr);
1017*4882a593Smuzhiyun } while (!vmci_handle_is_invalid(handle));
1018*4882a593Smuzhiyun do {
1019*4882a593Smuzhiyun struct vmci_handle_arr *arr = context->pending_doorbell_array;
1020*4882a593Smuzhiyun handle = vmci_handle_arr_remove_tail(arr);
1021*4882a593Smuzhiyun } while (!vmci_handle_is_invalid(handle));
1022*4882a593Smuzhiyun spin_unlock(&context->lock);
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun vmci_ctx_put(context);
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun return VMCI_SUCCESS;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun /*
1030*4882a593Smuzhiyun * Registers a notification of a doorbell handle initiated by the
1031*4882a593Smuzhiyun * specified source context. The notification of doorbells are
1032*4882a593Smuzhiyun * subject to the same isolation rules as datagram delivery. To
1033*4882a593Smuzhiyun * allow host side senders of notifications a finer granularity
1034*4882a593Smuzhiyun * of sender rights than those assigned to the sending context
1035*4882a593Smuzhiyun * itself, the host context is required to specify a different
1036*4882a593Smuzhiyun * set of privilege flags that will override the privileges of
1037*4882a593Smuzhiyun * the source context.
1038*4882a593Smuzhiyun */
vmci_ctx_notify_dbell(u32 src_cid,struct vmci_handle handle,u32 src_priv_flags)1039*4882a593Smuzhiyun int vmci_ctx_notify_dbell(u32 src_cid,
1040*4882a593Smuzhiyun struct vmci_handle handle,
1041*4882a593Smuzhiyun u32 src_priv_flags)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun struct vmci_ctx *dst_context;
1044*4882a593Smuzhiyun int result;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if (vmci_handle_is_invalid(handle))
1047*4882a593Smuzhiyun return VMCI_ERROR_INVALID_ARGS;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun /* Get the target VM's VMCI context. */
1050*4882a593Smuzhiyun dst_context = vmci_ctx_get(handle.context);
1051*4882a593Smuzhiyun if (!dst_context) {
1052*4882a593Smuzhiyun pr_devel("Invalid context (ID=0x%x)\n", handle.context);
1053*4882a593Smuzhiyun return VMCI_ERROR_NOT_FOUND;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun if (src_cid != handle.context) {
1057*4882a593Smuzhiyun u32 dst_priv_flags;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun if (VMCI_CONTEXT_IS_VM(src_cid) &&
1060*4882a593Smuzhiyun VMCI_CONTEXT_IS_VM(handle.context)) {
1061*4882a593Smuzhiyun pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n",
1062*4882a593Smuzhiyun src_cid, handle.context);
1063*4882a593Smuzhiyun result = VMCI_ERROR_DST_UNREACHABLE;
1064*4882a593Smuzhiyun goto out;
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags);
1068*4882a593Smuzhiyun if (result < VMCI_SUCCESS) {
1069*4882a593Smuzhiyun pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n",
1070*4882a593Smuzhiyun handle.context, handle.resource);
1071*4882a593Smuzhiyun goto out;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun if (src_cid != VMCI_HOST_CONTEXT_ID ||
1075*4882a593Smuzhiyun src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) {
1076*4882a593Smuzhiyun src_priv_flags = vmci_context_get_priv_flags(src_cid);
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) {
1080*4882a593Smuzhiyun result = VMCI_ERROR_NO_ACCESS;
1081*4882a593Smuzhiyun goto out;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun if (handle.context == VMCI_HOST_CONTEXT_ID) {
1086*4882a593Smuzhiyun result = vmci_dbell_host_context_notify(src_cid, handle);
1087*4882a593Smuzhiyun } else {
1088*4882a593Smuzhiyun spin_lock(&dst_context->lock);
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun if (!vmci_handle_arr_has_entry(dst_context->doorbell_array,
1091*4882a593Smuzhiyun handle)) {
1092*4882a593Smuzhiyun result = VMCI_ERROR_NOT_FOUND;
1093*4882a593Smuzhiyun } else {
1094*4882a593Smuzhiyun if (!vmci_handle_arr_has_entry(
1095*4882a593Smuzhiyun dst_context->pending_doorbell_array,
1096*4882a593Smuzhiyun handle)) {
1097*4882a593Smuzhiyun result = vmci_handle_arr_append_entry(
1098*4882a593Smuzhiyun &dst_context->pending_doorbell_array,
1099*4882a593Smuzhiyun handle);
1100*4882a593Smuzhiyun if (result == VMCI_SUCCESS) {
1101*4882a593Smuzhiyun ctx_signal_notify(dst_context);
1102*4882a593Smuzhiyun wake_up(&dst_context->host_context.wait_queue);
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun } else {
1105*4882a593Smuzhiyun result = VMCI_SUCCESS;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun spin_unlock(&dst_context->lock);
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun out:
1112*4882a593Smuzhiyun vmci_ctx_put(dst_context);
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun return result;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
vmci_ctx_supports_host_qp(struct vmci_ctx * context)1117*4882a593Smuzhiyun bool vmci_ctx_supports_host_qp(struct vmci_ctx *context)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun return context && context->user_version >= VMCI_VERSION_HOSTQP;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /*
1123*4882a593Smuzhiyun * Registers that a new queue pair handle has been allocated by
1124*4882a593Smuzhiyun * the context.
1125*4882a593Smuzhiyun */
vmci_ctx_qp_create(struct vmci_ctx * context,struct vmci_handle handle)1126*4882a593Smuzhiyun int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun int result;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun if (context == NULL || vmci_handle_is_invalid(handle))
1131*4882a593Smuzhiyun return VMCI_ERROR_INVALID_ARGS;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle))
1134*4882a593Smuzhiyun result = vmci_handle_arr_append_entry(
1135*4882a593Smuzhiyun &context->queue_pair_array, handle);
1136*4882a593Smuzhiyun else
1137*4882a593Smuzhiyun result = VMCI_ERROR_DUPLICATE_ENTRY;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun return result;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun /*
1143*4882a593Smuzhiyun * Unregisters a queue pair handle that was previously registered
1144*4882a593Smuzhiyun * with vmci_ctx_qp_create.
1145*4882a593Smuzhiyun */
vmci_ctx_qp_destroy(struct vmci_ctx * context,struct vmci_handle handle)1146*4882a593Smuzhiyun int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun struct vmci_handle hndl;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun if (context == NULL || vmci_handle_is_invalid(handle))
1151*4882a593Smuzhiyun return VMCI_ERROR_INVALID_ARGS;
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun return vmci_handle_is_invalid(hndl) ?
1156*4882a593Smuzhiyun VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun /*
1160*4882a593Smuzhiyun * Determines whether a given queue pair handle is registered
1161*4882a593Smuzhiyun * with the given context.
1162*4882a593Smuzhiyun */
vmci_ctx_qp_exists(struct vmci_ctx * context,struct vmci_handle handle)1163*4882a593Smuzhiyun bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun if (context == NULL || vmci_handle_is_invalid(handle))
1166*4882a593Smuzhiyun return false;
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun return vmci_handle_arr_has_entry(context->queue_pair_array, handle);
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun /*
1172*4882a593Smuzhiyun * vmci_context_get_priv_flags() - Retrieve privilege flags.
1173*4882a593Smuzhiyun * @context_id: The context ID of the VMCI context.
1174*4882a593Smuzhiyun *
1175*4882a593Smuzhiyun * Retrieves privilege flags of the given VMCI context ID.
1176*4882a593Smuzhiyun */
vmci_context_get_priv_flags(u32 context_id)1177*4882a593Smuzhiyun u32 vmci_context_get_priv_flags(u32 context_id)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun if (vmci_host_code_active()) {
1180*4882a593Smuzhiyun u32 flags;
1181*4882a593Smuzhiyun struct vmci_ctx *context;
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun context = vmci_ctx_get(context_id);
1184*4882a593Smuzhiyun if (!context)
1185*4882a593Smuzhiyun return VMCI_LEAST_PRIVILEGE_FLAGS;
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun flags = context->priv_flags;
1188*4882a593Smuzhiyun vmci_ctx_put(context);
1189*4882a593Smuzhiyun return flags;
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun return VMCI_NO_PRIVILEGE_FLAGS;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun /*
1196*4882a593Smuzhiyun * vmci_is_context_owner() - Determimnes if user is the context owner
1197*4882a593Smuzhiyun * @context_id: The context ID of the VMCI context.
1198*4882a593Smuzhiyun * @uid: The host user id (real kernel value).
1199*4882a593Smuzhiyun *
1200*4882a593Smuzhiyun * Determines whether a given UID is the owner of given VMCI context.
1201*4882a593Smuzhiyun */
vmci_is_context_owner(u32 context_id,kuid_t uid)1202*4882a593Smuzhiyun bool vmci_is_context_owner(u32 context_id, kuid_t uid)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun bool is_owner = false;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun if (vmci_host_code_active()) {
1207*4882a593Smuzhiyun struct vmci_ctx *context = vmci_ctx_get(context_id);
1208*4882a593Smuzhiyun if (context) {
1209*4882a593Smuzhiyun if (context->cred)
1210*4882a593Smuzhiyun is_owner = uid_eq(context->cred->uid, uid);
1211*4882a593Smuzhiyun vmci_ctx_put(context);
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun return is_owner;
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vmci_is_context_owner);
1218