xref: /OK3568_Linux_fs/kernel/drivers/misc/vmw_vmci/vmci_queue_pair.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * VMware VMCI Driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2012 VMware, Inc. All rights reserved.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #ifndef _VMCI_QUEUE_PAIR_H_
9*4882a593Smuzhiyun #define _VMCI_QUEUE_PAIR_H_
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/vmw_vmci_defs.h>
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "vmci_context.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /* Callback needed for correctly waiting on events. */
17*4882a593Smuzhiyun typedef int (*vmci_event_release_cb) (void *client_data);
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /* Guest device port I/O. */
20*4882a593Smuzhiyun struct ppn_set {
21*4882a593Smuzhiyun 	u64 num_produce_pages;
22*4882a593Smuzhiyun 	u64 num_consume_pages;
23*4882a593Smuzhiyun 	u64 *produce_ppns;
24*4882a593Smuzhiyun 	u64 *consume_ppns;
25*4882a593Smuzhiyun 	bool initialized;
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* VMCIqueue_pairAllocInfo */
29*4882a593Smuzhiyun struct vmci_qp_alloc_info {
30*4882a593Smuzhiyun 	struct vmci_handle handle;
31*4882a593Smuzhiyun 	u32 peer;
32*4882a593Smuzhiyun 	u32 flags;
33*4882a593Smuzhiyun 	u64 produce_size;
34*4882a593Smuzhiyun 	u64 consume_size;
35*4882a593Smuzhiyun 	u64 ppn_va;	/* Start VA of queue pair PPNs. */
36*4882a593Smuzhiyun 	u64 num_ppns;
37*4882a593Smuzhiyun 	s32 result;
38*4882a593Smuzhiyun 	u32 version;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /* VMCIqueue_pairSetVAInfo */
42*4882a593Smuzhiyun struct vmci_qp_set_va_info {
43*4882a593Smuzhiyun 	struct vmci_handle handle;
44*4882a593Smuzhiyun 	u64 va;		/* Start VA of queue pair PPNs. */
45*4882a593Smuzhiyun 	u64 num_ppns;
46*4882a593Smuzhiyun 	u32 version;
47*4882a593Smuzhiyun 	s32 result;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun  * For backwards compatibility, here is a version of the
52*4882a593Smuzhiyun  * VMCIqueue_pairPageFileInfo before host support end-points was added.
53*4882a593Smuzhiyun  * Note that the current version of that structure requires VMX to
54*4882a593Smuzhiyun  * pass down the VA of the mapped file.  Before host support was added
55*4882a593Smuzhiyun  * there was nothing of the sort.  So, when the driver sees the ioctl
56*4882a593Smuzhiyun  * with a parameter that is the sizeof
57*4882a593Smuzhiyun  * VMCIqueue_pairPageFileInfo_NoHostQP then it can infer that the version
58*4882a593Smuzhiyun  * of VMX running can't attach to host end points because it doesn't
59*4882a593Smuzhiyun  * provide the VA of the mapped files.
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * The Linux driver doesn't get an indication of the size of the
62*4882a593Smuzhiyun  * structure passed down from user space.  So, to fix a long standing
63*4882a593Smuzhiyun  * but unfiled bug, the _pad field has been renamed to version.
64*4882a593Smuzhiyun  * Existing versions of VMX always initialize the PageFileInfo
65*4882a593Smuzhiyun  * structure so that _pad, er, version is set to 0.
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  * A version value of 1 indicates that the size of the structure has
68*4882a593Smuzhiyun  * been increased to include two UVA's: produce_uva and consume_uva.
69*4882a593Smuzhiyun  * These UVA's are of the mmap()'d queue contents backing files.
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  * In addition, if when VMX is sending down the
72*4882a593Smuzhiyun  * VMCIqueue_pairPageFileInfo structure it gets an error then it will
73*4882a593Smuzhiyun  * try again with the _NoHostQP version of the file to see if an older
74*4882a593Smuzhiyun  * VMCI kernel module is running.
75*4882a593Smuzhiyun  */
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /* VMCIqueue_pairPageFileInfo */
78*4882a593Smuzhiyun struct vmci_qp_page_file_info {
79*4882a593Smuzhiyun 	struct vmci_handle handle;
80*4882a593Smuzhiyun 	u64 produce_page_file;	  /* User VA. */
81*4882a593Smuzhiyun 	u64 consume_page_file;	  /* User VA. */
82*4882a593Smuzhiyun 	u64 produce_page_file_size;  /* Size of the file name array. */
83*4882a593Smuzhiyun 	u64 consume_page_file_size;  /* Size of the file name array. */
84*4882a593Smuzhiyun 	s32 result;
85*4882a593Smuzhiyun 	u32 version;	/* Was _pad. */
86*4882a593Smuzhiyun 	u64 produce_va;	/* User VA of the mapped file. */
87*4882a593Smuzhiyun 	u64 consume_va;	/* User VA of the mapped file. */
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* vmci queuepair detach info */
91*4882a593Smuzhiyun struct vmci_qp_dtch_info {
92*4882a593Smuzhiyun 	struct vmci_handle handle;
93*4882a593Smuzhiyun 	s32 result;
94*4882a593Smuzhiyun 	u32 _pad;
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun  * struct vmci_qp_page_store describes how the memory of a given queue pair
99*4882a593Smuzhiyun  * is backed. When the queue pair is between the host and a guest, the
100*4882a593Smuzhiyun  * page store consists of references to the guest pages. On vmkernel,
101*4882a593Smuzhiyun  * this is a list of PPNs, and on hosted, it is a user VA where the
102*4882a593Smuzhiyun  * queue pair is mapped into the VMX address space.
103*4882a593Smuzhiyun  */
104*4882a593Smuzhiyun struct vmci_qp_page_store {
105*4882a593Smuzhiyun 	/* Reference to pages backing the queue pair. */
106*4882a593Smuzhiyun 	u64 pages;
107*4882a593Smuzhiyun 	/* Length of pageList/virtual addres range (in pages). */
108*4882a593Smuzhiyun 	u32 len;
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * This data type contains the information about a queue.
113*4882a593Smuzhiyun  * There are two queues (hence, queue pairs) per transaction model between a
114*4882a593Smuzhiyun  * pair of end points, A & B.  One queue is used by end point A to transmit
115*4882a593Smuzhiyun  * commands and responses to B.  The other queue is used by B to transmit
116*4882a593Smuzhiyun  * commands and responses.
117*4882a593Smuzhiyun  *
118*4882a593Smuzhiyun  * struct vmci_queue_kern_if is a per-OS defined Queue structure.  It contains
119*4882a593Smuzhiyun  * either a direct pointer to the linear address of the buffer contents or a
120*4882a593Smuzhiyun  * pointer to structures which help the OS locate those data pages.  See
121*4882a593Smuzhiyun  * vmciKernelIf.c for each platform for its definition.
122*4882a593Smuzhiyun  */
123*4882a593Smuzhiyun struct vmci_queue {
124*4882a593Smuzhiyun 	struct vmci_queue_header *q_header;
125*4882a593Smuzhiyun 	struct vmci_queue_header *saved_header;
126*4882a593Smuzhiyun 	struct vmci_queue_kern_if *kernel_if;
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun  * Utility function that checks whether the fields of the page
131*4882a593Smuzhiyun  * store contain valid values.
132*4882a593Smuzhiyun  * Result:
133*4882a593Smuzhiyun  * true if the page store is wellformed. false otherwise.
134*4882a593Smuzhiyun  */
135*4882a593Smuzhiyun static inline bool
VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store * page_store)136*4882a593Smuzhiyun VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	return page_store->len >= 2;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun void vmci_qp_broker_exit(void);
142*4882a593Smuzhiyun int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
143*4882a593Smuzhiyun 			 u32 flags, u32 priv_flags,
144*4882a593Smuzhiyun 			 u64 produce_size, u64 consume_size,
145*4882a593Smuzhiyun 			 struct vmci_qp_page_store *page_store,
146*4882a593Smuzhiyun 			 struct vmci_ctx *context);
147*4882a593Smuzhiyun int vmci_qp_broker_set_page_store(struct vmci_handle handle,
148*4882a593Smuzhiyun 				  u64 produce_uva, u64 consume_uva,
149*4882a593Smuzhiyun 				  struct vmci_ctx *context);
150*4882a593Smuzhiyun int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun void vmci_qp_guest_endpoints_exit(void);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun int vmci_qp_alloc(struct vmci_handle *handle,
155*4882a593Smuzhiyun 		  struct vmci_queue **produce_q, u64 produce_size,
156*4882a593Smuzhiyun 		  struct vmci_queue **consume_q, u64 consume_size,
157*4882a593Smuzhiyun 		  u32 peer, u32 flags, u32 priv_flags,
158*4882a593Smuzhiyun 		  bool guest_endpoint, vmci_event_release_cb wakeup_cb,
159*4882a593Smuzhiyun 		  void *client_data);
160*4882a593Smuzhiyun int vmci_qp_broker_map(struct vmci_handle handle,
161*4882a593Smuzhiyun 		       struct vmci_ctx *context, u64 guest_mem);
162*4882a593Smuzhiyun int vmci_qp_broker_unmap(struct vmci_handle handle,
163*4882a593Smuzhiyun 			 struct vmci_ctx *context, u32 gid);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun #endif /* _VMCI_QUEUE_PAIR_H_ */
166