xref: /OK3568_Linux_fs/kernel/drivers/virt/nitro_enclaves/ne_pci_dev.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _NE_PCI_DEV_H_
7*4882a593Smuzhiyun #define _NE_PCI_DEV_H_
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/atomic.h>
10*4882a593Smuzhiyun #include <linux/list.h>
11*4882a593Smuzhiyun #include <linux/mutex.h>
12*4882a593Smuzhiyun #include <linux/pci.h>
13*4882a593Smuzhiyun #include <linux/pci_ids.h>
14*4882a593Smuzhiyun #include <linux/wait.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /**
17*4882a593Smuzhiyun  * DOC: Nitro Enclaves (NE) PCI device
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /**
21*4882a593Smuzhiyun  * PCI_DEVICE_ID_NE - Nitro Enclaves PCI device id.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun #define PCI_DEVICE_ID_NE	(0xe4c1)
24*4882a593Smuzhiyun /**
25*4882a593Smuzhiyun  * PCI_BAR_NE - Nitro Enclaves PCI device MMIO BAR.
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun #define PCI_BAR_NE		(0x03)
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /**
30*4882a593Smuzhiyun  * DOC: Device registers in the NE PCI device MMIO BAR
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /**
34*4882a593Smuzhiyun  * NE_ENABLE - (1 byte) Register to notify the device that the driver is using
35*4882a593Smuzhiyun  *	       it (Read/Write).
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun #define NE_ENABLE		(0x0000)
38*4882a593Smuzhiyun #define NE_ENABLE_OFF		(0x00)
39*4882a593Smuzhiyun #define NE_ENABLE_ON		(0x01)
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /**
42*4882a593Smuzhiyun  * NE_VERSION - (2 bytes) Register to select the device run-time version
43*4882a593Smuzhiyun  *		(Read/Write).
44*4882a593Smuzhiyun  */
45*4882a593Smuzhiyun #define NE_VERSION		(0x0002)
46*4882a593Smuzhiyun #define NE_VERSION_MAX		(0x0001)
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /**
49*4882a593Smuzhiyun  * NE_COMMAND - (4 bytes) Register to notify the device what command was
50*4882a593Smuzhiyun  *		requested (Write-Only).
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun #define NE_COMMAND		(0x0004)
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /**
55*4882a593Smuzhiyun  * NE_EVTCNT - (4 bytes) Register to notify the driver that a reply or a device
56*4882a593Smuzhiyun  *	       event is available (Read-Only):
57*4882a593Smuzhiyun  *	       - Lower half  - command reply counter
58*4882a593Smuzhiyun  *	       - Higher half - out-of-band device event counter
59*4882a593Smuzhiyun  */
60*4882a593Smuzhiyun #define NE_EVTCNT		(0x000c)
61*4882a593Smuzhiyun #define NE_EVTCNT_REPLY_SHIFT	(0)
62*4882a593Smuzhiyun #define NE_EVTCNT_REPLY_MASK	(0x0000ffff)
63*4882a593Smuzhiyun #define NE_EVTCNT_REPLY(cnt)	(((cnt) & NE_EVTCNT_REPLY_MASK) >> \
64*4882a593Smuzhiyun 				NE_EVTCNT_REPLY_SHIFT)
65*4882a593Smuzhiyun #define NE_EVTCNT_EVENT_SHIFT	(16)
66*4882a593Smuzhiyun #define NE_EVTCNT_EVENT_MASK	(0xffff0000)
67*4882a593Smuzhiyun #define NE_EVTCNT_EVENT(cnt)	(((cnt) & NE_EVTCNT_EVENT_MASK) >> \
68*4882a593Smuzhiyun 				NE_EVTCNT_EVENT_SHIFT)
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /**
71*4882a593Smuzhiyun  * NE_SEND_DATA - (240 bytes) Buffer for sending the command request payload
72*4882a593Smuzhiyun  *		  (Read/Write).
73*4882a593Smuzhiyun  */
74*4882a593Smuzhiyun #define NE_SEND_DATA		(0x0010)
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /**
77*4882a593Smuzhiyun  * NE_RECV_DATA - (240 bytes) Buffer for receiving the command reply payload
78*4882a593Smuzhiyun  *		  (Read-Only).
79*4882a593Smuzhiyun  */
80*4882a593Smuzhiyun #define NE_RECV_DATA		(0x0100)
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /**
83*4882a593Smuzhiyun  * DOC: Device MMIO buffer sizes
84*4882a593Smuzhiyun  */
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /**
87*4882a593Smuzhiyun  * NE_SEND_DATA_SIZE / NE_RECV_DATA_SIZE - 240 bytes for send / recv buffer.
88*4882a593Smuzhiyun  */
89*4882a593Smuzhiyun #define NE_SEND_DATA_SIZE	(240)
90*4882a593Smuzhiyun #define NE_RECV_DATA_SIZE	(240)
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun  * DOC: MSI-X interrupt vectors
94*4882a593Smuzhiyun  */
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /**
97*4882a593Smuzhiyun  * NE_VEC_REPLY - MSI-X vector used for command reply notification.
98*4882a593Smuzhiyun  */
99*4882a593Smuzhiyun #define NE_VEC_REPLY		(0)
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /**
102*4882a593Smuzhiyun  * NE_VEC_EVENT - MSI-X vector used for out-of-band events e.g. enclave crash.
103*4882a593Smuzhiyun  */
104*4882a593Smuzhiyun #define NE_VEC_EVENT		(1)
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /**
107*4882a593Smuzhiyun  * enum ne_pci_dev_cmd_type - Device command types.
108*4882a593Smuzhiyun  * @INVALID_CMD:		Invalid command.
109*4882a593Smuzhiyun  * @ENCLAVE_START:		Start an enclave, after setting its resources.
110*4882a593Smuzhiyun  * @ENCLAVE_GET_SLOT:		Get the slot uid of an enclave.
111*4882a593Smuzhiyun  * @ENCLAVE_STOP:		Terminate an enclave.
112*4882a593Smuzhiyun  * @SLOT_ALLOC :		Allocate a slot for an enclave.
113*4882a593Smuzhiyun  * @SLOT_FREE:			Free the slot allocated for an enclave
114*4882a593Smuzhiyun  * @SLOT_ADD_MEM:		Add a memory region to an enclave slot.
115*4882a593Smuzhiyun  * @SLOT_ADD_VCPU:		Add a vCPU to an enclave slot.
116*4882a593Smuzhiyun  * @SLOT_COUNT :		Get the number of allocated slots.
117*4882a593Smuzhiyun  * @NEXT_SLOT:			Get the next slot in the list of allocated slots.
118*4882a593Smuzhiyun  * @SLOT_INFO:			Get the info for a slot e.g. slot uid, vCPUs count.
119*4882a593Smuzhiyun  * @SLOT_ADD_BULK_VCPUS:	Add a number of vCPUs, not providing CPU ids.
120*4882a593Smuzhiyun  * @MAX_CMD:			A gatekeeper for max possible command type.
121*4882a593Smuzhiyun  */
122*4882a593Smuzhiyun enum ne_pci_dev_cmd_type {
123*4882a593Smuzhiyun 	INVALID_CMD		= 0,
124*4882a593Smuzhiyun 	ENCLAVE_START		= 1,
125*4882a593Smuzhiyun 	ENCLAVE_GET_SLOT	= 2,
126*4882a593Smuzhiyun 	ENCLAVE_STOP		= 3,
127*4882a593Smuzhiyun 	SLOT_ALLOC		= 4,
128*4882a593Smuzhiyun 	SLOT_FREE		= 5,
129*4882a593Smuzhiyun 	SLOT_ADD_MEM		= 6,
130*4882a593Smuzhiyun 	SLOT_ADD_VCPU		= 7,
131*4882a593Smuzhiyun 	SLOT_COUNT		= 8,
132*4882a593Smuzhiyun 	NEXT_SLOT		= 9,
133*4882a593Smuzhiyun 	SLOT_INFO		= 10,
134*4882a593Smuzhiyun 	SLOT_ADD_BULK_VCPUS	= 11,
135*4882a593Smuzhiyun 	MAX_CMD,
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun  * DOC: Device commands - payload structure for requests and replies.
140*4882a593Smuzhiyun  */
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun  * struct enclave_start_req - ENCLAVE_START request.
144*4882a593Smuzhiyun  * @slot_uid:		Slot unique id mapped to the enclave to start.
145*4882a593Smuzhiyun  * @enclave_cid:	Context ID (CID) for the enclave vsock device.
146*4882a593Smuzhiyun  *			If 0, CID is autogenerated.
147*4882a593Smuzhiyun  * @flags:		Flags for the enclave to start with (e.g. debug mode).
148*4882a593Smuzhiyun  */
149*4882a593Smuzhiyun struct enclave_start_req {
150*4882a593Smuzhiyun 	u64	slot_uid;
151*4882a593Smuzhiyun 	u64	enclave_cid;
152*4882a593Smuzhiyun 	u64	flags;
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /**
156*4882a593Smuzhiyun  * struct enclave_get_slot_req - ENCLAVE_GET_SLOT request.
157*4882a593Smuzhiyun  * @enclave_cid:	Context ID (CID) for the enclave vsock device.
158*4882a593Smuzhiyun  */
159*4882a593Smuzhiyun struct enclave_get_slot_req {
160*4882a593Smuzhiyun 	u64	enclave_cid;
161*4882a593Smuzhiyun };
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /**
164*4882a593Smuzhiyun  * struct enclave_stop_req - ENCLAVE_STOP request.
165*4882a593Smuzhiyun  * @slot_uid:	Slot unique id mapped to the enclave to stop.
166*4882a593Smuzhiyun  */
167*4882a593Smuzhiyun struct enclave_stop_req {
168*4882a593Smuzhiyun 	u64	slot_uid;
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun /**
172*4882a593Smuzhiyun  * struct slot_alloc_req - SLOT_ALLOC request.
173*4882a593Smuzhiyun  * @unused:	In order to avoid weird sizeof edge cases.
174*4882a593Smuzhiyun  */
175*4882a593Smuzhiyun struct slot_alloc_req {
176*4882a593Smuzhiyun 	u8	unused;
177*4882a593Smuzhiyun };
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /**
180*4882a593Smuzhiyun  * struct slot_free_req - SLOT_FREE request.
181*4882a593Smuzhiyun  * @slot_uid:	Slot unique id mapped to the slot to free.
182*4882a593Smuzhiyun  */
183*4882a593Smuzhiyun struct slot_free_req {
184*4882a593Smuzhiyun 	u64	slot_uid;
185*4882a593Smuzhiyun };
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /* TODO: Add flags field to the request to add memory region. */
188*4882a593Smuzhiyun /**
189*4882a593Smuzhiyun  * struct slot_add_mem_req - SLOT_ADD_MEM request.
190*4882a593Smuzhiyun  * @slot_uid:	Slot unique id mapped to the slot to add the memory region to.
191*4882a593Smuzhiyun  * @paddr:	Physical address of the memory region to add to the slot.
192*4882a593Smuzhiyun  * @size:	Memory size, in bytes, of the memory region to add to the slot.
193*4882a593Smuzhiyun  */
194*4882a593Smuzhiyun struct slot_add_mem_req {
195*4882a593Smuzhiyun 	u64	slot_uid;
196*4882a593Smuzhiyun 	u64	paddr;
197*4882a593Smuzhiyun 	u64	size;
198*4882a593Smuzhiyun };
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /**
201*4882a593Smuzhiyun  * struct slot_add_vcpu_req - SLOT_ADD_VCPU request.
202*4882a593Smuzhiyun  * @slot_uid:	Slot unique id mapped to the slot to add the vCPU to.
203*4882a593Smuzhiyun  * @vcpu_id:	vCPU ID of the CPU to add to the enclave.
204*4882a593Smuzhiyun  * @padding:	Padding for the overall data structure.
205*4882a593Smuzhiyun  */
206*4882a593Smuzhiyun struct slot_add_vcpu_req {
207*4882a593Smuzhiyun 	u64	slot_uid;
208*4882a593Smuzhiyun 	u32	vcpu_id;
209*4882a593Smuzhiyun 	u8	padding[4];
210*4882a593Smuzhiyun };
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun /**
213*4882a593Smuzhiyun  * struct slot_count_req - SLOT_COUNT request.
214*4882a593Smuzhiyun  * @unused:	In order to avoid weird sizeof edge cases.
215*4882a593Smuzhiyun  */
216*4882a593Smuzhiyun struct slot_count_req {
217*4882a593Smuzhiyun 	u8	unused;
218*4882a593Smuzhiyun };
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun /**
221*4882a593Smuzhiyun  * struct next_slot_req - NEXT_SLOT request.
222*4882a593Smuzhiyun  * @slot_uid:	Slot unique id of the next slot in the iteration.
223*4882a593Smuzhiyun  */
224*4882a593Smuzhiyun struct next_slot_req {
225*4882a593Smuzhiyun 	u64	slot_uid;
226*4882a593Smuzhiyun };
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun /**
229*4882a593Smuzhiyun  * struct slot_info_req - SLOT_INFO request.
230*4882a593Smuzhiyun  * @slot_uid:	Slot unique id mapped to the slot to get information about.
231*4882a593Smuzhiyun  */
232*4882a593Smuzhiyun struct slot_info_req {
233*4882a593Smuzhiyun 	u64	slot_uid;
234*4882a593Smuzhiyun };
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun /**
237*4882a593Smuzhiyun  * struct slot_add_bulk_vcpus_req - SLOT_ADD_BULK_VCPUS request.
238*4882a593Smuzhiyun  * @slot_uid:	Slot unique id mapped to the slot to add vCPUs to.
239*4882a593Smuzhiyun  * @nr_vcpus:	Number of vCPUs to add to the slot.
240*4882a593Smuzhiyun  */
241*4882a593Smuzhiyun struct slot_add_bulk_vcpus_req {
242*4882a593Smuzhiyun 	u64	slot_uid;
243*4882a593Smuzhiyun 	u64	nr_vcpus;
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun /**
247*4882a593Smuzhiyun  * struct ne_pci_dev_cmd_reply - NE PCI device command reply.
248*4882a593Smuzhiyun  * @rc :		Return code of the logic that processed the request.
249*4882a593Smuzhiyun  * @padding0:		Padding for the overall data structure.
250*4882a593Smuzhiyun  * @slot_uid:		Valid for all commands except SLOT_COUNT.
251*4882a593Smuzhiyun  * @enclave_cid:	Valid for ENCLAVE_START command.
252*4882a593Smuzhiyun  * @slot_count :	Valid for SLOT_COUNT command.
253*4882a593Smuzhiyun  * @mem_regions:	Valid for SLOT_ALLOC and SLOT_INFO commands.
254*4882a593Smuzhiyun  * @mem_size:		Valid for SLOT_INFO command.
255*4882a593Smuzhiyun  * @nr_vcpus:		Valid for SLOT_INFO command.
256*4882a593Smuzhiyun  * @flags:		Valid for SLOT_INFO command.
257*4882a593Smuzhiyun  * @state:		Valid for SLOT_INFO command.
258*4882a593Smuzhiyun  * @padding1:		Padding for the overall data structure.
259*4882a593Smuzhiyun  */
260*4882a593Smuzhiyun struct ne_pci_dev_cmd_reply {
261*4882a593Smuzhiyun 	s32	rc;
262*4882a593Smuzhiyun 	u8	padding0[4];
263*4882a593Smuzhiyun 	u64	slot_uid;
264*4882a593Smuzhiyun 	u64	enclave_cid;
265*4882a593Smuzhiyun 	u64	slot_count;
266*4882a593Smuzhiyun 	u64	mem_regions;
267*4882a593Smuzhiyun 	u64	mem_size;
268*4882a593Smuzhiyun 	u64	nr_vcpus;
269*4882a593Smuzhiyun 	u64	flags;
270*4882a593Smuzhiyun 	u16	state;
271*4882a593Smuzhiyun 	u8	padding1[6];
272*4882a593Smuzhiyun };
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun /**
275*4882a593Smuzhiyun  * struct ne_pci_dev - Nitro Enclaves (NE) PCI device.
276*4882a593Smuzhiyun  * @cmd_reply_avail:		Variable set if a reply has been sent by the
277*4882a593Smuzhiyun  *				PCI device.
278*4882a593Smuzhiyun  * @cmd_reply_wait_q:		Wait queue for handling command reply from the
279*4882a593Smuzhiyun  *				PCI device.
280*4882a593Smuzhiyun  * @enclaves_list:		List of the enclaves managed by the PCI device.
281*4882a593Smuzhiyun  * @enclaves_list_mutex:	Mutex for accessing the list of enclaves.
282*4882a593Smuzhiyun  * @event_wq:			Work queue for handling out-of-band events
283*4882a593Smuzhiyun  *				triggered by the Nitro Hypervisor which require
284*4882a593Smuzhiyun  *				enclave state scanning and propagation to the
285*4882a593Smuzhiyun  *				enclave process.
286*4882a593Smuzhiyun  * @iomem_base :		MMIO region of the PCI device.
287*4882a593Smuzhiyun  * @notify_work:		Work item for every received out-of-band event.
288*4882a593Smuzhiyun  * @pci_dev_mutex:		Mutex for accessing the PCI device MMIO space.
289*4882a593Smuzhiyun  * @pdev:			PCI device data structure.
290*4882a593Smuzhiyun  */
291*4882a593Smuzhiyun struct ne_pci_dev {
292*4882a593Smuzhiyun 	atomic_t		cmd_reply_avail;
293*4882a593Smuzhiyun 	wait_queue_head_t	cmd_reply_wait_q;
294*4882a593Smuzhiyun 	struct list_head	enclaves_list;
295*4882a593Smuzhiyun 	struct mutex		enclaves_list_mutex;
296*4882a593Smuzhiyun 	struct workqueue_struct	*event_wq;
297*4882a593Smuzhiyun 	void __iomem		*iomem_base;
298*4882a593Smuzhiyun 	struct work_struct	notify_work;
299*4882a593Smuzhiyun 	struct mutex		pci_dev_mutex;
300*4882a593Smuzhiyun 	struct pci_dev		*pdev;
301*4882a593Smuzhiyun };
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun /**
304*4882a593Smuzhiyun  * ne_do_request() - Submit command request to the PCI device based on the command
305*4882a593Smuzhiyun  *		     type and retrieve the associated reply.
306*4882a593Smuzhiyun  * @pdev:		PCI device to send the command to and receive the reply from.
307*4882a593Smuzhiyun  * @cmd_type:		Command type of the request sent to the PCI device.
308*4882a593Smuzhiyun  * @cmd_request:	Command request payload.
309*4882a593Smuzhiyun  * @cmd_request_size:	Size of the command request payload.
310*4882a593Smuzhiyun  * @cmd_reply:		Command reply payload.
311*4882a593Smuzhiyun  * @cmd_reply_size:	Size of the command reply payload.
312*4882a593Smuzhiyun  *
313*4882a593Smuzhiyun  * Context: Process context. This function uses the ne_pci_dev mutex to handle
314*4882a593Smuzhiyun  *	    one command at a time.
315*4882a593Smuzhiyun  * Return:
316*4882a593Smuzhiyun  * * 0 on success.
317*4882a593Smuzhiyun  * * Negative return value on failure.
318*4882a593Smuzhiyun  */
319*4882a593Smuzhiyun int ne_do_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type,
320*4882a593Smuzhiyun 		  void *cmd_request, size_t cmd_request_size,
321*4882a593Smuzhiyun 		  struct ne_pci_dev_cmd_reply *cmd_reply,
322*4882a593Smuzhiyun 		  size_t cmd_reply_size);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun /* Nitro Enclaves (NE) PCI device driver */
325*4882a593Smuzhiyun extern struct pci_driver ne_pci_driver;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun #endif /* _NE_PCI_DEV_H_ */
328