1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun /**
7*4882a593Smuzhiyun * DOC: Sample flow of using the ioctl interface provided by the Nitro Enclaves (NE)
8*4882a593Smuzhiyun * kernel driver.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Usage
11*4882a593Smuzhiyun * -----
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Load the nitro_enclaves module, setting also the enclave CPU pool. The
14*4882a593Smuzhiyun * enclave CPUs need to be full cores from the same NUMA node. CPU 0 and its
15*4882a593Smuzhiyun * siblings have to remain available for the primary / parent VM, so they
16*4882a593Smuzhiyun * cannot be included in the enclave CPU pool.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * See the cpu list section from the kernel documentation.
19*4882a593Smuzhiyun * https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html#cpu-lists
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * insmod drivers/virt/nitro_enclaves/nitro_enclaves.ko
22*4882a593Smuzhiyun * lsmod
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * The CPU pool can be set at runtime, after the kernel module is loaded.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * echo <cpu-list> > /sys/module/nitro_enclaves/parameters/ne_cpus
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * NUMA and CPU siblings information can be found using:
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * lscpu
31*4882a593Smuzhiyun * /proc/cpuinfo
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * Check the online / offline CPU list. The CPUs from the pool should be
34*4882a593Smuzhiyun * offlined.
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun * lscpu
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * Check dmesg for any warnings / errors through the NE driver lifetime / usage.
39*4882a593Smuzhiyun * The NE logs contain the "nitro_enclaves" or "pci 0000:00:02.0" pattern.
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * dmesg
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * Setup hugetlbfs huge pages. The memory needs to be from the same NUMA node as
44*4882a593Smuzhiyun * the enclave CPUs.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * https://www.kernel.org/doc/html/latest/admin-guide/mm/hugetlbpage.html
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * By default, the allocation of hugetlb pages are distributed on all possible
49*4882a593Smuzhiyun * NUMA nodes. Use the following configuration files to set the number of huge
50*4882a593Smuzhiyun * pages from a NUMA node:
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * /sys/devices/system/node/node<X>/hugepages/hugepages-2048kB/nr_hugepages
53*4882a593Smuzhiyun * /sys/devices/system/node/node<X>/hugepages/hugepages-1048576kB/nr_hugepages
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * or, if not on a system with multiple NUMA nodes, can also set the number
56*4882a593Smuzhiyun * of 2 MiB / 1 GiB huge pages using
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
59*4882a593Smuzhiyun * /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * In this example 256 hugepages of 2 MiB are used.
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * Build and run the NE sample.
64*4882a593Smuzhiyun *
65*4882a593Smuzhiyun * make -C samples/nitro_enclaves clean
66*4882a593Smuzhiyun * make -C samples/nitro_enclaves
67*4882a593Smuzhiyun * ./samples/nitro_enclaves/ne_ioctl_sample <path_to_enclave_image>
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * Unload the nitro_enclaves module.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * rmmod nitro_enclaves
72*4882a593Smuzhiyun * lsmod
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #include <stdio.h>
76*4882a593Smuzhiyun #include <stdlib.h>
77*4882a593Smuzhiyun #include <errno.h>
78*4882a593Smuzhiyun #include <fcntl.h>
79*4882a593Smuzhiyun #include <limits.h>
80*4882a593Smuzhiyun #include <poll.h>
81*4882a593Smuzhiyun #include <pthread.h>
82*4882a593Smuzhiyun #include <string.h>
83*4882a593Smuzhiyun #include <sys/eventfd.h>
84*4882a593Smuzhiyun #include <sys/ioctl.h>
85*4882a593Smuzhiyun #include <sys/mman.h>
86*4882a593Smuzhiyun #include <sys/socket.h>
87*4882a593Smuzhiyun #include <sys/stat.h>
88*4882a593Smuzhiyun #include <sys/types.h>
89*4882a593Smuzhiyun #include <unistd.h>
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #include <linux/mman.h>
92*4882a593Smuzhiyun #include <linux/nitro_enclaves.h>
93*4882a593Smuzhiyun #include <linux/vm_sockets.h>
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun * NE_DEV_NAME - Nitro Enclaves (NE) misc device that provides the ioctl interface.
97*4882a593Smuzhiyun */
98*4882a593Smuzhiyun #define NE_DEV_NAME "/dev/nitro_enclaves"
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /**
101*4882a593Smuzhiyun * NE_POLL_WAIT_TIME - Timeout in seconds for each poll event.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun #define NE_POLL_WAIT_TIME (60)
104*4882a593Smuzhiyun /**
105*4882a593Smuzhiyun * NE_POLL_WAIT_TIME_MS - Timeout in milliseconds for each poll event.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun #define NE_POLL_WAIT_TIME_MS (NE_POLL_WAIT_TIME * 1000)
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /**
110*4882a593Smuzhiyun * NE_SLEEP_TIME - Amount of time in seconds for the process to keep the enclave alive.
111*4882a593Smuzhiyun */
112*4882a593Smuzhiyun #define NE_SLEEP_TIME (300)
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun * NE_DEFAULT_NR_VCPUS - Default number of vCPUs set for an enclave.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun #define NE_DEFAULT_NR_VCPUS (2)
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun * NE_MIN_MEM_REGION_SIZE - Minimum size of a memory region - 2 MiB.
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun #define NE_MIN_MEM_REGION_SIZE (2 * 1024 * 1024)
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /**
125*4882a593Smuzhiyun * NE_DEFAULT_NR_MEM_REGIONS - Default number of memory regions of 2 MiB set for
126*4882a593Smuzhiyun * an enclave.
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun #define NE_DEFAULT_NR_MEM_REGIONS (256)
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun * NE_IMAGE_LOAD_HEARTBEAT_CID - Vsock CID for enclave image loading heartbeat logic.
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun #define NE_IMAGE_LOAD_HEARTBEAT_CID (3)
134*4882a593Smuzhiyun /**
135*4882a593Smuzhiyun * NE_IMAGE_LOAD_HEARTBEAT_PORT - Vsock port for enclave image loading heartbeat logic.
136*4882a593Smuzhiyun */
137*4882a593Smuzhiyun #define NE_IMAGE_LOAD_HEARTBEAT_PORT (9000)
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun * NE_IMAGE_LOAD_HEARTBEAT_VALUE - Heartbeat value for enclave image loading.
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun #define NE_IMAGE_LOAD_HEARTBEAT_VALUE (0xb7)
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /**
144*4882a593Smuzhiyun * struct ne_user_mem_region - User space memory region set for an enclave.
145*4882a593Smuzhiyun * @userspace_addr: Address of the user space memory region.
146*4882a593Smuzhiyun * @memory_size: Size of the user space memory region.
147*4882a593Smuzhiyun */
148*4882a593Smuzhiyun struct ne_user_mem_region {
149*4882a593Smuzhiyun void *userspace_addr;
150*4882a593Smuzhiyun size_t memory_size;
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /**
154*4882a593Smuzhiyun * ne_create_vm() - Create a slot for the enclave VM.
155*4882a593Smuzhiyun * @ne_dev_fd: The file descriptor of the NE misc device.
156*4882a593Smuzhiyun * @slot_uid: The generated slot uid for the enclave.
157*4882a593Smuzhiyun * @enclave_fd : The generated file descriptor for the enclave.
158*4882a593Smuzhiyun *
159*4882a593Smuzhiyun * Context: Process context.
160*4882a593Smuzhiyun * Return:
161*4882a593Smuzhiyun * * 0 on success.
162*4882a593Smuzhiyun * * Negative return value on failure.
163*4882a593Smuzhiyun */
ne_create_vm(int ne_dev_fd,unsigned long * slot_uid,int * enclave_fd)164*4882a593Smuzhiyun static int ne_create_vm(int ne_dev_fd, unsigned long *slot_uid, int *enclave_fd)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun int rc = -EINVAL;
167*4882a593Smuzhiyun *enclave_fd = ioctl(ne_dev_fd, NE_CREATE_VM, slot_uid);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun if (*enclave_fd < 0) {
170*4882a593Smuzhiyun rc = *enclave_fd;
171*4882a593Smuzhiyun switch (errno) {
172*4882a593Smuzhiyun case NE_ERR_NO_CPUS_AVAIL_IN_POOL: {
173*4882a593Smuzhiyun printf("Error in create VM, no CPUs available in the NE CPU pool\n");
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun break;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun default:
179*4882a593Smuzhiyun printf("Error in create VM [%m]\n");
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun return rc;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return 0;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /**
190*4882a593Smuzhiyun * ne_poll_enclave_fd() - Thread function for polling the enclave fd.
191*4882a593Smuzhiyun * @data: Argument provided for the polling function.
192*4882a593Smuzhiyun *
193*4882a593Smuzhiyun * Context: Process context.
194*4882a593Smuzhiyun * Return:
195*4882a593Smuzhiyun * * NULL on success / failure.
196*4882a593Smuzhiyun */
ne_poll_enclave_fd(void * data)197*4882a593Smuzhiyun void *ne_poll_enclave_fd(void *data)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun int enclave_fd = *(int *)data;
200*4882a593Smuzhiyun struct pollfd fds[1] = {};
201*4882a593Smuzhiyun int i = 0;
202*4882a593Smuzhiyun int rc = -EINVAL;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun printf("Running from poll thread, enclave fd %d\n", enclave_fd);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun fds[0].fd = enclave_fd;
207*4882a593Smuzhiyun fds[0].events = POLLIN | POLLERR | POLLHUP;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* Keep on polling until the current process is terminated. */
210*4882a593Smuzhiyun while (1) {
211*4882a593Smuzhiyun printf("[iter %d] Polling ...\n", i);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun rc = poll(fds, 1, NE_POLL_WAIT_TIME_MS);
214*4882a593Smuzhiyun if (rc < 0) {
215*4882a593Smuzhiyun printf("Error in poll [%m]\n");
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun return NULL;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun i++;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (!rc) {
223*4882a593Smuzhiyun printf("Poll: %d seconds elapsed\n",
224*4882a593Smuzhiyun i * NE_POLL_WAIT_TIME);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun continue;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun printf("Poll received value 0x%x\n", fds[0].revents);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (fds[0].revents & POLLHUP) {
232*4882a593Smuzhiyun printf("Received POLLHUP\n");
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun return NULL;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (fds[0].revents & POLLNVAL) {
238*4882a593Smuzhiyun printf("Received POLLNVAL\n");
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun return NULL;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun return NULL;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /**
248*4882a593Smuzhiyun * ne_alloc_user_mem_region() - Allocate a user space memory region for an enclave.
249*4882a593Smuzhiyun * @ne_user_mem_region: User space memory region allocated using hugetlbfs.
250*4882a593Smuzhiyun *
251*4882a593Smuzhiyun * Context: Process context.
252*4882a593Smuzhiyun * Return:
253*4882a593Smuzhiyun * * 0 on success.
254*4882a593Smuzhiyun * * Negative return value on failure.
255*4882a593Smuzhiyun */
ne_alloc_user_mem_region(struct ne_user_mem_region * ne_user_mem_region)256*4882a593Smuzhiyun static int ne_alloc_user_mem_region(struct ne_user_mem_region *ne_user_mem_region)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun /**
259*4882a593Smuzhiyun * Check available hugetlb encodings for different huge page sizes in
260*4882a593Smuzhiyun * include/uapi/linux/mman.h.
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun ne_user_mem_region->userspace_addr = mmap(NULL, ne_user_mem_region->memory_size,
263*4882a593Smuzhiyun PROT_READ | PROT_WRITE,
264*4882a593Smuzhiyun MAP_PRIVATE | MAP_ANONYMOUS |
265*4882a593Smuzhiyun MAP_HUGETLB | MAP_HUGE_2MB, -1, 0);
266*4882a593Smuzhiyun if (ne_user_mem_region->userspace_addr == MAP_FAILED) {
267*4882a593Smuzhiyun printf("Error in mmap memory [%m]\n");
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun return -1;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return 0;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun * ne_load_enclave_image() - Place the enclave image in the enclave memory.
277*4882a593Smuzhiyun * @enclave_fd : The file descriptor associated with the enclave.
278*4882a593Smuzhiyun * @ne_user_mem_regions: User space memory regions allocated for the enclave.
279*4882a593Smuzhiyun * @enclave_image_path : The file path of the enclave image.
280*4882a593Smuzhiyun *
281*4882a593Smuzhiyun * Context: Process context.
282*4882a593Smuzhiyun * Return:
283*4882a593Smuzhiyun * * 0 on success.
284*4882a593Smuzhiyun * * Negative return value on failure.
285*4882a593Smuzhiyun */
ne_load_enclave_image(int enclave_fd,struct ne_user_mem_region ne_user_mem_regions[],char * enclave_image_path)286*4882a593Smuzhiyun static int ne_load_enclave_image(int enclave_fd, struct ne_user_mem_region ne_user_mem_regions[],
287*4882a593Smuzhiyun char *enclave_image_path)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun unsigned char *enclave_image = NULL;
290*4882a593Smuzhiyun int enclave_image_fd = -1;
291*4882a593Smuzhiyun size_t enclave_image_size = 0;
292*4882a593Smuzhiyun size_t enclave_memory_size = 0;
293*4882a593Smuzhiyun unsigned long i = 0;
294*4882a593Smuzhiyun size_t image_written_bytes = 0;
295*4882a593Smuzhiyun struct ne_image_load_info image_load_info = {
296*4882a593Smuzhiyun .flags = NE_EIF_IMAGE,
297*4882a593Smuzhiyun };
298*4882a593Smuzhiyun struct stat image_stat_buf = {};
299*4882a593Smuzhiyun int rc = -EINVAL;
300*4882a593Smuzhiyun size_t temp_image_offset = 0;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++)
303*4882a593Smuzhiyun enclave_memory_size += ne_user_mem_regions[i].memory_size;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun rc = stat(enclave_image_path, &image_stat_buf);
306*4882a593Smuzhiyun if (rc < 0) {
307*4882a593Smuzhiyun printf("Error in get image stat info [%m]\n");
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun return rc;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun enclave_image_size = image_stat_buf.st_size;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (enclave_memory_size < enclave_image_size) {
315*4882a593Smuzhiyun printf("The enclave memory is smaller than the enclave image size\n");
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun return -ENOMEM;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun rc = ioctl(enclave_fd, NE_GET_IMAGE_LOAD_INFO, &image_load_info);
321*4882a593Smuzhiyun if (rc < 0) {
322*4882a593Smuzhiyun switch (errno) {
323*4882a593Smuzhiyun case NE_ERR_NOT_IN_INIT_STATE: {
324*4882a593Smuzhiyun printf("Error in get image load info, enclave not in init state\n");
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun break;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun case NE_ERR_INVALID_FLAG_VALUE: {
330*4882a593Smuzhiyun printf("Error in get image load info, provided invalid flag\n");
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun break;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun default:
336*4882a593Smuzhiyun printf("Error in get image load info [%m]\n");
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun return rc;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun printf("Enclave image offset in enclave memory is %lld\n",
343*4882a593Smuzhiyun image_load_info.memory_offset);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun enclave_image_fd = open(enclave_image_path, O_RDONLY);
346*4882a593Smuzhiyun if (enclave_image_fd < 0) {
347*4882a593Smuzhiyun printf("Error in open enclave image file [%m]\n");
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun return enclave_image_fd;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun enclave_image = mmap(NULL, enclave_image_size, PROT_READ,
353*4882a593Smuzhiyun MAP_PRIVATE, enclave_image_fd, 0);
354*4882a593Smuzhiyun if (enclave_image == MAP_FAILED) {
355*4882a593Smuzhiyun printf("Error in mmap enclave image [%m]\n");
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun return -1;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun temp_image_offset = image_load_info.memory_offset;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++) {
363*4882a593Smuzhiyun size_t bytes_to_write = 0;
364*4882a593Smuzhiyun size_t memory_offset = 0;
365*4882a593Smuzhiyun size_t memory_size = ne_user_mem_regions[i].memory_size;
366*4882a593Smuzhiyun size_t remaining_bytes = 0;
367*4882a593Smuzhiyun void *userspace_addr = ne_user_mem_regions[i].userspace_addr;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (temp_image_offset >= memory_size) {
370*4882a593Smuzhiyun temp_image_offset -= memory_size;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun continue;
373*4882a593Smuzhiyun } else if (temp_image_offset != 0) {
374*4882a593Smuzhiyun memory_offset = temp_image_offset;
375*4882a593Smuzhiyun memory_size -= temp_image_offset;
376*4882a593Smuzhiyun temp_image_offset = 0;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun remaining_bytes = enclave_image_size - image_written_bytes;
380*4882a593Smuzhiyun bytes_to_write = memory_size < remaining_bytes ?
381*4882a593Smuzhiyun memory_size : remaining_bytes;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun memcpy(userspace_addr + memory_offset,
384*4882a593Smuzhiyun enclave_image + image_written_bytes, bytes_to_write);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun image_written_bytes += bytes_to_write;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun if (image_written_bytes == enclave_image_size)
389*4882a593Smuzhiyun break;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun munmap(enclave_image, enclave_image_size);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun close(enclave_image_fd);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun return 0;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /**
400*4882a593Smuzhiyun * ne_set_user_mem_region() - Set a user space memory region for the given enclave.
401*4882a593Smuzhiyun * @enclave_fd : The file descriptor associated with the enclave.
402*4882a593Smuzhiyun * @ne_user_mem_region : User space memory region to be set for the enclave.
403*4882a593Smuzhiyun *
404*4882a593Smuzhiyun * Context: Process context.
405*4882a593Smuzhiyun * Return:
406*4882a593Smuzhiyun * * 0 on success.
407*4882a593Smuzhiyun * * Negative return value on failure.
408*4882a593Smuzhiyun */
ne_set_user_mem_region(int enclave_fd,struct ne_user_mem_region ne_user_mem_region)409*4882a593Smuzhiyun static int ne_set_user_mem_region(int enclave_fd, struct ne_user_mem_region ne_user_mem_region)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun struct ne_user_memory_region mem_region = {
412*4882a593Smuzhiyun .flags = NE_DEFAULT_MEMORY_REGION,
413*4882a593Smuzhiyun .memory_size = ne_user_mem_region.memory_size,
414*4882a593Smuzhiyun .userspace_addr = (__u64)ne_user_mem_region.userspace_addr,
415*4882a593Smuzhiyun };
416*4882a593Smuzhiyun int rc = -EINVAL;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun rc = ioctl(enclave_fd, NE_SET_USER_MEMORY_REGION, &mem_region);
419*4882a593Smuzhiyun if (rc < 0) {
420*4882a593Smuzhiyun switch (errno) {
421*4882a593Smuzhiyun case NE_ERR_NOT_IN_INIT_STATE: {
422*4882a593Smuzhiyun printf("Error in set user memory region, enclave not in init state\n");
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun break;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun case NE_ERR_INVALID_MEM_REGION_SIZE: {
428*4882a593Smuzhiyun printf("Error in set user memory region, mem size not multiple of 2 MiB\n");
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun break;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun case NE_ERR_INVALID_MEM_REGION_ADDR: {
434*4882a593Smuzhiyun printf("Error in set user memory region, invalid user space address\n");
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun break;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun case NE_ERR_UNALIGNED_MEM_REGION_ADDR: {
440*4882a593Smuzhiyun printf("Error in set user memory region, unaligned user space address\n");
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun break;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun case NE_ERR_MEM_REGION_ALREADY_USED: {
446*4882a593Smuzhiyun printf("Error in set user memory region, memory region already used\n");
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun break;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun case NE_ERR_MEM_NOT_HUGE_PAGE: {
452*4882a593Smuzhiyun printf("Error in set user memory region, not backed by huge pages\n");
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun break;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun case NE_ERR_MEM_DIFFERENT_NUMA_NODE: {
458*4882a593Smuzhiyun printf("Error in set user memory region, different NUMA node than CPUs\n");
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun break;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun case NE_ERR_MEM_MAX_REGIONS: {
464*4882a593Smuzhiyun printf("Error in set user memory region, max memory regions reached\n");
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun break;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun case NE_ERR_INVALID_PAGE_SIZE: {
470*4882a593Smuzhiyun printf("Error in set user memory region, has page not multiple of 2 MiB\n");
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun break;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun case NE_ERR_INVALID_FLAG_VALUE: {
476*4882a593Smuzhiyun printf("Error in set user memory region, provided invalid flag\n");
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun break;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun default:
482*4882a593Smuzhiyun printf("Error in set user memory region [%m]\n");
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun return rc;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun return 0;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /**
492*4882a593Smuzhiyun * ne_free_mem_regions() - Unmap all the user space memory regions that were set
493*4882a593Smuzhiyun * aside for the enclave.
494*4882a593Smuzhiyun * @ne_user_mem_regions: The user space memory regions associated with an enclave.
495*4882a593Smuzhiyun *
496*4882a593Smuzhiyun * Context: Process context.
497*4882a593Smuzhiyun */
ne_free_mem_regions(struct ne_user_mem_region ne_user_mem_regions[])498*4882a593Smuzhiyun static void ne_free_mem_regions(struct ne_user_mem_region ne_user_mem_regions[])
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun unsigned int i = 0;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++)
503*4882a593Smuzhiyun munmap(ne_user_mem_regions[i].userspace_addr,
504*4882a593Smuzhiyun ne_user_mem_regions[i].memory_size);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /**
508*4882a593Smuzhiyun * ne_add_vcpu() - Add a vCPU to the given enclave.
509*4882a593Smuzhiyun * @enclave_fd : The file descriptor associated with the enclave.
510*4882a593Smuzhiyun * @vcpu_id: vCPU id to be set for the enclave, either provided or
511*4882a593Smuzhiyun * auto-generated (if provided vCPU id is 0).
512*4882a593Smuzhiyun *
513*4882a593Smuzhiyun * Context: Process context.
514*4882a593Smuzhiyun * Return:
515*4882a593Smuzhiyun * * 0 on success.
516*4882a593Smuzhiyun * * Negative return value on failure.
517*4882a593Smuzhiyun */
ne_add_vcpu(int enclave_fd,unsigned int * vcpu_id)518*4882a593Smuzhiyun static int ne_add_vcpu(int enclave_fd, unsigned int *vcpu_id)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun int rc = -EINVAL;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun rc = ioctl(enclave_fd, NE_ADD_VCPU, vcpu_id);
523*4882a593Smuzhiyun if (rc < 0) {
524*4882a593Smuzhiyun switch (errno) {
525*4882a593Smuzhiyun case NE_ERR_NO_CPUS_AVAIL_IN_POOL: {
526*4882a593Smuzhiyun printf("Error in add vcpu, no CPUs available in the NE CPU pool\n");
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun break;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun case NE_ERR_VCPU_ALREADY_USED: {
532*4882a593Smuzhiyun printf("Error in add vcpu, the provided vCPU is already used\n");
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun break;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun case NE_ERR_VCPU_NOT_IN_CPU_POOL: {
538*4882a593Smuzhiyun printf("Error in add vcpu, the provided vCPU is not in the NE CPU pool\n");
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun break;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun case NE_ERR_VCPU_INVALID_CPU_CORE: {
544*4882a593Smuzhiyun printf("Error in add vcpu, the core id of the provided vCPU is invalid\n");
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun break;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun case NE_ERR_NOT_IN_INIT_STATE: {
550*4882a593Smuzhiyun printf("Error in add vcpu, enclave not in init state\n");
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun break;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun case NE_ERR_INVALID_VCPU: {
556*4882a593Smuzhiyun printf("Error in add vcpu, the provided vCPU is out of avail CPUs range\n");
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun break;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun default:
562*4882a593Smuzhiyun printf("Error in add vcpu [%m]\n");
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun return rc;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun return 0;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /**
572*4882a593Smuzhiyun * ne_start_enclave() - Start the given enclave.
573*4882a593Smuzhiyun * @enclave_fd : The file descriptor associated with the enclave.
574*4882a593Smuzhiyun * @enclave_start_info : Enclave metadata used for starting e.g. vsock CID.
575*4882a593Smuzhiyun *
576*4882a593Smuzhiyun * Context: Process context.
577*4882a593Smuzhiyun * Return:
578*4882a593Smuzhiyun * * 0 on success.
579*4882a593Smuzhiyun * * Negative return value on failure.
580*4882a593Smuzhiyun */
ne_start_enclave(int enclave_fd,struct ne_enclave_start_info * enclave_start_info)581*4882a593Smuzhiyun static int ne_start_enclave(int enclave_fd, struct ne_enclave_start_info *enclave_start_info)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun int rc = -EINVAL;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun rc = ioctl(enclave_fd, NE_START_ENCLAVE, enclave_start_info);
586*4882a593Smuzhiyun if (rc < 0) {
587*4882a593Smuzhiyun switch (errno) {
588*4882a593Smuzhiyun case NE_ERR_NOT_IN_INIT_STATE: {
589*4882a593Smuzhiyun printf("Error in start enclave, enclave not in init state\n");
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun break;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun case NE_ERR_NO_MEM_REGIONS_ADDED: {
595*4882a593Smuzhiyun printf("Error in start enclave, no memory regions have been added\n");
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun break;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun case NE_ERR_NO_VCPUS_ADDED: {
601*4882a593Smuzhiyun printf("Error in start enclave, no vCPUs have been added\n");
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun break;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun case NE_ERR_FULL_CORES_NOT_USED: {
607*4882a593Smuzhiyun printf("Error in start enclave, enclave has no full cores set\n");
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun break;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun case NE_ERR_ENCLAVE_MEM_MIN_SIZE: {
613*4882a593Smuzhiyun printf("Error in start enclave, enclave memory is less than min size\n");
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun break;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun case NE_ERR_INVALID_FLAG_VALUE: {
619*4882a593Smuzhiyun printf("Error in start enclave, provided invalid flag\n");
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun break;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun case NE_ERR_INVALID_ENCLAVE_CID: {
625*4882a593Smuzhiyun printf("Error in start enclave, provided invalid enclave CID\n");
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun break;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun default:
631*4882a593Smuzhiyun printf("Error in start enclave [%m]\n");
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun return rc;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun return 0;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /**
641*4882a593Smuzhiyun * ne_start_enclave_check_booted() - Start the enclave and wait for a hearbeat
642*4882a593Smuzhiyun * from it, on a newly created vsock channel,
643*4882a593Smuzhiyun * to check it has booted.
644*4882a593Smuzhiyun * @enclave_fd : The file descriptor associated with the enclave.
645*4882a593Smuzhiyun *
646*4882a593Smuzhiyun * Context: Process context.
647*4882a593Smuzhiyun * Return:
648*4882a593Smuzhiyun * * 0 on success.
649*4882a593Smuzhiyun * * Negative return value on failure.
650*4882a593Smuzhiyun */
ne_start_enclave_check_booted(int enclave_fd)651*4882a593Smuzhiyun static int ne_start_enclave_check_booted(int enclave_fd)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun struct sockaddr_vm client_vsock_addr = {};
654*4882a593Smuzhiyun int client_vsock_fd = -1;
655*4882a593Smuzhiyun socklen_t client_vsock_len = sizeof(client_vsock_addr);
656*4882a593Smuzhiyun struct ne_enclave_start_info enclave_start_info = {};
657*4882a593Smuzhiyun struct pollfd fds[1] = {};
658*4882a593Smuzhiyun int rc = -EINVAL;
659*4882a593Smuzhiyun unsigned char recv_buf = 0;
660*4882a593Smuzhiyun struct sockaddr_vm server_vsock_addr = {
661*4882a593Smuzhiyun .svm_family = AF_VSOCK,
662*4882a593Smuzhiyun .svm_cid = NE_IMAGE_LOAD_HEARTBEAT_CID,
663*4882a593Smuzhiyun .svm_port = NE_IMAGE_LOAD_HEARTBEAT_PORT,
664*4882a593Smuzhiyun };
665*4882a593Smuzhiyun int server_vsock_fd = -1;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun server_vsock_fd = socket(AF_VSOCK, SOCK_STREAM, 0);
668*4882a593Smuzhiyun if (server_vsock_fd < 0) {
669*4882a593Smuzhiyun rc = server_vsock_fd;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun printf("Error in socket [%m]\n");
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun return rc;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun rc = bind(server_vsock_fd, (struct sockaddr *)&server_vsock_addr,
677*4882a593Smuzhiyun sizeof(server_vsock_addr));
678*4882a593Smuzhiyun if (rc < 0) {
679*4882a593Smuzhiyun printf("Error in bind [%m]\n");
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun goto out;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun rc = listen(server_vsock_fd, 1);
685*4882a593Smuzhiyun if (rc < 0) {
686*4882a593Smuzhiyun printf("Error in listen [%m]\n");
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun goto out;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun rc = ne_start_enclave(enclave_fd, &enclave_start_info);
692*4882a593Smuzhiyun if (rc < 0)
693*4882a593Smuzhiyun goto out;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun printf("Enclave started, CID %llu\n", enclave_start_info.enclave_cid);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun fds[0].fd = server_vsock_fd;
698*4882a593Smuzhiyun fds[0].events = POLLIN;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun rc = poll(fds, 1, NE_POLL_WAIT_TIME_MS);
701*4882a593Smuzhiyun if (rc < 0) {
702*4882a593Smuzhiyun printf("Error in poll [%m]\n");
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun goto out;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun if (!rc) {
708*4882a593Smuzhiyun printf("Poll timeout, %d seconds elapsed\n", NE_POLL_WAIT_TIME);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun rc = -ETIMEDOUT;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun goto out;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun if ((fds[0].revents & POLLIN) == 0) {
716*4882a593Smuzhiyun printf("Poll received value %d\n", fds[0].revents);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun rc = -EINVAL;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun goto out;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun rc = accept(server_vsock_fd, (struct sockaddr *)&client_vsock_addr,
724*4882a593Smuzhiyun &client_vsock_len);
725*4882a593Smuzhiyun if (rc < 0) {
726*4882a593Smuzhiyun printf("Error in accept [%m]\n");
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun goto out;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun client_vsock_fd = rc;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun /*
734*4882a593Smuzhiyun * Read the heartbeat value that the init process in the enclave sends
735*4882a593Smuzhiyun * after vsock connect.
736*4882a593Smuzhiyun */
737*4882a593Smuzhiyun rc = read(client_vsock_fd, &recv_buf, sizeof(recv_buf));
738*4882a593Smuzhiyun if (rc < 0) {
739*4882a593Smuzhiyun printf("Error in read [%m]\n");
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun goto out;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun if (rc != sizeof(recv_buf) || recv_buf != NE_IMAGE_LOAD_HEARTBEAT_VALUE) {
745*4882a593Smuzhiyun printf("Read %d instead of %d\n", recv_buf,
746*4882a593Smuzhiyun NE_IMAGE_LOAD_HEARTBEAT_VALUE);
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun goto out;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /* Write the heartbeat value back. */
752*4882a593Smuzhiyun rc = write(client_vsock_fd, &recv_buf, sizeof(recv_buf));
753*4882a593Smuzhiyun if (rc < 0) {
754*4882a593Smuzhiyun printf("Error in write [%m]\n");
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun goto out;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun rc = 0;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun out:
762*4882a593Smuzhiyun close(server_vsock_fd);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun return rc;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
main(int argc,char * argv[])767*4882a593Smuzhiyun int main(int argc, char *argv[])
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun int enclave_fd = -1;
770*4882a593Smuzhiyun unsigned int i = 0;
771*4882a593Smuzhiyun int ne_dev_fd = -1;
772*4882a593Smuzhiyun struct ne_user_mem_region ne_user_mem_regions[NE_DEFAULT_NR_MEM_REGIONS] = {};
773*4882a593Smuzhiyun unsigned int ne_vcpus[NE_DEFAULT_NR_VCPUS] = {};
774*4882a593Smuzhiyun int rc = -EINVAL;
775*4882a593Smuzhiyun pthread_t thread_id = 0;
776*4882a593Smuzhiyun unsigned long slot_uid = 0;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun if (argc != 2) {
779*4882a593Smuzhiyun printf("Usage: %s <path_to_enclave_image>\n", argv[0]);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun exit(EXIT_FAILURE);
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun if (strlen(argv[1]) >= PATH_MAX) {
785*4882a593Smuzhiyun printf("The size of the path to enclave image is higher than max path\n");
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun exit(EXIT_FAILURE);
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun ne_dev_fd = open(NE_DEV_NAME, O_RDWR | O_CLOEXEC);
791*4882a593Smuzhiyun if (ne_dev_fd < 0) {
792*4882a593Smuzhiyun printf("Error in open NE device [%m]\n");
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun exit(EXIT_FAILURE);
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun printf("Creating enclave slot ...\n");
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun rc = ne_create_vm(ne_dev_fd, &slot_uid, &enclave_fd);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun close(ne_dev_fd);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun if (rc < 0)
804*4882a593Smuzhiyun exit(EXIT_FAILURE);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun printf("Enclave fd %d\n", enclave_fd);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun rc = pthread_create(&thread_id, NULL, ne_poll_enclave_fd, (void *)&enclave_fd);
809*4882a593Smuzhiyun if (rc < 0) {
810*4882a593Smuzhiyun printf("Error in thread create [%m]\n");
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun close(enclave_fd);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun exit(EXIT_FAILURE);
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++) {
818*4882a593Smuzhiyun ne_user_mem_regions[i].memory_size = NE_MIN_MEM_REGION_SIZE;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun rc = ne_alloc_user_mem_region(&ne_user_mem_regions[i]);
821*4882a593Smuzhiyun if (rc < 0) {
822*4882a593Smuzhiyun printf("Error in alloc userspace memory region, iter %d\n", i);
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun goto release_enclave_fd;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun rc = ne_load_enclave_image(enclave_fd, ne_user_mem_regions, argv[1]);
829*4882a593Smuzhiyun if (rc < 0)
830*4882a593Smuzhiyun goto release_enclave_fd;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++) {
833*4882a593Smuzhiyun rc = ne_set_user_mem_region(enclave_fd, ne_user_mem_regions[i]);
834*4882a593Smuzhiyun if (rc < 0) {
835*4882a593Smuzhiyun printf("Error in set memory region, iter %d\n", i);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun goto release_enclave_fd;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun printf("Enclave memory regions were added\n");
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun for (i = 0; i < NE_DEFAULT_NR_VCPUS; i++) {
844*4882a593Smuzhiyun /*
845*4882a593Smuzhiyun * The vCPU is chosen from the enclave vCPU pool, if the value
846*4882a593Smuzhiyun * of the vcpu_id is 0.
847*4882a593Smuzhiyun */
848*4882a593Smuzhiyun ne_vcpus[i] = 0;
849*4882a593Smuzhiyun rc = ne_add_vcpu(enclave_fd, &ne_vcpus[i]);
850*4882a593Smuzhiyun if (rc < 0) {
851*4882a593Smuzhiyun printf("Error in add vcpu, iter %d\n", i);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun goto release_enclave_fd;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun printf("Added vCPU %d to the enclave\n", ne_vcpus[i]);
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun printf("Enclave vCPUs were added\n");
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun rc = ne_start_enclave_check_booted(enclave_fd);
862*4882a593Smuzhiyun if (rc < 0) {
863*4882a593Smuzhiyun printf("Error in the enclave start / image loading heartbeat logic [rc=%d]\n", rc);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun goto release_enclave_fd;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun printf("Entering sleep for %d seconds ...\n", NE_SLEEP_TIME);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun sleep(NE_SLEEP_TIME);
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun close(enclave_fd);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun ne_free_mem_regions(ne_user_mem_regions);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun exit(EXIT_SUCCESS);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun release_enclave_fd:
879*4882a593Smuzhiyun close(enclave_fd);
880*4882a593Smuzhiyun ne_free_mem_regions(ne_user_mem_regions);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun exit(EXIT_FAILURE);
883*4882a593Smuzhiyun }
884