xref: /OK3568_Linux_fs/kernel/arch/x86/kvm/svm/sev.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Kernel-based Virtual Machine driver for Linux
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * AMD SVM-SEV support
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/kvm_types.h>
11*4882a593Smuzhiyun #include <linux/kvm_host.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/highmem.h>
14*4882a593Smuzhiyun #include <linux/psp-sev.h>
15*4882a593Smuzhiyun #include <linux/pagemap.h>
16*4882a593Smuzhiyun #include <linux/swap.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "x86.h"
19*4882a593Smuzhiyun #include "svm.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun static int sev_flush_asids(void);
22*4882a593Smuzhiyun static DECLARE_RWSEM(sev_deactivate_lock);
23*4882a593Smuzhiyun static DEFINE_MUTEX(sev_bitmap_lock);
24*4882a593Smuzhiyun unsigned int max_sev_asid;
25*4882a593Smuzhiyun static unsigned int min_sev_asid;
26*4882a593Smuzhiyun static unsigned long *sev_asid_bitmap;
27*4882a593Smuzhiyun static unsigned long *sev_reclaim_asid_bitmap;
28*4882a593Smuzhiyun #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct enc_region {
31*4882a593Smuzhiyun 	struct list_head list;
32*4882a593Smuzhiyun 	unsigned long npages;
33*4882a593Smuzhiyun 	struct page **pages;
34*4882a593Smuzhiyun 	unsigned long uaddr;
35*4882a593Smuzhiyun 	unsigned long size;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
sev_flush_asids(void)38*4882a593Smuzhiyun static int sev_flush_asids(void)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	int ret, error = 0;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	/*
43*4882a593Smuzhiyun 	 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
44*4882a593Smuzhiyun 	 * so it must be guarded.
45*4882a593Smuzhiyun 	 */
46*4882a593Smuzhiyun 	down_write(&sev_deactivate_lock);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	wbinvd_on_all_cpus();
49*4882a593Smuzhiyun 	ret = sev_guest_df_flush(&error);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	up_write(&sev_deactivate_lock);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	if (ret)
54*4882a593Smuzhiyun 		pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	return ret;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /* Must be called with the sev_bitmap_lock held */
__sev_recycle_asids(void)60*4882a593Smuzhiyun static bool __sev_recycle_asids(void)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	int pos;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	/* Check if there are any ASIDs to reclaim before performing a flush */
65*4882a593Smuzhiyun 	pos = find_next_bit(sev_reclaim_asid_bitmap,
66*4882a593Smuzhiyun 			    max_sev_asid, min_sev_asid - 1);
67*4882a593Smuzhiyun 	if (pos >= max_sev_asid)
68*4882a593Smuzhiyun 		return false;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	if (sev_flush_asids())
71*4882a593Smuzhiyun 		return false;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
74*4882a593Smuzhiyun 		   max_sev_asid);
75*4882a593Smuzhiyun 	bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	return true;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
sev_asid_new(void)80*4882a593Smuzhiyun static int sev_asid_new(void)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	bool retry = true;
83*4882a593Smuzhiyun 	int pos;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	mutex_lock(&sev_bitmap_lock);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/*
88*4882a593Smuzhiyun 	 * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
89*4882a593Smuzhiyun 	 */
90*4882a593Smuzhiyun again:
91*4882a593Smuzhiyun 	pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
92*4882a593Smuzhiyun 	if (pos >= max_sev_asid) {
93*4882a593Smuzhiyun 		if (retry && __sev_recycle_asids()) {
94*4882a593Smuzhiyun 			retry = false;
95*4882a593Smuzhiyun 			goto again;
96*4882a593Smuzhiyun 		}
97*4882a593Smuzhiyun 		mutex_unlock(&sev_bitmap_lock);
98*4882a593Smuzhiyun 		return -EBUSY;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	__set_bit(pos, sev_asid_bitmap);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	mutex_unlock(&sev_bitmap_lock);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	return pos + 1;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
sev_get_asid(struct kvm * kvm)108*4882a593Smuzhiyun static int sev_get_asid(struct kvm *kvm)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return sev->asid;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
sev_asid_free(int asid)115*4882a593Smuzhiyun static void sev_asid_free(int asid)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct svm_cpu_data *sd;
118*4882a593Smuzhiyun 	int cpu, pos;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	mutex_lock(&sev_bitmap_lock);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	pos = asid - 1;
123*4882a593Smuzhiyun 	__set_bit(pos, sev_reclaim_asid_bitmap);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
126*4882a593Smuzhiyun 		sd = per_cpu(svm_data, cpu);
127*4882a593Smuzhiyun 		sd->sev_vmcbs[asid] = NULL;
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	mutex_unlock(&sev_bitmap_lock);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
sev_decommission(unsigned int handle)133*4882a593Smuzhiyun static void sev_decommission(unsigned int handle)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	struct sev_data_decommission *decommission;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	if (!handle)
138*4882a593Smuzhiyun 		return;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
141*4882a593Smuzhiyun 	if (!decommission)
142*4882a593Smuzhiyun 		return;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	decommission->handle = handle;
145*4882a593Smuzhiyun 	sev_guest_decommission(decommission, NULL);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	kfree(decommission);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
sev_unbind_asid(struct kvm * kvm,unsigned int handle)150*4882a593Smuzhiyun static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	struct sev_data_deactivate *data;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if (!handle)
155*4882a593Smuzhiyun 		return;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL);
158*4882a593Smuzhiyun 	if (!data)
159*4882a593Smuzhiyun 		return;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/* deactivate handle */
162*4882a593Smuzhiyun 	data->handle = handle;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	/* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
165*4882a593Smuzhiyun 	down_read(&sev_deactivate_lock);
166*4882a593Smuzhiyun 	sev_guest_deactivate(data, NULL);
167*4882a593Smuzhiyun 	up_read(&sev_deactivate_lock);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	kfree(data);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	sev_decommission(handle);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
sev_guest_init(struct kvm * kvm,struct kvm_sev_cmd * argp)174*4882a593Smuzhiyun static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
177*4882a593Smuzhiyun 	int asid, ret;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (kvm->created_vcpus)
180*4882a593Smuzhiyun 		return -EINVAL;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	ret = -EBUSY;
183*4882a593Smuzhiyun 	if (unlikely(sev->active))
184*4882a593Smuzhiyun 		return ret;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	asid = sev_asid_new();
187*4882a593Smuzhiyun 	if (asid < 0)
188*4882a593Smuzhiyun 		return ret;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	ret = sev_platform_init(&argp->error);
191*4882a593Smuzhiyun 	if (ret)
192*4882a593Smuzhiyun 		goto e_free;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	sev->active = true;
195*4882a593Smuzhiyun 	sev->asid = asid;
196*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sev->regions_list);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	return 0;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun e_free:
201*4882a593Smuzhiyun 	sev_asid_free(asid);
202*4882a593Smuzhiyun 	return ret;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
sev_bind_asid(struct kvm * kvm,unsigned int handle,int * error)205*4882a593Smuzhiyun static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	struct sev_data_activate *data;
208*4882a593Smuzhiyun 	int asid = sev_get_asid(kvm);
209*4882a593Smuzhiyun 	int ret;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
212*4882a593Smuzhiyun 	if (!data)
213*4882a593Smuzhiyun 		return -ENOMEM;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/* activate ASID on the given handle */
216*4882a593Smuzhiyun 	data->handle = handle;
217*4882a593Smuzhiyun 	data->asid   = asid;
218*4882a593Smuzhiyun 	ret = sev_guest_activate(data, error);
219*4882a593Smuzhiyun 	kfree(data);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	return ret;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
__sev_issue_cmd(int fd,int id,void * data,int * error)224*4882a593Smuzhiyun static int __sev_issue_cmd(int fd, int id, void *data, int *error)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	struct fd f;
227*4882a593Smuzhiyun 	int ret;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	f = fdget(fd);
230*4882a593Smuzhiyun 	if (!f.file)
231*4882a593Smuzhiyun 		return -EBADF;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	ret = sev_issue_cmd_external_user(f.file, id, data, error);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	fdput(f);
236*4882a593Smuzhiyun 	return ret;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
sev_issue_cmd(struct kvm * kvm,int id,void * data,int * error)239*4882a593Smuzhiyun static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	return __sev_issue_cmd(sev->fd, id, data, error);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
sev_launch_start(struct kvm * kvm,struct kvm_sev_cmd * argp)246*4882a593Smuzhiyun static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
249*4882a593Smuzhiyun 	struct sev_data_launch_start *start;
250*4882a593Smuzhiyun 	struct kvm_sev_launch_start params;
251*4882a593Smuzhiyun 	void *dh_blob, *session_blob;
252*4882a593Smuzhiyun 	int *error = &argp->error;
253*4882a593Smuzhiyun 	int ret;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	if (!sev_guest(kvm))
256*4882a593Smuzhiyun 		return -ENOTTY;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
259*4882a593Smuzhiyun 		return -EFAULT;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
262*4882a593Smuzhiyun 	if (!start)
263*4882a593Smuzhiyun 		return -ENOMEM;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	dh_blob = NULL;
266*4882a593Smuzhiyun 	if (params.dh_uaddr) {
267*4882a593Smuzhiyun 		dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
268*4882a593Smuzhiyun 		if (IS_ERR(dh_blob)) {
269*4882a593Smuzhiyun 			ret = PTR_ERR(dh_blob);
270*4882a593Smuzhiyun 			goto e_free;
271*4882a593Smuzhiyun 		}
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 		start->dh_cert_address = __sme_set(__pa(dh_blob));
274*4882a593Smuzhiyun 		start->dh_cert_len = params.dh_len;
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	session_blob = NULL;
278*4882a593Smuzhiyun 	if (params.session_uaddr) {
279*4882a593Smuzhiyun 		session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
280*4882a593Smuzhiyun 		if (IS_ERR(session_blob)) {
281*4882a593Smuzhiyun 			ret = PTR_ERR(session_blob);
282*4882a593Smuzhiyun 			goto e_free_dh;
283*4882a593Smuzhiyun 		}
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 		start->session_address = __sme_set(__pa(session_blob));
286*4882a593Smuzhiyun 		start->session_len = params.session_len;
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	start->handle = params.handle;
290*4882a593Smuzhiyun 	start->policy = params.policy;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/* create memory encryption context */
293*4882a593Smuzhiyun 	ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
294*4882a593Smuzhiyun 	if (ret)
295*4882a593Smuzhiyun 		goto e_free_session;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* Bind ASID to this guest */
298*4882a593Smuzhiyun 	ret = sev_bind_asid(kvm, start->handle, error);
299*4882a593Smuzhiyun 	if (ret) {
300*4882a593Smuzhiyun 		sev_decommission(start->handle);
301*4882a593Smuzhiyun 		goto e_free_session;
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	/* return handle to userspace */
305*4882a593Smuzhiyun 	params.handle = start->handle;
306*4882a593Smuzhiyun 	if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
307*4882a593Smuzhiyun 		sev_unbind_asid(kvm, start->handle);
308*4882a593Smuzhiyun 		ret = -EFAULT;
309*4882a593Smuzhiyun 		goto e_free_session;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	sev->handle = start->handle;
313*4882a593Smuzhiyun 	sev->fd = argp->sev_fd;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun e_free_session:
316*4882a593Smuzhiyun 	kfree(session_blob);
317*4882a593Smuzhiyun e_free_dh:
318*4882a593Smuzhiyun 	kfree(dh_blob);
319*4882a593Smuzhiyun e_free:
320*4882a593Smuzhiyun 	kfree(start);
321*4882a593Smuzhiyun 	return ret;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
sev_pin_memory(struct kvm * kvm,unsigned long uaddr,unsigned long ulen,unsigned long * n,int write)324*4882a593Smuzhiyun static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
325*4882a593Smuzhiyun 				    unsigned long ulen, unsigned long *n,
326*4882a593Smuzhiyun 				    int write)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
329*4882a593Smuzhiyun 	unsigned long npages, size;
330*4882a593Smuzhiyun 	int npinned;
331*4882a593Smuzhiyun 	unsigned long locked, lock_limit;
332*4882a593Smuzhiyun 	struct page **pages;
333*4882a593Smuzhiyun 	unsigned long first, last;
334*4882a593Smuzhiyun 	int ret;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	lockdep_assert_held(&kvm->lock);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (ulen == 0 || uaddr + ulen < uaddr)
339*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/* Calculate number of pages. */
342*4882a593Smuzhiyun 	first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
343*4882a593Smuzhiyun 	last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
344*4882a593Smuzhiyun 	npages = (last - first + 1);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	locked = sev->pages_locked + npages;
347*4882a593Smuzhiyun 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
348*4882a593Smuzhiyun 	if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
349*4882a593Smuzhiyun 		pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
350*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (WARN_ON_ONCE(npages > INT_MAX))
354*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	/* Avoid using vmalloc for smaller buffers. */
357*4882a593Smuzhiyun 	size = npages * sizeof(struct page *);
358*4882a593Smuzhiyun 	if (size > PAGE_SIZE)
359*4882a593Smuzhiyun 		pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
360*4882a593Smuzhiyun 	else
361*4882a593Smuzhiyun 		pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (!pages)
364*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	/* Pin the user virtual address. */
367*4882a593Smuzhiyun 	npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
368*4882a593Smuzhiyun 	if (npinned != npages) {
369*4882a593Smuzhiyun 		pr_err("SEV: Failure locking %lu pages.\n", npages);
370*4882a593Smuzhiyun 		ret = -ENOMEM;
371*4882a593Smuzhiyun 		goto err;
372*4882a593Smuzhiyun 	}
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	*n = npages;
375*4882a593Smuzhiyun 	sev->pages_locked = locked;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	return pages;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun err:
380*4882a593Smuzhiyun 	if (npinned > 0)
381*4882a593Smuzhiyun 		unpin_user_pages(pages, npinned);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	kvfree(pages);
384*4882a593Smuzhiyun 	return ERR_PTR(ret);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
sev_unpin_memory(struct kvm * kvm,struct page ** pages,unsigned long npages)387*4882a593Smuzhiyun static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
388*4882a593Smuzhiyun 			     unsigned long npages)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	unpin_user_pages(pages, npages);
393*4882a593Smuzhiyun 	kvfree(pages);
394*4882a593Smuzhiyun 	sev->pages_locked -= npages;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
sev_clflush_pages(struct page * pages[],unsigned long npages)397*4882a593Smuzhiyun static void sev_clflush_pages(struct page *pages[], unsigned long npages)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	uint8_t *page_virtual;
400*4882a593Smuzhiyun 	unsigned long i;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
403*4882a593Smuzhiyun 	    pages == NULL)
404*4882a593Smuzhiyun 		return;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	for (i = 0; i < npages; i++) {
407*4882a593Smuzhiyun 		page_virtual = kmap_atomic(pages[i]);
408*4882a593Smuzhiyun 		clflush_cache_range(page_virtual, PAGE_SIZE);
409*4882a593Smuzhiyun 		kunmap_atomic(page_virtual);
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
get_num_contig_pages(unsigned long idx,struct page ** inpages,unsigned long npages)413*4882a593Smuzhiyun static unsigned long get_num_contig_pages(unsigned long idx,
414*4882a593Smuzhiyun 				struct page **inpages, unsigned long npages)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun 	unsigned long paddr, next_paddr;
417*4882a593Smuzhiyun 	unsigned long i = idx + 1, pages = 1;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	/* find the number of contiguous pages starting from idx */
420*4882a593Smuzhiyun 	paddr = __sme_page_pa(inpages[idx]);
421*4882a593Smuzhiyun 	while (i < npages) {
422*4882a593Smuzhiyun 		next_paddr = __sme_page_pa(inpages[i++]);
423*4882a593Smuzhiyun 		if ((paddr + PAGE_SIZE) == next_paddr) {
424*4882a593Smuzhiyun 			pages++;
425*4882a593Smuzhiyun 			paddr = next_paddr;
426*4882a593Smuzhiyun 			continue;
427*4882a593Smuzhiyun 		}
428*4882a593Smuzhiyun 		break;
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	return pages;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
sev_launch_update_data(struct kvm * kvm,struct kvm_sev_cmd * argp)434*4882a593Smuzhiyun static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
437*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
438*4882a593Smuzhiyun 	struct kvm_sev_launch_update_data params;
439*4882a593Smuzhiyun 	struct sev_data_launch_update_data *data;
440*4882a593Smuzhiyun 	struct page **inpages;
441*4882a593Smuzhiyun 	int ret;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (!sev_guest(kvm))
444*4882a593Smuzhiyun 		return -ENOTTY;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
447*4882a593Smuzhiyun 		return -EFAULT;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
450*4882a593Smuzhiyun 	if (!data)
451*4882a593Smuzhiyun 		return -ENOMEM;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	vaddr = params.uaddr;
454*4882a593Smuzhiyun 	size = params.len;
455*4882a593Smuzhiyun 	vaddr_end = vaddr + size;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	/* Lock the user memory. */
458*4882a593Smuzhiyun 	inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
459*4882a593Smuzhiyun 	if (IS_ERR(inpages)) {
460*4882a593Smuzhiyun 		ret = PTR_ERR(inpages);
461*4882a593Smuzhiyun 		goto e_free;
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	/*
465*4882a593Smuzhiyun 	 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
466*4882a593Smuzhiyun 	 * place; the cache may contain the data that was written unencrypted.
467*4882a593Smuzhiyun 	 */
468*4882a593Smuzhiyun 	sev_clflush_pages(inpages, npages);
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
471*4882a593Smuzhiyun 		int offset, len;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 		/*
474*4882a593Smuzhiyun 		 * If the user buffer is not page-aligned, calculate the offset
475*4882a593Smuzhiyun 		 * within the page.
476*4882a593Smuzhiyun 		 */
477*4882a593Smuzhiyun 		offset = vaddr & (PAGE_SIZE - 1);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 		/* Calculate the number of pages that can be encrypted in one go. */
480*4882a593Smuzhiyun 		pages = get_num_contig_pages(i, inpages, npages);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 		len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 		data->handle = sev->handle;
485*4882a593Smuzhiyun 		data->len = len;
486*4882a593Smuzhiyun 		data->address = __sme_page_pa(inpages[i]) + offset;
487*4882a593Smuzhiyun 		ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
488*4882a593Smuzhiyun 		if (ret)
489*4882a593Smuzhiyun 			goto e_unpin;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 		size -= len;
492*4882a593Smuzhiyun 		next_vaddr = vaddr + len;
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun e_unpin:
496*4882a593Smuzhiyun 	/* content of memory is updated, mark pages dirty */
497*4882a593Smuzhiyun 	for (i = 0; i < npages; i++) {
498*4882a593Smuzhiyun 		set_page_dirty_lock(inpages[i]);
499*4882a593Smuzhiyun 		mark_page_accessed(inpages[i]);
500*4882a593Smuzhiyun 	}
501*4882a593Smuzhiyun 	/* unlock the user pages */
502*4882a593Smuzhiyun 	sev_unpin_memory(kvm, inpages, npages);
503*4882a593Smuzhiyun e_free:
504*4882a593Smuzhiyun 	kfree(data);
505*4882a593Smuzhiyun 	return ret;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun 
sev_launch_measure(struct kvm * kvm,struct kvm_sev_cmd * argp)508*4882a593Smuzhiyun static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	void __user *measure = (void __user *)(uintptr_t)argp->data;
511*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
512*4882a593Smuzhiyun 	struct sev_data_launch_measure *data;
513*4882a593Smuzhiyun 	struct kvm_sev_launch_measure params;
514*4882a593Smuzhiyun 	void __user *p = NULL;
515*4882a593Smuzhiyun 	void *blob = NULL;
516*4882a593Smuzhiyun 	int ret;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	if (!sev_guest(kvm))
519*4882a593Smuzhiyun 		return -ENOTTY;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	if (copy_from_user(&params, measure, sizeof(params)))
522*4882a593Smuzhiyun 		return -EFAULT;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
525*4882a593Smuzhiyun 	if (!data)
526*4882a593Smuzhiyun 		return -ENOMEM;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	/* User wants to query the blob length */
529*4882a593Smuzhiyun 	if (!params.len)
530*4882a593Smuzhiyun 		goto cmd;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	p = (void __user *)(uintptr_t)params.uaddr;
533*4882a593Smuzhiyun 	if (p) {
534*4882a593Smuzhiyun 		if (params.len > SEV_FW_BLOB_MAX_SIZE) {
535*4882a593Smuzhiyun 			ret = -EINVAL;
536*4882a593Smuzhiyun 			goto e_free;
537*4882a593Smuzhiyun 		}
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 		ret = -ENOMEM;
540*4882a593Smuzhiyun 		blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
541*4882a593Smuzhiyun 		if (!blob)
542*4882a593Smuzhiyun 			goto e_free;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 		data->address = __psp_pa(blob);
545*4882a593Smuzhiyun 		data->len = params.len;
546*4882a593Smuzhiyun 	}
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun cmd:
549*4882a593Smuzhiyun 	data->handle = sev->handle;
550*4882a593Smuzhiyun 	ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	/*
553*4882a593Smuzhiyun 	 * If we query the session length, FW responded with expected data.
554*4882a593Smuzhiyun 	 */
555*4882a593Smuzhiyun 	if (!params.len)
556*4882a593Smuzhiyun 		goto done;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	if (ret)
559*4882a593Smuzhiyun 		goto e_free_blob;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	if (blob) {
562*4882a593Smuzhiyun 		if (copy_to_user(p, blob, params.len))
563*4882a593Smuzhiyun 			ret = -EFAULT;
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun done:
567*4882a593Smuzhiyun 	params.len = data->len;
568*4882a593Smuzhiyun 	if (copy_to_user(measure, &params, sizeof(params)))
569*4882a593Smuzhiyun 		ret = -EFAULT;
570*4882a593Smuzhiyun e_free_blob:
571*4882a593Smuzhiyun 	kfree(blob);
572*4882a593Smuzhiyun e_free:
573*4882a593Smuzhiyun 	kfree(data);
574*4882a593Smuzhiyun 	return ret;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun 
sev_launch_finish(struct kvm * kvm,struct kvm_sev_cmd * argp)577*4882a593Smuzhiyun static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
580*4882a593Smuzhiyun 	struct sev_data_launch_finish *data;
581*4882a593Smuzhiyun 	int ret;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	if (!sev_guest(kvm))
584*4882a593Smuzhiyun 		return -ENOTTY;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
587*4882a593Smuzhiyun 	if (!data)
588*4882a593Smuzhiyun 		return -ENOMEM;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	data->handle = sev->handle;
591*4882a593Smuzhiyun 	ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	kfree(data);
594*4882a593Smuzhiyun 	return ret;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
sev_guest_status(struct kvm * kvm,struct kvm_sev_cmd * argp)597*4882a593Smuzhiyun static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
600*4882a593Smuzhiyun 	struct kvm_sev_guest_status params;
601*4882a593Smuzhiyun 	struct sev_data_guest_status *data;
602*4882a593Smuzhiyun 	int ret;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	if (!sev_guest(kvm))
605*4882a593Smuzhiyun 		return -ENOTTY;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
608*4882a593Smuzhiyun 	if (!data)
609*4882a593Smuzhiyun 		return -ENOMEM;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	data->handle = sev->handle;
612*4882a593Smuzhiyun 	ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
613*4882a593Smuzhiyun 	if (ret)
614*4882a593Smuzhiyun 		goto e_free;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	params.policy = data->policy;
617*4882a593Smuzhiyun 	params.state = data->state;
618*4882a593Smuzhiyun 	params.handle = data->handle;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
621*4882a593Smuzhiyun 		ret = -EFAULT;
622*4882a593Smuzhiyun e_free:
623*4882a593Smuzhiyun 	kfree(data);
624*4882a593Smuzhiyun 	return ret;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
__sev_issue_dbg_cmd(struct kvm * kvm,unsigned long src,unsigned long dst,int size,int * error,bool enc)627*4882a593Smuzhiyun static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
628*4882a593Smuzhiyun 			       unsigned long dst, int size,
629*4882a593Smuzhiyun 			       int *error, bool enc)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
632*4882a593Smuzhiyun 	struct sev_data_dbg *data;
633*4882a593Smuzhiyun 	int ret;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
636*4882a593Smuzhiyun 	if (!data)
637*4882a593Smuzhiyun 		return -ENOMEM;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	data->handle = sev->handle;
640*4882a593Smuzhiyun 	data->dst_addr = dst;
641*4882a593Smuzhiyun 	data->src_addr = src;
642*4882a593Smuzhiyun 	data->len = size;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	ret = sev_issue_cmd(kvm,
645*4882a593Smuzhiyun 			    enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
646*4882a593Smuzhiyun 			    data, error);
647*4882a593Smuzhiyun 	kfree(data);
648*4882a593Smuzhiyun 	return ret;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
__sev_dbg_decrypt(struct kvm * kvm,unsigned long src_paddr,unsigned long dst_paddr,int sz,int * err)651*4882a593Smuzhiyun static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
652*4882a593Smuzhiyun 			     unsigned long dst_paddr, int sz, int *err)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun 	int offset;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	/*
657*4882a593Smuzhiyun 	 * Its safe to read more than we are asked, caller should ensure that
658*4882a593Smuzhiyun 	 * destination has enough space.
659*4882a593Smuzhiyun 	 */
660*4882a593Smuzhiyun 	offset = src_paddr & 15;
661*4882a593Smuzhiyun 	src_paddr = round_down(src_paddr, 16);
662*4882a593Smuzhiyun 	sz = round_up(sz + offset, 16);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun 
__sev_dbg_decrypt_user(struct kvm * kvm,unsigned long paddr,unsigned long __user dst_uaddr,unsigned long dst_paddr,int size,int * err)667*4882a593Smuzhiyun static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
668*4882a593Smuzhiyun 				  unsigned long __user dst_uaddr,
669*4882a593Smuzhiyun 				  unsigned long dst_paddr,
670*4882a593Smuzhiyun 				  int size, int *err)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	struct page *tpage = NULL;
673*4882a593Smuzhiyun 	int ret, offset;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	/* if inputs are not 16-byte then use intermediate buffer */
676*4882a593Smuzhiyun 	if (!IS_ALIGNED(dst_paddr, 16) ||
677*4882a593Smuzhiyun 	    !IS_ALIGNED(paddr,     16) ||
678*4882a593Smuzhiyun 	    !IS_ALIGNED(size,      16)) {
679*4882a593Smuzhiyun 		tpage = (void *)alloc_page(GFP_KERNEL | __GFP_ZERO);
680*4882a593Smuzhiyun 		if (!tpage)
681*4882a593Smuzhiyun 			return -ENOMEM;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 		dst_paddr = __sme_page_pa(tpage);
684*4882a593Smuzhiyun 	}
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
687*4882a593Smuzhiyun 	if (ret)
688*4882a593Smuzhiyun 		goto e_free;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	if (tpage) {
691*4882a593Smuzhiyun 		offset = paddr & 15;
692*4882a593Smuzhiyun 		if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
693*4882a593Smuzhiyun 				 page_address(tpage) + offset, size))
694*4882a593Smuzhiyun 			ret = -EFAULT;
695*4882a593Smuzhiyun 	}
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun e_free:
698*4882a593Smuzhiyun 	if (tpage)
699*4882a593Smuzhiyun 		__free_page(tpage);
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	return ret;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun 
__sev_dbg_encrypt_user(struct kvm * kvm,unsigned long paddr,unsigned long __user vaddr,unsigned long dst_paddr,unsigned long __user dst_vaddr,int size,int * error)704*4882a593Smuzhiyun static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
705*4882a593Smuzhiyun 				  unsigned long __user vaddr,
706*4882a593Smuzhiyun 				  unsigned long dst_paddr,
707*4882a593Smuzhiyun 				  unsigned long __user dst_vaddr,
708*4882a593Smuzhiyun 				  int size, int *error)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun 	struct page *src_tpage = NULL;
711*4882a593Smuzhiyun 	struct page *dst_tpage = NULL;
712*4882a593Smuzhiyun 	int ret, len = size;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	/* If source buffer is not aligned then use an intermediate buffer */
715*4882a593Smuzhiyun 	if (!IS_ALIGNED(vaddr, 16)) {
716*4882a593Smuzhiyun 		src_tpage = alloc_page(GFP_KERNEL);
717*4882a593Smuzhiyun 		if (!src_tpage)
718*4882a593Smuzhiyun 			return -ENOMEM;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 		if (copy_from_user(page_address(src_tpage),
721*4882a593Smuzhiyun 				(void __user *)(uintptr_t)vaddr, size)) {
722*4882a593Smuzhiyun 			__free_page(src_tpage);
723*4882a593Smuzhiyun 			return -EFAULT;
724*4882a593Smuzhiyun 		}
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 		paddr = __sme_page_pa(src_tpage);
727*4882a593Smuzhiyun 	}
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	/*
730*4882a593Smuzhiyun 	 *  If destination buffer or length is not aligned then do read-modify-write:
731*4882a593Smuzhiyun 	 *   - decrypt destination in an intermediate buffer
732*4882a593Smuzhiyun 	 *   - copy the source buffer in an intermediate buffer
733*4882a593Smuzhiyun 	 *   - use the intermediate buffer as source buffer
734*4882a593Smuzhiyun 	 */
735*4882a593Smuzhiyun 	if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
736*4882a593Smuzhiyun 		int dst_offset;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 		dst_tpage = alloc_page(GFP_KERNEL);
739*4882a593Smuzhiyun 		if (!dst_tpage) {
740*4882a593Smuzhiyun 			ret = -ENOMEM;
741*4882a593Smuzhiyun 			goto e_free;
742*4882a593Smuzhiyun 		}
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 		ret = __sev_dbg_decrypt(kvm, dst_paddr,
745*4882a593Smuzhiyun 					__sme_page_pa(dst_tpage), size, error);
746*4882a593Smuzhiyun 		if (ret)
747*4882a593Smuzhiyun 			goto e_free;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 		/*
750*4882a593Smuzhiyun 		 *  If source is kernel buffer then use memcpy() otherwise
751*4882a593Smuzhiyun 		 *  copy_from_user().
752*4882a593Smuzhiyun 		 */
753*4882a593Smuzhiyun 		dst_offset = dst_paddr & 15;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 		if (src_tpage)
756*4882a593Smuzhiyun 			memcpy(page_address(dst_tpage) + dst_offset,
757*4882a593Smuzhiyun 			       page_address(src_tpage), size);
758*4882a593Smuzhiyun 		else {
759*4882a593Smuzhiyun 			if (copy_from_user(page_address(dst_tpage) + dst_offset,
760*4882a593Smuzhiyun 					   (void __user *)(uintptr_t)vaddr, size)) {
761*4882a593Smuzhiyun 				ret = -EFAULT;
762*4882a593Smuzhiyun 				goto e_free;
763*4882a593Smuzhiyun 			}
764*4882a593Smuzhiyun 		}
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 		paddr = __sme_page_pa(dst_tpage);
767*4882a593Smuzhiyun 		dst_paddr = round_down(dst_paddr, 16);
768*4882a593Smuzhiyun 		len = round_up(size, 16);
769*4882a593Smuzhiyun 	}
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun e_free:
774*4882a593Smuzhiyun 	if (src_tpage)
775*4882a593Smuzhiyun 		__free_page(src_tpage);
776*4882a593Smuzhiyun 	if (dst_tpage)
777*4882a593Smuzhiyun 		__free_page(dst_tpage);
778*4882a593Smuzhiyun 	return ret;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun 
sev_dbg_crypt(struct kvm * kvm,struct kvm_sev_cmd * argp,bool dec)781*4882a593Smuzhiyun static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun 	unsigned long vaddr, vaddr_end, next_vaddr;
784*4882a593Smuzhiyun 	unsigned long dst_vaddr;
785*4882a593Smuzhiyun 	struct page **src_p, **dst_p;
786*4882a593Smuzhiyun 	struct kvm_sev_dbg debug;
787*4882a593Smuzhiyun 	unsigned long n;
788*4882a593Smuzhiyun 	unsigned int size;
789*4882a593Smuzhiyun 	int ret;
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	if (!sev_guest(kvm))
792*4882a593Smuzhiyun 		return -ENOTTY;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
795*4882a593Smuzhiyun 		return -EFAULT;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
798*4882a593Smuzhiyun 		return -EINVAL;
799*4882a593Smuzhiyun 	if (!debug.dst_uaddr)
800*4882a593Smuzhiyun 		return -EINVAL;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	vaddr = debug.src_uaddr;
803*4882a593Smuzhiyun 	size = debug.len;
804*4882a593Smuzhiyun 	vaddr_end = vaddr + size;
805*4882a593Smuzhiyun 	dst_vaddr = debug.dst_uaddr;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	for (; vaddr < vaddr_end; vaddr = next_vaddr) {
808*4882a593Smuzhiyun 		int len, s_off, d_off;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 		/* lock userspace source and destination page */
811*4882a593Smuzhiyun 		src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
812*4882a593Smuzhiyun 		if (IS_ERR(src_p))
813*4882a593Smuzhiyun 			return PTR_ERR(src_p);
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 		dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
816*4882a593Smuzhiyun 		if (IS_ERR(dst_p)) {
817*4882a593Smuzhiyun 			sev_unpin_memory(kvm, src_p, n);
818*4882a593Smuzhiyun 			return PTR_ERR(dst_p);
819*4882a593Smuzhiyun 		}
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 		/*
822*4882a593Smuzhiyun 		 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
823*4882a593Smuzhiyun 		 * the pages; flush the destination too so that future accesses do not
824*4882a593Smuzhiyun 		 * see stale data.
825*4882a593Smuzhiyun 		 */
826*4882a593Smuzhiyun 		sev_clflush_pages(src_p, 1);
827*4882a593Smuzhiyun 		sev_clflush_pages(dst_p, 1);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 		/*
830*4882a593Smuzhiyun 		 * Since user buffer may not be page aligned, calculate the
831*4882a593Smuzhiyun 		 * offset within the page.
832*4882a593Smuzhiyun 		 */
833*4882a593Smuzhiyun 		s_off = vaddr & ~PAGE_MASK;
834*4882a593Smuzhiyun 		d_off = dst_vaddr & ~PAGE_MASK;
835*4882a593Smuzhiyun 		len = min_t(size_t, (PAGE_SIZE - s_off), size);
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 		if (dec)
838*4882a593Smuzhiyun 			ret = __sev_dbg_decrypt_user(kvm,
839*4882a593Smuzhiyun 						     __sme_page_pa(src_p[0]) + s_off,
840*4882a593Smuzhiyun 						     dst_vaddr,
841*4882a593Smuzhiyun 						     __sme_page_pa(dst_p[0]) + d_off,
842*4882a593Smuzhiyun 						     len, &argp->error);
843*4882a593Smuzhiyun 		else
844*4882a593Smuzhiyun 			ret = __sev_dbg_encrypt_user(kvm,
845*4882a593Smuzhiyun 						     __sme_page_pa(src_p[0]) + s_off,
846*4882a593Smuzhiyun 						     vaddr,
847*4882a593Smuzhiyun 						     __sme_page_pa(dst_p[0]) + d_off,
848*4882a593Smuzhiyun 						     dst_vaddr,
849*4882a593Smuzhiyun 						     len, &argp->error);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 		sev_unpin_memory(kvm, src_p, n);
852*4882a593Smuzhiyun 		sev_unpin_memory(kvm, dst_p, n);
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 		if (ret)
855*4882a593Smuzhiyun 			goto err;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 		next_vaddr = vaddr + len;
858*4882a593Smuzhiyun 		dst_vaddr = dst_vaddr + len;
859*4882a593Smuzhiyun 		size -= len;
860*4882a593Smuzhiyun 	}
861*4882a593Smuzhiyun err:
862*4882a593Smuzhiyun 	return ret;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun 
sev_launch_secret(struct kvm * kvm,struct kvm_sev_cmd * argp)865*4882a593Smuzhiyun static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
868*4882a593Smuzhiyun 	struct sev_data_launch_secret *data;
869*4882a593Smuzhiyun 	struct kvm_sev_launch_secret params;
870*4882a593Smuzhiyun 	struct page **pages;
871*4882a593Smuzhiyun 	void *blob, *hdr;
872*4882a593Smuzhiyun 	unsigned long n, i;
873*4882a593Smuzhiyun 	int ret, offset;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	if (!sev_guest(kvm))
876*4882a593Smuzhiyun 		return -ENOTTY;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
879*4882a593Smuzhiyun 		return -EFAULT;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
882*4882a593Smuzhiyun 	if (IS_ERR(pages))
883*4882a593Smuzhiyun 		return PTR_ERR(pages);
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	/*
886*4882a593Smuzhiyun 	 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
887*4882a593Smuzhiyun 	 * place; the cache may contain the data that was written unencrypted.
888*4882a593Smuzhiyun 	 */
889*4882a593Smuzhiyun 	sev_clflush_pages(pages, n);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	/*
892*4882a593Smuzhiyun 	 * The secret must be copied into contiguous memory region, lets verify
893*4882a593Smuzhiyun 	 * that userspace memory pages are contiguous before we issue command.
894*4882a593Smuzhiyun 	 */
895*4882a593Smuzhiyun 	if (get_num_contig_pages(0, pages, n) != n) {
896*4882a593Smuzhiyun 		ret = -EINVAL;
897*4882a593Smuzhiyun 		goto e_unpin_memory;
898*4882a593Smuzhiyun 	}
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	ret = -ENOMEM;
901*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
902*4882a593Smuzhiyun 	if (!data)
903*4882a593Smuzhiyun 		goto e_unpin_memory;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	offset = params.guest_uaddr & (PAGE_SIZE - 1);
906*4882a593Smuzhiyun 	data->guest_address = __sme_page_pa(pages[0]) + offset;
907*4882a593Smuzhiyun 	data->guest_len = params.guest_len;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
910*4882a593Smuzhiyun 	if (IS_ERR(blob)) {
911*4882a593Smuzhiyun 		ret = PTR_ERR(blob);
912*4882a593Smuzhiyun 		goto e_free;
913*4882a593Smuzhiyun 	}
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	data->trans_address = __psp_pa(blob);
916*4882a593Smuzhiyun 	data->trans_len = params.trans_len;
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
919*4882a593Smuzhiyun 	if (IS_ERR(hdr)) {
920*4882a593Smuzhiyun 		ret = PTR_ERR(hdr);
921*4882a593Smuzhiyun 		goto e_free_blob;
922*4882a593Smuzhiyun 	}
923*4882a593Smuzhiyun 	data->hdr_address = __psp_pa(hdr);
924*4882a593Smuzhiyun 	data->hdr_len = params.hdr_len;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	data->handle = sev->handle;
927*4882a593Smuzhiyun 	ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	kfree(hdr);
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun e_free_blob:
932*4882a593Smuzhiyun 	kfree(blob);
933*4882a593Smuzhiyun e_free:
934*4882a593Smuzhiyun 	kfree(data);
935*4882a593Smuzhiyun e_unpin_memory:
936*4882a593Smuzhiyun 	/* content of memory is updated, mark pages dirty */
937*4882a593Smuzhiyun 	for (i = 0; i < n; i++) {
938*4882a593Smuzhiyun 		set_page_dirty_lock(pages[i]);
939*4882a593Smuzhiyun 		mark_page_accessed(pages[i]);
940*4882a593Smuzhiyun 	}
941*4882a593Smuzhiyun 	sev_unpin_memory(kvm, pages, n);
942*4882a593Smuzhiyun 	return ret;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun 
svm_mem_enc_op(struct kvm * kvm,void __user * argp)945*4882a593Smuzhiyun int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun 	struct kvm_sev_cmd sev_cmd;
948*4882a593Smuzhiyun 	int r;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	if (!svm_sev_enabled())
951*4882a593Smuzhiyun 		return -ENOTTY;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	if (!argp)
954*4882a593Smuzhiyun 		return 0;
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
957*4882a593Smuzhiyun 		return -EFAULT;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	mutex_lock(&kvm->lock);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	switch (sev_cmd.id) {
962*4882a593Smuzhiyun 	case KVM_SEV_INIT:
963*4882a593Smuzhiyun 		r = sev_guest_init(kvm, &sev_cmd);
964*4882a593Smuzhiyun 		break;
965*4882a593Smuzhiyun 	case KVM_SEV_LAUNCH_START:
966*4882a593Smuzhiyun 		r = sev_launch_start(kvm, &sev_cmd);
967*4882a593Smuzhiyun 		break;
968*4882a593Smuzhiyun 	case KVM_SEV_LAUNCH_UPDATE_DATA:
969*4882a593Smuzhiyun 		r = sev_launch_update_data(kvm, &sev_cmd);
970*4882a593Smuzhiyun 		break;
971*4882a593Smuzhiyun 	case KVM_SEV_LAUNCH_MEASURE:
972*4882a593Smuzhiyun 		r = sev_launch_measure(kvm, &sev_cmd);
973*4882a593Smuzhiyun 		break;
974*4882a593Smuzhiyun 	case KVM_SEV_LAUNCH_FINISH:
975*4882a593Smuzhiyun 		r = sev_launch_finish(kvm, &sev_cmd);
976*4882a593Smuzhiyun 		break;
977*4882a593Smuzhiyun 	case KVM_SEV_GUEST_STATUS:
978*4882a593Smuzhiyun 		r = sev_guest_status(kvm, &sev_cmd);
979*4882a593Smuzhiyun 		break;
980*4882a593Smuzhiyun 	case KVM_SEV_DBG_DECRYPT:
981*4882a593Smuzhiyun 		r = sev_dbg_crypt(kvm, &sev_cmd, true);
982*4882a593Smuzhiyun 		break;
983*4882a593Smuzhiyun 	case KVM_SEV_DBG_ENCRYPT:
984*4882a593Smuzhiyun 		r = sev_dbg_crypt(kvm, &sev_cmd, false);
985*4882a593Smuzhiyun 		break;
986*4882a593Smuzhiyun 	case KVM_SEV_LAUNCH_SECRET:
987*4882a593Smuzhiyun 		r = sev_launch_secret(kvm, &sev_cmd);
988*4882a593Smuzhiyun 		break;
989*4882a593Smuzhiyun 	default:
990*4882a593Smuzhiyun 		r = -EINVAL;
991*4882a593Smuzhiyun 		goto out;
992*4882a593Smuzhiyun 	}
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
995*4882a593Smuzhiyun 		r = -EFAULT;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun out:
998*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
999*4882a593Smuzhiyun 	return r;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun 
svm_register_enc_region(struct kvm * kvm,struct kvm_enc_region * range)1002*4882a593Smuzhiyun int svm_register_enc_region(struct kvm *kvm,
1003*4882a593Smuzhiyun 			    struct kvm_enc_region *range)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1006*4882a593Smuzhiyun 	struct enc_region *region;
1007*4882a593Smuzhiyun 	int ret = 0;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	if (!sev_guest(kvm))
1010*4882a593Smuzhiyun 		return -ENOTTY;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1013*4882a593Smuzhiyun 		return -EINVAL;
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1016*4882a593Smuzhiyun 	if (!region)
1017*4882a593Smuzhiyun 		return -ENOMEM;
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	mutex_lock(&kvm->lock);
1020*4882a593Smuzhiyun 	region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
1021*4882a593Smuzhiyun 	if (IS_ERR(region->pages)) {
1022*4882a593Smuzhiyun 		ret = PTR_ERR(region->pages);
1023*4882a593Smuzhiyun 		mutex_unlock(&kvm->lock);
1024*4882a593Smuzhiyun 		goto e_free;
1025*4882a593Smuzhiyun 	}
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	region->uaddr = range->addr;
1028*4882a593Smuzhiyun 	region->size = range->size;
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	list_add_tail(&region->list, &sev->regions_list);
1031*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	/*
1034*4882a593Smuzhiyun 	 * The guest may change the memory encryption attribute from C=0 -> C=1
1035*4882a593Smuzhiyun 	 * or vice versa for this memory range. Lets make sure caches are
1036*4882a593Smuzhiyun 	 * flushed to ensure that guest data gets written into memory with
1037*4882a593Smuzhiyun 	 * correct C-bit.
1038*4882a593Smuzhiyun 	 */
1039*4882a593Smuzhiyun 	sev_clflush_pages(region->pages, region->npages);
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	return ret;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun e_free:
1044*4882a593Smuzhiyun 	kfree(region);
1045*4882a593Smuzhiyun 	return ret;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun static struct enc_region *
find_enc_region(struct kvm * kvm,struct kvm_enc_region * range)1049*4882a593Smuzhiyun find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1052*4882a593Smuzhiyun 	struct list_head *head = &sev->regions_list;
1053*4882a593Smuzhiyun 	struct enc_region *i;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	list_for_each_entry(i, head, list) {
1056*4882a593Smuzhiyun 		if (i->uaddr == range->addr &&
1057*4882a593Smuzhiyun 		    i->size == range->size)
1058*4882a593Smuzhiyun 			return i;
1059*4882a593Smuzhiyun 	}
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	return NULL;
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun 
__unregister_enc_region_locked(struct kvm * kvm,struct enc_region * region)1064*4882a593Smuzhiyun static void __unregister_enc_region_locked(struct kvm *kvm,
1065*4882a593Smuzhiyun 					   struct enc_region *region)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun 	sev_unpin_memory(kvm, region->pages, region->npages);
1068*4882a593Smuzhiyun 	list_del(&region->list);
1069*4882a593Smuzhiyun 	kfree(region);
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun 
svm_unregister_enc_region(struct kvm * kvm,struct kvm_enc_region * range)1072*4882a593Smuzhiyun int svm_unregister_enc_region(struct kvm *kvm,
1073*4882a593Smuzhiyun 			      struct kvm_enc_region *range)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun 	struct enc_region *region;
1076*4882a593Smuzhiyun 	int ret;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	mutex_lock(&kvm->lock);
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	if (!sev_guest(kvm)) {
1081*4882a593Smuzhiyun 		ret = -ENOTTY;
1082*4882a593Smuzhiyun 		goto failed;
1083*4882a593Smuzhiyun 	}
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	region = find_enc_region(kvm, range);
1086*4882a593Smuzhiyun 	if (!region) {
1087*4882a593Smuzhiyun 		ret = -EINVAL;
1088*4882a593Smuzhiyun 		goto failed;
1089*4882a593Smuzhiyun 	}
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	/*
1092*4882a593Smuzhiyun 	 * Ensure that all guest tagged cache entries are flushed before
1093*4882a593Smuzhiyun 	 * releasing the pages back to the system for use. CLFLUSH will
1094*4882a593Smuzhiyun 	 * not do this, so issue a WBINVD.
1095*4882a593Smuzhiyun 	 */
1096*4882a593Smuzhiyun 	wbinvd_on_all_cpus();
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	__unregister_enc_region_locked(kvm, region);
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
1101*4882a593Smuzhiyun 	return 0;
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun failed:
1104*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
1105*4882a593Smuzhiyun 	return ret;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun 
sev_vm_destroy(struct kvm * kvm)1108*4882a593Smuzhiyun void sev_vm_destroy(struct kvm *kvm)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1111*4882a593Smuzhiyun 	struct list_head *head = &sev->regions_list;
1112*4882a593Smuzhiyun 	struct list_head *pos, *q;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	if (!sev_guest(kvm))
1115*4882a593Smuzhiyun 		return;
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	mutex_lock(&kvm->lock);
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	/*
1120*4882a593Smuzhiyun 	 * Ensure that all guest tagged cache entries are flushed before
1121*4882a593Smuzhiyun 	 * releasing the pages back to the system for use. CLFLUSH will
1122*4882a593Smuzhiyun 	 * not do this, so issue a WBINVD.
1123*4882a593Smuzhiyun 	 */
1124*4882a593Smuzhiyun 	wbinvd_on_all_cpus();
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	/*
1127*4882a593Smuzhiyun 	 * if userspace was terminated before unregistering the memory regions
1128*4882a593Smuzhiyun 	 * then lets unpin all the registered memory.
1129*4882a593Smuzhiyun 	 */
1130*4882a593Smuzhiyun 	if (!list_empty(head)) {
1131*4882a593Smuzhiyun 		list_for_each_safe(pos, q, head) {
1132*4882a593Smuzhiyun 			__unregister_enc_region_locked(kvm,
1133*4882a593Smuzhiyun 				list_entry(pos, struct enc_region, list));
1134*4882a593Smuzhiyun 			cond_resched();
1135*4882a593Smuzhiyun 		}
1136*4882a593Smuzhiyun 	}
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	sev_unbind_asid(kvm, sev->handle);
1141*4882a593Smuzhiyun 	sev_asid_free(sev->asid);
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun 
sev_hardware_setup(void)1144*4882a593Smuzhiyun int __init sev_hardware_setup(void)
1145*4882a593Smuzhiyun {
1146*4882a593Smuzhiyun 	/* Maximum number of encrypted guests supported simultaneously */
1147*4882a593Smuzhiyun 	max_sev_asid = cpuid_ecx(0x8000001F);
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	if (!svm_sev_enabled())
1150*4882a593Smuzhiyun 		return 1;
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	/* Minimum ASID value that should be used for SEV guest */
1153*4882a593Smuzhiyun 	min_sev_asid = cpuid_edx(0x8000001F);
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	/* Initialize SEV ASID bitmaps */
1156*4882a593Smuzhiyun 	sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1157*4882a593Smuzhiyun 	if (!sev_asid_bitmap)
1158*4882a593Smuzhiyun 		return 1;
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1161*4882a593Smuzhiyun 	if (!sev_reclaim_asid_bitmap)
1162*4882a593Smuzhiyun 		return 1;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	pr_info("SEV supported\n");
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 	return 0;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun 
sev_hardware_teardown(void)1169*4882a593Smuzhiyun void sev_hardware_teardown(void)
1170*4882a593Smuzhiyun {
1171*4882a593Smuzhiyun 	if (!svm_sev_enabled())
1172*4882a593Smuzhiyun 		return;
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	bitmap_free(sev_asid_bitmap);
1175*4882a593Smuzhiyun 	bitmap_free(sev_reclaim_asid_bitmap);
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	sev_flush_asids();
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun 
sev_guest_memory_reclaimed(struct kvm * kvm)1180*4882a593Smuzhiyun void sev_guest_memory_reclaimed(struct kvm *kvm)
1181*4882a593Smuzhiyun {
1182*4882a593Smuzhiyun 	if (!sev_guest(kvm))
1183*4882a593Smuzhiyun 		return;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	wbinvd_on_all_cpus();
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun 
pre_sev_run(struct vcpu_svm * svm,int cpu)1188*4882a593Smuzhiyun void pre_sev_run(struct vcpu_svm *svm, int cpu)
1189*4882a593Smuzhiyun {
1190*4882a593Smuzhiyun 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1191*4882a593Smuzhiyun 	int asid = sev_get_asid(svm->vcpu.kvm);
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	/* Assign the asid allocated with this SEV guest */
1194*4882a593Smuzhiyun 	svm->vmcb->control.asid = asid;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	/*
1197*4882a593Smuzhiyun 	 * Flush guest TLB:
1198*4882a593Smuzhiyun 	 *
1199*4882a593Smuzhiyun 	 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
1200*4882a593Smuzhiyun 	 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
1201*4882a593Smuzhiyun 	 */
1202*4882a593Smuzhiyun 	if (sd->sev_vmcbs[asid] == svm->vmcb &&
1203*4882a593Smuzhiyun 	    svm->vcpu.arch.last_vmentry_cpu == cpu)
1204*4882a593Smuzhiyun 		return;
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	sd->sev_vmcbs[asid] = svm->vmcb;
1207*4882a593Smuzhiyun 	svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
1208*4882a593Smuzhiyun 	vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1209*4882a593Smuzhiyun }
1210