xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/psci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012 - ARM Ltd
4*4882a593Smuzhiyun  * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/arm-smccc.h>
8*4882a593Smuzhiyun #include <linux/preempt.h>
9*4882a593Smuzhiyun #include <linux/kvm_host.h>
10*4882a593Smuzhiyun #include <linux/uaccess.h>
11*4882a593Smuzhiyun #include <linux/wait.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <asm/cputype.h>
14*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <kvm/arm_psci.h>
17*4882a593Smuzhiyun #include <kvm/arm_hypercalls.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * This is an implementation of the Power State Coordination Interface
21*4882a593Smuzhiyun  * as described in ARM document number ARM DEN 0022A.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define AFFINITY_MASK(level)	~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
25*4882a593Smuzhiyun 
psci_affinity_mask(unsigned long affinity_level)26*4882a593Smuzhiyun static unsigned long psci_affinity_mask(unsigned long affinity_level)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	if (affinity_level <= 3)
29*4882a593Smuzhiyun 		return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	return 0;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
kvm_psci_vcpu_suspend(struct kvm_vcpu * vcpu)34*4882a593Smuzhiyun static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	/*
37*4882a593Smuzhiyun 	 * NOTE: For simplicity, we make VCPU suspend emulation to be
38*4882a593Smuzhiyun 	 * same-as WFI (Wait-for-interrupt) emulation.
39*4882a593Smuzhiyun 	 *
40*4882a593Smuzhiyun 	 * This means for KVM the wakeup events are interrupts and
41*4882a593Smuzhiyun 	 * this is consistent with intended use of StateID as described
42*4882a593Smuzhiyun 	 * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
43*4882a593Smuzhiyun 	 *
44*4882a593Smuzhiyun 	 * Further, we also treat power-down request to be same as
45*4882a593Smuzhiyun 	 * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
46*4882a593Smuzhiyun 	 * specification (ARM DEN 0022A). This means all suspend states
47*4882a593Smuzhiyun 	 * for KVM will preserve the register state.
48*4882a593Smuzhiyun 	 */
49*4882a593Smuzhiyun 	kvm_vcpu_block(vcpu);
50*4882a593Smuzhiyun 	kvm_clear_request(KVM_REQ_UNHALT, vcpu);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	return PSCI_RET_SUCCESS;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
kvm_psci_vcpu_off(struct kvm_vcpu * vcpu)55*4882a593Smuzhiyun static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	vcpu->arch.power_off = true;
58*4882a593Smuzhiyun 	kvm_make_request(KVM_REQ_SLEEP, vcpu);
59*4882a593Smuzhiyun 	kvm_vcpu_kick(vcpu);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
kvm_psci_vcpu_on(struct kvm_vcpu * source_vcpu)62*4882a593Smuzhiyun static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct vcpu_reset_state *reset_state;
65*4882a593Smuzhiyun 	struct kvm *kvm = source_vcpu->kvm;
66*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu = NULL;
67*4882a593Smuzhiyun 	unsigned long cpu_id;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
70*4882a593Smuzhiyun 	if (vcpu_mode_is_32bit(source_vcpu))
71*4882a593Smuzhiyun 		cpu_id &= ~((u32) 0);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/*
76*4882a593Smuzhiyun 	 * Make sure the caller requested a valid CPU and that the CPU is
77*4882a593Smuzhiyun 	 * turned off.
78*4882a593Smuzhiyun 	 */
79*4882a593Smuzhiyun 	if (!vcpu)
80*4882a593Smuzhiyun 		return PSCI_RET_INVALID_PARAMS;
81*4882a593Smuzhiyun 	if (!vcpu->arch.power_off) {
82*4882a593Smuzhiyun 		if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
83*4882a593Smuzhiyun 			return PSCI_RET_ALREADY_ON;
84*4882a593Smuzhiyun 		else
85*4882a593Smuzhiyun 			return PSCI_RET_INVALID_PARAMS;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	reset_state = &vcpu->arch.reset_state;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	reset_state->pc = smccc_get_arg2(source_vcpu);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/* Propagate caller endianness */
93*4882a593Smuzhiyun 	reset_state->be = kvm_vcpu_is_be(source_vcpu);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/*
96*4882a593Smuzhiyun 	 * NOTE: We always update r0 (or x0) because for PSCI v0.1
97*4882a593Smuzhiyun 	 * the general purpose registers are undefined upon CPU_ON.
98*4882a593Smuzhiyun 	 */
99*4882a593Smuzhiyun 	reset_state->r0 = smccc_get_arg3(source_vcpu);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	WRITE_ONCE(reset_state->reset, true);
102*4882a593Smuzhiyun 	kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/*
105*4882a593Smuzhiyun 	 * Make sure the reset request is observed if the change to
106*4882a593Smuzhiyun 	 * power_state is observed.
107*4882a593Smuzhiyun 	 */
108*4882a593Smuzhiyun 	smp_wmb();
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	vcpu->arch.power_off = false;
111*4882a593Smuzhiyun 	kvm_vcpu_wake_up(vcpu);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	return PSCI_RET_SUCCESS;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
kvm_psci_vcpu_affinity_info(struct kvm_vcpu * vcpu)116*4882a593Smuzhiyun static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	int i, matching_cpus = 0;
119*4882a593Smuzhiyun 	unsigned long mpidr;
120*4882a593Smuzhiyun 	unsigned long target_affinity;
121*4882a593Smuzhiyun 	unsigned long target_affinity_mask;
122*4882a593Smuzhiyun 	unsigned long lowest_affinity_level;
123*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
124*4882a593Smuzhiyun 	struct kvm_vcpu *tmp;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	target_affinity = smccc_get_arg1(vcpu);
127*4882a593Smuzhiyun 	lowest_affinity_level = smccc_get_arg2(vcpu);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* Determine target affinity mask */
130*4882a593Smuzhiyun 	target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
131*4882a593Smuzhiyun 	if (!target_affinity_mask)
132*4882a593Smuzhiyun 		return PSCI_RET_INVALID_PARAMS;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/* Ignore other bits of target affinity */
135*4882a593Smuzhiyun 	target_affinity &= target_affinity_mask;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/*
138*4882a593Smuzhiyun 	 * If one or more VCPU matching target affinity are running
139*4882a593Smuzhiyun 	 * then ON else OFF
140*4882a593Smuzhiyun 	 */
141*4882a593Smuzhiyun 	kvm_for_each_vcpu(i, tmp, kvm) {
142*4882a593Smuzhiyun 		mpidr = kvm_vcpu_get_mpidr_aff(tmp);
143*4882a593Smuzhiyun 		if ((mpidr & target_affinity_mask) == target_affinity) {
144*4882a593Smuzhiyun 			matching_cpus++;
145*4882a593Smuzhiyun 			if (!tmp->arch.power_off)
146*4882a593Smuzhiyun 				return PSCI_0_2_AFFINITY_LEVEL_ON;
147*4882a593Smuzhiyun 		}
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (!matching_cpus)
151*4882a593Smuzhiyun 		return PSCI_RET_INVALID_PARAMS;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	return PSCI_0_2_AFFINITY_LEVEL_OFF;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
kvm_prepare_system_event(struct kvm_vcpu * vcpu,u32 type)156*4882a593Smuzhiyun static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	int i;
159*4882a593Smuzhiyun 	struct kvm_vcpu *tmp;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/*
162*4882a593Smuzhiyun 	 * The KVM ABI specifies that a system event exit may call KVM_RUN
163*4882a593Smuzhiyun 	 * again and may perform shutdown/reboot at a later time that when the
164*4882a593Smuzhiyun 	 * actual request is made.  Since we are implementing PSCI and a
165*4882a593Smuzhiyun 	 * caller of PSCI reboot and shutdown expects that the system shuts
166*4882a593Smuzhiyun 	 * down or reboots immediately, let's make sure that VCPUs are not run
167*4882a593Smuzhiyun 	 * after this call is handled and before the VCPUs have been
168*4882a593Smuzhiyun 	 * re-initialized.
169*4882a593Smuzhiyun 	 */
170*4882a593Smuzhiyun 	kvm_for_each_vcpu(i, tmp, vcpu->kvm)
171*4882a593Smuzhiyun 		tmp->arch.power_off = true;
172*4882a593Smuzhiyun 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
175*4882a593Smuzhiyun 	vcpu->run->system_event.type = type;
176*4882a593Smuzhiyun 	vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
kvm_psci_system_off(struct kvm_vcpu * vcpu)179*4882a593Smuzhiyun static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
kvm_psci_system_reset(struct kvm_vcpu * vcpu)184*4882a593Smuzhiyun static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
kvm_psci_narrow_to_32bit(struct kvm_vcpu * vcpu)189*4882a593Smuzhiyun static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	int i;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	/*
194*4882a593Smuzhiyun 	 * Zero the input registers' upper 32 bits. They will be fully
195*4882a593Smuzhiyun 	 * zeroed on exit, so we're fine changing them in place.
196*4882a593Smuzhiyun 	 */
197*4882a593Smuzhiyun 	for (i = 1; i < 4; i++)
198*4882a593Smuzhiyun 		vcpu_set_reg(vcpu, i, lower_32_bits(vcpu_get_reg(vcpu, i)));
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
kvm_psci_check_allowed_function(struct kvm_vcpu * vcpu,u32 fn)201*4882a593Smuzhiyun static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	switch(fn) {
204*4882a593Smuzhiyun 	case PSCI_0_2_FN64_CPU_SUSPEND:
205*4882a593Smuzhiyun 	case PSCI_0_2_FN64_CPU_ON:
206*4882a593Smuzhiyun 	case PSCI_0_2_FN64_AFFINITY_INFO:
207*4882a593Smuzhiyun 		/* Disallow these functions for 32bit guests */
208*4882a593Smuzhiyun 		if (vcpu_mode_is_32bit(vcpu))
209*4882a593Smuzhiyun 			return PSCI_RET_NOT_SUPPORTED;
210*4882a593Smuzhiyun 		break;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	return 0;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
kvm_psci_0_2_call(struct kvm_vcpu * vcpu)216*4882a593Smuzhiyun static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
219*4882a593Smuzhiyun 	u32 psci_fn = smccc_get_function(vcpu);
220*4882a593Smuzhiyun 	unsigned long val;
221*4882a593Smuzhiyun 	int ret = 1;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	val = kvm_psci_check_allowed_function(vcpu, psci_fn);
224*4882a593Smuzhiyun 	if (val)
225*4882a593Smuzhiyun 		goto out;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	switch (psci_fn) {
228*4882a593Smuzhiyun 	case PSCI_0_2_FN_PSCI_VERSION:
229*4882a593Smuzhiyun 		/*
230*4882a593Smuzhiyun 		 * Bits[31:16] = Major Version = 0
231*4882a593Smuzhiyun 		 * Bits[15:0] = Minor Version = 2
232*4882a593Smuzhiyun 		 */
233*4882a593Smuzhiyun 		val = KVM_ARM_PSCI_0_2;
234*4882a593Smuzhiyun 		break;
235*4882a593Smuzhiyun 	case PSCI_0_2_FN_CPU_SUSPEND:
236*4882a593Smuzhiyun 	case PSCI_0_2_FN64_CPU_SUSPEND:
237*4882a593Smuzhiyun 		val = kvm_psci_vcpu_suspend(vcpu);
238*4882a593Smuzhiyun 		break;
239*4882a593Smuzhiyun 	case PSCI_0_2_FN_CPU_OFF:
240*4882a593Smuzhiyun 		kvm_psci_vcpu_off(vcpu);
241*4882a593Smuzhiyun 		val = PSCI_RET_SUCCESS;
242*4882a593Smuzhiyun 		break;
243*4882a593Smuzhiyun 	case PSCI_0_2_FN_CPU_ON:
244*4882a593Smuzhiyun 		kvm_psci_narrow_to_32bit(vcpu);
245*4882a593Smuzhiyun 		fallthrough;
246*4882a593Smuzhiyun 	case PSCI_0_2_FN64_CPU_ON:
247*4882a593Smuzhiyun 		mutex_lock(&kvm->lock);
248*4882a593Smuzhiyun 		val = kvm_psci_vcpu_on(vcpu);
249*4882a593Smuzhiyun 		mutex_unlock(&kvm->lock);
250*4882a593Smuzhiyun 		break;
251*4882a593Smuzhiyun 	case PSCI_0_2_FN_AFFINITY_INFO:
252*4882a593Smuzhiyun 		kvm_psci_narrow_to_32bit(vcpu);
253*4882a593Smuzhiyun 		fallthrough;
254*4882a593Smuzhiyun 	case PSCI_0_2_FN64_AFFINITY_INFO:
255*4882a593Smuzhiyun 		val = kvm_psci_vcpu_affinity_info(vcpu);
256*4882a593Smuzhiyun 		break;
257*4882a593Smuzhiyun 	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
258*4882a593Smuzhiyun 		/*
259*4882a593Smuzhiyun 		 * Trusted OS is MP hence does not require migration
260*4882a593Smuzhiyun 	         * or
261*4882a593Smuzhiyun 		 * Trusted OS is not present
262*4882a593Smuzhiyun 		 */
263*4882a593Smuzhiyun 		val = PSCI_0_2_TOS_MP;
264*4882a593Smuzhiyun 		break;
265*4882a593Smuzhiyun 	case PSCI_0_2_FN_SYSTEM_OFF:
266*4882a593Smuzhiyun 		kvm_psci_system_off(vcpu);
267*4882a593Smuzhiyun 		/*
268*4882a593Smuzhiyun 		 * We shouldn't be going back to guest VCPU after
269*4882a593Smuzhiyun 		 * receiving SYSTEM_OFF request.
270*4882a593Smuzhiyun 		 *
271*4882a593Smuzhiyun 		 * If user space accidentally/deliberately resumes
272*4882a593Smuzhiyun 		 * guest VCPU after SYSTEM_OFF request then guest
273*4882a593Smuzhiyun 		 * VCPU should see internal failure from PSCI return
274*4882a593Smuzhiyun 		 * value. To achieve this, we preload r0 (or x0) with
275*4882a593Smuzhiyun 		 * PSCI return value INTERNAL_FAILURE.
276*4882a593Smuzhiyun 		 */
277*4882a593Smuzhiyun 		val = PSCI_RET_INTERNAL_FAILURE;
278*4882a593Smuzhiyun 		ret = 0;
279*4882a593Smuzhiyun 		break;
280*4882a593Smuzhiyun 	case PSCI_0_2_FN_SYSTEM_RESET:
281*4882a593Smuzhiyun 		kvm_psci_system_reset(vcpu);
282*4882a593Smuzhiyun 		/*
283*4882a593Smuzhiyun 		 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
284*4882a593Smuzhiyun 		 * with PSCI return value INTERNAL_FAILURE.
285*4882a593Smuzhiyun 		 */
286*4882a593Smuzhiyun 		val = PSCI_RET_INTERNAL_FAILURE;
287*4882a593Smuzhiyun 		ret = 0;
288*4882a593Smuzhiyun 		break;
289*4882a593Smuzhiyun 	default:
290*4882a593Smuzhiyun 		val = PSCI_RET_NOT_SUPPORTED;
291*4882a593Smuzhiyun 		break;
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun out:
295*4882a593Smuzhiyun 	smccc_set_retval(vcpu, val, 0, 0, 0);
296*4882a593Smuzhiyun 	return ret;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
kvm_psci_1_0_call(struct kvm_vcpu * vcpu)299*4882a593Smuzhiyun static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	u32 psci_fn = smccc_get_function(vcpu);
302*4882a593Smuzhiyun 	u32 feature;
303*4882a593Smuzhiyun 	unsigned long val;
304*4882a593Smuzhiyun 	int ret = 1;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	switch(psci_fn) {
307*4882a593Smuzhiyun 	case PSCI_0_2_FN_PSCI_VERSION:
308*4882a593Smuzhiyun 		val = KVM_ARM_PSCI_1_0;
309*4882a593Smuzhiyun 		break;
310*4882a593Smuzhiyun 	case PSCI_1_0_FN_PSCI_FEATURES:
311*4882a593Smuzhiyun 		feature = smccc_get_arg1(vcpu);
312*4882a593Smuzhiyun 		val = kvm_psci_check_allowed_function(vcpu, feature);
313*4882a593Smuzhiyun 		if (val)
314*4882a593Smuzhiyun 			break;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 		switch(feature) {
317*4882a593Smuzhiyun 		case PSCI_0_2_FN_PSCI_VERSION:
318*4882a593Smuzhiyun 		case PSCI_0_2_FN_CPU_SUSPEND:
319*4882a593Smuzhiyun 		case PSCI_0_2_FN64_CPU_SUSPEND:
320*4882a593Smuzhiyun 		case PSCI_0_2_FN_CPU_OFF:
321*4882a593Smuzhiyun 		case PSCI_0_2_FN_CPU_ON:
322*4882a593Smuzhiyun 		case PSCI_0_2_FN64_CPU_ON:
323*4882a593Smuzhiyun 		case PSCI_0_2_FN_AFFINITY_INFO:
324*4882a593Smuzhiyun 		case PSCI_0_2_FN64_AFFINITY_INFO:
325*4882a593Smuzhiyun 		case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
326*4882a593Smuzhiyun 		case PSCI_0_2_FN_SYSTEM_OFF:
327*4882a593Smuzhiyun 		case PSCI_0_2_FN_SYSTEM_RESET:
328*4882a593Smuzhiyun 		case PSCI_1_0_FN_PSCI_FEATURES:
329*4882a593Smuzhiyun 		case ARM_SMCCC_VERSION_FUNC_ID:
330*4882a593Smuzhiyun 			val = 0;
331*4882a593Smuzhiyun 			break;
332*4882a593Smuzhiyun 		default:
333*4882a593Smuzhiyun 			val = PSCI_RET_NOT_SUPPORTED;
334*4882a593Smuzhiyun 			break;
335*4882a593Smuzhiyun 		}
336*4882a593Smuzhiyun 		break;
337*4882a593Smuzhiyun 	default:
338*4882a593Smuzhiyun 		return kvm_psci_0_2_call(vcpu);
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	smccc_set_retval(vcpu, val, 0, 0, 0);
342*4882a593Smuzhiyun 	return ret;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
kvm_psci_0_1_call(struct kvm_vcpu * vcpu)345*4882a593Smuzhiyun static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
348*4882a593Smuzhiyun 	u32 psci_fn = smccc_get_function(vcpu);
349*4882a593Smuzhiyun 	unsigned long val;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	switch (psci_fn) {
352*4882a593Smuzhiyun 	case KVM_PSCI_FN_CPU_OFF:
353*4882a593Smuzhiyun 		kvm_psci_vcpu_off(vcpu);
354*4882a593Smuzhiyun 		val = PSCI_RET_SUCCESS;
355*4882a593Smuzhiyun 		break;
356*4882a593Smuzhiyun 	case KVM_PSCI_FN_CPU_ON:
357*4882a593Smuzhiyun 		mutex_lock(&kvm->lock);
358*4882a593Smuzhiyun 		val = kvm_psci_vcpu_on(vcpu);
359*4882a593Smuzhiyun 		mutex_unlock(&kvm->lock);
360*4882a593Smuzhiyun 		break;
361*4882a593Smuzhiyun 	default:
362*4882a593Smuzhiyun 		val = PSCI_RET_NOT_SUPPORTED;
363*4882a593Smuzhiyun 		break;
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	smccc_set_retval(vcpu, val, 0, 0, 0);
367*4882a593Smuzhiyun 	return 1;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun /**
371*4882a593Smuzhiyun  * kvm_psci_call - handle PSCI call if r0 value is in range
372*4882a593Smuzhiyun  * @vcpu: Pointer to the VCPU struct
373*4882a593Smuzhiyun  *
374*4882a593Smuzhiyun  * Handle PSCI calls from guests through traps from HVC instructions.
375*4882a593Smuzhiyun  * The calling convention is similar to SMC calls to the secure world
376*4882a593Smuzhiyun  * where the function number is placed in r0.
377*4882a593Smuzhiyun  *
378*4882a593Smuzhiyun  * This function returns: > 0 (success), 0 (success but exit to user
379*4882a593Smuzhiyun  * space), and < 0 (errors)
380*4882a593Smuzhiyun  *
381*4882a593Smuzhiyun  * Errors:
382*4882a593Smuzhiyun  * -EINVAL: Unrecognized PSCI function
383*4882a593Smuzhiyun  */
kvm_psci_call(struct kvm_vcpu * vcpu)384*4882a593Smuzhiyun int kvm_psci_call(struct kvm_vcpu *vcpu)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	switch (kvm_psci_version(vcpu, vcpu->kvm)) {
387*4882a593Smuzhiyun 	case KVM_ARM_PSCI_1_0:
388*4882a593Smuzhiyun 		return kvm_psci_1_0_call(vcpu);
389*4882a593Smuzhiyun 	case KVM_ARM_PSCI_0_2:
390*4882a593Smuzhiyun 		return kvm_psci_0_2_call(vcpu);
391*4882a593Smuzhiyun 	case KVM_ARM_PSCI_0_1:
392*4882a593Smuzhiyun 		return kvm_psci_0_1_call(vcpu);
393*4882a593Smuzhiyun 	default:
394*4882a593Smuzhiyun 		return -EINVAL;
395*4882a593Smuzhiyun 	};
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
kvm_arm_get_fw_num_regs(struct kvm_vcpu * vcpu)398*4882a593Smuzhiyun int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	return 4;		/* PSCI version and three workaround registers */
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
kvm_arm_copy_fw_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)403*4882a593Smuzhiyun int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices++))
406*4882a593Smuzhiyun 		return -EFAULT;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1, uindices++))
409*4882a593Smuzhiyun 		return -EFAULT;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
412*4882a593Smuzhiyun 		return -EFAULT;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3, uindices++))
415*4882a593Smuzhiyun 		return -EFAULT;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	return 0;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun #define KVM_REG_FEATURE_LEVEL_WIDTH	4
421*4882a593Smuzhiyun #define KVM_REG_FEATURE_LEVEL_MASK	(BIT(KVM_REG_FEATURE_LEVEL_WIDTH) - 1)
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /*
424*4882a593Smuzhiyun  * Convert the workaround level into an easy-to-compare number, where higher
425*4882a593Smuzhiyun  * values mean better protection.
426*4882a593Smuzhiyun  */
get_kernel_wa_level(u64 regid)427*4882a593Smuzhiyun static int get_kernel_wa_level(u64 regid)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	switch (regid) {
430*4882a593Smuzhiyun 	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
431*4882a593Smuzhiyun 		switch (arm64_get_spectre_v2_state()) {
432*4882a593Smuzhiyun 		case SPECTRE_VULNERABLE:
433*4882a593Smuzhiyun 			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
434*4882a593Smuzhiyun 		case SPECTRE_MITIGATED:
435*4882a593Smuzhiyun 			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
436*4882a593Smuzhiyun 		case SPECTRE_UNAFFECTED:
437*4882a593Smuzhiyun 			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
438*4882a593Smuzhiyun 		}
439*4882a593Smuzhiyun 		return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
440*4882a593Smuzhiyun 	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
441*4882a593Smuzhiyun 		switch (arm64_get_spectre_v4_state()) {
442*4882a593Smuzhiyun 		case SPECTRE_MITIGATED:
443*4882a593Smuzhiyun 			/*
444*4882a593Smuzhiyun 			 * As for the hypercall discovery, we pretend we
445*4882a593Smuzhiyun 			 * don't have any FW mitigation if SSBS is there at
446*4882a593Smuzhiyun 			 * all times.
447*4882a593Smuzhiyun 			 */
448*4882a593Smuzhiyun 			if (cpus_have_final_cap(ARM64_SSBS))
449*4882a593Smuzhiyun 				return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
450*4882a593Smuzhiyun 			fallthrough;
451*4882a593Smuzhiyun 		case SPECTRE_UNAFFECTED:
452*4882a593Smuzhiyun 			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
453*4882a593Smuzhiyun 		case SPECTRE_VULNERABLE:
454*4882a593Smuzhiyun 			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
455*4882a593Smuzhiyun 		}
456*4882a593Smuzhiyun 		break;
457*4882a593Smuzhiyun 	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
458*4882a593Smuzhiyun 		switch (arm64_get_spectre_bhb_state()) {
459*4882a593Smuzhiyun 		case SPECTRE_VULNERABLE:
460*4882a593Smuzhiyun 			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
461*4882a593Smuzhiyun 		case SPECTRE_MITIGATED:
462*4882a593Smuzhiyun 			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
463*4882a593Smuzhiyun 		case SPECTRE_UNAFFECTED:
464*4882a593Smuzhiyun 			return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
465*4882a593Smuzhiyun 		}
466*4882a593Smuzhiyun 		return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	return -EINVAL;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
kvm_arm_get_fw_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)472*4882a593Smuzhiyun int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	void __user *uaddr = (void __user *)(long)reg->addr;
475*4882a593Smuzhiyun 	u64 val;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	switch (reg->id) {
478*4882a593Smuzhiyun 	case KVM_REG_ARM_PSCI_VERSION:
479*4882a593Smuzhiyun 		val = kvm_psci_version(vcpu, vcpu->kvm);
480*4882a593Smuzhiyun 		break;
481*4882a593Smuzhiyun 	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
482*4882a593Smuzhiyun 	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
483*4882a593Smuzhiyun 	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
484*4882a593Smuzhiyun 		val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
485*4882a593Smuzhiyun 		break;
486*4882a593Smuzhiyun 	default:
487*4882a593Smuzhiyun 		return -ENOENT;
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
491*4882a593Smuzhiyun 		return -EFAULT;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	return 0;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
kvm_arm_set_fw_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)496*4882a593Smuzhiyun int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	void __user *uaddr = (void __user *)(long)reg->addr;
499*4882a593Smuzhiyun 	u64 val;
500*4882a593Smuzhiyun 	int wa_level;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
503*4882a593Smuzhiyun 		return -EFAULT;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	switch (reg->id) {
506*4882a593Smuzhiyun 	case KVM_REG_ARM_PSCI_VERSION:
507*4882a593Smuzhiyun 	{
508*4882a593Smuzhiyun 		bool wants_02;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 		switch (val) {
513*4882a593Smuzhiyun 		case KVM_ARM_PSCI_0_1:
514*4882a593Smuzhiyun 			if (wants_02)
515*4882a593Smuzhiyun 				return -EINVAL;
516*4882a593Smuzhiyun 			vcpu->kvm->arch.psci_version = val;
517*4882a593Smuzhiyun 			return 0;
518*4882a593Smuzhiyun 		case KVM_ARM_PSCI_0_2:
519*4882a593Smuzhiyun 		case KVM_ARM_PSCI_1_0:
520*4882a593Smuzhiyun 			if (!wants_02)
521*4882a593Smuzhiyun 				return -EINVAL;
522*4882a593Smuzhiyun 			vcpu->kvm->arch.psci_version = val;
523*4882a593Smuzhiyun 			return 0;
524*4882a593Smuzhiyun 		}
525*4882a593Smuzhiyun 		break;
526*4882a593Smuzhiyun 	}
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
529*4882a593Smuzhiyun 	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
530*4882a593Smuzhiyun 		if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
531*4882a593Smuzhiyun 			return -EINVAL;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 		if (get_kernel_wa_level(reg->id) < val)
534*4882a593Smuzhiyun 			return -EINVAL;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 		return 0;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
539*4882a593Smuzhiyun 		if (val & ~(KVM_REG_FEATURE_LEVEL_MASK |
540*4882a593Smuzhiyun 			    KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
541*4882a593Smuzhiyun 			return -EINVAL;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 		/* The enabled bit must not be set unless the level is AVAIL. */
544*4882a593Smuzhiyun 		if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
545*4882a593Smuzhiyun 		    (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
546*4882a593Smuzhiyun 			return -EINVAL;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 		/*
549*4882a593Smuzhiyun 		 * Map all the possible incoming states to the only two we
550*4882a593Smuzhiyun 		 * really want to deal with.
551*4882a593Smuzhiyun 		 */
552*4882a593Smuzhiyun 		switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
553*4882a593Smuzhiyun 		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
554*4882a593Smuzhiyun 		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
555*4882a593Smuzhiyun 			wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
556*4882a593Smuzhiyun 			break;
557*4882a593Smuzhiyun 		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
558*4882a593Smuzhiyun 		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
559*4882a593Smuzhiyun 			wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
560*4882a593Smuzhiyun 			break;
561*4882a593Smuzhiyun 		default:
562*4882a593Smuzhiyun 			return -EINVAL;
563*4882a593Smuzhiyun 		}
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 		/*
566*4882a593Smuzhiyun 		 * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
567*4882a593Smuzhiyun 		 * other way around.
568*4882a593Smuzhiyun 		 */
569*4882a593Smuzhiyun 		if (get_kernel_wa_level(reg->id) < wa_level)
570*4882a593Smuzhiyun 			return -EINVAL;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 		return 0;
573*4882a593Smuzhiyun 	default:
574*4882a593Smuzhiyun 		return -ENOENT;
575*4882a593Smuzhiyun 	}
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	return -EINVAL;
578*4882a593Smuzhiyun }
579