xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/hyp/nvhe/psci-relay.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2020 - Google LLC
4*4882a593Smuzhiyun  * Author: David Brazdil <dbrazdil@google.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <asm/kvm_asm.h>
8*4882a593Smuzhiyun #include <asm/kvm_hyp.h>
9*4882a593Smuzhiyun #include <asm/kvm_mmu.h>
10*4882a593Smuzhiyun #include <linux/arm-smccc.h>
11*4882a593Smuzhiyun #include <linux/kvm_host.h>
12*4882a593Smuzhiyun #include <uapi/linux/psci.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <nvhe/memory.h>
15*4882a593Smuzhiyun #include <nvhe/trap_handler.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun void kvm_hyp_cpu_entry(unsigned long r0);
18*4882a593Smuzhiyun void kvm_hyp_cpu_resume(unsigned long r0);
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /* Config options set by the host. */
23*4882a593Smuzhiyun struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define INVALID_CPU_ID	UINT_MAX
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun struct psci_boot_args {
28*4882a593Smuzhiyun 	atomic_t lock;
29*4882a593Smuzhiyun 	unsigned long pc;
30*4882a593Smuzhiyun 	unsigned long r0;
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define PSCI_BOOT_ARGS_UNLOCKED		0
34*4882a593Smuzhiyun #define PSCI_BOOT_ARGS_LOCKED		1
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define PSCI_BOOT_ARGS_INIT					\
37*4882a593Smuzhiyun 	((struct psci_boot_args){				\
38*4882a593Smuzhiyun 		.lock = ATOMIC_INIT(PSCI_BOOT_ARGS_UNLOCKED),	\
39*4882a593Smuzhiyun 	})
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
42*4882a593Smuzhiyun static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define	is_psci_0_1(what, func_id)					\
45*4882a593Smuzhiyun 	(kvm_host_psci_config.psci_0_1_ ## what ## _implemented &&	\
46*4882a593Smuzhiyun 	 (func_id) == kvm_host_psci_config.function_ids_0_1.what)
47*4882a593Smuzhiyun 
is_psci_0_1_call(u64 func_id)48*4882a593Smuzhiyun static bool is_psci_0_1_call(u64 func_id)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	return (is_psci_0_1(cpu_suspend, func_id) ||
51*4882a593Smuzhiyun 		is_psci_0_1(cpu_on, func_id) ||
52*4882a593Smuzhiyun 		is_psci_0_1(cpu_off, func_id) ||
53*4882a593Smuzhiyun 		is_psci_0_1(migrate, func_id));
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
is_psci_0_2_call(u64 func_id)56*4882a593Smuzhiyun static bool is_psci_0_2_call(u64 func_id)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	/* SMCCC reserves IDs 0x00-1F with the given 32/64-bit base for PSCI. */
59*4882a593Smuzhiyun 	return (PSCI_0_2_FN(0) <= func_id && func_id <= PSCI_0_2_FN(31)) ||
60*4882a593Smuzhiyun 	       (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
psci_call(unsigned long fn,unsigned long arg0,unsigned long arg1,unsigned long arg2)63*4882a593Smuzhiyun static unsigned long psci_call(unsigned long fn, unsigned long arg0,
64*4882a593Smuzhiyun 			       unsigned long arg1, unsigned long arg2)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct arm_smccc_res res;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	arm_smccc_1_1_smc(fn, arg0, arg1, arg2, &res);
69*4882a593Smuzhiyun 	return res.a0;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
psci_forward(struct kvm_cpu_context * host_ctxt)72*4882a593Smuzhiyun static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1),
75*4882a593Smuzhiyun 			 cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
find_cpu_id(u64 mpidr)78*4882a593Smuzhiyun static unsigned int find_cpu_id(u64 mpidr)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	unsigned int i;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/* Reject invalid MPIDRs */
83*4882a593Smuzhiyun 	if (mpidr & ~MPIDR_HWID_BITMASK)
84*4882a593Smuzhiyun 		return INVALID_CPU_ID;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	for (i = 0; i < NR_CPUS; i++) {
87*4882a593Smuzhiyun 		if (cpu_logical_map(i) == mpidr)
88*4882a593Smuzhiyun 			return i;
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	return INVALID_CPU_ID;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
try_acquire_boot_args(struct psci_boot_args * args)94*4882a593Smuzhiyun static __always_inline bool try_acquire_boot_args(struct psci_boot_args *args)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	return atomic_cmpxchg_acquire(&args->lock,
97*4882a593Smuzhiyun 				      PSCI_BOOT_ARGS_UNLOCKED,
98*4882a593Smuzhiyun 				      PSCI_BOOT_ARGS_LOCKED) ==
99*4882a593Smuzhiyun 		PSCI_BOOT_ARGS_UNLOCKED;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
release_boot_args(struct psci_boot_args * args)102*4882a593Smuzhiyun static __always_inline void release_boot_args(struct psci_boot_args *args)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	atomic_set_release(&args->lock, PSCI_BOOT_ARGS_UNLOCKED);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
psci_cpu_on(u64 func_id,struct kvm_cpu_context * host_ctxt)107*4882a593Smuzhiyun static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	DECLARE_REG(u64, mpidr, host_ctxt, 1);
110*4882a593Smuzhiyun 	DECLARE_REG(unsigned long, pc, host_ctxt, 2);
111*4882a593Smuzhiyun 	DECLARE_REG(unsigned long, r0, host_ctxt, 3);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	unsigned int cpu_id;
114*4882a593Smuzhiyun 	struct psci_boot_args *boot_args;
115*4882a593Smuzhiyun 	struct kvm_nvhe_init_params *init_params;
116*4882a593Smuzhiyun 	int ret;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/*
119*4882a593Smuzhiyun 	 * Find the logical CPU ID for the given MPIDR. The search set is
120*4882a593Smuzhiyun 	 * the set of CPUs that were online at the point of KVM initialization.
121*4882a593Smuzhiyun 	 * Booting other CPUs is rejected because their cpufeatures were not
122*4882a593Smuzhiyun 	 * checked against the finalized capabilities. This could be relaxed
123*4882a593Smuzhiyun 	 * by doing the feature checks in hyp.
124*4882a593Smuzhiyun 	 */
125*4882a593Smuzhiyun 	cpu_id = find_cpu_id(mpidr);
126*4882a593Smuzhiyun 	if (cpu_id == INVALID_CPU_ID)
127*4882a593Smuzhiyun 		return PSCI_RET_INVALID_PARAMS;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	boot_args = per_cpu_ptr(&cpu_on_args, cpu_id);
130*4882a593Smuzhiyun 	init_params = per_cpu_ptr(&kvm_init_params, cpu_id);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	/* Check if the target CPU is already being booted. */
133*4882a593Smuzhiyun 	if (!try_acquire_boot_args(boot_args))
134*4882a593Smuzhiyun 		return PSCI_RET_ALREADY_ON;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	boot_args->pc = pc;
137*4882a593Smuzhiyun 	boot_args->r0 = r0;
138*4882a593Smuzhiyun 	wmb();
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	ret = psci_call(func_id, mpidr,
141*4882a593Smuzhiyun 			__hyp_pa(&kvm_hyp_cpu_entry),
142*4882a593Smuzhiyun 			__hyp_pa(init_params));
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	/* If successful, the lock will be released by the target CPU. */
145*4882a593Smuzhiyun 	if (ret != PSCI_RET_SUCCESS)
146*4882a593Smuzhiyun 		release_boot_args(boot_args);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return ret;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
psci_cpu_suspend(u64 func_id,struct kvm_cpu_context * host_ctxt)151*4882a593Smuzhiyun static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	DECLARE_REG(u64, power_state, host_ctxt, 1);
154*4882a593Smuzhiyun 	DECLARE_REG(unsigned long, pc, host_ctxt, 2);
155*4882a593Smuzhiyun 	DECLARE_REG(unsigned long, r0, host_ctxt, 3);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	struct psci_boot_args *boot_args;
158*4882a593Smuzhiyun 	struct kvm_nvhe_init_params *init_params;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	boot_args = this_cpu_ptr(&suspend_args);
161*4882a593Smuzhiyun 	init_params = this_cpu_ptr(&kvm_init_params);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/*
164*4882a593Smuzhiyun 	 * No need to acquire a lock before writing to boot_args because a core
165*4882a593Smuzhiyun 	 * can only suspend itself. Racy CPU_ON calls use a separate struct.
166*4882a593Smuzhiyun 	 */
167*4882a593Smuzhiyun 	boot_args->pc = pc;
168*4882a593Smuzhiyun 	boot_args->r0 = r0;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/*
171*4882a593Smuzhiyun 	 * Will either return if shallow sleep state, or wake up into the entry
172*4882a593Smuzhiyun 	 * point if it is a deep sleep state.
173*4882a593Smuzhiyun 	 */
174*4882a593Smuzhiyun 	return psci_call(func_id, power_state,
175*4882a593Smuzhiyun 			 __hyp_pa(&kvm_hyp_cpu_resume),
176*4882a593Smuzhiyun 			 __hyp_pa(init_params));
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
psci_system_suspend(u64 func_id,struct kvm_cpu_context * host_ctxt)179*4882a593Smuzhiyun static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	DECLARE_REG(unsigned long, pc, host_ctxt, 1);
182*4882a593Smuzhiyun 	DECLARE_REG(unsigned long, r0, host_ctxt, 2);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	struct psci_boot_args *boot_args;
185*4882a593Smuzhiyun 	struct kvm_nvhe_init_params *init_params;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	boot_args = this_cpu_ptr(&suspend_args);
188*4882a593Smuzhiyun 	init_params = this_cpu_ptr(&kvm_init_params);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	/*
191*4882a593Smuzhiyun 	 * No need to acquire a lock before writing to boot_args because a core
192*4882a593Smuzhiyun 	 * can only suspend itself. Racy CPU_ON calls use a separate struct.
193*4882a593Smuzhiyun 	 */
194*4882a593Smuzhiyun 	boot_args->pc = pc;
195*4882a593Smuzhiyun 	boot_args->r0 = r0;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	/* Will only return on error. */
198*4882a593Smuzhiyun 	return psci_call(func_id,
199*4882a593Smuzhiyun 			 __hyp_pa(&kvm_hyp_cpu_resume),
200*4882a593Smuzhiyun 			 __hyp_pa(init_params), 0);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
kvm_host_psci_cpu_entry(bool is_cpu_on)203*4882a593Smuzhiyun asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	struct psci_boot_args *boot_args;
206*4882a593Smuzhiyun 	struct kvm_cpu_context *host_ctxt;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	if (is_cpu_on)
211*4882a593Smuzhiyun 		boot_args = this_cpu_ptr(&cpu_on_args);
212*4882a593Smuzhiyun 	else
213*4882a593Smuzhiyun 		boot_args = this_cpu_ptr(&suspend_args);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	cpu_reg(host_ctxt, 0) = boot_args->r0;
216*4882a593Smuzhiyun 	write_sysreg_el2(boot_args->pc, SYS_ELR);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (is_cpu_on)
219*4882a593Smuzhiyun 		release_boot_args(boot_args);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	__host_enter(host_ctxt);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
psci_0_1_handler(u64 func_id,struct kvm_cpu_context * host_ctxt)224*4882a593Smuzhiyun static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
227*4882a593Smuzhiyun 		return psci_forward(host_ctxt);
228*4882a593Smuzhiyun 	if (is_psci_0_1(cpu_on, func_id))
229*4882a593Smuzhiyun 		return psci_cpu_on(func_id, host_ctxt);
230*4882a593Smuzhiyun 	if (is_psci_0_1(cpu_suspend, func_id))
231*4882a593Smuzhiyun 		return psci_cpu_suspend(func_id, host_ctxt);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return PSCI_RET_NOT_SUPPORTED;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
psci_0_2_handler(u64 func_id,struct kvm_cpu_context * host_ctxt)236*4882a593Smuzhiyun static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	switch (func_id) {
239*4882a593Smuzhiyun 	case PSCI_0_2_FN_PSCI_VERSION:
240*4882a593Smuzhiyun 	case PSCI_0_2_FN_CPU_OFF:
241*4882a593Smuzhiyun 	case PSCI_0_2_FN64_AFFINITY_INFO:
242*4882a593Smuzhiyun 	case PSCI_0_2_FN64_MIGRATE:
243*4882a593Smuzhiyun 	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
244*4882a593Smuzhiyun 	case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
245*4882a593Smuzhiyun 		return psci_forward(host_ctxt);
246*4882a593Smuzhiyun 	/*
247*4882a593Smuzhiyun 	 * SYSTEM_OFF/RESET should not return according to the spec.
248*4882a593Smuzhiyun 	 * Allow it so as to stay robust to broken firmware.
249*4882a593Smuzhiyun 	 */
250*4882a593Smuzhiyun 	case PSCI_0_2_FN_SYSTEM_OFF:
251*4882a593Smuzhiyun 	case PSCI_0_2_FN_SYSTEM_RESET:
252*4882a593Smuzhiyun 		return psci_forward(host_ctxt);
253*4882a593Smuzhiyun 	case PSCI_0_2_FN64_CPU_SUSPEND:
254*4882a593Smuzhiyun 		return psci_cpu_suspend(func_id, host_ctxt);
255*4882a593Smuzhiyun 	case PSCI_0_2_FN64_CPU_ON:
256*4882a593Smuzhiyun 		return psci_cpu_on(func_id, host_ctxt);
257*4882a593Smuzhiyun 	default:
258*4882a593Smuzhiyun 		return PSCI_RET_NOT_SUPPORTED;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
psci_1_0_handler(u64 func_id,struct kvm_cpu_context * host_ctxt)262*4882a593Smuzhiyun static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	switch (func_id) {
265*4882a593Smuzhiyun 	case PSCI_1_0_FN_PSCI_FEATURES:
266*4882a593Smuzhiyun 	case PSCI_1_0_FN_SET_SUSPEND_MODE:
267*4882a593Smuzhiyun 	case PSCI_1_1_FN64_SYSTEM_RESET2:
268*4882a593Smuzhiyun 		return psci_forward(host_ctxt);
269*4882a593Smuzhiyun 	case PSCI_1_0_FN64_SYSTEM_SUSPEND:
270*4882a593Smuzhiyun 		return psci_system_suspend(func_id, host_ctxt);
271*4882a593Smuzhiyun 	default:
272*4882a593Smuzhiyun 		return psci_0_2_handler(func_id, host_ctxt);
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
kvm_host_psci_handler(struct kvm_cpu_context * host_ctxt)276*4882a593Smuzhiyun bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	DECLARE_REG(u64, func_id, host_ctxt, 0);
279*4882a593Smuzhiyun 	unsigned long ret;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	switch (kvm_host_psci_config.version) {
282*4882a593Smuzhiyun 	case PSCI_VERSION(0, 1):
283*4882a593Smuzhiyun 		if (!is_psci_0_1_call(func_id))
284*4882a593Smuzhiyun 			return false;
285*4882a593Smuzhiyun 		ret = psci_0_1_handler(func_id, host_ctxt);
286*4882a593Smuzhiyun 		break;
287*4882a593Smuzhiyun 	case PSCI_VERSION(0, 2):
288*4882a593Smuzhiyun 		if (!is_psci_0_2_call(func_id))
289*4882a593Smuzhiyun 			return false;
290*4882a593Smuzhiyun 		ret = psci_0_2_handler(func_id, host_ctxt);
291*4882a593Smuzhiyun 		break;
292*4882a593Smuzhiyun 	default:
293*4882a593Smuzhiyun 		if (!is_psci_0_2_call(func_id))
294*4882a593Smuzhiyun 			return false;
295*4882a593Smuzhiyun 		ret = psci_1_0_handler(func_id, host_ctxt);
296*4882a593Smuzhiyun 		break;
297*4882a593Smuzhiyun 	}
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	cpu_reg(host_ctxt, 0) = ret;
300*4882a593Smuzhiyun 	cpu_reg(host_ctxt, 1) = 0;
301*4882a593Smuzhiyun 	cpu_reg(host_ctxt, 2) = 0;
302*4882a593Smuzhiyun 	cpu_reg(host_ctxt, 3) = 0;
303*4882a593Smuzhiyun 	return true;
304*4882a593Smuzhiyun }
305