xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/archrandom.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_ARCHRANDOM_H
3*4882a593Smuzhiyun #define _ASM_ARCHRANDOM_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #ifdef CONFIG_ARCH_RANDOM
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/bug.h>
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <asm/cpufeature.h>
10*4882a593Smuzhiyun 
__arm64_rndr(unsigned long * v)11*4882a593Smuzhiyun static inline bool __arm64_rndr(unsigned long *v)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun 	bool ok;
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun 	/*
16*4882a593Smuzhiyun 	 * Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
17*4882a593Smuzhiyun 	 * and set PSTATE.NZCV to 0b0100 otherwise.
18*4882a593Smuzhiyun 	 */
19*4882a593Smuzhiyun 	asm volatile(
20*4882a593Smuzhiyun 		__mrs_s("%0", SYS_RNDR_EL0) "\n"
21*4882a593Smuzhiyun 	"	cset %w1, ne\n"
22*4882a593Smuzhiyun 	: "=r" (*v), "=r" (ok)
23*4882a593Smuzhiyun 	:
24*4882a593Smuzhiyun 	: "cc");
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	return ok;
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun 
arch_get_random_long(unsigned long * v)29*4882a593Smuzhiyun static inline bool __must_check arch_get_random_long(unsigned long *v)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	return false;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
arch_get_random_int(unsigned int * v)34*4882a593Smuzhiyun static inline bool __must_check arch_get_random_int(unsigned int *v)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	return false;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
arch_get_random_seed_long(unsigned long * v)39*4882a593Smuzhiyun static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	/*
42*4882a593Smuzhiyun 	 * Only support the generic interface after we have detected
43*4882a593Smuzhiyun 	 * the system wide capability, avoiding complexity with the
44*4882a593Smuzhiyun 	 * cpufeature code and with potential scheduling between CPUs
45*4882a593Smuzhiyun 	 * with and without the feature.
46*4882a593Smuzhiyun 	 */
47*4882a593Smuzhiyun 	if (!cpus_have_const_cap(ARM64_HAS_RNG))
48*4882a593Smuzhiyun 		return false;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	return __arm64_rndr(v);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 
arch_get_random_seed_int(unsigned int * v)54*4882a593Smuzhiyun static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	unsigned long val;
57*4882a593Smuzhiyun 	bool ok = arch_get_random_seed_long(&val);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	*v = val;
60*4882a593Smuzhiyun 	return ok;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
__early_cpu_has_rndr(void)63*4882a593Smuzhiyun static inline bool __init __early_cpu_has_rndr(void)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	/* Open code as we run prior to the first call to cpufeature. */
66*4882a593Smuzhiyun 	unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
67*4882a593Smuzhiyun 	return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun static inline bool __init __must_check
arch_get_random_seed_long_early(unsigned long * v)71*4882a593Smuzhiyun arch_get_random_seed_long_early(unsigned long *v)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	WARN_ON(system_state != SYSTEM_BOOTING);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (!__early_cpu_has_rndr())
76*4882a593Smuzhiyun 		return false;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	return __arm64_rndr(v);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun #define arch_get_random_seed_long_early arch_get_random_seed_long_early
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #endif /* CONFIG_ARCH_RANDOM */
83*4882a593Smuzhiyun #endif /* _ASM_ARCHRANDOM_H */
84