1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012 ARM Ltd.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #ifndef __ASM_FP_H
6*4882a593Smuzhiyun #define __ASM_FP_H
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <asm/errno.h>
9*4882a593Smuzhiyun #include <asm/ptrace.h>
10*4882a593Smuzhiyun #include <asm/processor.h>
11*4882a593Smuzhiyun #include <asm/sigcontext.h>
12*4882a593Smuzhiyun #include <asm/sysreg.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #ifndef __ASSEMBLY__
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/bitmap.h>
17*4882a593Smuzhiyun #include <linux/build_bug.h>
18*4882a593Smuzhiyun #include <linux/bug.h>
19*4882a593Smuzhiyun #include <linux/cache.h>
20*4882a593Smuzhiyun #include <linux/init.h>
21*4882a593Smuzhiyun #include <linux/stddef.h>
22*4882a593Smuzhiyun #include <linux/types.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
25*4882a593Smuzhiyun /* Masks for extracting the FPSR and FPCR from the FPSCR */
26*4882a593Smuzhiyun #define VFP_FPSCR_STAT_MASK 0xf800009f
27*4882a593Smuzhiyun #define VFP_FPSCR_CTRL_MASK 0x07f79f00
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * The VFP state has 32x64-bit registers and a single 32-bit
30*4882a593Smuzhiyun * control/status register.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun #define VFP_STATE_SIZE ((32 * 8) + 4)
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun struct task_struct;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun extern void fpsimd_save_state(struct user_fpsimd_state *state);
38*4882a593Smuzhiyun extern void fpsimd_load_state(struct user_fpsimd_state *state);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun extern void fpsimd_thread_switch(struct task_struct *next);
41*4882a593Smuzhiyun extern void fpsimd_flush_thread(void);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun extern void fpsimd_signal_preserve_current_state(void);
44*4882a593Smuzhiyun extern void fpsimd_preserve_current_state(void);
45*4882a593Smuzhiyun extern void fpsimd_restore_current_state(void);
46*4882a593Smuzhiyun extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun extern void fpsimd_bind_task_to_cpu(void);
49*4882a593Smuzhiyun extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
50*4882a593Smuzhiyun void *sve_state, unsigned int sve_vl);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun extern void fpsimd_flush_task_state(struct task_struct *target);
53*4882a593Smuzhiyun extern void fpsimd_save_and_flush_cpu_state(void);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* Maximum VL that SVE VL-agnostic software can transparently support */
56*4882a593Smuzhiyun #define SVE_VL_ARCH_MAX 0x100
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* Offset of FFR in the SVE register dump */
sve_ffr_offset(int vl)59*4882a593Smuzhiyun static inline size_t sve_ffr_offset(int vl)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
sve_pffr(struct thread_struct * thread)64*4882a593Smuzhiyun static inline void *sve_pffr(struct thread_struct *thread)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun return (char *)thread->sve_state + sve_ffr_offset(thread->sve_vl);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun extern void sve_save_state(void *state, u32 *pfpsr);
70*4882a593Smuzhiyun extern void sve_load_state(void const *state, u32 const *pfpsr,
71*4882a593Smuzhiyun unsigned long vq_minus_1);
72*4882a593Smuzhiyun extern void sve_flush_live(void);
73*4882a593Smuzhiyun extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
74*4882a593Smuzhiyun unsigned long vq_minus_1);
75*4882a593Smuzhiyun extern unsigned int sve_get_vl(void);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun struct arm64_cpu_capabilities;
78*4882a593Smuzhiyun extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun extern u64 read_zcr_features(void);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun extern int __ro_after_init sve_max_vl;
83*4882a593Smuzhiyun extern int __ro_after_init sve_max_virtualisable_vl;
84*4882a593Smuzhiyun extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * Helpers to translate bit indices in sve_vq_map to VQ values (and
88*4882a593Smuzhiyun * vice versa). This allows find_next_bit() to be used to find the
89*4882a593Smuzhiyun * _maximum_ VQ not exceeding a certain value.
90*4882a593Smuzhiyun */
__vq_to_bit(unsigned int vq)91*4882a593Smuzhiyun static inline unsigned int __vq_to_bit(unsigned int vq)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun return SVE_VQ_MAX - vq;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
__bit_to_vq(unsigned int bit)96*4882a593Smuzhiyun static inline unsigned int __bit_to_vq(unsigned int bit)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun return SVE_VQ_MAX - bit;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
sve_vq_available(unsigned int vq)102*4882a593Smuzhiyun static inline bool sve_vq_available(unsigned int vq)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun return test_bit(__vq_to_bit(vq), sve_vq_map);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun #ifdef CONFIG_ARM64_SVE
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun extern size_t sve_state_size(struct task_struct const *task);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun extern void sve_alloc(struct task_struct *task);
112*4882a593Smuzhiyun extern void fpsimd_release_task(struct task_struct *task);
113*4882a593Smuzhiyun extern void fpsimd_sync_to_sve(struct task_struct *task);
114*4882a593Smuzhiyun extern void sve_sync_to_fpsimd(struct task_struct *task);
115*4882a593Smuzhiyun extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun extern int sve_set_vector_length(struct task_struct *task,
118*4882a593Smuzhiyun unsigned long vl, unsigned long flags);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun extern int sve_set_current_vl(unsigned long arg);
121*4882a593Smuzhiyun extern int sve_get_current_vl(void);
122*4882a593Smuzhiyun
sve_user_disable(void)123*4882a593Smuzhiyun static inline void sve_user_disable(void)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
sve_user_enable(void)128*4882a593Smuzhiyun static inline void sve_user_enable(void)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun #define sve_cond_update_zcr_vq(val, reg) \
134*4882a593Smuzhiyun do { \
135*4882a593Smuzhiyun u64 __zcr = read_sysreg_s((reg)); \
136*4882a593Smuzhiyun u64 __new = __zcr & ~ZCR_ELx_LEN_MASK; \
137*4882a593Smuzhiyun __new |= (val) & ZCR_ELx_LEN_MASK; \
138*4882a593Smuzhiyun if (__zcr != __new) \
139*4882a593Smuzhiyun write_sysreg_s(__new, (reg)); \
140*4882a593Smuzhiyun } while (0)
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Probing and setup functions.
144*4882a593Smuzhiyun * Calls to these functions must be serialised with one another.
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun extern void __init sve_init_vq_map(void);
147*4882a593Smuzhiyun extern void sve_update_vq_map(void);
148*4882a593Smuzhiyun extern int sve_verify_vq_map(void);
149*4882a593Smuzhiyun extern void __init sve_setup(void);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun #else /* ! CONFIG_ARM64_SVE */
152*4882a593Smuzhiyun
sve_alloc(struct task_struct * task)153*4882a593Smuzhiyun static inline void sve_alloc(struct task_struct *task) { }
fpsimd_release_task(struct task_struct * task)154*4882a593Smuzhiyun static inline void fpsimd_release_task(struct task_struct *task) { }
sve_sync_to_fpsimd(struct task_struct * task)155*4882a593Smuzhiyun static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
sve_sync_from_fpsimd_zeropad(struct task_struct * task)156*4882a593Smuzhiyun static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
157*4882a593Smuzhiyun
sve_set_current_vl(unsigned long arg)158*4882a593Smuzhiyun static inline int sve_set_current_vl(unsigned long arg)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun return -EINVAL;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
sve_get_current_vl(void)163*4882a593Smuzhiyun static inline int sve_get_current_vl(void)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun return -EINVAL;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
sve_user_disable(void)168*4882a593Smuzhiyun static inline void sve_user_disable(void) { BUILD_BUG(); }
sve_user_enable(void)169*4882a593Smuzhiyun static inline void sve_user_enable(void) { BUILD_BUG(); }
170*4882a593Smuzhiyun
sve_init_vq_map(void)171*4882a593Smuzhiyun static inline void sve_init_vq_map(void) { }
sve_update_vq_map(void)172*4882a593Smuzhiyun static inline void sve_update_vq_map(void) { }
sve_verify_vq_map(void)173*4882a593Smuzhiyun static inline int sve_verify_vq_map(void) { return 0; }
sve_setup(void)174*4882a593Smuzhiyun static inline void sve_setup(void) { }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun #endif /* ! CONFIG_ARM64_SVE */
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* For use by EFI runtime services calls only */
179*4882a593Smuzhiyun extern void __efi_fpsimd_begin(void);
180*4882a593Smuzhiyun extern void __efi_fpsimd_end(void);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun #endif
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun #endif
185