1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/const.h>
6*4882a593Smuzhiyun #include <asm/reg.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define AMR_KUAP_BLOCK_READ UL(0x4000000000000000)
9*4882a593Smuzhiyun #define AMR_KUAP_BLOCK_WRITE UL(0x8000000000000000)
10*4882a593Smuzhiyun #define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
11*4882a593Smuzhiyun #define AMR_KUAP_SHIFT 62
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #ifdef __ASSEMBLY__
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun .macro kuap_restore_amr gpr1, gpr2
16*4882a593Smuzhiyun #ifdef CONFIG_PPC_KUAP
17*4882a593Smuzhiyun BEGIN_MMU_FTR_SECTION_NESTED(67)
18*4882a593Smuzhiyun mfspr \gpr1, SPRN_AMR
19*4882a593Smuzhiyun ld \gpr2, STACK_REGS_KUAP(r1)
20*4882a593Smuzhiyun cmpd \gpr1, \gpr2
21*4882a593Smuzhiyun beq 998f
22*4882a593Smuzhiyun isync
23*4882a593Smuzhiyun mtspr SPRN_AMR, \gpr2
24*4882a593Smuzhiyun /* No isync required, see kuap_restore_amr() */
25*4882a593Smuzhiyun 998:
26*4882a593Smuzhiyun END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
27*4882a593Smuzhiyun #endif
28*4882a593Smuzhiyun .endm
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #ifdef CONFIG_PPC_KUAP
31*4882a593Smuzhiyun .macro kuap_check_amr gpr1, gpr2
32*4882a593Smuzhiyun #ifdef CONFIG_PPC_KUAP_DEBUG
33*4882a593Smuzhiyun BEGIN_MMU_FTR_SECTION_NESTED(67)
34*4882a593Smuzhiyun mfspr \gpr1, SPRN_AMR
35*4882a593Smuzhiyun li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
36*4882a593Smuzhiyun sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
37*4882a593Smuzhiyun 999: tdne \gpr1, \gpr2
38*4882a593Smuzhiyun EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
39*4882a593Smuzhiyun END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun .endm
42*4882a593Smuzhiyun #endif
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
45*4882a593Smuzhiyun #ifdef CONFIG_PPC_KUAP
46*4882a593Smuzhiyun BEGIN_MMU_FTR_SECTION_NESTED(67)
47*4882a593Smuzhiyun .ifnb \msr_pr_cr
48*4882a593Smuzhiyun bne \msr_pr_cr, 99f
49*4882a593Smuzhiyun .endif
50*4882a593Smuzhiyun mfspr \gpr1, SPRN_AMR
51*4882a593Smuzhiyun std \gpr1, STACK_REGS_KUAP(r1)
52*4882a593Smuzhiyun li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
53*4882a593Smuzhiyun sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
54*4882a593Smuzhiyun cmpd \use_cr, \gpr1, \gpr2
55*4882a593Smuzhiyun beq \use_cr, 99f
56*4882a593Smuzhiyun // We don't isync here because we very recently entered via rfid
57*4882a593Smuzhiyun mtspr SPRN_AMR, \gpr2
58*4882a593Smuzhiyun isync
59*4882a593Smuzhiyun 99:
60*4882a593Smuzhiyun END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
61*4882a593Smuzhiyun #endif
62*4882a593Smuzhiyun .endm
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #else /* !__ASSEMBLY__ */
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #include <linux/jump_label.h>
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #ifdef CONFIG_PPC_KUAP
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #include <asm/mmu.h>
73*4882a593Smuzhiyun #include <asm/ptrace.h>
74*4882a593Smuzhiyun
kuap_restore_amr(struct pt_regs * regs,unsigned long amr)75*4882a593Smuzhiyun static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun if (mmu_has_feature(MMU_FTR_RADIX_KUAP) && unlikely(regs->kuap != amr)) {
78*4882a593Smuzhiyun isync();
79*4882a593Smuzhiyun mtspr(SPRN_AMR, regs->kuap);
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * No isync required here because we are about to RFI back to
82*4882a593Smuzhiyun * previous context before any user accesses would be made,
83*4882a593Smuzhiyun * which is a CSI.
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
kuap_get_and_check_amr(void)88*4882a593Smuzhiyun static inline unsigned long kuap_get_and_check_amr(void)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) {
91*4882a593Smuzhiyun unsigned long amr = mfspr(SPRN_AMR);
92*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
93*4882a593Smuzhiyun WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
94*4882a593Smuzhiyun return amr;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun return 0;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
kuap_check_amr(void)99*4882a593Smuzhiyun static inline void kuap_check_amr(void)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP))
102*4882a593Smuzhiyun WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun * We support individually allowing read or write, but we don't support nesting
107*4882a593Smuzhiyun * because that would require an expensive read/modify write of the AMR.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun
get_kuap(void)110*4882a593Smuzhiyun static inline unsigned long get_kuap(void)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * We return AMR_KUAP_BLOCKED when we don't support KUAP because
114*4882a593Smuzhiyun * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
115*4882a593Smuzhiyun * cause restore_user_access to do a flush.
116*4882a593Smuzhiyun *
117*4882a593Smuzhiyun * This has no effect in terms of actually blocking things on hash,
118*4882a593Smuzhiyun * so it doesn't break anything.
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
121*4882a593Smuzhiyun return AMR_KUAP_BLOCKED;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun return mfspr(SPRN_AMR);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
set_kuap(unsigned long value)126*4882a593Smuzhiyun static inline void set_kuap(unsigned long value)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
129*4882a593Smuzhiyun return;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
133*4882a593Smuzhiyun * before and after the move to AMR. See table 6 on page 1134.
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun isync();
136*4882a593Smuzhiyun mtspr(SPRN_AMR, value);
137*4882a593Smuzhiyun isync();
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun static inline bool
bad_kuap_fault(struct pt_regs * regs,unsigned long address,bool is_write)141*4882a593Smuzhiyun bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
144*4882a593Smuzhiyun (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
145*4882a593Smuzhiyun "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun #else /* CONFIG_PPC_KUAP */
kuap_restore_amr(struct pt_regs * regs,unsigned long amr)148*4882a593Smuzhiyun static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }
149*4882a593Smuzhiyun
kuap_get_and_check_amr(void)150*4882a593Smuzhiyun static inline unsigned long kuap_get_and_check_amr(void)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun return 0UL;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
get_kuap(void)155*4882a593Smuzhiyun static inline unsigned long get_kuap(void)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun return AMR_KUAP_BLOCKED;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
set_kuap(unsigned long value)160*4882a593Smuzhiyun static inline void set_kuap(unsigned long value) { }
161*4882a593Smuzhiyun #endif /* !CONFIG_PPC_KUAP */
162*4882a593Smuzhiyun
allow_user_access(void __user * to,const void __user * from,unsigned long size,unsigned long dir)163*4882a593Smuzhiyun static __always_inline void allow_user_access(void __user *to, const void __user *from,
164*4882a593Smuzhiyun unsigned long size, unsigned long dir)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun // This is written so we can resolve to a single case at build time
167*4882a593Smuzhiyun BUILD_BUG_ON(!__builtin_constant_p(dir));
168*4882a593Smuzhiyun if (dir == KUAP_READ)
169*4882a593Smuzhiyun set_kuap(AMR_KUAP_BLOCK_WRITE);
170*4882a593Smuzhiyun else if (dir == KUAP_WRITE)
171*4882a593Smuzhiyun set_kuap(AMR_KUAP_BLOCK_READ);
172*4882a593Smuzhiyun else if (dir == KUAP_READ_WRITE)
173*4882a593Smuzhiyun set_kuap(0);
174*4882a593Smuzhiyun else
175*4882a593Smuzhiyun BUILD_BUG();
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
prevent_user_access(void __user * to,const void __user * from,unsigned long size,unsigned long dir)178*4882a593Smuzhiyun static inline void prevent_user_access(void __user *to, const void __user *from,
179*4882a593Smuzhiyun unsigned long size, unsigned long dir)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun set_kuap(AMR_KUAP_BLOCKED);
182*4882a593Smuzhiyun if (static_branch_unlikely(&uaccess_flush_key))
183*4882a593Smuzhiyun do_uaccess_flush();
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
prevent_user_access_return(void)186*4882a593Smuzhiyun static inline unsigned long prevent_user_access_return(void)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun unsigned long flags = get_kuap();
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun set_kuap(AMR_KUAP_BLOCKED);
191*4882a593Smuzhiyun if (static_branch_unlikely(&uaccess_flush_key))
192*4882a593Smuzhiyun do_uaccess_flush();
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun return flags;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
restore_user_access(unsigned long flags)197*4882a593Smuzhiyun static inline void restore_user_access(unsigned long flags)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun set_kuap(flags);
200*4882a593Smuzhiyun if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
201*4882a593Smuzhiyun do_uaccess_flush();
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun #endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
206