1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * arch/arm/include/asm/processor.h
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1995-1999 Russell King
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #ifndef __ASM_ARM_PROCESSOR_H
9*4882a593Smuzhiyun #define __ASM_ARM_PROCESSOR_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #ifdef __KERNEL__
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <asm/hw_breakpoint.h>
14*4882a593Smuzhiyun #include <asm/ptrace.h>
15*4882a593Smuzhiyun #include <asm/types.h>
16*4882a593Smuzhiyun #include <asm/unified.h>
17*4882a593Smuzhiyun #include <asm/vdso/processor.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #ifdef __KERNEL__
20*4882a593Smuzhiyun #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
21*4882a593Smuzhiyun TASK_SIZE : TASK_SIZE_26)
22*4882a593Smuzhiyun #define STACK_TOP_MAX TASK_SIZE
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun struct debug_info {
26*4882a593Smuzhiyun #ifdef CONFIG_HAVE_HW_BREAKPOINT
27*4882a593Smuzhiyun struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
28*4882a593Smuzhiyun #endif
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct thread_struct {
32*4882a593Smuzhiyun /* fault info */
33*4882a593Smuzhiyun unsigned long address;
34*4882a593Smuzhiyun unsigned long trap_no;
35*4882a593Smuzhiyun unsigned long error_code;
36*4882a593Smuzhiyun /* debugging */
37*4882a593Smuzhiyun struct debug_info debug;
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * Everything usercopied to/from thread_struct is statically-sized, so
42*4882a593Smuzhiyun * no hardened usercopy whitelist is needed.
43*4882a593Smuzhiyun */
arch_thread_struct_whitelist(unsigned long * offset,unsigned long * size)44*4882a593Smuzhiyun static inline void arch_thread_struct_whitelist(unsigned long *offset,
45*4882a593Smuzhiyun unsigned long *size)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun *offset = *size = 0;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #define INIT_THREAD { }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define start_thread(regs,pc,sp) \
53*4882a593Smuzhiyun ({ \
54*4882a593Smuzhiyun unsigned long r7, r8, r9; \
55*4882a593Smuzhiyun \
56*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \
57*4882a593Smuzhiyun r7 = regs->ARM_r7; \
58*4882a593Smuzhiyun r8 = regs->ARM_r8; \
59*4882a593Smuzhiyun r9 = regs->ARM_r9; \
60*4882a593Smuzhiyun } \
61*4882a593Smuzhiyun memset(regs->uregs, 0, sizeof(regs->uregs)); \
62*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \
63*4882a593Smuzhiyun current->personality & FDPIC_FUNCPTRS) { \
64*4882a593Smuzhiyun regs->ARM_r7 = r7; \
65*4882a593Smuzhiyun regs->ARM_r8 = r8; \
66*4882a593Smuzhiyun regs->ARM_r9 = r9; \
67*4882a593Smuzhiyun regs->ARM_r10 = current->mm->start_data; \
68*4882a593Smuzhiyun } else if (!IS_ENABLED(CONFIG_MMU)) \
69*4882a593Smuzhiyun regs->ARM_r10 = current->mm->start_data; \
70*4882a593Smuzhiyun if (current->personality & ADDR_LIMIT_32BIT) \
71*4882a593Smuzhiyun regs->ARM_cpsr = USR_MODE; \
72*4882a593Smuzhiyun else \
73*4882a593Smuzhiyun regs->ARM_cpsr = USR26_MODE; \
74*4882a593Smuzhiyun if (elf_hwcap & HWCAP_THUMB && pc & 1) \
75*4882a593Smuzhiyun regs->ARM_cpsr |= PSR_T_BIT; \
76*4882a593Smuzhiyun regs->ARM_cpsr |= PSR_ENDSTATE; \
77*4882a593Smuzhiyun regs->ARM_pc = pc & ~1; /* pc */ \
78*4882a593Smuzhiyun regs->ARM_sp = sp; /* sp */ \
79*4882a593Smuzhiyun })
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* Forward declaration, a strange C thing */
82*4882a593Smuzhiyun struct task_struct;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* Free all resources held by a thread. */
85*4882a593Smuzhiyun extern void release_thread(struct task_struct *);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun unsigned long get_wchan(struct task_struct *p);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #define task_pt_regs(p) \
90*4882a593Smuzhiyun ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun #define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
93*4882a593Smuzhiyun #define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun #ifdef CONFIG_SMP
96*4882a593Smuzhiyun #define __ALT_SMP_ASM(smp, up) \
97*4882a593Smuzhiyun "9998: " smp "\n" \
98*4882a593Smuzhiyun " .pushsection \".alt.smp.init\", \"a\"\n" \
99*4882a593Smuzhiyun " .long 9998b\n" \
100*4882a593Smuzhiyun " " up "\n" \
101*4882a593Smuzhiyun " .popsection\n"
102*4882a593Smuzhiyun #else
103*4882a593Smuzhiyun #define __ALT_SMP_ASM(smp, up) up
104*4882a593Smuzhiyun #endif
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun * Prefetching support - only ARMv5.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ >= 5
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #define ARCH_HAS_PREFETCH
prefetch(const void * ptr)112*4882a593Smuzhiyun static inline void prefetch(const void *ptr)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun __asm__ __volatile__(
115*4882a593Smuzhiyun "pld\t%a0"
116*4882a593Smuzhiyun :: "p" (ptr));
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
120*4882a593Smuzhiyun #define ARCH_HAS_PREFETCHW
prefetchw(const void * ptr)121*4882a593Smuzhiyun static inline void prefetchw(const void *ptr)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun __asm__ __volatile__(
124*4882a593Smuzhiyun ".arch_extension mp\n"
125*4882a593Smuzhiyun __ALT_SMP_ASM(
126*4882a593Smuzhiyun "pldw\t%a0",
127*4882a593Smuzhiyun "pld\t%a0"
128*4882a593Smuzhiyun )
129*4882a593Smuzhiyun :: "p" (ptr));
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun #endif
132*4882a593Smuzhiyun #endif
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #endif
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun #endif /* __ASM_ARM_PROCESSOR_H */
137