1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012 ARM Ltd.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #ifndef __ASM_MMU_H
6*4882a593Smuzhiyun #define __ASM_MMU_H
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <asm/cputype.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
11*4882a593Smuzhiyun #define USER_ASID_BIT 48
12*4882a593Smuzhiyun #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
13*4882a593Smuzhiyun #define TTBR_ASID_MASK (UL(0xffff) << 48)
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #ifndef __ASSEMBLY__
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/refcount.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun typedef struct {
20*4882a593Smuzhiyun atomic64_t id;
21*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
22*4882a593Smuzhiyun void *sigpage;
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun refcount_t pinned;
25*4882a593Smuzhiyun void *vdso;
26*4882a593Smuzhiyun unsigned long flags;
27*4882a593Smuzhiyun } mm_context_t;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun * We use atomic64_read() here because the ASID for an 'mm_struct' can
31*4882a593Smuzhiyun * be reallocated when scheduling one of its threads following a
32*4882a593Smuzhiyun * rollover event (see new_context() and flush_context()). In this case,
33*4882a593Smuzhiyun * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
34*4882a593Smuzhiyun * may use a stale ASID. This is fine in principle as the new ASID is
35*4882a593Smuzhiyun * guaranteed to be clean in the TLB, but the TLBI routines have to take
36*4882a593Smuzhiyun * care to handle the following race:
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * CPU 0 CPU 1 CPU 2
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * // ptep_clear_flush(mm)
41*4882a593Smuzhiyun * xchg_relaxed(pte, 0)
42*4882a593Smuzhiyun * DSB ISHST
43*4882a593Smuzhiyun * old = ASID(mm)
44*4882a593Smuzhiyun * | <rollover>
45*4882a593Smuzhiyun * | new = new_context(mm)
46*4882a593Smuzhiyun * \-----------------> atomic_set(mm->context.id, new)
47*4882a593Smuzhiyun * cpu_switch_mm(mm)
48*4882a593Smuzhiyun * // Hardware walk of pte using new ASID
49*4882a593Smuzhiyun * TLBI(old)
50*4882a593Smuzhiyun *
51*4882a593Smuzhiyun * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
52*4882a593Smuzhiyun * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
53*4882a593Smuzhiyun * written by CPU 0.
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun #define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff)
56*4882a593Smuzhiyun
arm64_kernel_unmapped_at_el0(void)57*4882a593Smuzhiyun static inline bool arm64_kernel_unmapped_at_el0(void)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun extern void arm64_memblock_init(void);
63*4882a593Smuzhiyun extern void paging_init(void);
64*4882a593Smuzhiyun extern void bootmem_init(void);
65*4882a593Smuzhiyun extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
66*4882a593Smuzhiyun extern void init_mem_pgprot(void);
67*4882a593Smuzhiyun extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
68*4882a593Smuzhiyun unsigned long virt, phys_addr_t size,
69*4882a593Smuzhiyun pgprot_t prot, bool page_mappings_only);
70*4882a593Smuzhiyun extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
71*4882a593Smuzhiyun extern void mark_linear_text_alias_ro(void);
72*4882a593Smuzhiyun extern bool kaslr_requires_kpti(void);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun #define INIT_MM_CONTEXT(name) \
75*4882a593Smuzhiyun .pgd = init_pg_dir,
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
78*4882a593Smuzhiyun #endif
79