1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_PAGE_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_PAGE_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * Copyright (C) 2001,2005 IBM Corporation.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #ifndef __ASSEMBLY__
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #else
13*4882a593Smuzhiyun #include <asm/types.h>
14*4882a593Smuzhiyun #endif
15*4882a593Smuzhiyun #include <asm/asm-const.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
19*4882a593Smuzhiyun * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software
20*4882a593Smuzhiyun * page size. When using 64K pages however, whether we are really supporting
21*4882a593Smuzhiyun * 64K pages in HW or not is irrelevant to those definitions.
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun #define PAGE_SHIFT CONFIG_PPC_PAGE_SHIFT
24*4882a593Smuzhiyun #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #ifndef __ASSEMBLY__
27*4882a593Smuzhiyun #ifndef CONFIG_HUGETLB_PAGE
28*4882a593Smuzhiyun #define HPAGE_SHIFT PAGE_SHIFT
29*4882a593Smuzhiyun #elif defined(CONFIG_PPC_BOOK3S_64)
30*4882a593Smuzhiyun extern unsigned int hpage_shift;
31*4882a593Smuzhiyun #define HPAGE_SHIFT hpage_shift
32*4882a593Smuzhiyun #elif defined(CONFIG_PPC_8xx)
33*4882a593Smuzhiyun #define HPAGE_SHIFT 19 /* 512k pages */
34*4882a593Smuzhiyun #elif defined(CONFIG_PPC_FSL_BOOK3E)
35*4882a593Smuzhiyun #define HPAGE_SHIFT 22 /* 4M pages */
36*4882a593Smuzhiyun #endif
37*4882a593Smuzhiyun #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
38*4882a593Smuzhiyun #define HPAGE_MASK (~(HPAGE_SIZE - 1))
39*4882a593Smuzhiyun #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
40*4882a593Smuzhiyun #define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
41*4882a593Smuzhiyun #endif
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
45*4882a593Smuzhiyun * assign PAGE_MASK to a larger type it gets extended the way we want
46*4882a593Smuzhiyun * (i.e. with 1s in the high bits)
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * KERNELBASE is the virtual address of the start of the kernel, it's often
52*4882a593Smuzhiyun * the same as PAGE_OFFSET, but _might not be_.
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * PAGE_OFFSET is the virtual address of the start of lowmem.
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * PHYSICAL_START is the physical address of the start of the kernel.
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * MEMORY_START is the physical address of the start of lowmem.
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
63*4882a593Smuzhiyun * ppc32 and based on how they are set we determine MEMORY_START.
64*4882a593Smuzhiyun *
65*4882a593Smuzhiyun * For the linear mapping the following equation should be true:
66*4882a593Smuzhiyun * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
69*4882a593Smuzhiyun *
70*4882a593Smuzhiyun * There are two ways to determine a physical address from a virtual one:
71*4882a593Smuzhiyun * va = pa + PAGE_OFFSET - MEMORY_START
72*4882a593Smuzhiyun * va = pa + KERNELBASE - PHYSICAL_START
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * If you want to know something's offset from the start of the kernel you
75*4882a593Smuzhiyun * should subtract KERNELBASE.
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * If you want to test if something's a kernel address, use is_kernel_addr().
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
81*4882a593Smuzhiyun #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
82*4882a593Smuzhiyun #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #if defined(CONFIG_NONSTATIC_KERNEL)
85*4882a593Smuzhiyun #ifndef __ASSEMBLY__
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun extern phys_addr_t memstart_addr;
88*4882a593Smuzhiyun extern phys_addr_t kernstart_addr;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
91*4882a593Smuzhiyun extern long long virt_phys_offset;
92*4882a593Smuzhiyun #endif
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
95*4882a593Smuzhiyun #define PHYSICAL_START kernstart_addr
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #else /* !CONFIG_NONSTATIC_KERNEL */
98*4882a593Smuzhiyun #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
99*4882a593Smuzhiyun #endif
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* See Description below for VIRT_PHYS_OFFSET */
102*4882a593Smuzhiyun #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
103*4882a593Smuzhiyun #ifdef CONFIG_RELOCATABLE
104*4882a593Smuzhiyun #define VIRT_PHYS_OFFSET virt_phys_offset
105*4882a593Smuzhiyun #else
106*4882a593Smuzhiyun #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
107*4882a593Smuzhiyun #endif
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun #ifdef CONFIG_PPC64
111*4882a593Smuzhiyun #define MEMORY_START 0UL
112*4882a593Smuzhiyun #elif defined(CONFIG_NONSTATIC_KERNEL)
113*4882a593Smuzhiyun #define MEMORY_START memstart_addr
114*4882a593Smuzhiyun #else
115*4882a593Smuzhiyun #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
116*4882a593Smuzhiyun #endif
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun #ifdef CONFIG_FLATMEM
119*4882a593Smuzhiyun #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
120*4882a593Smuzhiyun #ifndef __ASSEMBLY__
121*4882a593Smuzhiyun extern unsigned long max_mapnr;
pfn_valid(unsigned long pfn)122*4882a593Smuzhiyun static inline bool pfn_valid(unsigned long pfn)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun unsigned long min_pfn = ARCH_PFN_OFFSET;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun return pfn >= min_pfn && pfn < max_mapnr;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun #endif
129*4882a593Smuzhiyun #endif
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
132*4882a593Smuzhiyun #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
133*4882a593Smuzhiyun #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun #define virt_addr_valid(vaddr) ({ \
136*4882a593Smuzhiyun unsigned long _addr = (unsigned long)vaddr; \
137*4882a593Smuzhiyun _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
138*4882a593Smuzhiyun pfn_valid(virt_to_pfn(_addr)); \
139*4882a593Smuzhiyun })
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * On Book-E parts we need __va to parse the device tree and we can't
143*4882a593Smuzhiyun * determine MEMORY_START until then. However we can determine PHYSICAL_START
144*4882a593Smuzhiyun * from information at hand (program counter, TLB lookup).
145*4882a593Smuzhiyun *
146*4882a593Smuzhiyun * On BookE with RELOCATABLE && PPC32
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun * With RELOCATABLE && PPC32, we support loading the kernel at any physical
149*4882a593Smuzhiyun * address without any restriction on the page alignment.
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * We find the runtime address of _stext and relocate ourselves based on
152*4882a593Smuzhiyun * the following calculation:
153*4882a593Smuzhiyun *
154*4882a593Smuzhiyun * virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
155*4882a593Smuzhiyun * MODULO(_stext.run,256M)
156*4882a593Smuzhiyun * and create the following mapping:
157*4882a593Smuzhiyun *
158*4882a593Smuzhiyun * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * When we process relocations, we cannot depend on the
161*4882a593Smuzhiyun * existing equation for the __va()/__pa() translations:
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * __va(x) = (x) - PHYSICAL_START + KERNELBASE
164*4882a593Smuzhiyun *
165*4882a593Smuzhiyun * Where:
166*4882a593Smuzhiyun * PHYSICAL_START = kernstart_addr = Physical address of _stext
167*4882a593Smuzhiyun * KERNELBASE = Compiled virtual address of _stext.
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * This formula holds true iff, kernel load address is TLB page aligned.
170*4882a593Smuzhiyun *
171*4882a593Smuzhiyun * In our case, we need to also account for the shift in the kernel Virtual
172*4882a593Smuzhiyun * address.
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun * E.g.,
175*4882a593Smuzhiyun *
176*4882a593Smuzhiyun * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
177*4882a593Smuzhiyun * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
178*4882a593Smuzhiyun *
179*4882a593Smuzhiyun * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
180*4882a593Smuzhiyun * = 0xbc100000 , which is wrong.
181*4882a593Smuzhiyun *
182*4882a593Smuzhiyun * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
183*4882a593Smuzhiyun * according to our mapping.
184*4882a593Smuzhiyun *
185*4882a593Smuzhiyun * Hence we use the following formula to get the translations right:
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
188*4882a593Smuzhiyun *
189*4882a593Smuzhiyun * Where :
190*4882a593Smuzhiyun * PHYSICAL_START = dynamic load address.(kernstart_addr variable)
191*4882a593Smuzhiyun * Effective KERNELBASE = virtual_base =
192*4882a593Smuzhiyun * = ALIGN_DOWN(KERNELBASE,256M) +
193*4882a593Smuzhiyun * MODULO(PHYSICAL_START,256M)
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun * To make the cost of __va() / __pa() more light weight, we introduce
196*4882a593Smuzhiyun * a new variable virt_phys_offset, which will hold :
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
199*4882a593Smuzhiyun * = ALIGN_DOWN(KERNELBASE,256M) -
200*4882a593Smuzhiyun * ALIGN_DOWN(PHYSICALSTART,256M)
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * Hence :
203*4882a593Smuzhiyun *
204*4882a593Smuzhiyun * __va(x) = x - PHYSICAL_START + Effective KERNELBASE
205*4882a593Smuzhiyun * = x + virt_phys_offset
206*4882a593Smuzhiyun *
207*4882a593Smuzhiyun * and
208*4882a593Smuzhiyun * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
209*4882a593Smuzhiyun * = x - virt_phys_offset
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
212*4882a593Smuzhiyun * the other definitions for __va & __pa.
213*4882a593Smuzhiyun */
214*4882a593Smuzhiyun #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
215*4882a593Smuzhiyun #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
216*4882a593Smuzhiyun #define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
217*4882a593Smuzhiyun #else
218*4882a593Smuzhiyun #ifdef CONFIG_PPC64
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun #define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
224*4882a593Smuzhiyun * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
225*4882a593Smuzhiyun * This also results in better code generation.
226*4882a593Smuzhiyun */
227*4882a593Smuzhiyun #define __va(x) \
228*4882a593Smuzhiyun ({ \
229*4882a593Smuzhiyun VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \
230*4882a593Smuzhiyun (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
231*4882a593Smuzhiyun })
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun #define __pa(x) \
234*4882a593Smuzhiyun ({ \
235*4882a593Smuzhiyun VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \
236*4882a593Smuzhiyun (unsigned long)(x) & 0x0fffffffffffffffUL; \
237*4882a593Smuzhiyun })
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun #else /* 32-bit, non book E */
240*4882a593Smuzhiyun #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
241*4882a593Smuzhiyun #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
242*4882a593Smuzhiyun #endif
243*4882a593Smuzhiyun #endif
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /*
246*4882a593Smuzhiyun * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
247*4882a593Smuzhiyun * and needs to be executable. This means the whole heap ends
248*4882a593Smuzhiyun * up being executable.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun #define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC
251*4882a593Smuzhiyun #define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun #ifdef __powerpc64__
254*4882a593Smuzhiyun #include <asm/page_64.h>
255*4882a593Smuzhiyun #else
256*4882a593Smuzhiyun #include <asm/page_32.h>
257*4882a593Smuzhiyun #endif
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
261*4882a593Smuzhiyun * "kernelness", use is_kernel_addr() - it should do what you want.
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3E_64
264*4882a593Smuzhiyun #define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
265*4882a593Smuzhiyun #elif defined(CONFIG_PPC_BOOK3S_64)
266*4882a593Smuzhiyun #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
267*4882a593Smuzhiyun #else
268*4882a593Smuzhiyun #define is_kernel_addr(x) ((x) >= TASK_SIZE)
269*4882a593Smuzhiyun #endif
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun #ifndef CONFIG_PPC_BOOK3S_64
272*4882a593Smuzhiyun /*
273*4882a593Smuzhiyun * Use the top bit of the higher-level page table entries to indicate whether
274*4882a593Smuzhiyun * the entries we point to contain hugepages. This works because we know that
275*4882a593Smuzhiyun * the page tables live in kernel space. If we ever decide to support having
276*4882a593Smuzhiyun * page tables at arbitrary addresses, this breaks and will have to change.
277*4882a593Smuzhiyun */
278*4882a593Smuzhiyun #ifdef CONFIG_PPC64
279*4882a593Smuzhiyun #define PD_HUGE 0x8000000000000000UL
280*4882a593Smuzhiyun #else
281*4882a593Smuzhiyun #define PD_HUGE 0x80000000
282*4882a593Smuzhiyun #endif
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun #else /* CONFIG_PPC_BOOK3S_64 */
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun * Book3S 64 stores real addresses in the hugepd entries to
287*4882a593Smuzhiyun * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
288*4882a593Smuzhiyun */
289*4882a593Smuzhiyun #define HUGEPD_ADDR_MASK (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
290*4882a593Smuzhiyun #endif /* CONFIG_PPC_BOOK3S_64 */
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /*
293*4882a593Smuzhiyun * Some number of bits at the level of the page table that points to
294*4882a593Smuzhiyun * a hugepte are used to encode the size. This masks those bits.
295*4882a593Smuzhiyun * On 8xx, HW assistance requires 4k alignment for the hugepte.
296*4882a593Smuzhiyun */
297*4882a593Smuzhiyun #ifdef CONFIG_PPC_8xx
298*4882a593Smuzhiyun #define HUGEPD_SHIFT_MASK 0xfff
299*4882a593Smuzhiyun #else
300*4882a593Smuzhiyun #define HUGEPD_SHIFT_MASK 0x3f
301*4882a593Smuzhiyun #endif
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun #ifndef __ASSEMBLY__
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
306*4882a593Smuzhiyun #include <asm/pgtable-be-types.h>
307*4882a593Smuzhiyun #else
308*4882a593Smuzhiyun #include <asm/pgtable-types.h>
309*4882a593Smuzhiyun #endif
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun #ifndef CONFIG_HUGETLB_PAGE
313*4882a593Smuzhiyun #define is_hugepd(pdep) (0)
314*4882a593Smuzhiyun #define pgd_huge(pgd) (0)
315*4882a593Smuzhiyun #endif /* CONFIG_HUGETLB_PAGE */
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun struct page;
318*4882a593Smuzhiyun extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
319*4882a593Smuzhiyun extern void copy_user_page(void *to, void *from, unsigned long vaddr,
320*4882a593Smuzhiyun struct page *p);
321*4882a593Smuzhiyun extern int devmem_is_allowed(unsigned long pfn);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun #ifdef CONFIG_PPC_SMLPAR
324*4882a593Smuzhiyun void arch_free_page(struct page *page, int order);
325*4882a593Smuzhiyun #define HAVE_ARCH_FREE_PAGE
326*4882a593Smuzhiyun #endif
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun struct vm_area_struct;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun extern unsigned long kernstart_virt_addr;
331*4882a593Smuzhiyun
kaslr_offset(void)332*4882a593Smuzhiyun static inline unsigned long kaslr_offset(void)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun return kernstart_virt_addr - KERNELBASE;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun #include <asm-generic/memory_model.h>
338*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
339*4882a593Smuzhiyun #include <asm/slice.h>
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun #endif /* _ASM_POWERPC_PAGE_H */
342