1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Page management definitions for the Hexagon architecture
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #ifndef _ASM_PAGE_H
9*4882a593Smuzhiyun #define _ASM_PAGE_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/const.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /* This is probably not the most graceful way to handle this. */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #ifdef CONFIG_PAGE_SIZE_4KB
16*4882a593Smuzhiyun #define PAGE_SHIFT 12
17*4882a593Smuzhiyun #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB
18*4882a593Smuzhiyun #endif
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #ifdef CONFIG_PAGE_SIZE_16KB
21*4882a593Smuzhiyun #define PAGE_SHIFT 14
22*4882a593Smuzhiyun #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #ifdef CONFIG_PAGE_SIZE_64KB
26*4882a593Smuzhiyun #define PAGE_SHIFT 16
27*4882a593Smuzhiyun #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB
28*4882a593Smuzhiyun #endif
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #ifdef CONFIG_PAGE_SIZE_256KB
31*4882a593Smuzhiyun #define PAGE_SHIFT 18
32*4882a593Smuzhiyun #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #ifdef CONFIG_PAGE_SIZE_1MB
36*4882a593Smuzhiyun #define PAGE_SHIFT 20
37*4882a593Smuzhiyun #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * These should be defined in hugetlb.h, but apparently not.
42*4882a593Smuzhiyun * "Huge" for us should be 4MB or 16MB, which are both represented
43*4882a593Smuzhiyun * in L1 PTE's. Right now, it's set up for 4MB.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
46*4882a593Smuzhiyun #define HPAGE_SHIFT 22
47*4882a593Smuzhiyun #define HPAGE_SIZE (1UL << HPAGE_SHIFT)
48*4882a593Smuzhiyun #define HPAGE_MASK (~(HPAGE_SIZE-1))
49*4882a593Smuzhiyun #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
50*4882a593Smuzhiyun #define HVM_HUGEPAGE_SIZE 0x5
51*4882a593Smuzhiyun #endif
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define PAGE_SIZE (1UL << PAGE_SHIFT)
54*4882a593Smuzhiyun #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #ifdef __KERNEL__
57*4882a593Smuzhiyun #ifndef __ASSEMBLY__
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * This is for PFN_DOWN, which mm.h needs. Seems the right place to pull it in.
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun #include <linux/pfn.h>
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * We implement a two-level architecture-specific page table structure.
66*4882a593Smuzhiyun * Null intermediate page table level (pmd, pud) definitions will come from
67*4882a593Smuzhiyun * asm-generic/pagetable-nopmd.h and asm-generic/pagetable-nopud.h
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun typedef struct { unsigned long pte; } pte_t;
70*4882a593Smuzhiyun typedef struct { unsigned long pgd; } pgd_t;
71*4882a593Smuzhiyun typedef struct { unsigned long pgprot; } pgprot_t;
72*4882a593Smuzhiyun typedef struct page *pgtable_t;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun #define pte_val(x) ((x).pte)
75*4882a593Smuzhiyun #define pgd_val(x) ((x).pgd)
76*4882a593Smuzhiyun #define pgprot_val(x) ((x).pgprot)
77*4882a593Smuzhiyun #define __pte(x) ((pte_t) { (x) })
78*4882a593Smuzhiyun #define __pgd(x) ((pgd_t) { (x) })
79*4882a593Smuzhiyun #define __pgprot(x) ((pgprot_t) { (x) })
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * We need a __pa and a __va routine for kernel space.
83*4882a593Smuzhiyun * MIPS says they're only used during mem_init.
84*4882a593Smuzhiyun * also, check if we need a PHYS_OFFSET.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
87*4882a593Smuzhiyun #define __va(x) ((void *)((unsigned long)(x) - PHYS_OFFSET + PAGE_OFFSET))
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* The "page frame" descriptor is defined in linux/mm.h */
90*4882a593Smuzhiyun struct page;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* Returns page frame descriptor for virtual address. */
93*4882a593Smuzhiyun #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr)))
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* Default vm area behavior is non-executable. */
96*4882a593Smuzhiyun #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #define pfn_valid(pfn) ((pfn) < max_mapnr)
99*4882a593Smuzhiyun #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* Need to not use a define for linesize; may move this to another file. */
clear_page(void * page)102*4882a593Smuzhiyun static inline void clear_page(void *page)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun /* This can only be done on pages with L1 WB cache */
105*4882a593Smuzhiyun asm volatile(
106*4882a593Smuzhiyun " loop0(1f,%1);\n"
107*4882a593Smuzhiyun "1: { dczeroa(%0);\n"
108*4882a593Smuzhiyun " %0 = add(%0,#32); }:endloop0\n"
109*4882a593Smuzhiyun : "+r" (page)
110*4882a593Smuzhiyun : "r" (PAGE_SIZE/32)
111*4882a593Smuzhiyun : "lc0", "sa0", "memory"
112*4882a593Smuzhiyun );
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /*
118*4882a593Smuzhiyun * Under assumption that kernel always "sees" user map...
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun #define clear_user_page(page, vaddr, pg) clear_page(page)
121*4882a593Smuzhiyun #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * page_to_phys - convert page to physical address
125*4882a593Smuzhiyun * @page - pointer to page entry in mem_map
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
130*4882a593Smuzhiyun #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun #define page_to_virt(page) __va(page_to_phys(page))
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /*
135*4882a593Smuzhiyun * For port to Hexagon Virtual Machine, MAYBE we check for attempts
136*4882a593Smuzhiyun * to reference reserved HVM space, but in any case, the VM will be
137*4882a593Smuzhiyun * protected.
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun #define kern_addr_valid(addr) (1)
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun #include <asm/mem-layout.h>
142*4882a593Smuzhiyun #include <asm-generic/memory_model.h>
143*4882a593Smuzhiyun /* XXX Todo: implement assembly-optimized version of getorder. */
144*4882a593Smuzhiyun #include <asm-generic/getorder.h>
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #endif /* ifdef __ASSEMBLY__ */
147*4882a593Smuzhiyun #endif /* ifdef __KERNEL__ */
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #endif
150