xref: /OK3568_Linux_fs/kernel/arch/s390/include/asm/pgtable.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  S390 version
4*4882a593Smuzhiyun  *    Copyright IBM Corp. 1999, 2000
5*4882a593Smuzhiyun  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6*4882a593Smuzhiyun  *               Ulrich Weigand (weigand@de.ibm.com)
7*4882a593Smuzhiyun  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  *  Derived from "include/asm-i386/pgtable.h"
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #ifndef _ASM_S390_PGTABLE_H
13*4882a593Smuzhiyun #define _ASM_S390_PGTABLE_H
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/mm_types.h>
17*4882a593Smuzhiyun #include <linux/page-flags.h>
18*4882a593Smuzhiyun #include <linux/radix-tree.h>
19*4882a593Smuzhiyun #include <linux/atomic.h>
20*4882a593Smuzhiyun #include <asm/bug.h>
21*4882a593Smuzhiyun #include <asm/page.h>
22*4882a593Smuzhiyun #include <asm/uv.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun extern pgd_t swapper_pg_dir[];
25*4882a593Smuzhiyun extern void paging_init(void);
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun enum {
28*4882a593Smuzhiyun 	PG_DIRECT_MAP_4K = 0,
29*4882a593Smuzhiyun 	PG_DIRECT_MAP_1M,
30*4882a593Smuzhiyun 	PG_DIRECT_MAP_2G,
31*4882a593Smuzhiyun 	PG_DIRECT_MAP_MAX
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
35*4882a593Smuzhiyun 
update_page_count(int level,long count)36*4882a593Smuzhiyun static inline void update_page_count(int level, long count)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PROC_FS))
39*4882a593Smuzhiyun 		atomic_long_add(count, &direct_pages_count[level]);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun struct seq_file;
43*4882a593Smuzhiyun void arch_report_meminfo(struct seq_file *m);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * The S390 doesn't have any external MMU info: the kernel page
47*4882a593Smuzhiyun  * tables contain all the necessary information.
48*4882a593Smuzhiyun  */
49*4882a593Smuzhiyun #define update_mmu_cache(vma, address, ptep)     do { } while (0)
50*4882a593Smuzhiyun #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  * ZERO_PAGE is a global shared page that is always zero; used
54*4882a593Smuzhiyun  * for zero-mapped memory areas etc..
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun extern unsigned long empty_zero_page;
58*4882a593Smuzhiyun extern unsigned long zero_page_mask;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define ZERO_PAGE(vaddr) \
61*4882a593Smuzhiyun 	(virt_to_page((void *)(empty_zero_page + \
62*4882a593Smuzhiyun 	 (((unsigned long)(vaddr)) &zero_page_mask))))
63*4882a593Smuzhiyun #define __HAVE_COLOR_ZERO_PAGE
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /* TODO: s390 cannot support io_remap_pfn_range... */
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define FIRST_USER_ADDRESS  0UL
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #define pte_ERROR(e) \
70*4882a593Smuzhiyun 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
71*4882a593Smuzhiyun #define pmd_ERROR(e) \
72*4882a593Smuzhiyun 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
73*4882a593Smuzhiyun #define pud_ERROR(e) \
74*4882a593Smuzhiyun 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
75*4882a593Smuzhiyun #define p4d_ERROR(e) \
76*4882a593Smuzhiyun 	printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
77*4882a593Smuzhiyun #define pgd_ERROR(e) \
78*4882a593Smuzhiyun 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun  * The vmalloc and module area will always be on the topmost area of the
82*4882a593Smuzhiyun  * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
83*4882a593Smuzhiyun  * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
84*4882a593Smuzhiyun  * modules will reside. That makes sure that inter module branches always
85*4882a593Smuzhiyun  * happen without trampolines and in addition the placement within a 2GB frame
86*4882a593Smuzhiyun  * is branch prediction unit friendly.
87*4882a593Smuzhiyun  */
88*4882a593Smuzhiyun extern unsigned long VMALLOC_START;
89*4882a593Smuzhiyun extern unsigned long VMALLOC_END;
90*4882a593Smuzhiyun #define VMALLOC_DEFAULT_SIZE	((128UL << 30) - MODULES_LEN)
91*4882a593Smuzhiyun extern struct page *vmemmap;
92*4882a593Smuzhiyun extern unsigned long vmemmap_size;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun extern unsigned long MODULES_VADDR;
97*4882a593Smuzhiyun extern unsigned long MODULES_END;
98*4882a593Smuzhiyun #define MODULES_VADDR	MODULES_VADDR
99*4882a593Smuzhiyun #define MODULES_END	MODULES_END
100*4882a593Smuzhiyun #define MODULES_LEN	(1UL << 31)
101*4882a593Smuzhiyun 
is_module_addr(void * addr)102*4882a593Smuzhiyun static inline int is_module_addr(void *addr)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
105*4882a593Smuzhiyun 	if (addr < (void *)MODULES_VADDR)
106*4882a593Smuzhiyun 		return 0;
107*4882a593Smuzhiyun 	if (addr > (void *)MODULES_END)
108*4882a593Smuzhiyun 		return 0;
109*4882a593Smuzhiyun 	return 1;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun  * A 64 bit pagetable entry of S390 has following format:
114*4882a593Smuzhiyun  * |			 PFRA			      |0IPC|  OS  |
115*4882a593Smuzhiyun  * 0000000000111111111122222222223333333333444444444455555555556666
116*4882a593Smuzhiyun  * 0123456789012345678901234567890123456789012345678901234567890123
117*4882a593Smuzhiyun  *
118*4882a593Smuzhiyun  * I Page-Invalid Bit:    Page is not available for address-translation
119*4882a593Smuzhiyun  * P Page-Protection Bit: Store access not possible for page
120*4882a593Smuzhiyun  * C Change-bit override: HW is not required to set change bit
121*4882a593Smuzhiyun  *
122*4882a593Smuzhiyun  * A 64 bit segmenttable entry of S390 has following format:
123*4882a593Smuzhiyun  * |        P-table origin                              |      TT
124*4882a593Smuzhiyun  * 0000000000111111111122222222223333333333444444444455555555556666
125*4882a593Smuzhiyun  * 0123456789012345678901234567890123456789012345678901234567890123
126*4882a593Smuzhiyun  *
127*4882a593Smuzhiyun  * I Segment-Invalid Bit:    Segment is not available for address-translation
128*4882a593Smuzhiyun  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
129*4882a593Smuzhiyun  * P Page-Protection Bit: Store access not possible for page
130*4882a593Smuzhiyun  * TT Type 00
131*4882a593Smuzhiyun  *
132*4882a593Smuzhiyun  * A 64 bit region table entry of S390 has following format:
133*4882a593Smuzhiyun  * |        S-table origin                             |   TF  TTTL
134*4882a593Smuzhiyun  * 0000000000111111111122222222223333333333444444444455555555556666
135*4882a593Smuzhiyun  * 0123456789012345678901234567890123456789012345678901234567890123
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * I Segment-Invalid Bit:    Segment is not available for address-translation
138*4882a593Smuzhiyun  * TT Type 01
139*4882a593Smuzhiyun  * TF
140*4882a593Smuzhiyun  * TL Table length
141*4882a593Smuzhiyun  *
142*4882a593Smuzhiyun  * The 64 bit regiontable origin of S390 has following format:
143*4882a593Smuzhiyun  * |      region table origon                          |       DTTL
144*4882a593Smuzhiyun  * 0000000000111111111122222222223333333333444444444455555555556666
145*4882a593Smuzhiyun  * 0123456789012345678901234567890123456789012345678901234567890123
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * X Space-Switch event:
148*4882a593Smuzhiyun  * G Segment-Invalid Bit:
149*4882a593Smuzhiyun  * P Private-Space Bit:
150*4882a593Smuzhiyun  * S Storage-Alteration:
151*4882a593Smuzhiyun  * R Real space
152*4882a593Smuzhiyun  * TL Table-Length:
153*4882a593Smuzhiyun  *
154*4882a593Smuzhiyun  * A storage key has the following format:
155*4882a593Smuzhiyun  * | ACC |F|R|C|0|
156*4882a593Smuzhiyun  *  0   3 4 5 6 7
157*4882a593Smuzhiyun  * ACC: access key
158*4882a593Smuzhiyun  * F  : fetch protection bit
159*4882a593Smuzhiyun  * R  : referenced bit
160*4882a593Smuzhiyun  * C  : changed bit
161*4882a593Smuzhiyun  */
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /* Hardware bits in the page table entry */
164*4882a593Smuzhiyun #define _PAGE_NOEXEC	0x100		/* HW no-execute bit  */
165*4882a593Smuzhiyun #define _PAGE_PROTECT	0x200		/* HW read-only bit  */
166*4882a593Smuzhiyun #define _PAGE_INVALID	0x400		/* HW invalid bit    */
167*4882a593Smuzhiyun #define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun /* Software bits in the page table entry */
170*4882a593Smuzhiyun #define _PAGE_PRESENT	0x001		/* SW pte present bit */
171*4882a593Smuzhiyun #define _PAGE_YOUNG	0x004		/* SW pte young bit */
172*4882a593Smuzhiyun #define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
173*4882a593Smuzhiyun #define _PAGE_READ	0x010		/* SW pte read bit */
174*4882a593Smuzhiyun #define _PAGE_WRITE	0x020		/* SW pte write bit */
175*4882a593Smuzhiyun #define _PAGE_SPECIAL	0x040		/* SW associated with special page */
176*4882a593Smuzhiyun #define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #ifdef CONFIG_MEM_SOFT_DIRTY
179*4882a593Smuzhiyun #define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
180*4882a593Smuzhiyun #else
181*4882a593Smuzhiyun #define _PAGE_SOFT_DIRTY 0x000
182*4882a593Smuzhiyun #endif
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /* Set of bits not changed in pte_modify */
185*4882a593Smuzhiyun #define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
186*4882a593Smuzhiyun 				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun  * handle_pte_fault uses pte_present and pte_none to find out the pte type
190*4882a593Smuzhiyun  * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
191*4882a593Smuzhiyun  * distinguish present from not-present ptes. It is changed only with the page
192*4882a593Smuzhiyun  * table lock held.
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * The following table gives the different possible bit combinations for
195*4882a593Smuzhiyun  * the pte hardware and software bits in the last 12 bits of a pte
196*4882a593Smuzhiyun  * (. unassigned bit, x don't care, t swap type):
197*4882a593Smuzhiyun  *
198*4882a593Smuzhiyun  *				842100000000
199*4882a593Smuzhiyun  *				000084210000
200*4882a593Smuzhiyun  *				000000008421
201*4882a593Smuzhiyun  *				.IR.uswrdy.p
202*4882a593Smuzhiyun  * empty			.10.00000000
203*4882a593Smuzhiyun  * swap				.11..ttttt.0
204*4882a593Smuzhiyun  * prot-none, clean, old	.11.xx0000.1
205*4882a593Smuzhiyun  * prot-none, clean, young	.11.xx0001.1
206*4882a593Smuzhiyun  * prot-none, dirty, old	.11.xx0010.1
207*4882a593Smuzhiyun  * prot-none, dirty, young	.11.xx0011.1
208*4882a593Smuzhiyun  * read-only, clean, old	.11.xx0100.1
209*4882a593Smuzhiyun  * read-only, clean, young	.01.xx0101.1
210*4882a593Smuzhiyun  * read-only, dirty, old	.11.xx0110.1
211*4882a593Smuzhiyun  * read-only, dirty, young	.01.xx0111.1
212*4882a593Smuzhiyun  * read-write, clean, old	.11.xx1100.1
213*4882a593Smuzhiyun  * read-write, clean, young	.01.xx1101.1
214*4882a593Smuzhiyun  * read-write, dirty, old	.10.xx1110.1
215*4882a593Smuzhiyun  * read-write, dirty, young	.00.xx1111.1
216*4882a593Smuzhiyun  * HW-bits: R read-only, I invalid
217*4882a593Smuzhiyun  * SW-bits: p present, y young, d dirty, r read, w write, s special,
218*4882a593Smuzhiyun  *	    u unused, l large
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
221*4882a593Smuzhiyun  * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
222*4882a593Smuzhiyun  * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
223*4882a593Smuzhiyun  */
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /* Bits in the segment/region table address-space-control-element */
226*4882a593Smuzhiyun #define _ASCE_ORIGIN		~0xfffUL/* region/segment table origin	    */
227*4882a593Smuzhiyun #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
228*4882a593Smuzhiyun #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
229*4882a593Smuzhiyun #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
230*4882a593Smuzhiyun #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
231*4882a593Smuzhiyun #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
232*4882a593Smuzhiyun #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
233*4882a593Smuzhiyun #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
234*4882a593Smuzhiyun #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
235*4882a593Smuzhiyun #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
236*4882a593Smuzhiyun #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun /* Bits in the region table entry */
239*4882a593Smuzhiyun #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
240*4882a593Smuzhiyun #define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
241*4882a593Smuzhiyun #define _REGION_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
242*4882a593Smuzhiyun #define _REGION_ENTRY_OFFSET	0xc0	/* region table offset		    */
243*4882a593Smuzhiyun #define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
244*4882a593Smuzhiyun #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region table type mask	    */
245*4882a593Smuzhiyun #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
246*4882a593Smuzhiyun #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
247*4882a593Smuzhiyun #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
248*4882a593Smuzhiyun #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
251*4882a593Smuzhiyun #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
252*4882a593Smuzhiyun #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
253*4882a593Smuzhiyun #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
254*4882a593Smuzhiyun #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
255*4882a593Smuzhiyun #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
258*4882a593Smuzhiyun #define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
259*4882a593Smuzhiyun #define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
260*4882a593Smuzhiyun #define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
261*4882a593Smuzhiyun #define _REGION3_ENTRY_READ	0x0002	/* SW region read bit */
262*4882a593Smuzhiyun #define _REGION3_ENTRY_WRITE	0x0001	/* SW region write bit */
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun #ifdef CONFIG_MEM_SOFT_DIRTY
265*4882a593Smuzhiyun #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
266*4882a593Smuzhiyun #else
267*4882a593Smuzhiyun #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
268*4882a593Smuzhiyun #endif
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun #define _REGION_ENTRY_BITS	 0xfffffffffffff22fUL
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun /* Bits in the segment table entry */
273*4882a593Smuzhiyun #define _SEGMENT_ENTRY_BITS			0xfffffffffffffe33UL
274*4882a593Smuzhiyun #define _SEGMENT_ENTRY_HARDWARE_BITS		0xfffffffffffffe30UL
275*4882a593Smuzhiyun #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE	0xfffffffffff00730UL
276*4882a593Smuzhiyun #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
277*4882a593Smuzhiyun #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* page table origin		    */
278*4882a593Smuzhiyun #define _SEGMENT_ENTRY_PROTECT	0x200	/* segment protection bit	    */
279*4882a593Smuzhiyun #define _SEGMENT_ENTRY_NOEXEC	0x100	/* segment no-execute bit	    */
280*4882a593Smuzhiyun #define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
281*4882a593Smuzhiyun #define _SEGMENT_ENTRY_TYPE_MASK 0x0c	/* segment table type mask	    */
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun #define _SEGMENT_ENTRY		(0)
284*4882a593Smuzhiyun #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun #define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
287*4882a593Smuzhiyun #define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
288*4882a593Smuzhiyun #define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
289*4882a593Smuzhiyun #define _SEGMENT_ENTRY_WRITE	0x0002	/* SW segment write bit */
290*4882a593Smuzhiyun #define _SEGMENT_ENTRY_READ	0x0001	/* SW segment read bit */
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun #ifdef CONFIG_MEM_SOFT_DIRTY
293*4882a593Smuzhiyun #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
294*4882a593Smuzhiyun #else
295*4882a593Smuzhiyun #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
296*4882a593Smuzhiyun #endif
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun #define _CRST_ENTRIES	2048	/* number of region/segment table entries */
299*4882a593Smuzhiyun #define _PAGE_ENTRIES	256	/* number of page table entries	*/
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
302*4882a593Smuzhiyun #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun #define _REGION1_SHIFT	53
305*4882a593Smuzhiyun #define _REGION2_SHIFT	42
306*4882a593Smuzhiyun #define _REGION3_SHIFT	31
307*4882a593Smuzhiyun #define _SEGMENT_SHIFT	20
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun #define _REGION1_INDEX	(0x7ffUL << _REGION1_SHIFT)
310*4882a593Smuzhiyun #define _REGION2_INDEX	(0x7ffUL << _REGION2_SHIFT)
311*4882a593Smuzhiyun #define _REGION3_INDEX	(0x7ffUL << _REGION3_SHIFT)
312*4882a593Smuzhiyun #define _SEGMENT_INDEX	(0x7ffUL << _SEGMENT_SHIFT)
313*4882a593Smuzhiyun #define _PAGE_INDEX	(0xffUL  << _PAGE_SHIFT)
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun #define _REGION1_SIZE	(1UL << _REGION1_SHIFT)
316*4882a593Smuzhiyun #define _REGION2_SIZE	(1UL << _REGION2_SHIFT)
317*4882a593Smuzhiyun #define _REGION3_SIZE	(1UL << _REGION3_SHIFT)
318*4882a593Smuzhiyun #define _SEGMENT_SIZE	(1UL << _SEGMENT_SHIFT)
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun #define _REGION1_MASK	(~(_REGION1_SIZE - 1))
321*4882a593Smuzhiyun #define _REGION2_MASK	(~(_REGION2_SIZE - 1))
322*4882a593Smuzhiyun #define _REGION3_MASK	(~(_REGION3_SIZE - 1))
323*4882a593Smuzhiyun #define _SEGMENT_MASK	(~(_SEGMENT_SIZE - 1))
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun #define PMD_SHIFT	_SEGMENT_SHIFT
326*4882a593Smuzhiyun #define PUD_SHIFT	_REGION3_SHIFT
327*4882a593Smuzhiyun #define P4D_SHIFT	_REGION2_SHIFT
328*4882a593Smuzhiyun #define PGDIR_SHIFT	_REGION1_SHIFT
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun #define PMD_SIZE	_SEGMENT_SIZE
331*4882a593Smuzhiyun #define PUD_SIZE	_REGION3_SIZE
332*4882a593Smuzhiyun #define P4D_SIZE	_REGION2_SIZE
333*4882a593Smuzhiyun #define PGDIR_SIZE	_REGION1_SIZE
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun #define PMD_MASK	_SEGMENT_MASK
336*4882a593Smuzhiyun #define PUD_MASK	_REGION3_MASK
337*4882a593Smuzhiyun #define P4D_MASK	_REGION2_MASK
338*4882a593Smuzhiyun #define PGDIR_MASK	_REGION1_MASK
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun #define PTRS_PER_PTE	_PAGE_ENTRIES
341*4882a593Smuzhiyun #define PTRS_PER_PMD	_CRST_ENTRIES
342*4882a593Smuzhiyun #define PTRS_PER_PUD	_CRST_ENTRIES
343*4882a593Smuzhiyun #define PTRS_PER_P4D	_CRST_ENTRIES
344*4882a593Smuzhiyun #define PTRS_PER_PGD	_CRST_ENTRIES
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun #define MAX_PTRS_PER_P4D	PTRS_PER_P4D
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun  * Segment table and region3 table entry encoding
350*4882a593Smuzhiyun  * (R = read-only, I = invalid, y = young bit):
351*4882a593Smuzhiyun  *				dy..R...I...wr
352*4882a593Smuzhiyun  * prot-none, clean, old	00..1...1...00
353*4882a593Smuzhiyun  * prot-none, clean, young	01..1...1...00
354*4882a593Smuzhiyun  * prot-none, dirty, old	10..1...1...00
355*4882a593Smuzhiyun  * prot-none, dirty, young	11..1...1...00
356*4882a593Smuzhiyun  * read-only, clean, old	00..1...1...01
357*4882a593Smuzhiyun  * read-only, clean, young	01..1...0...01
358*4882a593Smuzhiyun  * read-only, dirty, old	10..1...1...01
359*4882a593Smuzhiyun  * read-only, dirty, young	11..1...0...01
360*4882a593Smuzhiyun  * read-write, clean, old	00..1...1...11
361*4882a593Smuzhiyun  * read-write, clean, young	01..1...0...11
362*4882a593Smuzhiyun  * read-write, dirty, old	10..0...1...11
363*4882a593Smuzhiyun  * read-write, dirty, young	11..0...0...11
364*4882a593Smuzhiyun  * The segment table origin is used to distinguish empty (origin==0) from
365*4882a593Smuzhiyun  * read-write, old segment table entries (origin!=0)
366*4882a593Smuzhiyun  * HW-bits: R read-only, I invalid
367*4882a593Smuzhiyun  * SW-bits: y young, d dirty, r read, w write
368*4882a593Smuzhiyun  */
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun /* Page status table bits for virtualization */
371*4882a593Smuzhiyun #define PGSTE_ACC_BITS	0xf000000000000000UL
372*4882a593Smuzhiyun #define PGSTE_FP_BIT	0x0800000000000000UL
373*4882a593Smuzhiyun #define PGSTE_PCL_BIT	0x0080000000000000UL
374*4882a593Smuzhiyun #define PGSTE_HR_BIT	0x0040000000000000UL
375*4882a593Smuzhiyun #define PGSTE_HC_BIT	0x0020000000000000UL
376*4882a593Smuzhiyun #define PGSTE_GR_BIT	0x0004000000000000UL
377*4882a593Smuzhiyun #define PGSTE_GC_BIT	0x0002000000000000UL
378*4882a593Smuzhiyun #define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
379*4882a593Smuzhiyun #define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
380*4882a593Smuzhiyun #define PGSTE_VSIE_BIT	0x0000200000000000UL	/* ref'd in a shadow table */
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun /* Guest Page State used for virtualization */
383*4882a593Smuzhiyun #define _PGSTE_GPS_ZERO			0x0000000080000000UL
384*4882a593Smuzhiyun #define _PGSTE_GPS_NODAT		0x0000000040000000UL
385*4882a593Smuzhiyun #define _PGSTE_GPS_USAGE_MASK		0x0000000003000000UL
386*4882a593Smuzhiyun #define _PGSTE_GPS_USAGE_STABLE		0x0000000000000000UL
387*4882a593Smuzhiyun #define _PGSTE_GPS_USAGE_UNUSED		0x0000000001000000UL
388*4882a593Smuzhiyun #define _PGSTE_GPS_USAGE_POT_VOLATILE	0x0000000002000000UL
389*4882a593Smuzhiyun #define _PGSTE_GPS_USAGE_VOLATILE	_PGSTE_GPS_USAGE_MASK
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun /*
392*4882a593Smuzhiyun  * A user page table pointer has the space-switch-event bit, the
393*4882a593Smuzhiyun  * private-space-control bit and the storage-alteration-event-control
394*4882a593Smuzhiyun  * bit set. A kernel page table pointer doesn't need them.
395*4882a593Smuzhiyun  */
396*4882a593Smuzhiyun #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
397*4882a593Smuzhiyun 				 _ASCE_ALT_EVENT)
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun /*
400*4882a593Smuzhiyun  * Page protection definitions.
401*4882a593Smuzhiyun  */
402*4882a593Smuzhiyun #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
403*4882a593Smuzhiyun #define PAGE_RO		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
404*4882a593Smuzhiyun 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
405*4882a593Smuzhiyun #define PAGE_RX		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
406*4882a593Smuzhiyun 				 _PAGE_INVALID | _PAGE_PROTECT)
407*4882a593Smuzhiyun #define PAGE_RW		__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408*4882a593Smuzhiyun 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
409*4882a593Smuzhiyun #define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
410*4882a593Smuzhiyun 				 _PAGE_INVALID | _PAGE_PROTECT)
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413*4882a593Smuzhiyun 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414*4882a593Smuzhiyun #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
415*4882a593Smuzhiyun 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
416*4882a593Smuzhiyun #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
417*4882a593Smuzhiyun 				 _PAGE_PROTECT | _PAGE_NOEXEC)
418*4882a593Smuzhiyun #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
419*4882a593Smuzhiyun 				  _PAGE_YOUNG |	_PAGE_DIRTY)
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun /*
422*4882a593Smuzhiyun  * On s390 the page table entry has an invalid bit and a read-only bit.
423*4882a593Smuzhiyun  * Read permission implies execute permission and write permission
424*4882a593Smuzhiyun  * implies read permission.
425*4882a593Smuzhiyun  */
426*4882a593Smuzhiyun          /*xwr*/
427*4882a593Smuzhiyun #define __P000	PAGE_NONE
428*4882a593Smuzhiyun #define __P001	PAGE_RO
429*4882a593Smuzhiyun #define __P010	PAGE_RO
430*4882a593Smuzhiyun #define __P011	PAGE_RO
431*4882a593Smuzhiyun #define __P100	PAGE_RX
432*4882a593Smuzhiyun #define __P101	PAGE_RX
433*4882a593Smuzhiyun #define __P110	PAGE_RX
434*4882a593Smuzhiyun #define __P111	PAGE_RX
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun #define __S000	PAGE_NONE
437*4882a593Smuzhiyun #define __S001	PAGE_RO
438*4882a593Smuzhiyun #define __S010	PAGE_RW
439*4882a593Smuzhiyun #define __S011	PAGE_RW
440*4882a593Smuzhiyun #define __S100	PAGE_RX
441*4882a593Smuzhiyun #define __S101	PAGE_RX
442*4882a593Smuzhiyun #define __S110	PAGE_RWX
443*4882a593Smuzhiyun #define __S111	PAGE_RWX
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun  * Segment entry (large page) protection definitions.
447*4882a593Smuzhiyun  */
448*4882a593Smuzhiyun #define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
449*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_PROTECT)
450*4882a593Smuzhiyun #define SEGMENT_RO	__pgprot(_SEGMENT_ENTRY_PROTECT | \
451*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_READ | \
452*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_NOEXEC)
453*4882a593Smuzhiyun #define SEGMENT_RX	__pgprot(_SEGMENT_ENTRY_PROTECT | \
454*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_READ)
455*4882a593Smuzhiyun #define SEGMENT_RW	__pgprot(_SEGMENT_ENTRY_READ | \
456*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_WRITE | \
457*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_NOEXEC)
458*4882a593Smuzhiyun #define SEGMENT_RWX	__pgprot(_SEGMENT_ENTRY_READ | \
459*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_WRITE)
460*4882a593Smuzhiyun #define SEGMENT_KERNEL	__pgprot(_SEGMENT_ENTRY |	\
461*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_LARGE |	\
462*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_READ |	\
463*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_WRITE | \
464*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_YOUNG | \
465*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_DIRTY | \
466*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_NOEXEC)
467*4882a593Smuzhiyun #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY |	\
468*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_LARGE |	\
469*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_READ |	\
470*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_YOUNG |	\
471*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_PROTECT | \
472*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_NOEXEC)
473*4882a593Smuzhiyun #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY |	\
474*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_LARGE |	\
475*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_READ |	\
476*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_WRITE | \
477*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_YOUNG |	\
478*4882a593Smuzhiyun 				 _SEGMENT_ENTRY_DIRTY)
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun /*
481*4882a593Smuzhiyun  * Region3 entry (large page) protection definitions.
482*4882a593Smuzhiyun  */
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun #define REGION3_KERNEL	__pgprot(_REGION_ENTRY_TYPE_R3 | \
485*4882a593Smuzhiyun 				 _REGION3_ENTRY_LARGE |	 \
486*4882a593Smuzhiyun 				 _REGION3_ENTRY_READ |	 \
487*4882a593Smuzhiyun 				 _REGION3_ENTRY_WRITE |	 \
488*4882a593Smuzhiyun 				 _REGION3_ENTRY_YOUNG |	 \
489*4882a593Smuzhiyun 				 _REGION3_ENTRY_DIRTY | \
490*4882a593Smuzhiyun 				 _REGION_ENTRY_NOEXEC)
491*4882a593Smuzhiyun #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
492*4882a593Smuzhiyun 				   _REGION3_ENTRY_LARGE |  \
493*4882a593Smuzhiyun 				   _REGION3_ENTRY_READ |   \
494*4882a593Smuzhiyun 				   _REGION3_ENTRY_YOUNG |  \
495*4882a593Smuzhiyun 				   _REGION_ENTRY_PROTECT | \
496*4882a593Smuzhiyun 				   _REGION_ENTRY_NOEXEC)
497*4882a593Smuzhiyun 
mm_p4d_folded(struct mm_struct * mm)498*4882a593Smuzhiyun static inline bool mm_p4d_folded(struct mm_struct *mm)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun 	return mm->context.asce_limit <= _REGION1_SIZE;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun #define mm_p4d_folded(mm) mm_p4d_folded(mm)
503*4882a593Smuzhiyun 
mm_pud_folded(struct mm_struct * mm)504*4882a593Smuzhiyun static inline bool mm_pud_folded(struct mm_struct *mm)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	return mm->context.asce_limit <= _REGION2_SIZE;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun #define mm_pud_folded(mm) mm_pud_folded(mm)
509*4882a593Smuzhiyun 
mm_pmd_folded(struct mm_struct * mm)510*4882a593Smuzhiyun static inline bool mm_pmd_folded(struct mm_struct *mm)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	return mm->context.asce_limit <= _REGION3_SIZE;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun #define mm_pmd_folded(mm) mm_pmd_folded(mm)
515*4882a593Smuzhiyun 
mm_has_pgste(struct mm_struct * mm)516*4882a593Smuzhiyun static inline int mm_has_pgste(struct mm_struct *mm)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun #ifdef CONFIG_PGSTE
519*4882a593Smuzhiyun 	if (unlikely(mm->context.has_pgste))
520*4882a593Smuzhiyun 		return 1;
521*4882a593Smuzhiyun #endif
522*4882a593Smuzhiyun 	return 0;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
mm_is_protected(struct mm_struct * mm)525*4882a593Smuzhiyun static inline int mm_is_protected(struct mm_struct *mm)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun #ifdef CONFIG_PGSTE
528*4882a593Smuzhiyun 	if (unlikely(atomic_read(&mm->context.is_protected)))
529*4882a593Smuzhiyun 		return 1;
530*4882a593Smuzhiyun #endif
531*4882a593Smuzhiyun 	return 0;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
mm_alloc_pgste(struct mm_struct * mm)534*4882a593Smuzhiyun static inline int mm_alloc_pgste(struct mm_struct *mm)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun #ifdef CONFIG_PGSTE
537*4882a593Smuzhiyun 	if (unlikely(mm->context.alloc_pgste))
538*4882a593Smuzhiyun 		return 1;
539*4882a593Smuzhiyun #endif
540*4882a593Smuzhiyun 	return 0;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun /*
544*4882a593Smuzhiyun  * In the case that a guest uses storage keys
545*4882a593Smuzhiyun  * faults should no longer be backed by zero pages
546*4882a593Smuzhiyun  */
547*4882a593Smuzhiyun #define mm_forbids_zeropage mm_has_pgste
mm_uses_skeys(struct mm_struct * mm)548*4882a593Smuzhiyun static inline int mm_uses_skeys(struct mm_struct *mm)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun #ifdef CONFIG_PGSTE
551*4882a593Smuzhiyun 	if (mm->context.uses_skeys)
552*4882a593Smuzhiyun 		return 1;
553*4882a593Smuzhiyun #endif
554*4882a593Smuzhiyun 	return 0;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
csp(unsigned int * ptr,unsigned int old,unsigned int new)557*4882a593Smuzhiyun static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	register unsigned long reg2 asm("2") = old;
560*4882a593Smuzhiyun 	register unsigned long reg3 asm("3") = new;
561*4882a593Smuzhiyun 	unsigned long address = (unsigned long)ptr | 1;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	asm volatile(
564*4882a593Smuzhiyun 		"	csp	%0,%3"
565*4882a593Smuzhiyun 		: "+d" (reg2), "+m" (*ptr)
566*4882a593Smuzhiyun 		: "d" (reg3), "d" (address)
567*4882a593Smuzhiyun 		: "cc");
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun 
cspg(unsigned long * ptr,unsigned long old,unsigned long new)570*4882a593Smuzhiyun static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun 	register unsigned long reg2 asm("2") = old;
573*4882a593Smuzhiyun 	register unsigned long reg3 asm("3") = new;
574*4882a593Smuzhiyun 	unsigned long address = (unsigned long)ptr | 1;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	asm volatile(
577*4882a593Smuzhiyun 		"	.insn	rre,0xb98a0000,%0,%3"
578*4882a593Smuzhiyun 		: "+d" (reg2), "+m" (*ptr)
579*4882a593Smuzhiyun 		: "d" (reg3), "d" (address)
580*4882a593Smuzhiyun 		: "cc");
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun #define CRDTE_DTT_PAGE		0x00UL
584*4882a593Smuzhiyun #define CRDTE_DTT_SEGMENT	0x10UL
585*4882a593Smuzhiyun #define CRDTE_DTT_REGION3	0x14UL
586*4882a593Smuzhiyun #define CRDTE_DTT_REGION2	0x18UL
587*4882a593Smuzhiyun #define CRDTE_DTT_REGION1	0x1cUL
588*4882a593Smuzhiyun 
crdte(unsigned long old,unsigned long new,unsigned long table,unsigned long dtt,unsigned long address,unsigned long asce)589*4882a593Smuzhiyun static inline void crdte(unsigned long old, unsigned long new,
590*4882a593Smuzhiyun 			 unsigned long table, unsigned long dtt,
591*4882a593Smuzhiyun 			 unsigned long address, unsigned long asce)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	register unsigned long reg2 asm("2") = old;
594*4882a593Smuzhiyun 	register unsigned long reg3 asm("3") = new;
595*4882a593Smuzhiyun 	register unsigned long reg4 asm("4") = table | dtt;
596*4882a593Smuzhiyun 	register unsigned long reg5 asm("5") = address;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
599*4882a593Smuzhiyun 		     : "+d" (reg2)
600*4882a593Smuzhiyun 		     : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
601*4882a593Smuzhiyun 		     : "memory", "cc");
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun /*
605*4882a593Smuzhiyun  * pgd/p4d/pud/pmd/pte query functions
606*4882a593Smuzhiyun  */
pgd_folded(pgd_t pgd)607*4882a593Smuzhiyun static inline int pgd_folded(pgd_t pgd)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
pgd_present(pgd_t pgd)612*4882a593Smuzhiyun static inline int pgd_present(pgd_t pgd)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun 	if (pgd_folded(pgd))
615*4882a593Smuzhiyun 		return 1;
616*4882a593Smuzhiyun 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
pgd_none(pgd_t pgd)619*4882a593Smuzhiyun static inline int pgd_none(pgd_t pgd)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	if (pgd_folded(pgd))
622*4882a593Smuzhiyun 		return 0;
623*4882a593Smuzhiyun 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun 
pgd_bad(pgd_t pgd)626*4882a593Smuzhiyun static inline int pgd_bad(pgd_t pgd)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
629*4882a593Smuzhiyun 		return 0;
630*4882a593Smuzhiyun 	return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun 
pgd_pfn(pgd_t pgd)633*4882a593Smuzhiyun static inline unsigned long pgd_pfn(pgd_t pgd)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	unsigned long origin_mask;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	origin_mask = _REGION_ENTRY_ORIGIN;
638*4882a593Smuzhiyun 	return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun 
p4d_folded(p4d_t p4d)641*4882a593Smuzhiyun static inline int p4d_folded(p4d_t p4d)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun 	return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun 
p4d_present(p4d_t p4d)646*4882a593Smuzhiyun static inline int p4d_present(p4d_t p4d)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	if (p4d_folded(p4d))
649*4882a593Smuzhiyun 		return 1;
650*4882a593Smuzhiyun 	return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
p4d_none(p4d_t p4d)653*4882a593Smuzhiyun static inline int p4d_none(p4d_t p4d)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun 	if (p4d_folded(p4d))
656*4882a593Smuzhiyun 		return 0;
657*4882a593Smuzhiyun 	return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun 
p4d_pfn(p4d_t p4d)660*4882a593Smuzhiyun static inline unsigned long p4d_pfn(p4d_t p4d)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	unsigned long origin_mask;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	origin_mask = _REGION_ENTRY_ORIGIN;
665*4882a593Smuzhiyun 	return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun 
pud_folded(pud_t pud)668*4882a593Smuzhiyun static inline int pud_folded(pud_t pud)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun 	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
pud_present(pud_t pud)673*4882a593Smuzhiyun static inline int pud_present(pud_t pud)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	if (pud_folded(pud))
676*4882a593Smuzhiyun 		return 1;
677*4882a593Smuzhiyun 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun 
pud_none(pud_t pud)680*4882a593Smuzhiyun static inline int pud_none(pud_t pud)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	if (pud_folded(pud))
683*4882a593Smuzhiyun 		return 0;
684*4882a593Smuzhiyun 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun #define pud_leaf	pud_large
pud_large(pud_t pud)688*4882a593Smuzhiyun static inline int pud_large(pud_t pud)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
691*4882a593Smuzhiyun 		return 0;
692*4882a593Smuzhiyun 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun #define pmd_leaf	pmd_large
pmd_large(pmd_t pmd)696*4882a593Smuzhiyun static inline int pmd_large(pmd_t pmd)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun 	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun 
pmd_bad(pmd_t pmd)701*4882a593Smuzhiyun static inline int pmd_bad(pmd_t pmd)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
704*4882a593Smuzhiyun 		return 1;
705*4882a593Smuzhiyun 	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun 
pud_bad(pud_t pud)708*4882a593Smuzhiyun static inline int pud_bad(pud_t pud)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun 	unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
713*4882a593Smuzhiyun 		return 1;
714*4882a593Smuzhiyun 	if (type < _REGION_ENTRY_TYPE_R3)
715*4882a593Smuzhiyun 		return 0;
716*4882a593Smuzhiyun 	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun 
p4d_bad(p4d_t p4d)719*4882a593Smuzhiyun static inline int p4d_bad(p4d_t p4d)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun 	unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	if (type > _REGION_ENTRY_TYPE_R2)
724*4882a593Smuzhiyun 		return 1;
725*4882a593Smuzhiyun 	if (type < _REGION_ENTRY_TYPE_R2)
726*4882a593Smuzhiyun 		return 0;
727*4882a593Smuzhiyun 	return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun 
pmd_present(pmd_t pmd)730*4882a593Smuzhiyun static inline int pmd_present(pmd_t pmd)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun 	return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun 
pmd_none(pmd_t pmd)735*4882a593Smuzhiyun static inline int pmd_none(pmd_t pmd)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun 	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun #define pmd_write pmd_write
pmd_write(pmd_t pmd)741*4882a593Smuzhiyun static inline int pmd_write(pmd_t pmd)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun #define pud_write pud_write
pud_write(pud_t pud)747*4882a593Smuzhiyun static inline int pud_write(pud_t pud)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun 	return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun 
pmd_dirty(pmd_t pmd)752*4882a593Smuzhiyun static inline int pmd_dirty(pmd_t pmd)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun 	return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun 
pmd_young(pmd_t pmd)757*4882a593Smuzhiyun static inline int pmd_young(pmd_t pmd)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun 	return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
pte_present(pte_t pte)762*4882a593Smuzhiyun static inline int pte_present(pte_t pte)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	/* Bit pattern: (pte & 0x001) == 0x001 */
765*4882a593Smuzhiyun 	return (pte_val(pte) & _PAGE_PRESENT) != 0;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun 
pte_none(pte_t pte)768*4882a593Smuzhiyun static inline int pte_none(pte_t pte)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun 	/* Bit pattern: pte == 0x400 */
771*4882a593Smuzhiyun 	return pte_val(pte) == _PAGE_INVALID;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
pte_swap(pte_t pte)774*4882a593Smuzhiyun static inline int pte_swap(pte_t pte)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	/* Bit pattern: (pte & 0x201) == 0x200 */
777*4882a593Smuzhiyun 	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
778*4882a593Smuzhiyun 		== _PAGE_PROTECT;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun 
pte_special(pte_t pte)781*4882a593Smuzhiyun static inline int pte_special(pte_t pte)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun 	return (pte_val(pte) & _PAGE_SPECIAL);
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)787*4882a593Smuzhiyun static inline int pte_same(pte_t a, pte_t b)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun 	return pte_val(a) == pte_val(b);
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)793*4882a593Smuzhiyun static inline int pte_protnone(pte_t pte)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun 
pmd_protnone(pmd_t pmd)798*4882a593Smuzhiyun static inline int pmd_protnone(pmd_t pmd)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun 	/* pmd_large(pmd) implies pmd_present(pmd) */
801*4882a593Smuzhiyun 	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun #endif
804*4882a593Smuzhiyun 
pte_soft_dirty(pte_t pte)805*4882a593Smuzhiyun static inline int pte_soft_dirty(pte_t pte)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun 	return pte_val(pte) & _PAGE_SOFT_DIRTY;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun #define pte_swp_soft_dirty pte_soft_dirty
810*4882a593Smuzhiyun 
pte_mksoft_dirty(pte_t pte)811*4882a593Smuzhiyun static inline pte_t pte_mksoft_dirty(pte_t pte)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun 	pte_val(pte) |= _PAGE_SOFT_DIRTY;
814*4882a593Smuzhiyun 	return pte;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun #define pte_swp_mksoft_dirty pte_mksoft_dirty
817*4882a593Smuzhiyun 
pte_clear_soft_dirty(pte_t pte)818*4882a593Smuzhiyun static inline pte_t pte_clear_soft_dirty(pte_t pte)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun 	pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
821*4882a593Smuzhiyun 	return pte;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
824*4882a593Smuzhiyun 
pmd_soft_dirty(pmd_t pmd)825*4882a593Smuzhiyun static inline int pmd_soft_dirty(pmd_t pmd)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun 	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun 
pmd_mksoft_dirty(pmd_t pmd)830*4882a593Smuzhiyun static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun 	pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
833*4882a593Smuzhiyun 	return pmd;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun 
pmd_clear_soft_dirty(pmd_t pmd)836*4882a593Smuzhiyun static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
839*4882a593Smuzhiyun 	return pmd;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun /*
843*4882a593Smuzhiyun  * query functions pte_write/pte_dirty/pte_young only work if
844*4882a593Smuzhiyun  * pte_present() is true. Undefined behaviour if not..
845*4882a593Smuzhiyun  */
pte_write(pte_t pte)846*4882a593Smuzhiyun static inline int pte_write(pte_t pte)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun 	return (pte_val(pte) & _PAGE_WRITE) != 0;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun 
pte_dirty(pte_t pte)851*4882a593Smuzhiyun static inline int pte_dirty(pte_t pte)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun 	return (pte_val(pte) & _PAGE_DIRTY) != 0;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun 
pte_young(pte_t pte)856*4882a593Smuzhiyun static inline int pte_young(pte_t pte)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun 	return (pte_val(pte) & _PAGE_YOUNG) != 0;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_UNUSED
pte_unused(pte_t pte)862*4882a593Smuzhiyun static inline int pte_unused(pte_t pte)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun 	return pte_val(pte) & _PAGE_UNUSED;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun /*
868*4882a593Smuzhiyun  * Extract the pgprot value from the given pte while at the same time making it
869*4882a593Smuzhiyun  * usable for kernel address space mappings where fault driven dirty and
870*4882a593Smuzhiyun  * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
871*4882a593Smuzhiyun  * must not be set.
872*4882a593Smuzhiyun  */
pte_pgprot(pte_t pte)873*4882a593Smuzhiyun static inline pgprot_t pte_pgprot(pte_t pte)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	if (pte_write(pte))
878*4882a593Smuzhiyun 		pte_flags |= pgprot_val(PAGE_KERNEL);
879*4882a593Smuzhiyun 	else
880*4882a593Smuzhiyun 		pte_flags |= pgprot_val(PAGE_KERNEL_RO);
881*4882a593Smuzhiyun 	pte_flags |= pte_val(pte) & mio_wb_bit_mask;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	return __pgprot(pte_flags);
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun /*
887*4882a593Smuzhiyun  * pgd/pmd/pte modification functions
888*4882a593Smuzhiyun  */
889*4882a593Smuzhiyun 
pgd_clear(pgd_t * pgd)890*4882a593Smuzhiyun static inline void pgd_clear(pgd_t *pgd)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
893*4882a593Smuzhiyun 		pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun 
p4d_clear(p4d_t * p4d)896*4882a593Smuzhiyun static inline void p4d_clear(p4d_t *p4d)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun 	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
899*4882a593Smuzhiyun 		p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun 
pud_clear(pud_t * pud)902*4882a593Smuzhiyun static inline void pud_clear(pud_t *pud)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
905*4882a593Smuzhiyun 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun 
pmd_clear(pmd_t * pmdp)908*4882a593Smuzhiyun static inline void pmd_clear(pmd_t *pmdp)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun 	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun 
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)913*4882a593Smuzhiyun static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun 	pte_val(*ptep) = _PAGE_INVALID;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun /*
919*4882a593Smuzhiyun  * The following pte modification functions only work if
920*4882a593Smuzhiyun  * pte_present() is true. Undefined behaviour if not..
921*4882a593Smuzhiyun  */
pte_modify(pte_t pte,pgprot_t newprot)922*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun 	pte_val(pte) &= _PAGE_CHG_MASK;
925*4882a593Smuzhiyun 	pte_val(pte) |= pgprot_val(newprot);
926*4882a593Smuzhiyun 	/*
927*4882a593Smuzhiyun 	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
928*4882a593Smuzhiyun 	 * has the invalid bit set, clear it again for readable, young pages
929*4882a593Smuzhiyun 	 */
930*4882a593Smuzhiyun 	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
931*4882a593Smuzhiyun 		pte_val(pte) &= ~_PAGE_INVALID;
932*4882a593Smuzhiyun 	/*
933*4882a593Smuzhiyun 	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
934*4882a593Smuzhiyun 	 * protection bit set, clear it again for writable, dirty pages
935*4882a593Smuzhiyun 	 */
936*4882a593Smuzhiyun 	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
937*4882a593Smuzhiyun 		pte_val(pte) &= ~_PAGE_PROTECT;
938*4882a593Smuzhiyun 	return pte;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun 
pte_wrprotect(pte_t pte)941*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun 	pte_val(pte) &= ~_PAGE_WRITE;
944*4882a593Smuzhiyun 	pte_val(pte) |= _PAGE_PROTECT;
945*4882a593Smuzhiyun 	return pte;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun 
pte_mkwrite(pte_t pte)948*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun 	pte_val(pte) |= _PAGE_WRITE;
951*4882a593Smuzhiyun 	if (pte_val(pte) & _PAGE_DIRTY)
952*4882a593Smuzhiyun 		pte_val(pte) &= ~_PAGE_PROTECT;
953*4882a593Smuzhiyun 	return pte;
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun 
pte_mkclean(pte_t pte)956*4882a593Smuzhiyun static inline pte_t pte_mkclean(pte_t pte)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun 	pte_val(pte) &= ~_PAGE_DIRTY;
959*4882a593Smuzhiyun 	pte_val(pte) |= _PAGE_PROTECT;
960*4882a593Smuzhiyun 	return pte;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun 
pte_mkdirty(pte_t pte)963*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun 	pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
966*4882a593Smuzhiyun 	if (pte_val(pte) & _PAGE_WRITE)
967*4882a593Smuzhiyun 		pte_val(pte) &= ~_PAGE_PROTECT;
968*4882a593Smuzhiyun 	return pte;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun 
pte_mkold(pte_t pte)971*4882a593Smuzhiyun static inline pte_t pte_mkold(pte_t pte)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun 	pte_val(pte) &= ~_PAGE_YOUNG;
974*4882a593Smuzhiyun 	pte_val(pte) |= _PAGE_INVALID;
975*4882a593Smuzhiyun 	return pte;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun 
pte_mkyoung(pte_t pte)978*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun 	pte_val(pte) |= _PAGE_YOUNG;
981*4882a593Smuzhiyun 	if (pte_val(pte) & _PAGE_READ)
982*4882a593Smuzhiyun 		pte_val(pte) &= ~_PAGE_INVALID;
983*4882a593Smuzhiyun 	return pte;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun 
pte_mkspecial(pte_t pte)986*4882a593Smuzhiyun static inline pte_t pte_mkspecial(pte_t pte)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun 	pte_val(pte) |= _PAGE_SPECIAL;
989*4882a593Smuzhiyun 	return pte;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
pte_mkhuge(pte_t pte)993*4882a593Smuzhiyun static inline pte_t pte_mkhuge(pte_t pte)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun 	pte_val(pte) |= _PAGE_LARGE;
996*4882a593Smuzhiyun 	return pte;
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun #endif
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun #define IPTE_GLOBAL	0
1001*4882a593Smuzhiyun #define	IPTE_LOCAL	1
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun #define IPTE_NODAT	0x400
1004*4882a593Smuzhiyun #define IPTE_GUEST_ASCE	0x800
1005*4882a593Smuzhiyun 
__ptep_ipte(unsigned long address,pte_t * ptep,unsigned long opt,unsigned long asce,int local)1006*4882a593Smuzhiyun static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1007*4882a593Smuzhiyun 					unsigned long opt, unsigned long asce,
1008*4882a593Smuzhiyun 					int local)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun 	unsigned long pto = (unsigned long) ptep;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	if (__builtin_constant_p(opt) && opt == 0) {
1013*4882a593Smuzhiyun 		/* Invalidation + TLB flush for the pte */
1014*4882a593Smuzhiyun 		asm volatile(
1015*4882a593Smuzhiyun 			"	.insn	rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1016*4882a593Smuzhiyun 			: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1017*4882a593Smuzhiyun 			  [m4] "i" (local));
1018*4882a593Smuzhiyun 		return;
1019*4882a593Smuzhiyun 	}
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	/* Invalidate ptes with options + TLB flush of the ptes */
1022*4882a593Smuzhiyun 	opt = opt | (asce & _ASCE_ORIGIN);
1023*4882a593Smuzhiyun 	asm volatile(
1024*4882a593Smuzhiyun 		"	.insn	rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1025*4882a593Smuzhiyun 		: [r2] "+a" (address), [r3] "+a" (opt)
1026*4882a593Smuzhiyun 		: [r1] "a" (pto), [m4] "i" (local) : "memory");
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun 
__ptep_ipte_range(unsigned long address,int nr,pte_t * ptep,int local)1029*4882a593Smuzhiyun static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1030*4882a593Smuzhiyun 					      pte_t *ptep, int local)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun 	unsigned long pto = (unsigned long) ptep;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	/* Invalidate a range of ptes + TLB flush of the ptes */
1035*4882a593Smuzhiyun 	do {
1036*4882a593Smuzhiyun 		asm volatile(
1037*4882a593Smuzhiyun 			"       .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1038*4882a593Smuzhiyun 			: [r2] "+a" (address), [r3] "+a" (nr)
1039*4882a593Smuzhiyun 			: [r1] "a" (pto), [m4] "i" (local) : "memory");
1040*4882a593Smuzhiyun 	} while (nr != 255);
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun /*
1044*4882a593Smuzhiyun  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1045*4882a593Smuzhiyun  * both clear the TLB for the unmapped pte. The reason is that
1046*4882a593Smuzhiyun  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1047*4882a593Smuzhiyun  * to modify an active pte. The sequence is
1048*4882a593Smuzhiyun  *   1) ptep_get_and_clear
1049*4882a593Smuzhiyun  *   2) set_pte_at
1050*4882a593Smuzhiyun  *   3) flush_tlb_range
1051*4882a593Smuzhiyun  * On s390 the tlb needs to get flushed with the modification of the pte
1052*4882a593Smuzhiyun  * if the pte is active. The only way how this can be implemented is to
1053*4882a593Smuzhiyun  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1054*4882a593Smuzhiyun  * is a nop.
1055*4882a593Smuzhiyun  */
1056*4882a593Smuzhiyun pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1057*4882a593Smuzhiyun pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1060*4882a593Smuzhiyun static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1061*4882a593Smuzhiyun 					    unsigned long addr, pte_t *ptep)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun 	pte_t pte = *ptep;
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1066*4882a593Smuzhiyun 	return pte_young(pte);
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1070*4882a593Smuzhiyun static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1071*4882a593Smuzhiyun 					 unsigned long address, pte_t *ptep)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun 	return ptep_test_and_clear_young(vma, address, ptep);
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1077*4882a593Smuzhiyun static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1078*4882a593Smuzhiyun 				       unsigned long addr, pte_t *ptep)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun 	pte_t res;
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1083*4882a593Smuzhiyun 	if (mm_is_protected(mm) && pte_present(res))
1084*4882a593Smuzhiyun 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1085*4882a593Smuzhiyun 	return res;
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1089*4882a593Smuzhiyun pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1090*4882a593Smuzhiyun void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1091*4882a593Smuzhiyun 			     pte_t *, pte_t, pte_t);
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1094*4882a593Smuzhiyun static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1095*4882a593Smuzhiyun 				     unsigned long addr, pte_t *ptep)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun 	pte_t res;
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1100*4882a593Smuzhiyun 	if (mm_is_protected(vma->vm_mm) && pte_present(res))
1101*4882a593Smuzhiyun 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1102*4882a593Smuzhiyun 	return res;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun /*
1106*4882a593Smuzhiyun  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1107*4882a593Smuzhiyun  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1108*4882a593Smuzhiyun  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1109*4882a593Smuzhiyun  * cannot be accessed while the batched unmap is running. In this case
1110*4882a593Smuzhiyun  * full==1 and a simple pte_clear is enough. See tlb.h.
1111*4882a593Smuzhiyun  */
1112*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)1113*4882a593Smuzhiyun static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1114*4882a593Smuzhiyun 					    unsigned long addr,
1115*4882a593Smuzhiyun 					    pte_t *ptep, int full)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun 	pte_t res;
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	if (full) {
1120*4882a593Smuzhiyun 		res = *ptep;
1121*4882a593Smuzhiyun 		*ptep = __pte(_PAGE_INVALID);
1122*4882a593Smuzhiyun 	} else {
1123*4882a593Smuzhiyun 		res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1124*4882a593Smuzhiyun 	}
1125*4882a593Smuzhiyun 	if (mm_is_protected(mm) && pte_present(res))
1126*4882a593Smuzhiyun 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1127*4882a593Smuzhiyun 	return res;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1131*4882a593Smuzhiyun static inline void ptep_set_wrprotect(struct mm_struct *mm,
1132*4882a593Smuzhiyun 				      unsigned long addr, pte_t *ptep)
1133*4882a593Smuzhiyun {
1134*4882a593Smuzhiyun 	pte_t pte = *ptep;
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	if (pte_write(pte))
1137*4882a593Smuzhiyun 		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t entry,int dirty)1141*4882a593Smuzhiyun static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1142*4882a593Smuzhiyun 					unsigned long addr, pte_t *ptep,
1143*4882a593Smuzhiyun 					pte_t entry, int dirty)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun 	if (pte_same(*ptep, entry))
1146*4882a593Smuzhiyun 		return 0;
1147*4882a593Smuzhiyun 	ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1148*4882a593Smuzhiyun 	return 1;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun /*
1152*4882a593Smuzhiyun  * Additional functions to handle KVM guest page tables
1153*4882a593Smuzhiyun  */
1154*4882a593Smuzhiyun void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1155*4882a593Smuzhiyun 		     pte_t *ptep, pte_t entry);
1156*4882a593Smuzhiyun void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1157*4882a593Smuzhiyun void ptep_notify(struct mm_struct *mm, unsigned long addr,
1158*4882a593Smuzhiyun 		 pte_t *ptep, unsigned long bits);
1159*4882a593Smuzhiyun int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1160*4882a593Smuzhiyun 		    pte_t *ptep, int prot, unsigned long bit);
1161*4882a593Smuzhiyun void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1162*4882a593Smuzhiyun 		     pte_t *ptep , int reset);
1163*4882a593Smuzhiyun void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1164*4882a593Smuzhiyun int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1165*4882a593Smuzhiyun 		    pte_t *sptep, pte_t *tptep, pte_t pte);
1166*4882a593Smuzhiyun void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1169*4882a593Smuzhiyun 			    pte_t *ptep);
1170*4882a593Smuzhiyun int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1171*4882a593Smuzhiyun 			  unsigned char key, bool nq);
1172*4882a593Smuzhiyun int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1173*4882a593Smuzhiyun 			       unsigned char key, unsigned char *oldkey,
1174*4882a593Smuzhiyun 			       bool nq, bool mr, bool mc);
1175*4882a593Smuzhiyun int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1176*4882a593Smuzhiyun int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1177*4882a593Smuzhiyun 			  unsigned char *key);
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1180*4882a593Smuzhiyun 				unsigned long bits, unsigned long value);
1181*4882a593Smuzhiyun int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1182*4882a593Smuzhiyun int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1183*4882a593Smuzhiyun 			unsigned long *oldpte, unsigned long *oldpgste);
1184*4882a593Smuzhiyun void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1185*4882a593Smuzhiyun void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1186*4882a593Smuzhiyun void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1187*4882a593Smuzhiyun void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun #define pgprot_writecombine	pgprot_writecombine
1190*4882a593Smuzhiyun pgprot_t pgprot_writecombine(pgprot_t prot);
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun #define pgprot_writethrough	pgprot_writethrough
1193*4882a593Smuzhiyun pgprot_t pgprot_writethrough(pgprot_t prot);
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun /*
1196*4882a593Smuzhiyun  * Certain architectures need to do special things when PTEs
1197*4882a593Smuzhiyun  * within a page table are directly modified.  Thus, the following
1198*4882a593Smuzhiyun  * hook is made available.
1199*4882a593Smuzhiyun  */
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry)1200*4882a593Smuzhiyun static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1201*4882a593Smuzhiyun 			      pte_t *ptep, pte_t entry)
1202*4882a593Smuzhiyun {
1203*4882a593Smuzhiyun 	if (pte_present(entry))
1204*4882a593Smuzhiyun 		pte_val(entry) &= ~_PAGE_UNUSED;
1205*4882a593Smuzhiyun 	if (mm_has_pgste(mm))
1206*4882a593Smuzhiyun 		ptep_set_pte_at(mm, addr, ptep, entry);
1207*4882a593Smuzhiyun 	else
1208*4882a593Smuzhiyun 		*ptep = entry;
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun /*
1212*4882a593Smuzhiyun  * Conversion functions: convert a page and protection to a page entry,
1213*4882a593Smuzhiyun  * and a page entry and page directory to the page they refer to.
1214*4882a593Smuzhiyun  */
mk_pte_phys(unsigned long physpage,pgprot_t pgprot)1215*4882a593Smuzhiyun static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun 	pte_t __pte;
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	pte_val(__pte) = physpage | pgprot_val(pgprot);
1220*4882a593Smuzhiyun 	if (!MACHINE_HAS_NX)
1221*4882a593Smuzhiyun 		pte_val(__pte) &= ~_PAGE_NOEXEC;
1222*4882a593Smuzhiyun 	return pte_mkyoung(__pte);
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun 
mk_pte(struct page * page,pgprot_t pgprot)1225*4882a593Smuzhiyun static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun 	unsigned long physpage = page_to_phys(page);
1228*4882a593Smuzhiyun 	pte_t __pte = mk_pte_phys(physpage, pgprot);
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	if (pte_write(__pte) && PageDirty(page))
1231*4882a593Smuzhiyun 		__pte = pte_mkdirty(__pte);
1232*4882a593Smuzhiyun 	return __pte;
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1236*4882a593Smuzhiyun #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1237*4882a593Smuzhiyun #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1238*4882a593Smuzhiyun #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1241*4882a593Smuzhiyun #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1242*4882a593Smuzhiyun 
pmd_deref(pmd_t pmd)1243*4882a593Smuzhiyun static inline unsigned long pmd_deref(pmd_t pmd)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun 	unsigned long origin_mask;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	origin_mask = _SEGMENT_ENTRY_ORIGIN;
1248*4882a593Smuzhiyun 	if (pmd_large(pmd))
1249*4882a593Smuzhiyun 		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1250*4882a593Smuzhiyun 	return pmd_val(pmd) & origin_mask;
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun 
pmd_pfn(pmd_t pmd)1253*4882a593Smuzhiyun static inline unsigned long pmd_pfn(pmd_t pmd)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun 	return pmd_deref(pmd) >> PAGE_SHIFT;
1256*4882a593Smuzhiyun }
1257*4882a593Smuzhiyun 
pud_deref(pud_t pud)1258*4882a593Smuzhiyun static inline unsigned long pud_deref(pud_t pud)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun 	unsigned long origin_mask;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	origin_mask = _REGION_ENTRY_ORIGIN;
1263*4882a593Smuzhiyun 	if (pud_large(pud))
1264*4882a593Smuzhiyun 		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1265*4882a593Smuzhiyun 	return pud_val(pud) & origin_mask;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun 
pud_pfn(pud_t pud)1268*4882a593Smuzhiyun static inline unsigned long pud_pfn(pud_t pud)
1269*4882a593Smuzhiyun {
1270*4882a593Smuzhiyun 	return pud_deref(pud) >> PAGE_SHIFT;
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun /*
1274*4882a593Smuzhiyun  * The pgd_offset function *always* adds the index for the top-level
1275*4882a593Smuzhiyun  * region/segment table. This is done to get a sequence like the
1276*4882a593Smuzhiyun  * following to work:
1277*4882a593Smuzhiyun  *	pgdp = pgd_offset(current->mm, addr);
1278*4882a593Smuzhiyun  *	pgd = READ_ONCE(*pgdp);
1279*4882a593Smuzhiyun  *	p4dp = p4d_offset(&pgd, addr);
1280*4882a593Smuzhiyun  *	...
1281*4882a593Smuzhiyun  * The subsequent p4d_offset, pud_offset and pmd_offset functions
1282*4882a593Smuzhiyun  * only add an index if they dereferenced the pointer.
1283*4882a593Smuzhiyun  */
pgd_offset_raw(pgd_t * pgd,unsigned long address)1284*4882a593Smuzhiyun static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1285*4882a593Smuzhiyun {
1286*4882a593Smuzhiyun 	unsigned long rste;
1287*4882a593Smuzhiyun 	unsigned int shift;
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	/* Get the first entry of the top level table */
1290*4882a593Smuzhiyun 	rste = pgd_val(*pgd);
1291*4882a593Smuzhiyun 	/* Pick up the shift from the table type of the first entry */
1292*4882a593Smuzhiyun 	shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1293*4882a593Smuzhiyun 	return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1297*4882a593Smuzhiyun 
p4d_offset_lockless(pgd_t * pgdp,pgd_t pgd,unsigned long address)1298*4882a593Smuzhiyun static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1301*4882a593Smuzhiyun 		return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1302*4882a593Smuzhiyun 	return (p4d_t *) pgdp;
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun #define p4d_offset_lockless p4d_offset_lockless
1305*4882a593Smuzhiyun 
p4d_offset(pgd_t * pgdp,unsigned long address)1306*4882a593Smuzhiyun static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1307*4882a593Smuzhiyun {
1308*4882a593Smuzhiyun 	return p4d_offset_lockless(pgdp, *pgdp, address);
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun 
pud_offset_lockless(p4d_t * p4dp,p4d_t p4d,unsigned long address)1311*4882a593Smuzhiyun static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1312*4882a593Smuzhiyun {
1313*4882a593Smuzhiyun 	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1314*4882a593Smuzhiyun 		return (pud_t *) p4d_deref(p4d) + pud_index(address);
1315*4882a593Smuzhiyun 	return (pud_t *) p4dp;
1316*4882a593Smuzhiyun }
1317*4882a593Smuzhiyun #define pud_offset_lockless pud_offset_lockless
1318*4882a593Smuzhiyun 
pud_offset(p4d_t * p4dp,unsigned long address)1319*4882a593Smuzhiyun static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun 	return pud_offset_lockless(p4dp, *p4dp, address);
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun #define pud_offset pud_offset
1324*4882a593Smuzhiyun 
pmd_offset_lockless(pud_t * pudp,pud_t pud,unsigned long address)1325*4882a593Smuzhiyun static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1328*4882a593Smuzhiyun 		return (pmd_t *) pud_deref(pud) + pmd_index(address);
1329*4882a593Smuzhiyun 	return (pmd_t *) pudp;
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun #define pmd_offset_lockless pmd_offset_lockless
1332*4882a593Smuzhiyun 
pmd_offset(pud_t * pudp,unsigned long address)1333*4882a593Smuzhiyun static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1334*4882a593Smuzhiyun {
1335*4882a593Smuzhiyun 	return pmd_offset_lockless(pudp, *pudp, address);
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun #define pmd_offset pmd_offset
1338*4882a593Smuzhiyun 
pmd_page_vaddr(pmd_t pmd)1339*4882a593Smuzhiyun static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1340*4882a593Smuzhiyun {
1341*4882a593Smuzhiyun 	return (unsigned long) pmd_deref(pmd);
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun 
gup_fast_permitted(unsigned long start,unsigned long end)1344*4882a593Smuzhiyun static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1345*4882a593Smuzhiyun {
1346*4882a593Smuzhiyun 	return end <= current->mm->context.asce_limit;
1347*4882a593Smuzhiyun }
1348*4882a593Smuzhiyun #define gup_fast_permitted gup_fast_permitted
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1351*4882a593Smuzhiyun #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1352*4882a593Smuzhiyun #define pte_page(x) pfn_to_page(pte_pfn(x))
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1355*4882a593Smuzhiyun #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1356*4882a593Smuzhiyun #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1357*4882a593Smuzhiyun #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1358*4882a593Smuzhiyun 
pmd_wrprotect(pmd_t pmd)1359*4882a593Smuzhiyun static inline pmd_t pmd_wrprotect(pmd_t pmd)
1360*4882a593Smuzhiyun {
1361*4882a593Smuzhiyun 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1362*4882a593Smuzhiyun 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1363*4882a593Smuzhiyun 	return pmd;
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun 
pmd_mkwrite(pmd_t pmd)1366*4882a593Smuzhiyun static inline pmd_t pmd_mkwrite(pmd_t pmd)
1367*4882a593Smuzhiyun {
1368*4882a593Smuzhiyun 	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1369*4882a593Smuzhiyun 	if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1370*4882a593Smuzhiyun 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1371*4882a593Smuzhiyun 	return pmd;
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun 
pmd_mkclean(pmd_t pmd)1374*4882a593Smuzhiyun static inline pmd_t pmd_mkclean(pmd_t pmd)
1375*4882a593Smuzhiyun {
1376*4882a593Smuzhiyun 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1377*4882a593Smuzhiyun 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1378*4882a593Smuzhiyun 	return pmd;
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun 
pmd_mkdirty(pmd_t pmd)1381*4882a593Smuzhiyun static inline pmd_t pmd_mkdirty(pmd_t pmd)
1382*4882a593Smuzhiyun {
1383*4882a593Smuzhiyun 	pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
1384*4882a593Smuzhiyun 	if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1385*4882a593Smuzhiyun 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1386*4882a593Smuzhiyun 	return pmd;
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun 
pud_wrprotect(pud_t pud)1389*4882a593Smuzhiyun static inline pud_t pud_wrprotect(pud_t pud)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun 	pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1392*4882a593Smuzhiyun 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
1393*4882a593Smuzhiyun 	return pud;
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun 
pud_mkwrite(pud_t pud)1396*4882a593Smuzhiyun static inline pud_t pud_mkwrite(pud_t pud)
1397*4882a593Smuzhiyun {
1398*4882a593Smuzhiyun 	pud_val(pud) |= _REGION3_ENTRY_WRITE;
1399*4882a593Smuzhiyun 	if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1400*4882a593Smuzhiyun 		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1401*4882a593Smuzhiyun 	return pud;
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun 
pud_mkclean(pud_t pud)1404*4882a593Smuzhiyun static inline pud_t pud_mkclean(pud_t pud)
1405*4882a593Smuzhiyun {
1406*4882a593Smuzhiyun 	pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1407*4882a593Smuzhiyun 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
1408*4882a593Smuzhiyun 	return pud;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun 
pud_mkdirty(pud_t pud)1411*4882a593Smuzhiyun static inline pud_t pud_mkdirty(pud_t pud)
1412*4882a593Smuzhiyun {
1413*4882a593Smuzhiyun 	pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
1414*4882a593Smuzhiyun 	if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1415*4882a593Smuzhiyun 		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1416*4882a593Smuzhiyun 	return pud;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
massage_pgprot_pmd(pgprot_t pgprot)1420*4882a593Smuzhiyun static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1421*4882a593Smuzhiyun {
1422*4882a593Smuzhiyun 	/*
1423*4882a593Smuzhiyun 	 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1424*4882a593Smuzhiyun 	 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1425*4882a593Smuzhiyun 	 */
1426*4882a593Smuzhiyun 	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1427*4882a593Smuzhiyun 		return pgprot_val(SEGMENT_NONE);
1428*4882a593Smuzhiyun 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1429*4882a593Smuzhiyun 		return pgprot_val(SEGMENT_RO);
1430*4882a593Smuzhiyun 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1431*4882a593Smuzhiyun 		return pgprot_val(SEGMENT_RX);
1432*4882a593Smuzhiyun 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1433*4882a593Smuzhiyun 		return pgprot_val(SEGMENT_RW);
1434*4882a593Smuzhiyun 	return pgprot_val(SEGMENT_RWX);
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun 
pmd_mkyoung(pmd_t pmd)1437*4882a593Smuzhiyun static inline pmd_t pmd_mkyoung(pmd_t pmd)
1438*4882a593Smuzhiyun {
1439*4882a593Smuzhiyun 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1440*4882a593Smuzhiyun 	if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1441*4882a593Smuzhiyun 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1442*4882a593Smuzhiyun 	return pmd;
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun 
pmd_mkold(pmd_t pmd)1445*4882a593Smuzhiyun static inline pmd_t pmd_mkold(pmd_t pmd)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1448*4882a593Smuzhiyun 	pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1449*4882a593Smuzhiyun 	return pmd;
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun 
pmd_modify(pmd_t pmd,pgprot_t newprot)1452*4882a593Smuzhiyun static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1453*4882a593Smuzhiyun {
1454*4882a593Smuzhiyun 	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1455*4882a593Smuzhiyun 		_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1456*4882a593Smuzhiyun 		_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1457*4882a593Smuzhiyun 	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1458*4882a593Smuzhiyun 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1459*4882a593Smuzhiyun 		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1460*4882a593Smuzhiyun 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1461*4882a593Smuzhiyun 		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1462*4882a593Smuzhiyun 	return pmd;
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun 
mk_pmd_phys(unsigned long physpage,pgprot_t pgprot)1465*4882a593Smuzhiyun static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1466*4882a593Smuzhiyun {
1467*4882a593Smuzhiyun 	pmd_t __pmd;
1468*4882a593Smuzhiyun 	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1469*4882a593Smuzhiyun 	return __pmd;
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1473*4882a593Smuzhiyun 
__pmdp_csp(pmd_t * pmdp)1474*4882a593Smuzhiyun static inline void __pmdp_csp(pmd_t *pmdp)
1475*4882a593Smuzhiyun {
1476*4882a593Smuzhiyun 	csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1477*4882a593Smuzhiyun 	    pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun #define IDTE_GLOBAL	0
1481*4882a593Smuzhiyun #define IDTE_LOCAL	1
1482*4882a593Smuzhiyun 
1483*4882a593Smuzhiyun #define IDTE_PTOA	0x0800
1484*4882a593Smuzhiyun #define IDTE_NODAT	0x1000
1485*4882a593Smuzhiyun #define IDTE_GUEST_ASCE	0x2000
1486*4882a593Smuzhiyun 
__pmdp_idte(unsigned long addr,pmd_t * pmdp,unsigned long opt,unsigned long asce,int local)1487*4882a593Smuzhiyun static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1488*4882a593Smuzhiyun 					unsigned long opt, unsigned long asce,
1489*4882a593Smuzhiyun 					int local)
1490*4882a593Smuzhiyun {
1491*4882a593Smuzhiyun 	unsigned long sto;
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1494*4882a593Smuzhiyun 	if (__builtin_constant_p(opt) && opt == 0) {
1495*4882a593Smuzhiyun 		/* flush without guest asce */
1496*4882a593Smuzhiyun 		asm volatile(
1497*4882a593Smuzhiyun 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1498*4882a593Smuzhiyun 			: "+m" (*pmdp)
1499*4882a593Smuzhiyun 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1500*4882a593Smuzhiyun 			  [m4] "i" (local)
1501*4882a593Smuzhiyun 			: "cc" );
1502*4882a593Smuzhiyun 	} else {
1503*4882a593Smuzhiyun 		/* flush with guest asce */
1504*4882a593Smuzhiyun 		asm volatile(
1505*4882a593Smuzhiyun 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1506*4882a593Smuzhiyun 			: "+m" (*pmdp)
1507*4882a593Smuzhiyun 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1508*4882a593Smuzhiyun 			  [r3] "a" (asce), [m4] "i" (local)
1509*4882a593Smuzhiyun 			: "cc" );
1510*4882a593Smuzhiyun 	}
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun 
__pudp_idte(unsigned long addr,pud_t * pudp,unsigned long opt,unsigned long asce,int local)1513*4882a593Smuzhiyun static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1514*4882a593Smuzhiyun 					unsigned long opt, unsigned long asce,
1515*4882a593Smuzhiyun 					int local)
1516*4882a593Smuzhiyun {
1517*4882a593Smuzhiyun 	unsigned long r3o;
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1520*4882a593Smuzhiyun 	r3o |= _ASCE_TYPE_REGION3;
1521*4882a593Smuzhiyun 	if (__builtin_constant_p(opt) && opt == 0) {
1522*4882a593Smuzhiyun 		/* flush without guest asce */
1523*4882a593Smuzhiyun 		asm volatile(
1524*4882a593Smuzhiyun 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1525*4882a593Smuzhiyun 			: "+m" (*pudp)
1526*4882a593Smuzhiyun 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1527*4882a593Smuzhiyun 			  [m4] "i" (local)
1528*4882a593Smuzhiyun 			: "cc");
1529*4882a593Smuzhiyun 	} else {
1530*4882a593Smuzhiyun 		/* flush with guest asce */
1531*4882a593Smuzhiyun 		asm volatile(
1532*4882a593Smuzhiyun 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1533*4882a593Smuzhiyun 			: "+m" (*pudp)
1534*4882a593Smuzhiyun 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1535*4882a593Smuzhiyun 			  [r3] "a" (asce), [m4] "i" (local)
1536*4882a593Smuzhiyun 			: "cc" );
1537*4882a593Smuzhiyun 	}
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1541*4882a593Smuzhiyun pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1542*4882a593Smuzhiyun pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun #define __HAVE_ARCH_PGTABLE_DEPOSIT
1547*4882a593Smuzhiyun void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1548*4882a593Smuzhiyun 				pgtable_t pgtable);
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun #define __HAVE_ARCH_PGTABLE_WITHDRAW
1551*4882a593Smuzhiyun pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,pmd_t entry,int dirty)1554*4882a593Smuzhiyun static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1555*4882a593Smuzhiyun 					unsigned long addr, pmd_t *pmdp,
1556*4882a593Smuzhiyun 					pmd_t entry, int dirty)
1557*4882a593Smuzhiyun {
1558*4882a593Smuzhiyun 	VM_BUG_ON(addr & ~HPAGE_MASK);
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	entry = pmd_mkyoung(entry);
1561*4882a593Smuzhiyun 	if (dirty)
1562*4882a593Smuzhiyun 		entry = pmd_mkdirty(entry);
1563*4882a593Smuzhiyun 	if (pmd_val(*pmdp) == pmd_val(entry))
1564*4882a593Smuzhiyun 		return 0;
1565*4882a593Smuzhiyun 	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1566*4882a593Smuzhiyun 	return 1;
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1570*4882a593Smuzhiyun static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1571*4882a593Smuzhiyun 					    unsigned long addr, pmd_t *pmdp)
1572*4882a593Smuzhiyun {
1573*4882a593Smuzhiyun 	pmd_t pmd = *pmdp;
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1576*4882a593Smuzhiyun 	return pmd_young(pmd);
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
pmdp_clear_flush_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1580*4882a593Smuzhiyun static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1581*4882a593Smuzhiyun 					 unsigned long addr, pmd_t *pmdp)
1582*4882a593Smuzhiyun {
1583*4882a593Smuzhiyun 	VM_BUG_ON(addr & ~HPAGE_MASK);
1584*4882a593Smuzhiyun 	return pmdp_test_and_clear_young(vma, addr, pmdp);
1585*4882a593Smuzhiyun }
1586*4882a593Smuzhiyun 
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t entry)1587*4882a593Smuzhiyun static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1588*4882a593Smuzhiyun 			      pmd_t *pmdp, pmd_t entry)
1589*4882a593Smuzhiyun {
1590*4882a593Smuzhiyun 	if (!MACHINE_HAS_NX)
1591*4882a593Smuzhiyun 		pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1592*4882a593Smuzhiyun 	*pmdp = entry;
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun 
pmd_mkhuge(pmd_t pmd)1595*4882a593Smuzhiyun static inline pmd_t pmd_mkhuge(pmd_t pmd)
1596*4882a593Smuzhiyun {
1597*4882a593Smuzhiyun 	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1598*4882a593Smuzhiyun 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1599*4882a593Smuzhiyun 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1600*4882a593Smuzhiyun 	return pmd;
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1604*4882a593Smuzhiyun static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1605*4882a593Smuzhiyun 					    unsigned long addr, pmd_t *pmdp)
1606*4882a593Smuzhiyun {
1607*4882a593Smuzhiyun 	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
pmdp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,int full)1611*4882a593Smuzhiyun static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1612*4882a593Smuzhiyun 						 unsigned long addr,
1613*4882a593Smuzhiyun 						 pmd_t *pmdp, int full)
1614*4882a593Smuzhiyun {
1615*4882a593Smuzhiyun 	if (full) {
1616*4882a593Smuzhiyun 		pmd_t pmd = *pmdp;
1617*4882a593Smuzhiyun 		*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1618*4882a593Smuzhiyun 		return pmd;
1619*4882a593Smuzhiyun 	}
1620*4882a593Smuzhiyun 	return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
pmdp_huge_clear_flush(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1624*4882a593Smuzhiyun static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1625*4882a593Smuzhiyun 					  unsigned long addr, pmd_t *pmdp)
1626*4882a593Smuzhiyun {
1627*4882a593Smuzhiyun 	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_INVALIDATE
pmdp_invalidate(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1631*4882a593Smuzhiyun static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1632*4882a593Smuzhiyun 				   unsigned long addr, pmd_t *pmdp)
1633*4882a593Smuzhiyun {
1634*4882a593Smuzhiyun 	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun 	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1637*4882a593Smuzhiyun }
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1640*4882a593Smuzhiyun static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1641*4882a593Smuzhiyun 				      unsigned long addr, pmd_t *pmdp)
1642*4882a593Smuzhiyun {
1643*4882a593Smuzhiyun 	pmd_t pmd = *pmdp;
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 	if (pmd_write(pmd))
1646*4882a593Smuzhiyun 		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1647*4882a593Smuzhiyun }
1648*4882a593Smuzhiyun 
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1649*4882a593Smuzhiyun static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1650*4882a593Smuzhiyun 					unsigned long address,
1651*4882a593Smuzhiyun 					pmd_t *pmdp)
1652*4882a593Smuzhiyun {
1653*4882a593Smuzhiyun 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun #define pmdp_collapse_flush pmdp_collapse_flush
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1658*4882a593Smuzhiyun #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
1659*4882a593Smuzhiyun 
pmd_trans_huge(pmd_t pmd)1660*4882a593Smuzhiyun static inline int pmd_trans_huge(pmd_t pmd)
1661*4882a593Smuzhiyun {
1662*4882a593Smuzhiyun 	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1663*4882a593Smuzhiyun }
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun #define has_transparent_hugepage has_transparent_hugepage
has_transparent_hugepage(void)1666*4882a593Smuzhiyun static inline int has_transparent_hugepage(void)
1667*4882a593Smuzhiyun {
1668*4882a593Smuzhiyun 	return MACHINE_HAS_EDAT1 ? 1 : 0;
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1671*4882a593Smuzhiyun 
1672*4882a593Smuzhiyun /*
1673*4882a593Smuzhiyun  * 64 bit swap entry format:
1674*4882a593Smuzhiyun  * A page-table entry has some bits we have to treat in a special way.
1675*4882a593Smuzhiyun  * Bits 52 and bit 55 have to be zero, otherwise a specification
1676*4882a593Smuzhiyun  * exception will occur instead of a page translation exception. The
1677*4882a593Smuzhiyun  * specification exception has the bad habit not to store necessary
1678*4882a593Smuzhiyun  * information in the lowcore.
1679*4882a593Smuzhiyun  * Bits 54 and 63 are used to indicate the page type.
1680*4882a593Smuzhiyun  * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1681*4882a593Smuzhiyun  * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1682*4882a593Smuzhiyun  * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1683*4882a593Smuzhiyun  * for the offset.
1684*4882a593Smuzhiyun  * |			  offset			|01100|type |00|
1685*4882a593Smuzhiyun  * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1686*4882a593Smuzhiyun  * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1687*4882a593Smuzhiyun  */
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun #define __SWP_OFFSET_MASK	((1UL << 52) - 1)
1690*4882a593Smuzhiyun #define __SWP_OFFSET_SHIFT	12
1691*4882a593Smuzhiyun #define __SWP_TYPE_MASK		((1UL << 5) - 1)
1692*4882a593Smuzhiyun #define __SWP_TYPE_SHIFT	2
1693*4882a593Smuzhiyun 
mk_swap_pte(unsigned long type,unsigned long offset)1694*4882a593Smuzhiyun static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1695*4882a593Smuzhiyun {
1696*4882a593Smuzhiyun 	pte_t pte;
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 	pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1699*4882a593Smuzhiyun 	pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1700*4882a593Smuzhiyun 	pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1701*4882a593Smuzhiyun 	return pte;
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun 
__swp_type(swp_entry_t entry)1704*4882a593Smuzhiyun static inline unsigned long __swp_type(swp_entry_t entry)
1705*4882a593Smuzhiyun {
1706*4882a593Smuzhiyun 	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun 
__swp_offset(swp_entry_t entry)1709*4882a593Smuzhiyun static inline unsigned long __swp_offset(swp_entry_t entry)
1710*4882a593Smuzhiyun {
1711*4882a593Smuzhiyun 	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun 
__swp_entry(unsigned long type,unsigned long offset)1714*4882a593Smuzhiyun static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1715*4882a593Smuzhiyun {
1716*4882a593Smuzhiyun 	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1720*4882a593Smuzhiyun #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun #define kern_addr_valid(addr)   (1)
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun extern int vmem_add_mapping(unsigned long start, unsigned long size);
1725*4882a593Smuzhiyun extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1726*4882a593Smuzhiyun extern int s390_enable_sie(void);
1727*4882a593Smuzhiyun extern int s390_enable_skey(void);
1728*4882a593Smuzhiyun extern void s390_reset_cmma(struct mm_struct *mm);
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1731*4882a593Smuzhiyun #define HAVE_ARCH_UNMAPPED_AREA
1732*4882a593Smuzhiyun #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun #endif /* _S390_PAGE_H */
1735