xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/nohash/32/pte-40x.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_NOHASH_32_PTE_40x_H
4*4882a593Smuzhiyun #ifdef __KERNEL__
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun /*
7*4882a593Smuzhiyun  * At present, all PowerPC 400-class processors share a similar TLB
8*4882a593Smuzhiyun  * architecture. The instruction and data sides share a unified,
9*4882a593Smuzhiyun  * 64-entry, fully-associative TLB which is maintained totally under
10*4882a593Smuzhiyun  * software control. In addition, the instruction side has a
11*4882a593Smuzhiyun  * hardware-managed, 4-entry, fully-associative TLB which serves as a
12*4882a593Smuzhiyun  * first level to the shared TLB. These two TLBs are known as the UTLB
13*4882a593Smuzhiyun  * and ITLB, respectively (see "mmu.h" for definitions).
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * There are several potential gotchas here.  The 40x hardware TLBLO
16*4882a593Smuzhiyun  * field looks like this:
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * 0  1  2  3  4  ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
19*4882a593Smuzhiyun  * RPN.....................  0  0 EX WR ZSEL.......  W  I  M  G
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * Where possible we make the Linux PTE bits match up with this
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * - bits 20 and 21 must be cleared, because we use 4k pages (40x can
24*4882a593Smuzhiyun  *   support down to 1k pages), this is done in the TLBMiss exception
25*4882a593Smuzhiyun  *   handler.
26*4882a593Smuzhiyun  * - We use only zones 0 (for kernel pages) and 1 (for user pages)
27*4882a593Smuzhiyun  *   of the 16 available.  Bit 24-26 of the TLB are cleared in the TLB
28*4882a593Smuzhiyun  *   miss handler.  Bit 27 is PAGE_USER, thus selecting the correct
29*4882a593Smuzhiyun  *   zone.
30*4882a593Smuzhiyun  * - PRESENT *must* be in the bottom two bits because swap cache
31*4882a593Smuzhiyun  *   entries use the top 30 bits.  Because 40x doesn't support SMP
32*4882a593Smuzhiyun  *   anyway, M is irrelevant so we borrow it for PAGE_PRESENT.  Bit 30
33*4882a593Smuzhiyun  *   is cleared in the TLB miss handler before the TLB entry is loaded.
34*4882a593Smuzhiyun  * - All other bits of the PTE are loaded into TLBLO without
35*4882a593Smuzhiyun  *   modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
36*4882a593Smuzhiyun  *   software PTE bits.  We actually use bits 21, 24, 25, and
37*4882a593Smuzhiyun  *   30 respectively for the software bits: ACCESSED, DIRTY, RW, and
38*4882a593Smuzhiyun  *   PRESENT.
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #define	_PAGE_GUARDED	0x001	/* G: page is guarded from prefetch */
42*4882a593Smuzhiyun #define _PAGE_PRESENT	0x002	/* software: PTE contains a translation */
43*4882a593Smuzhiyun #define	_PAGE_NO_CACHE	0x004	/* I: caching is inhibited */
44*4882a593Smuzhiyun #define	_PAGE_WRITETHRU	0x008	/* W: caching is write-through */
45*4882a593Smuzhiyun #define	_PAGE_USER	0x010	/* matches one of the zone permission bits */
46*4882a593Smuzhiyun #define	_PAGE_SPECIAL	0x020	/* software: Special page */
47*4882a593Smuzhiyun #define	_PAGE_DIRTY	0x080	/* software: dirty page */
48*4882a593Smuzhiyun #define _PAGE_RW	0x100	/* hardware: WR, anded with dirty in exception */
49*4882a593Smuzhiyun #define _PAGE_EXEC	0x200	/* hardware: EX permission */
50*4882a593Smuzhiyun #define _PAGE_ACCESSED	0x400	/* software: R: page referenced */
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /* No page size encoding in the linux PTE */
53*4882a593Smuzhiyun #define _PAGE_PSIZE		0
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /* cache related flags non existing on 40x */
56*4882a593Smuzhiyun #define _PAGE_COHERENT	0
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define _PAGE_KERNEL_RO		0
59*4882a593Smuzhiyun #define _PAGE_KERNEL_ROX	_PAGE_EXEC
60*4882a593Smuzhiyun #define _PAGE_KERNEL_RW		(_PAGE_DIRTY | _PAGE_RW)
61*4882a593Smuzhiyun #define _PAGE_KERNEL_RWX	(_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #define _PMD_PRESENT	0x400	/* PMD points to page of PTEs */
64*4882a593Smuzhiyun #define _PMD_PRESENT_MASK	_PMD_PRESENT
65*4882a593Smuzhiyun #define _PMD_BAD	0x802
66*4882a593Smuzhiyun #define _PMD_SIZE_4M	0x0c0
67*4882a593Smuzhiyun #define _PMD_SIZE_16M	0x0e0
68*4882a593Smuzhiyun #define _PMD_USER	0
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #define _PTE_NONE_MASK	0
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /* Until my rework is finished, 40x still needs atomic PTE updates */
73*4882a593Smuzhiyun #define PTE_ATOMIC_UPDATES	1
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED)
76*4882a593Smuzhiyun #define _PAGE_BASE	(_PAGE_BASE_NC)
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /* Permission masks used to generate the __P and __S table */
79*4882a593Smuzhiyun #define PAGE_NONE	__pgprot(_PAGE_BASE)
80*4882a593Smuzhiyun #define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
81*4882a593Smuzhiyun #define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
82*4882a593Smuzhiyun #define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
83*4882a593Smuzhiyun #define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
84*4882a593Smuzhiyun #define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
85*4882a593Smuzhiyun #define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #endif /* __KERNEL__ */
88*4882a593Smuzhiyun #endif /*  _ASM_POWERPC_NOHASH_32_PTE_40x_H */
89