xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  *
3  * (C) COPYRIGHT 2010-2014, 2016, 2017 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15 
16 
17 
18 
19 
20 #include "mali_kbase_mmu_mode.h"
21 
22 #include "mali_kbase.h"
23 #include "mali_midg_regmap.h"
24 
25 #define ENTRY_TYPE_MASK     3ULL
26 /* For valid ATEs bit 1 = (level == 3) ? 1 : 0.
27  * The MMU is only ever configured by the driver so that ATEs
28  * are at level 3, so bit 1 should always be set
29  */
30 #define ENTRY_IS_ATE        3ULL
31 #define ENTRY_IS_INVAL      2ULL
32 #define ENTRY_IS_PTE        3ULL
33 
34 #define ENTRY_ATTR_BITS (7ULL << 2)	/* bits 4:2 */
35 #define ENTRY_ACCESS_RW (1ULL << 6)     /* bits 6:7 */
36 #define ENTRY_ACCESS_RO (3ULL << 6)
37 #define ENTRY_SHARE_BITS (3ULL << 8)	/* bits 9:8 */
38 #define ENTRY_ACCESS_BIT (1ULL << 10)
39 #define ENTRY_NX_BIT (1ULL << 54)
40 
41 /* Helper Function to perform assignment of page table entries, to
42  * ensure the use of strd, which is required on LPAE systems.
43  */
page_table_entry_set(u64 * pte,u64 phy)44 static inline void page_table_entry_set(u64 *pte, u64 phy)
45 {
46 #ifdef CONFIG_64BIT
47 	*pte = phy;
48 #elif defined(CONFIG_ARM)
49 	/*
50 	 * In order to prevent the compiler keeping cached copies of
51 	 * memory, we have to explicitly say that we have updated memory.
52 	 *
53 	 * Note: We could manually move the data ourselves into R0 and
54 	 * R1 by specifying register variables that are explicitly
55 	 * given registers assignments, the down side of this is that
56 	 * we have to assume cpu endianness.  To avoid this we can use
57 	 * the ldrd to read the data from memory into R0 and R1 which
58 	 * will respect the cpu endianness, we then use strd to make
59 	 * the 64 bit assignment to the page table entry.
60 	 */
61 	asm volatile("ldrd r0, r1, [%[ptemp]]\n\t"
62 			"strd r0, r1, [%[pte]]\n\t"
63 			: "=m" (*pte)
64 			: [ptemp] "r" (&phy), [pte] "r" (pte), "m" (phy)
65 			: "r0", "r1");
66 #else
67 #error "64-bit atomic write must be implemented for your architecture"
68 #endif
69 }
70 
mmu_get_as_setup(struct kbase_context * kctx,struct kbase_mmu_setup * const setup)71 static void mmu_get_as_setup(struct kbase_context *kctx,
72 		struct kbase_mmu_setup * const setup)
73 {
74 	/* Set up the required caching policies at the correct indices
75 	 * in the memattr register.
76 	 */
77 	setup->memattr =
78 		(AS_MEMATTR_IMPL_DEF_CACHE_POLICY <<
79 			(AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
80 		(AS_MEMATTR_FORCE_TO_CACHE_ALL    <<
81 			(AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
82 		(AS_MEMATTR_WRITE_ALLOC           <<
83 			(AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
84 		(AS_MEMATTR_AARCH64_OUTER_IMPL_DEF   <<
85 			(AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
86 		(AS_MEMATTR_AARCH64_OUTER_WA         <<
87 			(AS_MEMATTR_INDEX_OUTER_WA * 8));
88 
89 	setup->transtab = (u64)kctx->pgd & AS_TRANSTAB_BASE_MASK;
90 	setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K;
91 }
92 
mmu_update(struct kbase_context * kctx)93 static void mmu_update(struct kbase_context *kctx)
94 {
95 	struct kbase_device * const kbdev = kctx->kbdev;
96 	struct kbase_as * const as = &kbdev->as[kctx->as_nr];
97 	struct kbase_mmu_setup * const current_setup = &as->current_setup;
98 
99 	mmu_get_as_setup(kctx, current_setup);
100 
101 	/* Apply the address space setting */
102 	kbase_mmu_hw_configure(kbdev, as, kctx);
103 }
104 
mmu_disable_as(struct kbase_device * kbdev,int as_nr)105 static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
106 {
107 	struct kbase_as * const as = &kbdev->as[as_nr];
108 	struct kbase_mmu_setup * const current_setup = &as->current_setup;
109 
110 	current_setup->transtab = 0ULL;
111 	current_setup->transcfg = AS_TRANSCFG_ADRMODE_UNMAPPED;
112 
113 	/* Apply the address space setting */
114 	kbase_mmu_hw_configure(kbdev, as, NULL);
115 }
116 
pte_to_phy_addr(u64 entry)117 static phys_addr_t pte_to_phy_addr(u64 entry)
118 {
119 	if (!(entry & 1))
120 		return 0;
121 
122 	return entry & ~0xFFF;
123 }
124 
ate_is_valid(u64 ate)125 static int ate_is_valid(u64 ate)
126 {
127 	return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE);
128 }
129 
pte_is_valid(u64 pte)130 static int pte_is_valid(u64 pte)
131 {
132 	return ((pte & ENTRY_TYPE_MASK) == ENTRY_IS_PTE);
133 }
134 
135 /*
136  * Map KBASE_REG flags to MMU flags
137  */
get_mmu_flags(unsigned long flags)138 static u64 get_mmu_flags(unsigned long flags)
139 {
140 	u64 mmu_flags;
141 
142 	/* store mem_attr index as 4:2 (macro called ensures 3 bits already) */
143 	mmu_flags = KBASE_REG_MEMATTR_VALUE(flags) << 2;
144 
145 	/* Set access flags - note that AArch64 stage 1 does not support
146 	 * write-only access, so we use read/write instead
147 	 */
148 	if (flags & KBASE_REG_GPU_WR)
149 		mmu_flags |= ENTRY_ACCESS_RW;
150 	else if (flags & KBASE_REG_GPU_RD)
151 		mmu_flags |= ENTRY_ACCESS_RO;
152 
153 	/* nx if requested */
154 	mmu_flags |= (flags & KBASE_REG_GPU_NX) ? ENTRY_NX_BIT : 0;
155 
156 	if (flags & KBASE_REG_SHARE_BOTH) {
157 		/* inner and outer shareable */
158 		mmu_flags |= SHARE_BOTH_BITS;
159 	} else if (flags & KBASE_REG_SHARE_IN) {
160 		/* inner shareable coherency */
161 		mmu_flags |= SHARE_INNER_BITS;
162 	}
163 
164 	return mmu_flags;
165 }
166 
entry_set_ate(u64 * entry,phys_addr_t phy,unsigned long flags)167 static void entry_set_ate(u64 *entry, phys_addr_t phy, unsigned long flags)
168 {
169 	page_table_entry_set(entry, (phy & ~0xFFF) |
170 			get_mmu_flags(flags) |
171 			ENTRY_ACCESS_BIT | ENTRY_IS_ATE);
172 }
173 
entry_set_pte(u64 * entry,phys_addr_t phy)174 static void entry_set_pte(u64 *entry, phys_addr_t phy)
175 {
176 	page_table_entry_set(entry, (phy & ~0xFFF) |
177 			ENTRY_ACCESS_BIT | ENTRY_IS_PTE);
178 }
179 
entry_invalidate(u64 * entry)180 static void entry_invalidate(u64 *entry)
181 {
182 	page_table_entry_set(entry, ENTRY_IS_INVAL);
183 }
184 
185 static struct kbase_mmu_mode const aarch64_mode = {
186 	.update = mmu_update,
187 	.get_as_setup = mmu_get_as_setup,
188 	.disable_as = mmu_disable_as,
189 	.pte_to_phy_addr = pte_to_phy_addr,
190 	.ate_is_valid = ate_is_valid,
191 	.pte_is_valid = pte_is_valid,
192 	.entry_set_ate = entry_set_ate,
193 	.entry_set_pte = entry_set_pte,
194 	.entry_invalidate = entry_invalidate
195 };
196 
kbase_mmu_mode_get_aarch64(void)197 struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void)
198 {
199 	return &aarch64_mode;
200 }
201