1 /*
2 * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
3 *
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6 *
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
9 */
10
11 #include "mali_kernel_common.h"
12 #include "mali_osk.h"
13 #include "mali_osk_list.h"
14 #include "mali_ukk.h"
15
16 #include "mali_mmu.h"
17 #include "mali_hw_core.h"
18 #include "mali_group.h"
19 #include "mali_mmu_page_directory.h"
20
21 /**
22 * Size of the MMU registers in bytes
23 */
24 #define MALI_MMU_REGISTERS_SIZE 0x24
25
26 /**
27 * MMU commands
28 * These are the commands that can be sent
29 * to the MMU unit.
30 */
31 typedef enum mali_mmu_command {
32 MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
33 MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
34 MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
35 MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
36 MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
37 MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
38 MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
39 } mali_mmu_command;
40
41 static void mali_mmu_probe_trigger(void *data);
42 static _mali_osk_errcode_t mali_mmu_probe_ack(void *data);
43
44 MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu);
45
46 /* page fault queue flush helper pages
47 * note that the mapping pointers are currently unused outside of the initialization functions */
48 static mali_dma_addr mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
49 static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
50 static mali_dma_addr mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
51 static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
52 static mali_dma_addr mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
53 static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
54
55 /* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
56 static mali_dma_addr mali_empty_page_directory_phys = MALI_INVALID_PAGE;
57 static mali_io_address mali_empty_page_directory_virt = NULL;
58
59
mali_mmu_initialize(void)60 _mali_osk_errcode_t mali_mmu_initialize(void)
61 {
62 /* allocate the helper pages */
63 mali_empty_page_directory_phys = mali_allocate_empty_page(&mali_empty_page_directory_virt);
64 if (0 == mali_empty_page_directory_phys) {
65 MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate empty page directory.\n"));
66 mali_empty_page_directory_phys = MALI_INVALID_PAGE;
67 return _MALI_OSK_ERR_NOMEM;
68 }
69
70 if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
71 &mali_page_fault_flush_page_directory_mapping,
72 &mali_page_fault_flush_page_table,
73 &mali_page_fault_flush_page_table_mapping,
74 &mali_page_fault_flush_data_page,
75 &mali_page_fault_flush_data_page_mapping)) {
76 MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate fault flush pages\n"));
77 mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
78 mali_empty_page_directory_phys = MALI_INVALID_PAGE;
79 mali_empty_page_directory_virt = NULL;
80 return _MALI_OSK_ERR_NOMEM;
81 }
82
83 return _MALI_OSK_ERR_OK;
84 }
85
mali_mmu_terminate(void)86 void mali_mmu_terminate(void)
87 {
88 MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n"));
89
90 /* Free global helper pages */
91 mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
92 mali_empty_page_directory_phys = MALI_INVALID_PAGE;
93 mali_empty_page_directory_virt = NULL;
94
95 /* Free the page fault flush pages */
96 mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
97 &mali_page_fault_flush_page_directory_mapping,
98 &mali_page_fault_flush_page_table,
99 &mali_page_fault_flush_page_table_mapping,
100 &mali_page_fault_flush_data_page,
101 &mali_page_fault_flush_data_page_mapping);
102 }
103
mali_mmu_create(_mali_osk_resource_t * resource,struct mali_group * group,mali_bool is_virtual)104 struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
105 {
106 struct mali_mmu_core *mmu = NULL;
107
108 MALI_DEBUG_ASSERT_POINTER(resource);
109
110 MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
111
112 mmu = _mali_osk_calloc(1, sizeof(struct mali_mmu_core));
113 if (NULL != mmu) {
114 if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) {
115 if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu)) {
116 if (is_virtual) {
117 /* Skip reset and IRQ setup for virtual MMU */
118 return mmu;
119 }
120
121 if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu)) {
122 /* Setup IRQ handlers (which will do IRQ probing if needed) */
123 mmu->irq = _mali_osk_irq_init(resource->irq,
124 mali_group_upper_half_mmu,
125 group,
126 mali_mmu_probe_trigger,
127 mali_mmu_probe_ack,
128 mmu,
129 resource->description);
130 if (NULL != mmu->irq) {
131 return mmu;
132 } else {
133 MALI_PRINT_ERROR(("Mali MMU: Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
134 }
135 }
136 mali_group_remove_mmu_core(group);
137 } else {
138 MALI_PRINT_ERROR(("Mali MMU: Failed to add core %s to group\n", mmu->hw_core.description));
139 }
140 mali_hw_core_delete(&mmu->hw_core);
141 }
142
143 _mali_osk_free(mmu);
144 } else {
145 MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
146 }
147
148 return NULL;
149 }
150
mali_mmu_delete(struct mali_mmu_core * mmu)151 void mali_mmu_delete(struct mali_mmu_core *mmu)
152 {
153 if (NULL != mmu->irq) {
154 _mali_osk_irq_term(mmu->irq);
155 }
156
157 mali_hw_core_delete(&mmu->hw_core);
158 _mali_osk_free(mmu);
159 }
160
mali_mmu_enable_paging(struct mali_mmu_core * mmu)161 static void mali_mmu_enable_paging(struct mali_mmu_core *mmu)
162 {
163 int i;
164
165 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
166
167 for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
168 if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED) {
169 break;
170 }
171 }
172 if (MALI_REG_POLL_COUNT_FAST == i) {
173 MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
174 }
175 }
176
177 /**
178 * Issues the enable stall command to the MMU and waits for HW to complete the request
179 * @param mmu The MMU to enable paging for
180 * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out)
181 */
mali_mmu_enable_stall(struct mali_mmu_core * mmu)182 static mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
183 {
184 int i;
185 u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
186
187 if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
188 MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enabled.\n"));
189 return MALI_TRUE;
190 }
191
192 if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
193 MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
194 return MALI_FALSE;
195 }
196
197 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
198
199 for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
200 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
201 if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
202 break;
203 }
204 if ((mmu_status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) && (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE))) {
205 break;
206 }
207 if (0 == (mmu_status & (MALI_MMU_STATUS_BIT_PAGING_ENABLED))) {
208 break;
209 }
210 }
211 if (MALI_REG_POLL_COUNT_FAST == i) {
212 MALI_DEBUG_PRINT(2, ("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
213 return MALI_FALSE;
214 }
215
216 if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
217 MALI_DEBUG_PRINT(2, ("Aborting MMU stall request since it has a pagefault.\n"));
218 return MALI_FALSE;
219 }
220
221 return MALI_TRUE;
222 }
223
224 /**
225 * Issues the disable stall command to the MMU and waits for HW to complete the request
226 * @param mmu The MMU to enable paging for
227 */
mali_mmu_disable_stall(struct mali_mmu_core * mmu)228 static void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
229 {
230 int i;
231 u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
232
233 if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
234 MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
235 return;
236 }
237 if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
238 MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n"));
239 return;
240 }
241
242 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
243
244 for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
245 u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
246 if (0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE)) {
247 break;
248 }
249 if (status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
250 break;
251 }
252 if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
253 break;
254 }
255 }
256 if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1, ("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
257 }
258
mali_mmu_page_fault_done(struct mali_mmu_core * mmu)259 void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
260 {
261 MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description));
262 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
263 }
264
mali_mmu_raw_reset(struct mali_mmu_core * mmu)265 MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu)
266 {
267 int i;
268
269 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
270 MALI_DEBUG_ASSERT(0xCAFEB000 == mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR));
271 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET);
272
273 for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
274 if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0) {
275 break;
276 }
277 }
278 if (MALI_REG_POLL_COUNT_FAST == i) {
279 MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
280 return _MALI_OSK_ERR_FAULT;
281 }
282
283 return _MALI_OSK_ERR_OK;
284 }
285
mali_mmu_reset(struct mali_mmu_core * mmu)286 _mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
287 {
288 _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
289 mali_bool stall_success;
290 MALI_DEBUG_ASSERT_POINTER(mmu);
291
292 stall_success = mali_mmu_enable_stall(mmu);
293 if (!stall_success) {
294 err = _MALI_OSK_ERR_BUSY;
295 }
296
297 MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
298
299 if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu)) {
300 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
301 /* no session is active, so just activate the empty page directory */
302 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory_phys);
303 mali_mmu_enable_paging(mmu);
304 err = _MALI_OSK_ERR_OK;
305 }
306 mali_mmu_disable_stall(mmu);
307
308 return err;
309 }
310
mali_mmu_zap_tlb(struct mali_mmu_core * mmu)311 mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu)
312 {
313 mali_bool stall_success = mali_mmu_enable_stall(mmu);
314
315 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
316
317 if (MALI_FALSE == stall_success) {
318 /* False means that it is in Pagefault state. Not possible to disable_stall then */
319 return MALI_FALSE;
320 }
321
322 mali_mmu_disable_stall(mmu);
323 return MALI_TRUE;
324 }
325
mali_mmu_zap_tlb_without_stall(struct mali_mmu_core * mmu)326 void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu)
327 {
328 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
329 }
330
331
mali_mmu_invalidate_page(struct mali_mmu_core * mmu,u32 mali_address)332 void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
333 {
334 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address));
335 }
336
mali_mmu_activate_address_space(struct mali_mmu_core * mmu,u32 page_directory)337 static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
338 {
339 /* The MMU must be in stalled or page fault mode, for this writing to work */
340 MALI_DEBUG_ASSERT(0 != (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
341 & (MALI_MMU_STATUS_BIT_STALL_ACTIVE | MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)));
342 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
343 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
344
345 }
346
mali_mmu_activate_page_directory(struct mali_mmu_core * mmu,struct mali_page_directory * pagedir)347 void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
348 {
349 mali_bool stall_success;
350 MALI_DEBUG_ASSERT_POINTER(mmu);
351
352 MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));
353
354 stall_success = mali_mmu_enable_stall(mmu);
355 MALI_DEBUG_ASSERT(stall_success);
356 MALI_IGNORE(stall_success);
357 mali_mmu_activate_address_space(mmu, pagedir->page_directory);
358 mali_mmu_disable_stall(mmu);
359 }
360
mali_mmu_activate_empty_page_directory(struct mali_mmu_core * mmu)361 void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu)
362 {
363 mali_bool stall_success;
364
365 MALI_DEBUG_ASSERT_POINTER(mmu);
366 MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));
367
368 stall_success = mali_mmu_enable_stall(mmu);
369
370 /* This function can only be called when the core is idle, so it could not fail. */
371 MALI_DEBUG_ASSERT(stall_success);
372 MALI_IGNORE(stall_success);
373
374 mali_mmu_activate_address_space(mmu, mali_empty_page_directory_phys);
375 mali_mmu_disable_stall(mmu);
376 }
377
mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core * mmu)378 void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu)
379 {
380 mali_bool stall_success;
381 MALI_DEBUG_ASSERT_POINTER(mmu);
382
383 MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
384 stall_success = mali_mmu_enable_stall(mmu);
385 /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
386 mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
387 if (MALI_TRUE == stall_success) mali_mmu_disable_stall(mmu);
388 }
389
390 /* Is called when we want the mmu to give an interrupt */
mali_mmu_probe_trigger(void * data)391 static void mali_mmu_probe_trigger(void *data)
392 {
393 struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
394 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
395 }
396
397 /* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
mali_mmu_probe_ack(void * data)398 static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
399 {
400 struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
401 u32 int_stat;
402
403 int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
404
405 MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
406 if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT) {
407 MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
408 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
409 } else {
410 MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
411 }
412
413 if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR) {
414 MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
415 mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
416 } else {
417 MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
418 }
419
420 if ((int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
421 (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
422 return _MALI_OSK_ERR_OK;
423 }
424
425 return _MALI_OSK_ERR_FAULT;
426 }
427
428 #if 0
429 void mali_mmu_print_state(struct mali_mmu_core *mmu)
430 {
431 MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
432 }
433 #endif
434