xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/common/mali_mmu_page_directory.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (C) 2011-2017 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10 
11 #include "mali_kernel_common.h"
12 #include "mali_osk.h"
13 #include "mali_ukk.h"
14 #include "mali_uk_types.h"
15 #include "mali_mmu_page_directory.h"
16 #include "mali_memory.h"
17 #include "mali_l2_cache.h"
18 
19 static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
20 
mali_allocate_empty_page(mali_io_address * virt_addr)21 u32 mali_allocate_empty_page(mali_io_address *virt_addr)
22 {
23 	_mali_osk_errcode_t err;
24 	mali_io_address mapping;
25 	mali_dma_addr address;
26 
27 	if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
28 		/* Allocation failed */
29 		MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to get table page for empty pgdir\n"));
30 		return 0;
31 	}
32 
33 	MALI_DEBUG_ASSERT_POINTER(mapping);
34 
35 	err = fill_page(mapping, 0);
36 	if (_MALI_OSK_ERR_OK != err) {
37 		mali_mmu_release_table_page(address, mapping);
38 		MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to zero page\n"));
39 		return 0;
40 	}
41 
42 	*virt_addr = mapping;
43 	return address;
44 }
45 
mali_free_empty_page(mali_dma_addr address,mali_io_address virt_addr)46 void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr)
47 {
48 	if (MALI_INVALID_PAGE != address) {
49 		mali_mmu_release_table_page(address, virt_addr);
50 	}
51 }
52 
mali_create_fault_flush_pages(mali_dma_addr * page_directory,mali_io_address * page_directory_mapping,mali_dma_addr * page_table,mali_io_address * page_table_mapping,mali_dma_addr * data_page,mali_io_address * data_page_mapping)53 _mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
54 		mali_io_address *page_directory_mapping,
55 		mali_dma_addr *page_table, mali_io_address *page_table_mapping,
56 		mali_dma_addr *data_page, mali_io_address *data_page_mapping)
57 {
58 	_mali_osk_errcode_t err;
59 
60 	err = mali_mmu_get_table_page(data_page, data_page_mapping);
61 	if (_MALI_OSK_ERR_OK == err) {
62 		err = mali_mmu_get_table_page(page_table, page_table_mapping);
63 		if (_MALI_OSK_ERR_OK == err) {
64 			err = mali_mmu_get_table_page(page_directory, page_directory_mapping);
65 			if (_MALI_OSK_ERR_OK == err) {
66 				fill_page(*data_page_mapping, 0);
67 				fill_page(*page_table_mapping, *data_page | MALI_MMU_FLAGS_DEFAULT);
68 				fill_page(*page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
69 				MALI_SUCCESS;
70 			}
71 			mali_mmu_release_table_page(*page_table, *page_table_mapping);
72 			*page_table = MALI_INVALID_PAGE;
73 		}
74 		mali_mmu_release_table_page(*data_page, *data_page_mapping);
75 		*data_page = MALI_INVALID_PAGE;
76 	}
77 	return err;
78 }
79 
mali_destroy_fault_flush_pages(mali_dma_addr * page_directory,mali_io_address * page_directory_mapping,mali_dma_addr * page_table,mali_io_address * page_table_mapping,mali_dma_addr * data_page,mali_io_address * data_page_mapping)80 void mali_destroy_fault_flush_pages(
81 	mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
82 	mali_dma_addr *page_table, mali_io_address *page_table_mapping,
83 	mali_dma_addr *data_page, mali_io_address *data_page_mapping)
84 {
85 	if (MALI_INVALID_PAGE != *page_directory) {
86 		mali_mmu_release_table_page(*page_directory, *page_directory_mapping);
87 		*page_directory = MALI_INVALID_PAGE;
88 		*page_directory_mapping = NULL;
89 	}
90 
91 	if (MALI_INVALID_PAGE != *page_table) {
92 		mali_mmu_release_table_page(*page_table, *page_table_mapping);
93 		*page_table = MALI_INVALID_PAGE;
94 		*page_table_mapping = NULL;
95 	}
96 
97 	if (MALI_INVALID_PAGE != *data_page) {
98 		mali_mmu_release_table_page(*data_page, *data_page_mapping);
99 		*data_page = MALI_INVALID_PAGE;
100 		*data_page_mapping = NULL;
101 	}
102 }
103 
fill_page(mali_io_address mapping,u32 data)104 static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
105 {
106 	int i;
107 	MALI_DEBUG_ASSERT_POINTER(mapping);
108 
109 	for (i = 0; i < MALI_MMU_PAGE_SIZE / 4; i++) {
110 		_mali_osk_mem_iowrite32_relaxed(mapping, i * sizeof(u32), data);
111 	}
112 	_mali_osk_mem_barrier();
113 	MALI_SUCCESS;
114 }
115 
mali_mmu_pagedir_map(struct mali_page_directory * pagedir,u32 mali_address,u32 size)116 _mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
117 {
118 	const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
119 	const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
120 	_mali_osk_errcode_t err;
121 	mali_io_address pde_mapping;
122 	mali_dma_addr pde_phys;
123 	int i, page_count;
124 	u32 start_address;
125 	if (last_pde < first_pde)
126 		return _MALI_OSK_ERR_INVALID_ARGS;
127 
128 	for (i = first_pde; i <= last_pde; i++) {
129 		if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
130 						 i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
131 			/* Page table not present */
132 			MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
133 			MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
134 
135 			err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
136 			if (_MALI_OSK_ERR_OK != err) {
137 				MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
138 				return err;
139 			}
140 			pagedir->page_entries_mapped[i] = pde_mapping;
141 
142 			/* Update PDE, mark as present */
143 			_mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32),
144 							pde_phys | MALI_MMU_FLAGS_PRESENT);
145 
146 			MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
147 		}
148 
149 		if (first_pde == last_pde) {
150 			pagedir->page_entries_usage_count[i] += size / MALI_MMU_PAGE_SIZE;
151 		} else if (i == first_pde) {
152 			start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE;
153 			page_count = (start_address + MALI_MMU_VIRTUAL_PAGE_SIZE - mali_address) / MALI_MMU_PAGE_SIZE;
154 			pagedir->page_entries_usage_count[i] += page_count;
155 		} else if (i == last_pde) {
156 			start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE;
157 			page_count = (mali_address + size - start_address) / MALI_MMU_PAGE_SIZE;
158 			pagedir->page_entries_usage_count[i] += page_count;
159 		} else {
160 			pagedir->page_entries_usage_count[i] = 1024;
161 		}
162 	}
163 	_mali_osk_write_mem_barrier();
164 
165 	return _MALI_OSK_ERR_OK;
166 }
167 
mali_mmu_zero_pte(mali_io_address page_table,u32 mali_address,u32 size)168 MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
169 {
170 	int i;
171 	const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
172 	const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
173 
174 	for (i = first_pte; i <= last_pte; i++) {
175 		_mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
176 	}
177 }
178 
mali_page_directory_get_phys_address(struct mali_page_directory * pagedir,u32 index)179 static u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
180 {
181 	return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
182 				       index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
183 }
184 
185 
mali_mmu_pagedir_unmap(struct mali_page_directory * pagedir,u32 mali_address,u32 size)186 _mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
187 {
188 	const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
189 	const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
190 	u32 left = size;
191 	int i;
192 	mali_bool pd_changed = MALI_FALSE;
193 	u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
194 	u32 num_pages_inv = 0;
195 	mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */
196 
197 	/* For all page directory entries in range. */
198 	for (i = first_pde; i <= last_pde; i++) {
199 		u32 size_in_pde, offset;
200 
201 		MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
202 		MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
203 
204 		/* Offset into page table, 0 if mali_address is 4MiB aligned */
205 		offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
206 		if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) {
207 			size_in_pde = left;
208 		} else {
209 			size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
210 		}
211 
212 		pagedir->page_entries_usage_count[i] -= size_in_pde / MALI_MMU_PAGE_SIZE;
213 
214 		/* If entire page table is unused, free it */
215 		if (0 == pagedir->page_entries_usage_count[i]) {
216 			u32 page_phys;
217 			void *page_virt;
218 			MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
219 			/* last reference removed, no need to zero out each PTE  */
220 
221 			page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)));
222 			page_virt = pagedir->page_entries_mapped[i];
223 			pagedir->page_entries_mapped[i] = NULL;
224 			_mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
225 
226 			mali_mmu_release_table_page(page_phys, page_virt);
227 			pd_changed = MALI_TRUE;
228 		} else {
229 			MALI_DEBUG_ASSERT(num_pages_inv < 2);
230 			if (num_pages_inv < 2) {
231 				pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
232 				num_pages_inv++;
233 			} else {
234 				invalidate_all = MALI_TRUE;
235 			}
236 
237 			/* If part of the page table is still in use, zero the relevant PTEs */
238 			mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
239 		}
240 
241 		left -= size_in_pde;
242 		mali_address += size_in_pde;
243 	}
244 	_mali_osk_write_mem_barrier();
245 
246 	/* L2 pages invalidation */
247 	if (MALI_TRUE == pd_changed) {
248 		MALI_DEBUG_ASSERT(num_pages_inv < 3);
249 		if (num_pages_inv < 3) {
250 			pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
251 			num_pages_inv++;
252 		} else {
253 			invalidate_all = MALI_TRUE;
254 		}
255 	}
256 
257 	if (invalidate_all) {
258 		mali_l2_cache_invalidate_all();
259 	} else {
260 		mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
261 	}
262 
263 	MALI_SUCCESS;
264 }
265 
mali_mmu_pagedir_alloc(void)266 struct mali_page_directory *mali_mmu_pagedir_alloc(void)
267 {
268 	struct mali_page_directory *pagedir;
269 	_mali_osk_errcode_t err;
270 	mali_dma_addr phys;
271 
272 	pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
273 	if (NULL == pagedir) {
274 		return NULL;
275 	}
276 
277 	err = mali_mmu_get_table_page(&phys, &pagedir->page_directory_mapped);
278 	if (_MALI_OSK_ERR_OK != err) {
279 		_mali_osk_free(pagedir);
280 		return NULL;
281 	}
282 
283 	pagedir->page_directory = (u32)phys;
284 
285 	/* Zero page directory */
286 	fill_page(pagedir->page_directory_mapped, 0);
287 
288 	return pagedir;
289 }
290 
mali_mmu_pagedir_free(struct mali_page_directory * pagedir)291 void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
292 {
293 	const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
294 	int i;
295 
296 	/* Free referenced page tables and zero PDEs. */
297 	for (i = 0; i < num_page_table_entries; i++) {
298 		if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(
299 				pagedir->page_directory_mapped,
300 				sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
301 			mali_dma_addr phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
302 					     i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
303 			_mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
304 			mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
305 		}
306 	}
307 	_mali_osk_write_mem_barrier();
308 
309 	/* Free the page directory page. */
310 	mali_mmu_release_table_page(pagedir->page_directory, pagedir->page_directory_mapped);
311 
312 	_mali_osk_free(pagedir);
313 }
314 
315 
mali_mmu_pagedir_update(struct mali_page_directory * pagedir,u32 mali_address,mali_dma_addr phys_address,u32 size,u32 permission_bits)316 void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
317 			     mali_dma_addr phys_address, u32 size, u32 permission_bits)
318 {
319 	u32 end_address = mali_address + size;
320 	u32 mali_phys = (u32)phys_address;
321 
322 	/* Map physical pages into MMU page tables */
323 	for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, mali_phys += MALI_MMU_PAGE_SIZE) {
324 		MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
325 		_mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
326 						MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
327 						mali_phys | permission_bits);
328 	}
329 }
330 
mali_mmu_pagedir_diag(struct mali_page_directory * pagedir,u32 fault_addr)331 void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr)
332 {
333 #if defined(DEBUG)
334 	u32 pde_index, pte_index;
335 	u32 pde, pte;
336 
337 	pde_index = MALI_MMU_PDE_ENTRY(fault_addr);
338 	pte_index = MALI_MMU_PTE_ENTRY(fault_addr);
339 
340 
341 	pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
342 				     pde_index * sizeof(u32));
343 
344 
345 	if (pde & MALI_MMU_FLAGS_PRESENT) {
346 		u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde);
347 
348 		pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index],
349 					     pte_index * sizeof(u32));
350 
351 		MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n"
352 				     "\t\tPTE: %08x, page %08x is %s\n",
353 				     fault_addr, pte_addr, pte,
354 				     MALI_MMU_ENTRY_ADDRESS(pte),
355 				     pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present"));
356 	} else {
357 		MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n",
358 				     fault_addr, pde));
359 	}
360 #else
361 	MALI_IGNORE(pagedir);
362 	MALI_IGNORE(fault_addr);
363 #endif
364 }
365 
366 /* For instrumented */
367 struct dump_info {
368 	u32 buffer_left;
369 	u32 register_writes_size;
370 	u32 page_table_dump_size;
371 	u32 *buffer;
372 };
373 
writereg(u32 where,u32 what,const char * comment,struct dump_info * info)374 static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
375 {
376 	if (NULL != info) {
377 		info->register_writes_size += sizeof(u32) * 2; /* two 32-bit words */
378 
379 		if (NULL != info->buffer) {
380 			/* check that we have enough space */
381 			if (info->buffer_left < sizeof(u32) * 2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
382 
383 			*info->buffer = where;
384 			info->buffer++;
385 
386 			*info->buffer = what;
387 			info->buffer++;
388 
389 			info->buffer_left -= sizeof(u32) * 2;
390 		}
391 	}
392 
393 	MALI_SUCCESS;
394 }
395 
mali_mmu_dump_page(mali_io_address page,u32 phys_addr,struct dump_info * info)396 static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info *info)
397 {
398 	if (NULL != info) {
399 		/* 4096 for the page and 4 bytes for the address */
400 		const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
401 		const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
402 		const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
403 
404 		info->page_table_dump_size += dump_size_in_bytes;
405 
406 		if (NULL != info->buffer) {
407 			if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
408 
409 			*info->buffer = phys_addr;
410 			info->buffer++;
411 
412 			_mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
413 			info->buffer += page_size_in_elements;
414 
415 			info->buffer_left -= dump_size_in_bytes;
416 		}
417 	}
418 
419 	MALI_SUCCESS;
420 }
421 
dump_mmu_page_table(struct mali_page_directory * pagedir,struct dump_info * info)422 static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info *info)
423 {
424 	MALI_DEBUG_ASSERT_POINTER(pagedir);
425 	MALI_DEBUG_ASSERT_POINTER(info);
426 
427 	if (NULL != pagedir->page_directory_mapped) {
428 		int i;
429 
430 		MALI_CHECK_NO_ERROR(
431 			mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
432 		);
433 
434 		for (i = 0; i < 1024; i++) {
435 			if (NULL != pagedir->page_entries_mapped[i]) {
436 				MALI_CHECK_NO_ERROR(
437 					mali_mmu_dump_page(pagedir->page_entries_mapped[i],
438 							   _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
439 									   i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
440 				);
441 			}
442 		}
443 	}
444 
445 	MALI_SUCCESS;
446 }
447 
dump_mmu_registers(struct mali_page_directory * pagedir,struct dump_info * info)448 static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info *info)
449 {
450 	MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
451 				     "set the page directory address", info));
452 	MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
453 	MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
454 	MALI_SUCCESS;
455 }
456 
_mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s * args)457 _mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args)
458 {
459 	struct dump_info info = { 0, 0, 0, NULL };
460 	struct mali_session_data *session_data;
461 
462 	session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
463 	MALI_DEBUG_ASSERT_POINTER(session_data);
464 	MALI_DEBUG_ASSERT_POINTER(args);
465 
466 	MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
467 	MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
468 	args->size = info.register_writes_size + info.page_table_dump_size;
469 	MALI_SUCCESS;
470 }
471 
_mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s * args)472 _mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args)
473 {
474 	struct dump_info info = { 0, 0, 0, NULL };
475 	struct mali_session_data *session_data;
476 
477 	MALI_DEBUG_ASSERT_POINTER(args);
478 
479 	session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
480 	MALI_DEBUG_ASSERT_POINTER(session_data);
481 
482 	info.buffer_left = args->size;
483 	info.buffer = (u32 *)(uintptr_t)args->buffer;
484 
485 	args->register_writes = (uintptr_t)info.buffer;
486 	MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
487 
488 	args->page_table_dump = (uintptr_t)info.buffer;
489 	MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
490 
491 	args->register_writes_size = info.register_writes_size;
492 	args->page_table_dump_size = info.page_table_dump_size;
493 
494 	MALI_SUCCESS;
495 }
496