xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/linux/mali_memory_secure.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10 
11 #include "mali_kernel_common.h"
12 #include "mali_memory.h"
13 #include "mali_memory_secure.h"
14 #include "mali_osk.h"
15 #include <linux/mutex.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dma-buf.h>
18 #include <linux/dma-direct.h>
19 
mali_mem_secure_attach_dma_buf(mali_mem_secure * secure_mem,u32 size,int mem_fd)20 _mali_osk_errcode_t mali_mem_secure_attach_dma_buf(mali_mem_secure *secure_mem, u32 size, int mem_fd)
21 {
22 	struct dma_buf *buf;
23 	MALI_DEBUG_ASSERT_POINTER(secure_mem);
24 
25 	/* get dma buffer */
26 	buf = dma_buf_get(mem_fd);
27 	if (IS_ERR_OR_NULL(buf)) {
28 		MALI_DEBUG_PRINT_ERROR(("Failed to get dma buf!\n"));
29 		return _MALI_OSK_ERR_FAULT;
30 	}
31 
32 	if (size != buf->size) {
33 		MALI_DEBUG_PRINT_ERROR(("The secure mem size not match to the dma buf size!\n"));
34 		goto failed_alloc_mem;
35 	}
36 
37 	secure_mem->buf =  buf;
38 	secure_mem->attachment = dma_buf_attach(secure_mem->buf, &mali_platform_device->dev);
39 	if (NULL == secure_mem->attachment) {
40 		MALI_DEBUG_PRINT_ERROR(("Failed to get dma buf attachment!\n"));
41 		goto failed_dma_attach;
42 	}
43 
44 	secure_mem->sgt = dma_buf_map_attachment(secure_mem->attachment, DMA_BIDIRECTIONAL);
45 	if (IS_ERR_OR_NULL(secure_mem->sgt)) {
46 		MALI_DEBUG_PRINT_ERROR(("Failed to map dma buf attachment\n"));
47 		goto  failed_dma_map;
48 	}
49 
50 	secure_mem->count = size / MALI_MMU_PAGE_SIZE;
51 
52 	return _MALI_OSK_ERR_OK;
53 
54 failed_dma_map:
55 	dma_buf_detach(secure_mem->buf, secure_mem->attachment);
56 failed_dma_attach:
57 failed_alloc_mem:
58 	dma_buf_put(buf);
59 	return _MALI_OSK_ERR_FAULT;
60 }
61 
mali_mem_secure_mali_map(mali_mem_secure * secure_mem,struct mali_session_data * session,u32 vaddr,u32 props)62 _mali_osk_errcode_t mali_mem_secure_mali_map(mali_mem_secure *secure_mem, struct mali_session_data *session, u32 vaddr, u32 props)
63 {
64 	struct mali_page_directory *pagedir;
65 	struct scatterlist *sg;
66 	u32 virt = vaddr;
67 	u32 prop = props;
68 	int i;
69 
70 	MALI_DEBUG_ASSERT_POINTER(secure_mem);
71 	MALI_DEBUG_ASSERT_POINTER(secure_mem->sgt);
72 	MALI_DEBUG_ASSERT_POINTER(session);
73 
74 	pagedir = session->page_directory;
75 
76 	for_each_sg(secure_mem->sgt->sgl, sg, secure_mem->sgt->nents, i) {
77 		u32 size = sg_dma_len(sg);
78 		dma_addr_t phys = sg_dma_address(sg);
79 
80 		/* sg must be page aligned. */
81 		MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
82 		MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF));
83 
84 		mali_mmu_pagedir_update(pagedir, virt, phys, size, prop);
85 
86 		MALI_DEBUG_PRINT(3, ("The secure mem physical address: 0x%x gpu virtual address: 0x%x! \n", phys, virt));
87 		virt += size;
88 	}
89 
90 	return _MALI_OSK_ERR_OK;
91 }
92 
mali_mem_secure_mali_unmap(mali_mem_allocation * alloc)93 void mali_mem_secure_mali_unmap(mali_mem_allocation *alloc)
94 {
95 	struct mali_session_data *session;
96 	MALI_DEBUG_ASSERT_POINTER(alloc);
97 	session = alloc->session;
98 	MALI_DEBUG_ASSERT_POINTER(session);
99 
100 	mali_session_memory_lock(session);
101 	mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
102 			       alloc->flags);
103 	mali_session_memory_unlock(session);
104 }
105 
106 
mali_mem_secure_cpu_map(mali_mem_backend * mem_bkend,struct vm_area_struct * vma)107 int mali_mem_secure_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
108 {
109 
110 	int ret = 0;
111 	struct scatterlist *sg;
112 	mali_mem_secure *secure_mem = &mem_bkend->secure_mem;
113 	unsigned long addr = vma->vm_start;
114 	int i;
115 
116 	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_SECURE);
117 
118 	for_each_sg(secure_mem->sgt->sgl, sg, secure_mem->sgt->nents, i) {
119 		phys_addr_t phys;
120 		dma_addr_t dev_addr;
121 		u32 size, j;
122 		dev_addr = sg_dma_address(sg);
123 #if defined(CONFIG_ARM64) ||LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
124 		phys =  dma_to_phys(&mali_platform_device->dev, dev_addr);
125 #else
126 		phys = page_to_phys(pfn_to_page(dma_to_pfn(&mali_platform_device->dev, dev_addr)));
127 #endif
128 		size = sg_dma_len(sg);
129 		MALI_DEBUG_ASSERT(0 == size % _MALI_OSK_MALI_PAGE_SIZE);
130 
131 		for (j = 0; j < size / _MALI_OSK_MALI_PAGE_SIZE; j++) {
132 			ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys));
133 
134 			if (unlikely(VM_FAULT_NOPAGE != ret)) {
135 				return -EFAULT;
136 			}
137 			addr += _MALI_OSK_MALI_PAGE_SIZE;
138 			phys += _MALI_OSK_MALI_PAGE_SIZE;
139 
140 			MALI_DEBUG_PRINT(3, ("The secure mem physical address: 0x%x , cpu virtual address: 0x%x! \n", phys, addr));
141 		}
142 	}
143 	return ret;
144 }
145 
mali_mem_secure_release(mali_mem_backend * mem_bkend)146 u32 mali_mem_secure_release(mali_mem_backend *mem_bkend)
147 {
148 	struct mali_mem_secure *mem;
149 	mali_mem_allocation *alloc = mem_bkend->mali_allocation;
150 	u32 free_pages_nr = 0;
151 	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_SECURE);
152 
153 	mem = &mem_bkend->secure_mem;
154 	MALI_DEBUG_ASSERT_POINTER(mem->attachment);
155 	MALI_DEBUG_ASSERT_POINTER(mem->buf);
156 	MALI_DEBUG_ASSERT_POINTER(mem->sgt);
157 	/* Unmap the memory from the mali virtual address space. */
158 	mali_mem_secure_mali_unmap(alloc);
159 	mutex_lock(&mem_bkend->mutex);
160 	dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL);
161 	dma_buf_detach(mem->buf, mem->attachment);
162 	dma_buf_put(mem->buf);
163 	mutex_unlock(&mem_bkend->mutex);
164 
165 	free_pages_nr = mem->count;
166 
167 	return free_pages_nr;
168 }
169 
170 
171