xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/linux/mali_memory_defer_bind.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (C) 2013-2017 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10 #include <linux/mm.h>
11 #include <linux/list.h>
12 #include <linux/mm_types.h>
13 #include <linux/fs.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/highmem.h>
16 #include <asm/cacheflush.h>
17 #include <linux/sched.h>
18 #ifdef CONFIG_ARM
19 #include <asm/outercache.h>
20 #endif
21 #include <asm/dma-mapping.h>
22 
23 #include "mali_memory.h"
24 #include "mali_kernel_common.h"
25 #include "mali_uk_types.h"
26 #include "mali_osk.h"
27 #include "mali_kernel_linux.h"
28 #include "mali_memory_defer_bind.h"
29 #include "mali_executor.h"
30 #include "mali_osk.h"
31 #include "mali_scheduler.h"
32 #include "mali_gp_job.h"
33 
34 mali_defer_bind_manager *mali_dmem_man = NULL;
35 
mali_dmem_get_gp_varying_size(struct mali_gp_job * gp_job)36 static u32 mali_dmem_get_gp_varying_size(struct mali_gp_job *gp_job)
37 {
38 	return gp_job->required_varying_memsize / _MALI_OSK_MALI_PAGE_SIZE;
39 }
40 
mali_mem_defer_bind_manager_init(void)41 _mali_osk_errcode_t mali_mem_defer_bind_manager_init(void)
42 {
43 	mali_dmem_man = _mali_osk_calloc(1, sizeof(struct mali_defer_bind_manager));
44 	if (!mali_dmem_man)
45 		return _MALI_OSK_ERR_NOMEM;
46 
47 	atomic_set(&mali_dmem_man->num_used_pages, 0);
48 	atomic_set(&mali_dmem_man->num_dmem, 0);
49 
50 	return _MALI_OSK_ERR_OK;
51 }
52 
53 
mali_mem_defer_bind_manager_destory(void)54 void mali_mem_defer_bind_manager_destory(void)
55 {
56 	if (mali_dmem_man) {
57 		MALI_DEBUG_ASSERT(0 == atomic_read(&mali_dmem_man->num_dmem));
58 		kfree(mali_dmem_man);
59 	}
60 	mali_dmem_man = NULL;
61 }
62 
63 
64 /*allocate pages from OS memory*/
mali_mem_defer_alloc_mem(u32 require,struct mali_session_data * session,mali_defer_mem_block * dblock)65 _mali_osk_errcode_t mali_mem_defer_alloc_mem(u32 require, struct mali_session_data *session, mali_defer_mem_block *dblock)
66 {
67 	int retval = 0;
68 	u32 num_pages = require;
69 	mali_mem_os_mem os_mem;
70 
71 	retval = mali_mem_os_alloc_pages(&os_mem, num_pages * _MALI_OSK_MALI_PAGE_SIZE);
72 
73 	/* add to free pages list */
74 	if (0 == retval) {
75 		MALI_DEBUG_PRINT(4, ("mali_mem_defer_alloc_mem ,,*** pages allocate = 0x%x \n", num_pages));
76 		list_splice(&os_mem.pages, &dblock->free_pages);
77 		atomic_add(os_mem.count, &dblock->num_free_pages);
78 		atomic_add(os_mem.count, &session->mali_mem_allocated_pages);
79 		if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
80 			session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
81 		}
82 		return _MALI_OSK_ERR_OK;
83 	} else
84 		return _MALI_OSK_ERR_FAULT;
85 }
86 
mali_mem_prepare_mem_for_job(struct mali_gp_job * next_gp_job,mali_defer_mem_block * dblock)87 _mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock)
88 {
89 	u32 require_page;
90 
91 	if (!next_gp_job)
92 		return _MALI_OSK_ERR_FAULT;
93 
94 	require_page = mali_dmem_get_gp_varying_size(next_gp_job);
95 
96 	MALI_DEBUG_PRINT(4, ("mali_mem_defer_prepare_mem_work, require alloc page 0x%x\n",
97 			     require_page));
98 	/* allocate more pages from OS */
99 	if (_MALI_OSK_ERR_OK != mali_mem_defer_alloc_mem(require_page, next_gp_job->session, dblock)) {
100 		MALI_DEBUG_PRINT(1, ("ERROR##mali_mem_defer_prepare_mem_work, allocate page failed!!"));
101 		return _MALI_OSK_ERR_NOMEM;
102 	}
103 
104 	next_gp_job->bind_flag = MALI_DEFER_BIND_MEMORY_PREPARED;
105 
106 	return _MALI_OSK_ERR_OK;
107 }
108 
109 
110 /* do preparetion for allocation before defer bind */
mali_mem_defer_bind_allocation_prepare(mali_mem_allocation * alloc,struct list_head * list,u32 * required_varying_memsize)111 _mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list, u32 *required_varying_memsize)
112 {
113 	mali_mem_backend *mem_bkend = NULL;
114 	struct mali_backend_bind_list *bk_list = _mali_osk_calloc(1, sizeof(struct mali_backend_bind_list));
115 	if (NULL == bk_list)
116 		return _MALI_OSK_ERR_FAULT;
117 
118 	INIT_LIST_HEAD(&bk_list->node);
119 	/* Get backend memory */
120 	mutex_lock(&mali_idr_mutex);
121 	if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {
122 		MALI_DEBUG_PRINT(1, ("Can't find memory backend in defer bind!\n"));
123 		mutex_unlock(&mali_idr_mutex);
124 		_mali_osk_free(bk_list);
125 		return _MALI_OSK_ERR_FAULT;
126 	}
127 	mutex_unlock(&mali_idr_mutex);
128 
129 	/* If the mem backend has already been bound, no need to bind again.*/
130 	if (mem_bkend->os_mem.count > 0) {
131 		_mali_osk_free(bk_list);
132 		return _MALI_OSK_ERR_OK;
133 	}
134 
135 	MALI_DEBUG_PRINT(4, ("bind_allocation_prepare:: allocation =%x vaddr=0x%x!\n", alloc, alloc->mali_vma_node.vm_node.start));
136 
137 	INIT_LIST_HEAD(&mem_bkend->os_mem.pages);
138 
139 	bk_list->bkend = mem_bkend;
140 	bk_list->vaddr = alloc->mali_vma_node.vm_node.start;
141 	bk_list->session = alloc->session;
142 	bk_list->page_num = mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE;
143 	*required_varying_memsize +=  mem_bkend->size;
144 	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
145 
146 	/* add to job to do list */
147 	list_add(&bk_list->node, list);
148 
149 	return _MALI_OSK_ERR_OK;
150 }
151 
152 
153 
154 /* bind phyiscal memory to allocation
155 This function will be called in IRQ handler*/
mali_mem_defer_bind_allocation(struct mali_backend_bind_list * bk_node,struct list_head * pages)156 static _mali_osk_errcode_t mali_mem_defer_bind_allocation(struct mali_backend_bind_list *bk_node,
157 		struct list_head *pages)
158 {
159 	struct mali_session_data *session = bk_node->session;
160 	mali_mem_backend *mem_bkend = bk_node->bkend;
161 	MALI_DEBUG_PRINT(4, ("mali_mem_defer_bind_allocation, bind bkend = %x page num=0x%x vaddr=%x session=%x\n", mem_bkend, bk_node->page_num, bk_node->vaddr, session));
162 
163 	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
164 	list_splice(pages, &mem_bkend->os_mem.pages);
165 	mem_bkend->os_mem.count = bk_node->page_num;
166 
167 	if (mem_bkend->type == MALI_MEM_OS) {
168 		mali_mem_os_mali_map(&mem_bkend->os_mem, session, bk_node->vaddr, 0,
169 				     mem_bkend->os_mem.count, MALI_MMU_FLAGS_DEFAULT);
170 	}
171 	smp_wmb();
172 	bk_node->flag = MALI_DEFER_BIND_MEMORY_BINDED;
173 	mem_bkend->flags &= ~MALI_MEM_BACKEND_FLAG_NOT_BINDED;
174 	mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_BINDED;
175 	return _MALI_OSK_ERR_OK;
176 }
177 
178 
mali_mem_defer_get_free_page_list(u32 count,struct list_head * pages,mali_defer_mem_block * dblock)179 static struct list_head *mali_mem_defer_get_free_page_list(u32 count, struct list_head *pages, mali_defer_mem_block *dblock)
180 {
181 	int i = 0;
182 	struct mali_page_node *m_page, *m_tmp;
183 
184 	if (atomic_read(&dblock->num_free_pages) < count) {
185 		return NULL;
186 	} else {
187 		list_for_each_entry_safe(m_page, m_tmp, &dblock->free_pages, list) {
188 			if (i < count) {
189 				list_move_tail(&m_page->list, pages);
190 			} else {
191 				break;
192 			}
193 			i++;
194 		}
195 		MALI_DEBUG_ASSERT(i == count);
196 		atomic_sub(count, &dblock->num_free_pages);
197 		return pages;
198 	}
199 }
200 
201 
202 /* called in job start IOCTL to bind physical memory for each allocations
203 @ bk_list backend list to do defer bind
204 @ pages page list to do this bind
205 @ count number of pages
206 */
mali_mem_defer_bind(struct mali_gp_job * gp,struct mali_defer_mem_block * dmem_block)207 _mali_osk_errcode_t mali_mem_defer_bind(struct mali_gp_job *gp,
208 					struct mali_defer_mem_block *dmem_block)
209 {
210 	struct mali_defer_mem *dmem = NULL;
211 	struct mali_backend_bind_list *bkn, *bkn_tmp;
212 	LIST_HEAD(pages);
213 
214 	if (gp->required_varying_memsize != (atomic_read(&dmem_block->num_free_pages) * _MALI_OSK_MALI_PAGE_SIZE)) {
215 		MALI_DEBUG_PRINT_ERROR(("#BIND:  The memsize of varying buffer not match to the pagesize of the dmem_block!!## \n"));
216 		return _MALI_OSK_ERR_FAULT;
217 	}
218 
219 	MALI_DEBUG_PRINT(4, ("#BIND: GP job=%x## \n", gp));
220 	dmem = (mali_defer_mem *)_mali_osk_calloc(1, sizeof(struct mali_defer_mem));
221 	if (dmem) {
222 		INIT_LIST_HEAD(&dmem->node);
223 		gp->dmem = dmem;
224 	} else {
225 		return _MALI_OSK_ERR_NOMEM;
226 	}
227 
228 	atomic_add(1, &mali_dmem_man->num_dmem);
229 	/* for each bk_list backend, do bind */
230 	list_for_each_entry_safe(bkn, bkn_tmp , &gp->vary_todo, node) {
231 		INIT_LIST_HEAD(&pages);
232 		if (likely(mali_mem_defer_get_free_page_list(bkn->page_num, &pages, dmem_block))) {
233 			list_del(&bkn->node);
234 			mali_mem_defer_bind_allocation(bkn, &pages);
235 			_mali_osk_free(bkn);
236 		} else {
237 			/* not enough memory will not happen */
238 			MALI_DEBUG_PRINT_ERROR(("#BIND: NOT enough memory when binded !!## \n"));
239 			_mali_osk_free(gp->dmem);
240 			return _MALI_OSK_ERR_NOMEM;
241 		}
242 	}
243 
244 	if (!list_empty(&gp->vary_todo)) {
245 		MALI_DEBUG_PRINT_ERROR(("#BIND:  The deferbind backend list isn't empty !!## \n"));
246 		_mali_osk_free(gp->dmem);
247 		return _MALI_OSK_ERR_FAULT;
248 	}
249 
250 	dmem->flag = MALI_DEFER_BIND_MEMORY_BINDED;
251 
252 	return _MALI_OSK_ERR_OK;
253 }
254 
mali_mem_defer_dmem_free(struct mali_gp_job * gp)255 void mali_mem_defer_dmem_free(struct mali_gp_job *gp)
256 {
257 	if (gp->dmem) {
258 		atomic_dec(&mali_dmem_man->num_dmem);
259 		_mali_osk_free(gp->dmem);
260 	}
261 }
262 
263