xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/linux/mali_memory_block_alloc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (C) 2010-2017 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10 
11 #include "mali_kernel_common.h"
12 #include "mali_memory.h"
13 #include "mali_memory_block_alloc.h"
14 #include "mali_osk.h"
15 #include <linux/mutex.h>
16 
17 
18 static mali_block_allocator *mali_mem_block_gobal_allocator = NULL;
19 
_mali_blk_item_get_phy_addr(mali_block_item * item)20 unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item)
21 {
22 	return (item->phy_addr & ~(MALI_BLOCK_REF_MASK));
23 }
24 
25 
_mali_blk_item_get_pfn(mali_block_item * item)26 unsigned long _mali_blk_item_get_pfn(mali_block_item *item)
27 {
28 	return (item->phy_addr / MALI_BLOCK_SIZE);
29 }
30 
31 
mali_mem_block_get_ref_count(mali_page_node * node)32 u32 mali_mem_block_get_ref_count(mali_page_node *node)
33 {
34 	MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
35 	return (node->blk_it->phy_addr & MALI_BLOCK_REF_MASK);
36 }
37 
38 
39 /* Increase the refence count
40 * It not atomic, so it need to get sp_lock before call this function
41 */
42 
mali_mem_block_add_ref(mali_page_node * node)43 u32 mali_mem_block_add_ref(mali_page_node *node)
44 {
45 	MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
46 	MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) < MALI_BLOCK_MAX_REF_COUNT);
47 	return (node->blk_it->phy_addr++ & MALI_BLOCK_REF_MASK);
48 }
49 
50 /* Decase the refence count
51 * It not atomic, so it need to get sp_lock before call this function
52 */
mali_mem_block_dec_ref(mali_page_node * node)53 u32 mali_mem_block_dec_ref(mali_page_node *node)
54 {
55 	MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
56 	MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) > 0);
57 	return (node->blk_it->phy_addr-- & MALI_BLOCK_REF_MASK);
58 }
59 
60 
mali_mem_block_allocator_create(u32 base_address,u32 size)61 static mali_block_allocator *mali_mem_block_allocator_create(u32 base_address, u32 size)
62 {
63 	mali_block_allocator *info;
64 	u32 usable_size;
65 	u32 num_blocks;
66 	mali_page_node *m_node;
67 	mali_block_item *mali_blk_items = NULL;
68 	int i = 0;
69 
70 	usable_size = size & ~(MALI_BLOCK_SIZE - 1);
71 	MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
72 	MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
73 	num_blocks = usable_size / MALI_BLOCK_SIZE;
74 	MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
75 
76 	if (usable_size == 0) {
77 		MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
78 		return NULL;
79 	}
80 
81 	info = _mali_osk_calloc(1, sizeof(mali_block_allocator));
82 	if (NULL != info) {
83 		INIT_LIST_HEAD(&info->free);
84 		spin_lock_init(&info->sp_lock);
85 		info->total_num = num_blocks;
86 		mali_blk_items = _mali_osk_calloc(1, sizeof(mali_block_item) * num_blocks);
87 
88 		if (mali_blk_items) {
89 			info->items = mali_blk_items;
90 			/* add blocks(4k size) to free list*/
91 			for (i = 0 ; i < num_blocks ; i++) {
92 				/* add block information*/
93 				mali_blk_items[i].phy_addr = base_address + (i * MALI_BLOCK_SIZE);
94 				/* add  to free list */
95 				m_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
96 				if (m_node == NULL)
97 					goto fail;
98 				_mali_page_node_add_block_item(m_node, &(mali_blk_items[i]));
99 				list_add_tail(&m_node->list, &info->free);
100 				atomic_add(1, &info->free_num);
101 			}
102 			return info;
103 		}
104 	}
105 fail:
106 	mali_mem_block_allocator_destroy();
107 	return NULL;
108 }
109 
mali_mem_block_allocator_destroy(void)110 void mali_mem_block_allocator_destroy(void)
111 {
112 	struct mali_page_node *m_page, *m_tmp;
113 	mali_block_allocator *info = mali_mem_block_gobal_allocator;
114 	MALI_DEBUG_ASSERT_POINTER(info);
115 	MALI_DEBUG_PRINT(4, ("Memory block destroy !\n"));
116 
117 	if (NULL == info)
118 		return;
119 
120 	list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
121 		MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
122 		list_del(&m_page->list);
123 		kfree(m_page);
124 	}
125 
126 	_mali_osk_free(info->items);
127 	_mali_osk_free(info);
128 }
129 
mali_mem_block_release(mali_mem_backend * mem_bkend)130 u32 mali_mem_block_release(mali_mem_backend *mem_bkend)
131 {
132 	mali_mem_allocation *alloc = mem_bkend->mali_allocation;
133 	u32 free_pages_nr = 0;
134 	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
135 
136 	/* Unmap the memory from the mali virtual address space. */
137 	mali_mem_block_mali_unmap(alloc);
138 	mutex_lock(&mem_bkend->mutex);
139 	free_pages_nr = mali_mem_block_free(&mem_bkend->block_mem);
140 	mutex_unlock(&mem_bkend->mutex);
141 	return free_pages_nr;
142 }
143 
144 
mali_mem_block_alloc(mali_mem_block_mem * block_mem,u32 size)145 int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size)
146 {
147 	struct mali_page_node *m_page, *m_tmp;
148 	size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
149 	mali_block_allocator *info = mali_mem_block_gobal_allocator;
150 	MALI_DEBUG_ASSERT_POINTER(info);
151 
152 	MALI_DEBUG_PRINT(4, ("BLOCK Mem: Allocate size = 0x%x\n", size));
153 	/*do some init */
154 	INIT_LIST_HEAD(&block_mem->pfns);
155 
156 	spin_lock(&info->sp_lock);
157 	/*check if have enough space*/
158 	if (atomic_read(&info->free_num) > page_count) {
159 		list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
160 			if (page_count > 0) {
161 				MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
162 				MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(m_page) == 0);
163 				list_move(&m_page->list, &block_mem->pfns);
164 				block_mem->count++;
165 				atomic_dec(&info->free_num);
166 				_mali_page_node_ref(m_page);
167 			} else {
168 				break;
169 			}
170 			page_count--;
171 		}
172 	} else {
173 		/* can't allocate from BLOCK memory*/
174 		spin_unlock(&info->sp_lock);
175 		return -1;
176 	}
177 
178 	spin_unlock(&info->sp_lock);
179 	return 0;
180 }
181 
mali_mem_block_free(mali_mem_block_mem * block_mem)182 u32 mali_mem_block_free(mali_mem_block_mem *block_mem)
183 {
184 	u32 free_pages_nr = 0;
185 
186 	free_pages_nr = mali_mem_block_free_list(&block_mem->pfns);
187 	MALI_DEBUG_PRINT(4, ("BLOCK Mem free : allocated size = 0x%x, free size = 0x%x\n", block_mem->count * _MALI_OSK_MALI_PAGE_SIZE,
188 			     free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
189 	block_mem->count = 0;
190 	MALI_DEBUG_ASSERT(list_empty(&block_mem->pfns));
191 
192 	return free_pages_nr;
193 }
194 
195 
mali_mem_block_free_list(struct list_head * list)196 u32 mali_mem_block_free_list(struct list_head *list)
197 {
198 	struct mali_page_node *m_page, *m_tmp;
199 	mali_block_allocator *info = mali_mem_block_gobal_allocator;
200 	u32 free_pages_nr = 0;
201 
202 	if (info) {
203 		spin_lock(&info->sp_lock);
204 		list_for_each_entry_safe(m_page, m_tmp , list, list) {
205 			if (1 == _mali_page_node_get_ref_count(m_page)) {
206 				free_pages_nr++;
207 			}
208 			mali_mem_block_free_node(m_page);
209 		}
210 		spin_unlock(&info->sp_lock);
211 	}
212 	return free_pages_nr;
213 }
214 
215 /* free the node,*/
mali_mem_block_free_node(struct mali_page_node * node)216 void mali_mem_block_free_node(struct mali_page_node *node)
217 {
218 	mali_block_allocator *info = mali_mem_block_gobal_allocator;
219 
220 	/* only handle BLOCK node */
221 	if (node->type == MALI_PAGE_NODE_BLOCK && info) {
222 		/*Need to make this atomic?*/
223 		if (1 == _mali_page_node_get_ref_count(node)) {
224 			/*Move to free list*/
225 			_mali_page_node_unref(node);
226 			list_move_tail(&node->list, &info->free);
227 			atomic_add(1, &info->free_num);
228 		} else {
229 			_mali_page_node_unref(node);
230 			list_del(&node->list);
231 			kfree(node);
232 		}
233 	}
234 }
235 
236 /* unref the node, but not free it */
mali_mem_block_unref_node(struct mali_page_node * node)237 _mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node)
238 {
239 	mali_block_allocator *info = mali_mem_block_gobal_allocator;
240 	mali_page_node *new_node;
241 
242 	/* only handle BLOCK node */
243 	if (node->type == MALI_PAGE_NODE_BLOCK && info) {
244 		/*Need to make this atomic?*/
245 		if (1 == _mali_page_node_get_ref_count(node)) {
246 			/* allocate a  new node, Add to free list, keep the old node*/
247 			_mali_page_node_unref(node);
248 			new_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
249 			if (new_node) {
250 				memcpy(new_node, node, sizeof(mali_page_node));
251 				list_add(&new_node->list, &info->free);
252 				atomic_add(1, &info->free_num);
253 			} else
254 				return _MALI_OSK_ERR_FAULT;
255 
256 		} else {
257 			_mali_page_node_unref(node);
258 		}
259 	}
260 	return _MALI_OSK_ERR_OK;
261 }
262 
263 
mali_mem_block_mali_map(mali_mem_block_mem * block_mem,struct mali_session_data * session,u32 vaddr,u32 props)264 int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props)
265 {
266 	struct mali_page_directory *pagedir = session->page_directory;
267 	struct mali_page_node *m_page;
268 	dma_addr_t phys;
269 	u32 virt = vaddr;
270 	u32 prop = props;
271 
272 	list_for_each_entry(m_page, &block_mem->pfns, list) {
273 		MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
274 		phys = _mali_page_node_get_dma_addr(m_page);
275 #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
276 		/* Verify that the "physical" address is 32-bit and
277 		 * usable for Mali, when on a system with bus addresses
278 		 * wider than 32-bit. */
279 		MALI_DEBUG_ASSERT(0 == (phys >> 32));
280 #endif
281 		mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
282 		virt += MALI_MMU_PAGE_SIZE;
283 	}
284 
285 	return 0;
286 }
287 
mali_mem_block_mali_unmap(mali_mem_allocation * alloc)288 void mali_mem_block_mali_unmap(mali_mem_allocation *alloc)
289 {
290 	struct mali_session_data *session;
291 	MALI_DEBUG_ASSERT_POINTER(alloc);
292 	session = alloc->session;
293 	MALI_DEBUG_ASSERT_POINTER(session);
294 
295 	mali_session_memory_lock(session);
296 	mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
297 			       alloc->flags);
298 	mali_session_memory_unlock(session);
299 }
300 
301 
mali_mem_block_cpu_map(mali_mem_backend * mem_bkend,struct vm_area_struct * vma)302 int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
303 {
304 	int ret;
305 	mali_mem_block_mem *block_mem = &mem_bkend->block_mem;
306 	unsigned long addr = vma->vm_start;
307 	struct mali_page_node *m_page;
308 	MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
309 
310 	list_for_each_entry(m_page, &block_mem->pfns, list) {
311 		MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
312 		ret = vmf_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
313 
314 		if (unlikely(0 != ret)) {
315 			return -EFAULT;
316 		}
317 		addr += _MALI_OSK_MALI_PAGE_SIZE;
318 
319 	}
320 
321 	return 0;
322 }
323 
324 
mali_memory_core_resource_dedicated_memory(u32 start,u32 size)325 _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size)
326 {
327 	mali_block_allocator *allocator;
328 
329 	/* Do the low level linux operation first */
330 
331 	/* Request ownership of the memory */
332 	if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(start, size, "Dedicated Mali GPU memory")) {
333 		MALI_DEBUG_PRINT(1, ("Failed to request memory region for frame buffer (0x%08X - 0x%08X)\n", start, start + size - 1));
334 		return _MALI_OSK_ERR_FAULT;
335 	}
336 
337 	/* Create generic block allocator object to handle it */
338 	allocator = mali_mem_block_allocator_create(start, size);
339 
340 	if (NULL == allocator) {
341 		MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
342 		_mali_osk_mem_unreqregion(start, size);
343 		MALI_ERROR(_MALI_OSK_ERR_FAULT);
344 	}
345 
346 	mali_mem_block_gobal_allocator = (mali_block_allocator *)allocator;
347 
348 	return _MALI_OSK_ERR_OK;
349 }
350 
mali_memory_have_dedicated_memory(void)351 mali_bool mali_memory_have_dedicated_memory(void)
352 {
353 	return mali_mem_block_gobal_allocator ? MALI_TRUE : MALI_FALSE;
354 }
355 
mali_mem_block_allocator_stat(void)356 u32 mali_mem_block_allocator_stat(void)
357 {
358 	mali_block_allocator *allocator = mali_mem_block_gobal_allocator;
359 	MALI_DEBUG_ASSERT_POINTER(allocator);
360 
361 	return (allocator->total_num - atomic_read(&allocator->free_num)) * _MALI_OSK_MALI_PAGE_SIZE;
362 }
363