xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mali_kbase_native_mgm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <linux/gfp.h>
23 #include <linux/mm.h>
24 #include <linux/memory_group_manager.h>
25 
26 #include <mali_kbase.h>
27 #include <mali_kbase_native_mgm.h>
28 
29 /**
30  * kbase_native_mgm_alloc - Native physical memory allocation method
31  *
32  * @mgm_dev:  The memory group manager the request is being made through.
33  * @group_id: A physical memory group ID, which must be valid but is not used.
34  *            Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
35  * @gfp_mask: Bitmask of Get Free Page flags affecting allocator behavior.
36  * @order:    Page order for physical page size (order=0 means 4 KiB,
37  *            order=9 means 2 MiB).
38  *
39  * Delegates all memory allocation requests to the kernel's alloc_pages
40  * function.
41  *
42  * Return: Pointer to allocated page, or NULL if allocation failed.
43  */
kbase_native_mgm_alloc(struct memory_group_manager_device * mgm_dev,int group_id,gfp_t gfp_mask,unsigned int order)44 static struct page *kbase_native_mgm_alloc(
45 	struct memory_group_manager_device *mgm_dev, int group_id,
46 	gfp_t gfp_mask, unsigned int order)
47 {
48 	/*
49 	 * Check that the base and the mgm defines, from separate header files,
50 	 * for the max number of memory groups are compatible.
51 	 */
52 	BUILD_BUG_ON(BASE_MEM_GROUP_COUNT != MEMORY_GROUP_MANAGER_NR_GROUPS);
53 	/*
54 	 * Check that the mask used for storing the memory group ID is big
55 	 * enough for the largest possible memory group ID.
56 	 */
57 	BUILD_BUG_ON((BASEP_CONTEXT_MMU_GROUP_ID_MASK
58 				>> BASEP_CONTEXT_MMU_GROUP_ID_SHIFT)
59 			< (BASE_MEM_GROUP_COUNT - 1));
60 
61 	CSTD_UNUSED(mgm_dev);
62 	CSTD_UNUSED(group_id);
63 
64 	return alloc_pages(gfp_mask, order);
65 }
66 
67 /**
68  * kbase_native_mgm_free - Native physical memory freeing method
69  *
70  * @mgm_dev:  The memory group manager the request is being made through.
71  * @group_id: A physical memory group ID, which must be valid but is not used.
72  *            Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
73  * @page:     Address of the struct associated with a page of physical
74  *            memory that was allocated by calling kbase_native_mgm_alloc
75  *            with the same argument values.
76  * @order:    Page order for physical page size (order=0 means 4 KiB,
77  *            order=9 means 2 MiB).
78  *
79  * Delegates all memory freeing requests to the kernel's __free_pages function.
80  */
kbase_native_mgm_free(struct memory_group_manager_device * mgm_dev,int group_id,struct page * page,unsigned int order)81 static void kbase_native_mgm_free(struct memory_group_manager_device *mgm_dev,
82 	int group_id, struct page *page, unsigned int order)
83 {
84 	CSTD_UNUSED(mgm_dev);
85 	CSTD_UNUSED(group_id);
86 
87 	__free_pages(page, order);
88 }
89 
90 /**
91  * kbase_native_mgm_vmf_insert_pfn_prot - Native method to map a page on the CPU
92  *
93  * @mgm_dev:  The memory group manager the request is being made through.
94  * @group_id: A physical memory group ID, which must be valid but is not used.
95  *            Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
96  * @vma:      The virtual memory area to insert the page into.
97  * @addr:     An address contained in @vma to assign to the inserted page.
98  * @pfn:      The kernel Page Frame Number to insert at @addr in @vma.
99  * @pgprot:   Protection flags for the inserted page.
100  *
101  * Called from a CPU virtual memory page fault handler. Delegates all memory
102  * mapping requests to the kernel's vmf_insert_pfn_prot function.
103  *
104  * Return: Type of fault that occurred or VM_FAULT_NOPAGE if the page table
105  *         entry was successfully installed.
106  */
kbase_native_mgm_vmf_insert_pfn_prot(struct memory_group_manager_device * mgm_dev,int group_id,struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,pgprot_t pgprot)107 static vm_fault_t kbase_native_mgm_vmf_insert_pfn_prot(
108 		struct memory_group_manager_device *mgm_dev, int group_id,
109 		struct vm_area_struct *vma, unsigned long addr,
110 		unsigned long pfn, pgprot_t pgprot)
111 {
112 	CSTD_UNUSED(mgm_dev);
113 	CSTD_UNUSED(group_id);
114 
115 	return vmf_insert_pfn_prot(vma, addr, pfn, pgprot);
116 }
117 
118 /**
119  * kbase_native_mgm_update_gpu_pte - Native method to modify a GPU page table
120  *                                   entry
121  *
122  * @mgm_dev:   The memory group manager the request is being made through.
123  * @group_id:  A physical memory group ID, which must be valid but is not used.
124  *             Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
125  * @mmu_level: The level of the MMU page table where the page is getting mapped.
126  * @pte:       The prepared page table entry.
127  *
128  * This function simply returns the @pte without modification.
129  *
130  * Return: A GPU page table entry to be stored in a page table.
131  */
132 static u64
kbase_native_mgm_update_gpu_pte(struct memory_group_manager_device * mgm_dev,int group_id,int mmu_level,u64 pte)133 kbase_native_mgm_update_gpu_pte(struct memory_group_manager_device *mgm_dev,
134 			      int group_id, int mmu_level, u64 pte)
135 {
136 	CSTD_UNUSED(mgm_dev);
137 	CSTD_UNUSED(group_id);
138 	CSTD_UNUSED(mmu_level);
139 
140 	return pte;
141 }
142 
143 /**
144  * kbase_native_mgm_pte_to_original_pte - Native method to undo changes done in
145  *                                        kbase_native_mgm_update_gpu_pte()
146  *
147  * @mgm_dev:   The memory group manager the request is being made through.
148  * @group_id:  A physical memory group ID, which must be valid but is not used.
149  *             Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
150  * @mmu_level: The level of the MMU page table where the page is getting mapped.
151  * @pte:       The prepared page table entry.
152  *
153  * This function simply returns the @pte without modification.
154  *
155  * Return: A GPU page table entry to be stored in a page table.
156  */
kbase_native_mgm_pte_to_original_pte(struct memory_group_manager_device * mgm_dev,int group_id,int mmu_level,u64 pte)157 static u64 kbase_native_mgm_pte_to_original_pte(struct memory_group_manager_device *mgm_dev,
158 						int group_id, int mmu_level, u64 pte)
159 {
160 	CSTD_UNUSED(mgm_dev);
161 	CSTD_UNUSED(group_id);
162 	CSTD_UNUSED(mmu_level);
163 
164 	return pte;
165 }
166 
167 struct memory_group_manager_device kbase_native_mgm_dev = {
168 	.ops = {
169 		.mgm_alloc_page = kbase_native_mgm_alloc,
170 		.mgm_free_page = kbase_native_mgm_free,
171 		.mgm_get_import_memory_id = NULL,
172 		.mgm_vmf_insert_pfn_prot = kbase_native_mgm_vmf_insert_pfn_prot,
173 		.mgm_update_gpu_pte = kbase_native_mgm_update_gpu_pte,
174 		.mgm_pte_to_original_pte = kbase_native_mgm_pte_to_original_pte,
175 	},
176 	.data = NULL
177 };
178