1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include "mali_kbase_gwt.h"
23 #include <linux/list_sort.h>
24
kbase_gpu_gwt_setup_page_permission(struct kbase_context * kctx,unsigned long flag,struct rb_node * node)25 static inline void kbase_gpu_gwt_setup_page_permission(
26 struct kbase_context *kctx,
27 unsigned long flag,
28 struct rb_node *node)
29 {
30 struct rb_node *rbnode = node;
31
32 while (rbnode) {
33 struct kbase_va_region *reg;
34 int err = 0;
35
36 reg = rb_entry(rbnode, struct kbase_va_region, rblink);
37 if (reg->nr_pages && !kbase_is_region_invalid_or_free(reg) &&
38 (reg->flags & KBASE_REG_GPU_WR)) {
39 err = kbase_mmu_update_pages(kctx, reg->start_pfn,
40 kbase_get_gpu_phy_pages(reg),
41 reg->gpu_alloc->nents,
42 reg->flags & flag,
43 reg->gpu_alloc->group_id);
44 if (err)
45 dev_warn(kctx->kbdev->dev, "kbase_mmu_update_pages failure\n");
46 }
47
48 rbnode = rb_next(rbnode);
49 }
50 }
51
kbase_gpu_gwt_setup_pages(struct kbase_context * kctx,unsigned long flag)52 static void kbase_gpu_gwt_setup_pages(struct kbase_context *kctx,
53 unsigned long flag)
54 {
55 kbase_gpu_gwt_setup_page_permission(kctx, flag,
56 rb_first(&(kctx->reg_rbtree_same)));
57 kbase_gpu_gwt_setup_page_permission(kctx, flag,
58 rb_first(&(kctx->reg_rbtree_custom)));
59 }
60
61
kbase_gpu_gwt_start(struct kbase_context * kctx)62 int kbase_gpu_gwt_start(struct kbase_context *kctx)
63 {
64 kbase_gpu_vm_lock(kctx);
65 if (kctx->gwt_enabled) {
66 kbase_gpu_vm_unlock(kctx);
67 return -EBUSY;
68 }
69
70 INIT_LIST_HEAD(&kctx->gwt_current_list);
71 INIT_LIST_HEAD(&kctx->gwt_snapshot_list);
72
73 #if !MALI_USE_CSF
74 /* If GWT is enabled using new vector dumping format
75 * from user space, back up status of the job serialization flag and
76 * use full serialisation of jobs for dumping.
77 * Status will be restored on end of dumping in gwt_stop.
78 */
79 kctx->kbdev->backup_serialize_jobs = kctx->kbdev->serialize_jobs;
80 kctx->kbdev->serialize_jobs = KBASE_SERIALIZE_INTRA_SLOT |
81 KBASE_SERIALIZE_INTER_SLOT;
82
83 #endif
84 /* Mark gwt enabled before making pages read only in case a
85 * write page fault is triggered while we're still in this loop.
86 * (kbase_gpu_vm_lock() doesn't prevent this!)
87 */
88 kctx->gwt_enabled = true;
89 kctx->gwt_was_enabled = true;
90
91 kbase_gpu_gwt_setup_pages(kctx, ~KBASE_REG_GPU_WR);
92
93 kbase_gpu_vm_unlock(kctx);
94 return 0;
95 }
96
kbase_gpu_gwt_stop(struct kbase_context * kctx)97 int kbase_gpu_gwt_stop(struct kbase_context *kctx)
98 {
99 struct kbasep_gwt_list_element *pos, *n;
100
101 kbase_gpu_vm_lock(kctx);
102 if (!kctx->gwt_enabled) {
103 kbase_gpu_vm_unlock(kctx);
104 return -EINVAL;
105 }
106
107 list_for_each_entry_safe(pos, n, &kctx->gwt_current_list, link) {
108 list_del(&pos->link);
109 kfree(pos);
110 }
111
112 list_for_each_entry_safe(pos, n, &kctx->gwt_snapshot_list, link) {
113 list_del(&pos->link);
114 kfree(pos);
115 }
116
117 #if !MALI_USE_CSF
118 kctx->kbdev->serialize_jobs = kctx->kbdev->backup_serialize_jobs;
119 #endif
120
121 kbase_gpu_gwt_setup_pages(kctx, ~0UL);
122
123 kctx->gwt_enabled = false;
124 kbase_gpu_vm_unlock(kctx);
125 return 0;
126 }
127
128 #if (KERNEL_VERSION(5, 13, 0) <= LINUX_VERSION_CODE)
list_cmp_function(void * priv,const struct list_head * a,const struct list_head * b)129 static int list_cmp_function(void *priv, const struct list_head *a, const struct list_head *b)
130 #else
131 static int list_cmp_function(void *priv, struct list_head *a,
132 struct list_head *b)
133 #endif
134 {
135 const struct kbasep_gwt_list_element *elementA =
136 container_of(a, struct kbasep_gwt_list_element, link);
137 const struct kbasep_gwt_list_element *elementB =
138 container_of(b, struct kbasep_gwt_list_element, link);
139
140 CSTD_UNUSED(priv);
141
142 if (elementA->page_addr > elementB->page_addr)
143 return 1;
144 return -1;
145 }
146
kbase_gpu_gwt_collate(struct kbase_context * kctx,struct list_head * snapshot_list)147 static void kbase_gpu_gwt_collate(struct kbase_context *kctx,
148 struct list_head *snapshot_list)
149 {
150 struct kbasep_gwt_list_element *pos, *n;
151 struct kbasep_gwt_list_element *collated = NULL;
152
153 /* Sort the list */
154 list_sort(NULL, snapshot_list, list_cmp_function);
155
156 /* Combine contiguous areas. */
157 list_for_each_entry_safe(pos, n, snapshot_list, link) {
158 if (collated == NULL || collated->region !=
159 pos->region ||
160 (collated->page_addr +
161 (collated->num_pages * PAGE_SIZE)) !=
162 pos->page_addr) {
163 /* This is the first time through, a new region or
164 * is not contiguous - start collating to this element
165 */
166 collated = pos;
167 } else {
168 /* contiguous so merge */
169 collated->num_pages += pos->num_pages;
170 /* remove element from list */
171 list_del(&pos->link);
172 kfree(pos);
173 }
174 }
175 }
176
kbase_gpu_gwt_dump(struct kbase_context * kctx,union kbase_ioctl_cinstr_gwt_dump * gwt_dump)177 int kbase_gpu_gwt_dump(struct kbase_context *kctx,
178 union kbase_ioctl_cinstr_gwt_dump *gwt_dump)
179 {
180 const u32 ubuf_size = gwt_dump->in.len;
181 u32 ubuf_count = 0;
182 __user void *user_addr = (__user void *)
183 (uintptr_t)gwt_dump->in.addr_buffer;
184 __user void *user_sizes = (__user void *)
185 (uintptr_t)gwt_dump->in.size_buffer;
186
187 kbase_gpu_vm_lock(kctx);
188
189 if (!kctx->gwt_enabled) {
190 kbase_gpu_vm_unlock(kctx);
191 /* gwt_dump shouldn't be called when gwt is disabled */
192 return -EPERM;
193 }
194
195 if (!gwt_dump->in.len || !gwt_dump->in.addr_buffer
196 || !gwt_dump->in.size_buffer) {
197 kbase_gpu_vm_unlock(kctx);
198 /* We don't have any valid user space buffer to copy the
199 * write modified addresses.
200 */
201 return -EINVAL;
202 }
203
204 if (list_empty(&kctx->gwt_snapshot_list) &&
205 !list_empty(&kctx->gwt_current_list)) {
206
207 list_replace_init(&kctx->gwt_current_list,
208 &kctx->gwt_snapshot_list);
209
210 /* We have collected all write faults so far
211 * and they will be passed on to user space.
212 * Reset the page flags state to allow collection of
213 * further write faults.
214 */
215 kbase_gpu_gwt_setup_pages(kctx, ~KBASE_REG_GPU_WR);
216
217 /* Sort and combine consecutive pages in the dump list*/
218 kbase_gpu_gwt_collate(kctx, &kctx->gwt_snapshot_list);
219 }
220
221 while ((!list_empty(&kctx->gwt_snapshot_list))) {
222 u64 addr_buffer[32];
223 u64 num_page_buffer[32];
224 u32 count = 0;
225 int err;
226 struct kbasep_gwt_list_element *dump_info, *n;
227
228 list_for_each_entry_safe(dump_info, n,
229 &kctx->gwt_snapshot_list, link) {
230 addr_buffer[count] = dump_info->page_addr;
231 num_page_buffer[count] = dump_info->num_pages;
232 count++;
233 list_del(&dump_info->link);
234 kfree(dump_info);
235 if (ARRAY_SIZE(addr_buffer) == count ||
236 ubuf_size == (ubuf_count + count))
237 break;
238 }
239
240 if (count) {
241 err = copy_to_user((user_addr +
242 (ubuf_count * sizeof(u64))),
243 (void *)addr_buffer,
244 count * sizeof(u64));
245 if (err) {
246 dev_err(kctx->kbdev->dev, "Copy to user failure\n");
247 kbase_gpu_vm_unlock(kctx);
248 return err;
249 }
250 err = copy_to_user((user_sizes +
251 (ubuf_count * sizeof(u64))),
252 (void *)num_page_buffer,
253 count * sizeof(u64));
254 if (err) {
255 dev_err(kctx->kbdev->dev, "Copy to user failure\n");
256 kbase_gpu_vm_unlock(kctx);
257 return err;
258 }
259
260 ubuf_count += count;
261 }
262
263 if (ubuf_count == ubuf_size)
264 break;
265 }
266
267 if (!list_empty(&kctx->gwt_snapshot_list))
268 gwt_dump->out.more_data_available = 1;
269 else
270 gwt_dump->out.more_data_available = 0;
271
272 gwt_dump->out.no_of_addr_collected = ubuf_count;
273 kbase_gpu_vm_unlock(kctx);
274 return 0;
275 }
276