xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mali_kbase_debug_mem_view.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2013-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * Debugfs interface to dump the memory visible to the GPU
24  */
25 
26 #include "mali_kbase_debug_mem_view.h"
27 #include "mali_kbase.h"
28 
29 #include <linux/list.h>
30 #include <linux/file.h>
31 
32 #if IS_ENABLED(CONFIG_DEBUG_FS)
33 
34 #define SHOW_GPU_MEM_DATA(type, format)                                      \
35 {                                                                            \
36 	unsigned int i, j;                                                   \
37 	const type *ptr = (type *)cpu_addr;                                  \
38 	const unsigned int col_width = sizeof(type);                         \
39 	const unsigned int row_width = (col_width == sizeof(u64)) ? 32 : 16; \
40 	const unsigned int num_cols = row_width / col_width;                 \
41 	for (i = 0; i < PAGE_SIZE; i += row_width) {                         \
42 		seq_printf(m, "%016llx:", gpu_addr + i);                     \
43 		for (j = 0; j < num_cols; j++)                               \
44 			seq_printf(m, format, ptr[j]);                       \
45 		ptr += num_cols;                                             \
46 		seq_putc(m, '\n');                                           \
47 	}                                                                    \
48 }
49 
50 struct debug_mem_mapping {
51 	struct list_head node;
52 
53 	struct kbase_mem_phy_alloc *alloc;
54 	unsigned long flags;
55 
56 	u64 start_pfn;
57 	size_t nr_pages;
58 };
59 
60 struct debug_mem_data {
61 	struct list_head mapping_list;
62 	struct kbase_context *kctx;
63 	unsigned int column_width;
64 };
65 
66 struct debug_mem_seq_off {
67 	struct list_head *lh;
68 	size_t offset;
69 };
70 
debug_mem_start(struct seq_file * m,loff_t * _pos)71 static void *debug_mem_start(struct seq_file *m, loff_t *_pos)
72 {
73 	struct debug_mem_data *mem_data = m->private;
74 	struct debug_mem_seq_off *data;
75 	struct debug_mem_mapping *map;
76 	loff_t pos = *_pos;
77 
78 	list_for_each_entry(map, &mem_data->mapping_list, node) {
79 		if (pos >= map->nr_pages) {
80 			pos -= map->nr_pages;
81 		} else {
82 			data = kmalloc(sizeof(*data), GFP_KERNEL);
83 			if (!data)
84 				return NULL;
85 			data->lh = &map->node;
86 			data->offset = pos;
87 			return data;
88 		}
89 	}
90 
91 	/* Beyond the end */
92 	return NULL;
93 }
94 
debug_mem_stop(struct seq_file * m,void * v)95 static void debug_mem_stop(struct seq_file *m, void *v)
96 {
97 	kfree(v);
98 }
99 
debug_mem_next(struct seq_file * m,void * v,loff_t * pos)100 static void *debug_mem_next(struct seq_file *m, void *v, loff_t *pos)
101 {
102 	struct debug_mem_data *mem_data = m->private;
103 	struct debug_mem_seq_off *data = v;
104 	struct debug_mem_mapping *map;
105 
106 	map = list_entry(data->lh, struct debug_mem_mapping, node);
107 
108 	if (data->offset < map->nr_pages - 1) {
109 		data->offset++;
110 		++*pos;
111 		return data;
112 	}
113 
114 	if (list_is_last(data->lh, &mem_data->mapping_list)) {
115 		kfree(data);
116 		return NULL;
117 	}
118 
119 	data->lh = data->lh->next;
120 	data->offset = 0;
121 	++*pos;
122 
123 	return data;
124 }
125 
debug_mem_show(struct seq_file * m,void * v)126 static int debug_mem_show(struct seq_file *m, void *v)
127 {
128 	struct debug_mem_data *mem_data = m->private;
129 	struct debug_mem_seq_off *data = v;
130 	struct debug_mem_mapping *map;
131 	unsigned long long gpu_addr;
132 	struct page *page;
133 	void *cpu_addr;
134 	pgprot_t prot = PAGE_KERNEL;
135 
136 	map = list_entry(data->lh, struct debug_mem_mapping, node);
137 
138 	kbase_gpu_vm_lock(mem_data->kctx);
139 
140 	if (data->offset >= map->alloc->nents) {
141 		seq_printf(m, "%016llx: Unbacked page\n\n", (map->start_pfn +
142 				data->offset) << PAGE_SHIFT);
143 		goto out;
144 	}
145 
146 	if (!(map->flags & KBASE_REG_CPU_CACHED))
147 		prot = pgprot_writecombine(prot);
148 
149 	page = as_page(map->alloc->pages[data->offset]);
150 	cpu_addr = vmap(&page, 1, VM_MAP, prot);
151 	if (!cpu_addr)
152 		goto out;
153 
154 	gpu_addr = (map->start_pfn + data->offset) << PAGE_SHIFT;
155 
156 	/* Cases for 4 supported values of column_width for showing
157 	 * the GPU memory contents.
158 	 */
159 	switch (mem_data->column_width) {
160 	case 1:
161 		SHOW_GPU_MEM_DATA(u8, " %02hhx");
162 		break;
163 	case 2:
164 		SHOW_GPU_MEM_DATA(u16, " %04hx");
165 		break;
166 	case 4:
167 		SHOW_GPU_MEM_DATA(u32, " %08x");
168 		break;
169 	case 8:
170 		SHOW_GPU_MEM_DATA(u64, " %016llx");
171 		break;
172 	default:
173 		dev_warn(mem_data->kctx->kbdev->dev, "Unexpected column width");
174 	}
175 
176 	vunmap(cpu_addr);
177 
178 	seq_putc(m, '\n');
179 
180 out:
181 	kbase_gpu_vm_unlock(mem_data->kctx);
182 	return 0;
183 }
184 
185 static const struct seq_operations ops = {
186 	.start = debug_mem_start,
187 	.next = debug_mem_next,
188 	.stop = debug_mem_stop,
189 	.show = debug_mem_show,
190 };
191 
debug_mem_zone_open(struct rb_root * rbtree,struct debug_mem_data * mem_data)192 static int debug_mem_zone_open(struct rb_root *rbtree,
193 						struct debug_mem_data *mem_data)
194 {
195 	int ret = 0;
196 	struct rb_node *p;
197 	struct kbase_va_region *reg;
198 	struct debug_mem_mapping *mapping;
199 
200 	for (p = rb_first(rbtree); p; p = rb_next(p)) {
201 		reg = rb_entry(p, struct kbase_va_region, rblink);
202 
203 		if (reg->gpu_alloc == NULL)
204 			/* Empty region - ignore */
205 			continue;
206 
207 		if (reg->flags & KBASE_REG_PROTECTED) {
208 			/* CPU access to protected memory is forbidden - so
209 			 * skip this GPU virtual region.
210 			 */
211 			continue;
212 		}
213 
214 		mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
215 		if (!mapping) {
216 			ret = -ENOMEM;
217 			goto out;
218 		}
219 
220 		mapping->alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
221 		mapping->start_pfn = reg->start_pfn;
222 		mapping->nr_pages = reg->nr_pages;
223 		mapping->flags = reg->flags;
224 		list_add_tail(&mapping->node, &mem_data->mapping_list);
225 	}
226 
227 out:
228 	return ret;
229 }
230 
debug_mem_open(struct inode * i,struct file * file)231 static int debug_mem_open(struct inode *i, struct file *file)
232 {
233 	struct kbase_context *const kctx = i->i_private;
234 	struct debug_mem_data *mem_data;
235 	int ret;
236 
237 	if (get_file_rcu(kctx->filp) == 0)
238 		return -ENOENT;
239 
240 	/* Check if file was opened in write mode. GPU memory contents
241 	 * are returned only when the file is not opened in write mode.
242 	 */
243 	if (file->f_mode & FMODE_WRITE) {
244 		file->private_data = kctx;
245 		return 0;
246 	}
247 
248 	ret = seq_open(file, &ops);
249 	if (ret)
250 		goto open_fail;
251 
252 	mem_data = kmalloc(sizeof(*mem_data), GFP_KERNEL);
253 	if (!mem_data) {
254 		ret = -ENOMEM;
255 		goto out;
256 	}
257 
258 	mem_data->kctx = kctx;
259 
260 	INIT_LIST_HEAD(&mem_data->mapping_list);
261 
262 	kbase_gpu_vm_lock(kctx);
263 
264 	mem_data->column_width = kctx->mem_view_column_width;
265 
266 	ret = debug_mem_zone_open(&kctx->reg_rbtree_same, mem_data);
267 	if (ret != 0) {
268 		kbase_gpu_vm_unlock(kctx);
269 		goto out;
270 	}
271 
272 	ret = debug_mem_zone_open(&kctx->reg_rbtree_custom, mem_data);
273 	if (ret != 0) {
274 		kbase_gpu_vm_unlock(kctx);
275 		goto out;
276 	}
277 
278 	ret = debug_mem_zone_open(&kctx->reg_rbtree_exec, mem_data);
279 	if (ret != 0) {
280 		kbase_gpu_vm_unlock(kctx);
281 		goto out;
282 	}
283 
284 #if MALI_USE_CSF
285 	ret = debug_mem_zone_open(&kctx->reg_rbtree_exec_fixed, mem_data);
286 	if (ret != 0) {
287 		kbase_gpu_vm_unlock(kctx);
288 		goto out;
289 	}
290 
291 	ret = debug_mem_zone_open(&kctx->reg_rbtree_fixed, mem_data);
292 	if (ret != 0) {
293 		kbase_gpu_vm_unlock(kctx);
294 		goto out;
295 	}
296 #endif
297 
298 	kbase_gpu_vm_unlock(kctx);
299 
300 	((struct seq_file *)file->private_data)->private = mem_data;
301 
302 	return 0;
303 
304 out:
305 	if (mem_data) {
306 		while (!list_empty(&mem_data->mapping_list)) {
307 			struct debug_mem_mapping *mapping;
308 
309 			mapping = list_first_entry(&mem_data->mapping_list,
310 					struct debug_mem_mapping, node);
311 			kbase_mem_phy_alloc_put(mapping->alloc);
312 			list_del(&mapping->node);
313 			kfree(mapping);
314 		}
315 		kfree(mem_data);
316 	}
317 	seq_release(i, file);
318 open_fail:
319 	fput(kctx->filp);
320 
321 	return ret;
322 }
323 
debug_mem_release(struct inode * inode,struct file * file)324 static int debug_mem_release(struct inode *inode, struct file *file)
325 {
326 	struct kbase_context *const kctx = inode->i_private;
327 
328 	/* If the file wasn't opened in write mode, then release the
329 	 * memory allocated to show the GPU memory contents.
330 	 */
331 	if (!(file->f_mode & FMODE_WRITE)) {
332 		struct seq_file *sfile = file->private_data;
333 		struct debug_mem_data *mem_data = sfile->private;
334 		struct debug_mem_mapping *mapping;
335 
336 		seq_release(inode, file);
337 
338 		while (!list_empty(&mem_data->mapping_list)) {
339 			mapping = list_first_entry(&mem_data->mapping_list,
340 				struct debug_mem_mapping, node);
341 			kbase_mem_phy_alloc_put(mapping->alloc);
342 			list_del(&mapping->node);
343 			kfree(mapping);
344 		}
345 
346 		kfree(mem_data);
347 	}
348 
349 	fput(kctx->filp);
350 
351 	return 0;
352 }
353 
debug_mem_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)354 static ssize_t debug_mem_write(struct file *file, const char __user *ubuf,
355 			       size_t count, loff_t *ppos)
356 {
357 	struct kbase_context *const kctx = file->private_data;
358 	unsigned int column_width = 0;
359 	int ret = 0;
360 
361 	CSTD_UNUSED(ppos);
362 
363 	ret = kstrtouint_from_user(ubuf, count, 0, &column_width);
364 
365 	if (ret)
366 		return ret;
367 	if (!is_power_of_2(column_width)) {
368 		dev_dbg(kctx->kbdev->dev,
369 			"Column width %u not a multiple of power of 2", column_width);
370 		return  -EINVAL;
371 	}
372 	if (column_width > 8) {
373 		dev_dbg(kctx->kbdev->dev,
374 			"Column width %u greater than 8 not supported", column_width);
375 		return  -EINVAL;
376 	}
377 
378 	kbase_gpu_vm_lock(kctx);
379 	kctx->mem_view_column_width = column_width;
380 	kbase_gpu_vm_unlock(kctx);
381 
382 	return count;
383 }
384 
385 static const struct file_operations kbase_debug_mem_view_fops = {
386 	.owner = THIS_MODULE,
387 	.open = debug_mem_open,
388 	.release = debug_mem_release,
389 	.read = seq_read,
390 	.write = debug_mem_write,
391 	.llseek = seq_lseek
392 };
393 
kbase_debug_mem_view_init(struct kbase_context * const kctx)394 void kbase_debug_mem_view_init(struct kbase_context *const kctx)
395 {
396 	/* Caller already ensures this, but we keep the pattern for
397 	 * maintenance safety.
398 	 */
399 	if (WARN_ON(!kctx) ||
400 		WARN_ON(IS_ERR_OR_NULL(kctx->kctx_dentry)))
401 		return;
402 
403 	/* Default column width is 4 */
404 	kctx->mem_view_column_width = sizeof(u32);
405 
406 	debugfs_create_file("mem_view", 0400, kctx->kctx_dentry, kctx,
407 			&kbase_debug_mem_view_fops);
408 }
409 
410 #endif
411