xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_trace_buffer.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2018-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include "mali_kbase.h"
23 #include "mali_kbase_defs.h"
24 #include "mali_kbase_csf_firmware.h"
25 #include "mali_kbase_csf_trace_buffer.h"
26 #include "mali_kbase_reset_gpu.h"
27 #include "mali_kbase_csf_tl_reader.h"
28 
29 #include <linux/list.h>
30 #include <linux/mman.h>
31 
32 /**
33  * struct firmware_trace_buffer - Trace Buffer within the MCU firmware
34  *
35  * @kbdev:        Pointer to the Kbase device.
36  * @node:         List head linking all trace buffers to
37  *                kbase_device:csf.firmware_trace_buffers
38  * @data_mapping: MCU shared memory mapping used for the data buffer.
39  * @updatable:    Indicates whether config items can be updated with
40  *                FIRMWARE_CONFIG_UPDATE
41  * @type:         The type of the trace buffer.
42  * @trace_enable_entry_count: Number of Trace Enable bits.
43  * @gpu_va:                 Structure containing all the Firmware addresses
44  *                          that are accessed by the MCU.
45  * @gpu_va.size_address:    The address where the MCU shall read the size of
46  *                          the data buffer.
47  * @gpu_va.insert_address:  The address that shall be dereferenced by the MCU
48  *                          to write the Insert offset.
49  * @gpu_va.extract_address: The address that shall be dereferenced by the MCU
50  *                          to read the Extract offset.
51  * @gpu_va.data_address:    The address that shall be dereferenced by the MCU
52  *                          to write the Trace Buffer.
53  * @gpu_va.trace_enable:    The address where the MCU shall read the array of
54  *                          Trace Enable bits describing which trace points
55  *                          and features shall be enabled.
56  * @cpu_va:                 Structure containing CPU addresses of variables
57  *                          which are permanently mapped on the CPU address
58  *                          space.
59  * @cpu_va.insert_cpu_va:   CPU virtual address of the Insert variable.
60  * @cpu_va.extract_cpu_va:  CPU virtual address of the Extract variable.
61  * @num_pages: Size of the data buffer, in pages.
62  * @trace_enable_init_mask: Initial value for the trace enable bit mask.
63  * @name:  NULL terminated string which contains the name of the trace buffer.
64  *
65  * The firmware relays information to the host by writing on memory buffers
66  * which are allocated and partially configured by the host. These buffers
67  * are called Trace Buffers: each of them has a specific purpose and is
68  * identified by a name and a set of memory addresses where the host can
69  * set pointers to host-allocated structures.
70  */
71 struct firmware_trace_buffer {
72 	struct kbase_device *kbdev;
73 	struct list_head node;
74 	struct kbase_csf_mapping data_mapping;
75 	bool updatable;
76 	u32 type;
77 	u32 trace_enable_entry_count;
78 	struct gpu_va {
79 		u32 size_address;
80 		u32 insert_address;
81 		u32 extract_address;
82 		u32 data_address;
83 		u32 trace_enable;
84 	} gpu_va;
85 	struct cpu_va {
86 		u32 *insert_cpu_va;
87 		u32 *extract_cpu_va;
88 	} cpu_va;
89 	u32 num_pages;
90 	u32 trace_enable_init_mask[CSF_FIRMWARE_TRACE_ENABLE_INIT_MASK_MAX];
91 	char name[1]; /* this field must be last */
92 };
93 
94 /**
95  * struct firmware_trace_buffer_data - Configuration data for trace buffers
96  *
97  * @name: Name identifier of the trace buffer
98  * @trace_enable_init_mask: Initial value to assign to the trace enable bits
99  * @size: Size of the data buffer to allocate for the trace buffer, in pages.
100  *        The size of a data buffer must always be a power of 2.
101  *
102  * Describe how to set up a trace buffer interface.
103  * Trace buffers are identified by name and they require a data buffer and
104  * an initial mask of values for the trace enable bits.
105  */
106 struct firmware_trace_buffer_data {
107 	char name[64];
108 	u32 trace_enable_init_mask[CSF_FIRMWARE_TRACE_ENABLE_INIT_MASK_MAX];
109 	size_t size;
110 };
111 
112 /*
113  * Table of configuration data for trace buffers.
114  *
115  * This table contains the configuration data for the trace buffers that are
116  * expected to be parsed from the firmware.
117  */
118 static const struct firmware_trace_buffer_data trace_buffer_data[] = {
119 #if MALI_UNIT_TEST
120 	{ "fwutf", { 0 }, 1 },
121 #endif
122 	{ FIRMWARE_LOG_BUF_NAME, { 0 }, 4 },
123 	{ "benchmark", { 0 }, 2 },
124 	{ "timeline", { 0 }, KBASE_CSF_TL_BUFFER_NR_PAGES },
125 };
126 
kbase_csf_firmware_trace_buffers_init(struct kbase_device * kbdev)127 int kbase_csf_firmware_trace_buffers_init(struct kbase_device *kbdev)
128 {
129 	struct firmware_trace_buffer *trace_buffer;
130 	int ret = 0;
131 	u32 mcu_rw_offset = 0, mcu_write_offset = 0;
132 	const u32 cache_line_alignment = kbase_get_cache_line_alignment(kbdev);
133 
134 	if (list_empty(&kbdev->csf.firmware_trace_buffers.list)) {
135 		dev_dbg(kbdev->dev, "No trace buffers to initialise\n");
136 		return 0;
137 	}
138 
139 	/* GPU-readable,writable memory used for Extract variables */
140 	ret = kbase_csf_firmware_mcu_shared_mapping_init(
141 			kbdev, 1, PROT_WRITE,
142 			KBASE_REG_GPU_RD | KBASE_REG_GPU_WR,
143 			&kbdev->csf.firmware_trace_buffers.mcu_rw);
144 	if (ret != 0) {
145 		dev_err(kbdev->dev, "Failed to map GPU-rw MCU shared memory\n");
146 		goto out;
147 	}
148 
149 	/* GPU-writable memory used for Insert variables */
150 	ret = kbase_csf_firmware_mcu_shared_mapping_init(
151 			kbdev, 1, PROT_READ, KBASE_REG_GPU_WR,
152 			&kbdev->csf.firmware_trace_buffers.mcu_write);
153 	if (ret != 0) {
154 		dev_err(kbdev->dev, "Failed to map GPU-writable MCU shared memory\n");
155 		goto out;
156 	}
157 
158 	list_for_each_entry(trace_buffer, &kbdev->csf.firmware_trace_buffers.list, node) {
159 		u32 extract_gpu_va, insert_gpu_va, data_buffer_gpu_va,
160 			trace_enable_size_dwords;
161 		u32 *extract_cpu_va, *insert_cpu_va;
162 		unsigned int i;
163 
164 		/* GPU-writable data buffer for the individual trace buffer */
165 		ret = kbase_csf_firmware_mcu_shared_mapping_init(
166 				kbdev, trace_buffer->num_pages, PROT_READ, KBASE_REG_GPU_WR,
167 				&trace_buffer->data_mapping);
168 		if (ret) {
169 			dev_err(kbdev->dev, "Failed to map GPU-writable MCU shared memory for a trace buffer\n");
170 			goto out;
171 		}
172 
173 		extract_gpu_va =
174 			(kbdev->csf.firmware_trace_buffers.mcu_rw.va_reg->start_pfn << PAGE_SHIFT) +
175 			mcu_rw_offset;
176 		extract_cpu_va = (u32 *)(
177 			kbdev->csf.firmware_trace_buffers.mcu_rw.cpu_addr +
178 			mcu_rw_offset);
179 		insert_gpu_va =
180 			(kbdev->csf.firmware_trace_buffers.mcu_write.va_reg->start_pfn << PAGE_SHIFT) +
181 			mcu_write_offset;
182 		insert_cpu_va = (u32 *)(
183 			kbdev->csf.firmware_trace_buffers.mcu_write.cpu_addr +
184 			mcu_write_offset);
185 		data_buffer_gpu_va =
186 			(trace_buffer->data_mapping.va_reg->start_pfn << PAGE_SHIFT);
187 
188 		/* Initialize the Extract variable */
189 		*extract_cpu_va = 0;
190 
191 		/* Each FW address shall be mapped and set individually, as we can't
192 		 * assume anything about their location in the memory address space.
193 		 */
194 		kbase_csf_update_firmware_memory(
195 				kbdev, trace_buffer->gpu_va.data_address, data_buffer_gpu_va);
196 		kbase_csf_update_firmware_memory(
197 				kbdev, trace_buffer->gpu_va.insert_address, insert_gpu_va);
198 		kbase_csf_update_firmware_memory(
199 				kbdev, trace_buffer->gpu_va.extract_address, extract_gpu_va);
200 		kbase_csf_update_firmware_memory(
201 				kbdev, trace_buffer->gpu_va.size_address,
202 				trace_buffer->num_pages << PAGE_SHIFT);
203 
204 		trace_enable_size_dwords =
205 				(trace_buffer->trace_enable_entry_count + 31) >> 5;
206 
207 		for (i = 0; i < trace_enable_size_dwords; i++) {
208 			kbase_csf_update_firmware_memory(
209 					kbdev, trace_buffer->gpu_va.trace_enable + i*4,
210 					trace_buffer->trace_enable_init_mask[i]);
211 		}
212 
213 		/* Store CPU virtual addresses for permanently mapped variables */
214 		trace_buffer->cpu_va.insert_cpu_va = insert_cpu_va;
215 		trace_buffer->cpu_va.extract_cpu_va = extract_cpu_va;
216 
217 		/* Update offsets */
218 		mcu_write_offset += cache_line_alignment;
219 		mcu_rw_offset += cache_line_alignment;
220 	}
221 
222 out:
223 	return ret;
224 }
225 
kbase_csf_firmware_trace_buffers_term(struct kbase_device * kbdev)226 void kbase_csf_firmware_trace_buffers_term(struct kbase_device *kbdev)
227 {
228 	if (list_empty(&kbdev->csf.firmware_trace_buffers.list))
229 		return;
230 
231 	while (!list_empty(&kbdev->csf.firmware_trace_buffers.list)) {
232 		struct firmware_trace_buffer *trace_buffer;
233 
234 		trace_buffer = list_first_entry(&kbdev->csf.firmware_trace_buffers.list,
235 				struct firmware_trace_buffer, node);
236 		kbase_csf_firmware_mcu_shared_mapping_term(kbdev, &trace_buffer->data_mapping);
237 		list_del(&trace_buffer->node);
238 
239 		kfree(trace_buffer);
240 	}
241 
242 	kbase_csf_firmware_mcu_shared_mapping_term(
243 			kbdev, &kbdev->csf.firmware_trace_buffers.mcu_rw);
244 	kbase_csf_firmware_mcu_shared_mapping_term(
245 			kbdev, &kbdev->csf.firmware_trace_buffers.mcu_write);
246 }
247 
kbase_csf_firmware_parse_trace_buffer_entry(struct kbase_device * kbdev,const u32 * entry,unsigned int size,bool updatable)248 int kbase_csf_firmware_parse_trace_buffer_entry(struct kbase_device *kbdev,
249 						const u32 *entry,
250 						unsigned int size,
251 						bool updatable)
252 {
253 	const char *name = (char *)&entry[7];
254 	const unsigned int name_len = size - TRACE_BUFFER_ENTRY_NAME_OFFSET;
255 	struct firmware_trace_buffer *trace_buffer;
256 	unsigned int i;
257 
258 	/* Allocate enough space for struct firmware_trace_buffer and the
259 	 * trace buffer name (with NULL termination).
260 	 */
261 	trace_buffer =
262 		kmalloc(sizeof(*trace_buffer) + name_len + 1, GFP_KERNEL);
263 
264 	if (!trace_buffer)
265 		return -ENOMEM;
266 
267 	memcpy(&trace_buffer->name, name, name_len);
268 	trace_buffer->name[name_len] = '\0';
269 
270 	for (i = 0; i < ARRAY_SIZE(trace_buffer_data); i++) {
271 		if (!strcmp(trace_buffer_data[i].name, trace_buffer->name)) {
272 			unsigned int j;
273 
274 			trace_buffer->kbdev = kbdev;
275 			trace_buffer->updatable = updatable;
276 			trace_buffer->type = entry[0];
277 			trace_buffer->gpu_va.size_address = entry[1];
278 			trace_buffer->gpu_va.insert_address = entry[2];
279 			trace_buffer->gpu_va.extract_address = entry[3];
280 			trace_buffer->gpu_va.data_address = entry[4];
281 			trace_buffer->gpu_va.trace_enable = entry[5];
282 			trace_buffer->trace_enable_entry_count = entry[6];
283 			trace_buffer->num_pages = trace_buffer_data[i].size;
284 
285 			for (j = 0; j < CSF_FIRMWARE_TRACE_ENABLE_INIT_MASK_MAX; j++) {
286 				trace_buffer->trace_enable_init_mask[j] =
287 					trace_buffer_data[i].trace_enable_init_mask[j];
288 			}
289 			break;
290 		}
291 	}
292 
293 	if (i < ARRAY_SIZE(trace_buffer_data)) {
294 		list_add(&trace_buffer->node, &kbdev->csf.firmware_trace_buffers.list);
295 		dev_dbg(kbdev->dev, "Trace buffer '%s'", trace_buffer->name);
296 	} else {
297 		dev_dbg(kbdev->dev, "Unknown trace buffer '%s'", trace_buffer->name);
298 		kfree(trace_buffer);
299 	}
300 
301 	return 0;
302 }
303 
kbase_csf_firmware_reload_trace_buffers_data(struct kbase_device * kbdev)304 void kbase_csf_firmware_reload_trace_buffers_data(struct kbase_device *kbdev)
305 {
306 	struct firmware_trace_buffer *trace_buffer;
307 	u32 mcu_rw_offset = 0, mcu_write_offset = 0;
308 	const u32 cache_line_alignment = kbase_get_cache_line_alignment(kbdev);
309 
310 	list_for_each_entry(trace_buffer, &kbdev->csf.firmware_trace_buffers.list, node) {
311 		u32 extract_gpu_va, insert_gpu_va, data_buffer_gpu_va,
312 			trace_enable_size_dwords;
313 		u32 *extract_cpu_va, *insert_cpu_va;
314 		unsigned int i;
315 
316 		/* Rely on the fact that all required mappings already exist */
317 		extract_gpu_va =
318 			(kbdev->csf.firmware_trace_buffers.mcu_rw.va_reg->start_pfn << PAGE_SHIFT) +
319 			mcu_rw_offset;
320 		extract_cpu_va = (u32 *)(
321 			kbdev->csf.firmware_trace_buffers.mcu_rw.cpu_addr +
322 			mcu_rw_offset);
323 		insert_gpu_va =
324 			(kbdev->csf.firmware_trace_buffers.mcu_write.va_reg->start_pfn << PAGE_SHIFT) +
325 			mcu_write_offset;
326 		insert_cpu_va = (u32 *)(
327 			kbdev->csf.firmware_trace_buffers.mcu_write.cpu_addr +
328 			mcu_write_offset);
329 		data_buffer_gpu_va =
330 			(trace_buffer->data_mapping.va_reg->start_pfn << PAGE_SHIFT);
331 
332 		/* Notice that the function only re-updates firmware memory locations
333 		 * with information that allows access to the trace buffers without
334 		 * really resetting their state. For instance, the Insert offset will
335 		 * not change and, as a consequence, the Extract offset is not going
336 		 * to be reset to keep consistency.
337 		 */
338 
339 		/* Each FW address shall be mapped and set individually, as we can't
340 		 * assume anything about their location in the memory address space.
341 		 */
342 		kbase_csf_update_firmware_memory(
343 				kbdev, trace_buffer->gpu_va.data_address, data_buffer_gpu_va);
344 		kbase_csf_update_firmware_memory(
345 				kbdev, trace_buffer->gpu_va.insert_address, insert_gpu_va);
346 		kbase_csf_update_firmware_memory(
347 				kbdev, trace_buffer->gpu_va.extract_address, extract_gpu_va);
348 		kbase_csf_update_firmware_memory(
349 				kbdev, trace_buffer->gpu_va.size_address,
350 				trace_buffer->num_pages << PAGE_SHIFT);
351 
352 		trace_enable_size_dwords =
353 				(trace_buffer->trace_enable_entry_count + 31) >> 5;
354 
355 		for (i = 0; i < trace_enable_size_dwords; i++) {
356 			kbase_csf_update_firmware_memory(
357 					kbdev, trace_buffer->gpu_va.trace_enable + i*4,
358 					trace_buffer->trace_enable_init_mask[i]);
359 		}
360 
361 		/* Store CPU virtual addresses for permanently mapped variables,
362 		 * as they might have slightly changed.
363 		 */
364 		trace_buffer->cpu_va.insert_cpu_va = insert_cpu_va;
365 		trace_buffer->cpu_va.extract_cpu_va = extract_cpu_va;
366 
367 		/* Update offsets */
368 		mcu_write_offset += cache_line_alignment;
369 		mcu_rw_offset += cache_line_alignment;
370 	}
371 }
372 
kbase_csf_firmware_get_trace_buffer(struct kbase_device * kbdev,const char * name)373 struct firmware_trace_buffer *kbase_csf_firmware_get_trace_buffer(
374 	struct kbase_device *kbdev, const char *name)
375 {
376 	struct firmware_trace_buffer *trace_buffer;
377 
378 	list_for_each_entry(trace_buffer, &kbdev->csf.firmware_trace_buffers.list, node) {
379 		if (!strcmp(trace_buffer->name, name))
380 			return trace_buffer;
381 	}
382 
383 	return NULL;
384 }
385 EXPORT_SYMBOL(kbase_csf_firmware_get_trace_buffer);
386 
kbase_csf_firmware_trace_buffer_get_trace_enable_bits_count(const struct firmware_trace_buffer * trace_buffer)387 unsigned int kbase_csf_firmware_trace_buffer_get_trace_enable_bits_count(
388 	const struct firmware_trace_buffer *trace_buffer)
389 {
390 	return trace_buffer->trace_enable_entry_count;
391 }
392 EXPORT_SYMBOL(kbase_csf_firmware_trace_buffer_get_trace_enable_bits_count);
393 
kbasep_csf_firmware_trace_buffer_update_trace_enable_bit(struct firmware_trace_buffer * tb,unsigned int bit,bool value)394 static void kbasep_csf_firmware_trace_buffer_update_trace_enable_bit(
395 	struct firmware_trace_buffer *tb, unsigned int bit, bool value)
396 {
397 	struct kbase_device *kbdev = tb->kbdev;
398 
399 	lockdep_assert_held(&kbdev->hwaccess_lock);
400 
401 	if (bit < tb->trace_enable_entry_count) {
402 		unsigned int trace_enable_reg_offset = bit >> 5;
403 		u32 trace_enable_bit_mask = 1u << (bit & 0x1F);
404 
405 		if (value) {
406 			tb->trace_enable_init_mask[trace_enable_reg_offset] |=
407 				trace_enable_bit_mask;
408 		} else {
409 			tb->trace_enable_init_mask[trace_enable_reg_offset] &=
410 				~trace_enable_bit_mask;
411 		}
412 
413 		/* This is not strictly needed as the caller is supposed to
414 		 * reload the firmware image (through GPU reset) after updating
415 		 * the bitmask. Otherwise there is no guarantee that firmware
416 		 * will take into account the updated bitmask for all types of
417 		 * trace buffers, since firmware could continue to use the
418 		 * value of bitmask it cached after the boot.
419 		 */
420 		kbase_csf_update_firmware_memory(
421 			kbdev,
422 			tb->gpu_va.trace_enable + trace_enable_reg_offset * 4,
423 			tb->trace_enable_init_mask[trace_enable_reg_offset]);
424 	}
425 }
426 
kbase_csf_firmware_trace_buffer_update_trace_enable_bit(struct firmware_trace_buffer * tb,unsigned int bit,bool value)427 int kbase_csf_firmware_trace_buffer_update_trace_enable_bit(
428 	struct firmware_trace_buffer *tb, unsigned int bit, bool value)
429 {
430 	struct kbase_device *kbdev = tb->kbdev;
431 	int err = 0;
432 	unsigned long flags;
433 
434 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
435 
436 	/* If trace buffer update cannot be performed with
437 	 * FIRMWARE_CONFIG_UPDATE then we need to do a
438 	 * silent reset before we update the memory.
439 	 */
440 	if (!tb->updatable) {
441 		/* If there is already a GPU reset pending then inform
442 		 * the User to retry the update.
443 		 */
444 		if (kbase_reset_gpu_silent(kbdev)) {
445 			dev_warn(
446 				kbdev->dev,
447 				"GPU reset already in progress when enabling firmware timeline.");
448 			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
449 			return -EAGAIN;
450 		}
451 	}
452 
453 	kbasep_csf_firmware_trace_buffer_update_trace_enable_bit(tb, bit,
454 								 value);
455 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
456 
457 	if (tb->updatable)
458 		err = kbase_csf_trigger_firmware_config_update(kbdev);
459 
460 	return err;
461 }
462 EXPORT_SYMBOL(kbase_csf_firmware_trace_buffer_update_trace_enable_bit);
463 
kbase_csf_firmware_trace_buffer_is_empty(const struct firmware_trace_buffer * trace_buffer)464 bool kbase_csf_firmware_trace_buffer_is_empty(
465 	const struct firmware_trace_buffer *trace_buffer)
466 {
467 	return *(trace_buffer->cpu_va.insert_cpu_va) ==
468 			*(trace_buffer->cpu_va.extract_cpu_va);
469 }
470 EXPORT_SYMBOL(kbase_csf_firmware_trace_buffer_is_empty);
471 
kbase_csf_firmware_trace_buffer_read_data(struct firmware_trace_buffer * trace_buffer,u8 * data,unsigned int num_bytes)472 unsigned int kbase_csf_firmware_trace_buffer_read_data(
473 	struct firmware_trace_buffer *trace_buffer, u8 *data, unsigned int num_bytes)
474 {
475 	unsigned int bytes_copied;
476 	u8 *data_cpu_va = trace_buffer->data_mapping.cpu_addr;
477 	u32 extract_offset = *(trace_buffer->cpu_va.extract_cpu_va);
478 	u32 insert_offset = *(trace_buffer->cpu_va.insert_cpu_va);
479 	u32 buffer_size = trace_buffer->num_pages << PAGE_SHIFT;
480 
481 	if (insert_offset >= extract_offset) {
482 		bytes_copied = min_t(unsigned int, num_bytes,
483 			(insert_offset - extract_offset));
484 		memcpy(data, &data_cpu_va[extract_offset], bytes_copied);
485 		extract_offset += bytes_copied;
486 	} else {
487 		unsigned int bytes_copied_head, bytes_copied_tail;
488 
489 		bytes_copied_tail = min_t(unsigned int, num_bytes,
490 			(buffer_size - extract_offset));
491 		memcpy(data, &data_cpu_va[extract_offset], bytes_copied_tail);
492 
493 		bytes_copied_head = min_t(unsigned int,
494 			(num_bytes - bytes_copied_tail), insert_offset);
495 		memcpy(&data[bytes_copied_tail], data_cpu_va, bytes_copied_head);
496 
497 		bytes_copied = bytes_copied_head + bytes_copied_tail;
498 		extract_offset += bytes_copied;
499 		if (extract_offset >= buffer_size)
500 			extract_offset = bytes_copied_head;
501 	}
502 
503 	*(trace_buffer->cpu_va.extract_cpu_va) = extract_offset;
504 
505 	return bytes_copied;
506 }
507 EXPORT_SYMBOL(kbase_csf_firmware_trace_buffer_read_data);
508 
update_trace_buffer_active_mask64(struct firmware_trace_buffer * tb,u64 mask)509 static void update_trace_buffer_active_mask64(struct firmware_trace_buffer *tb, u64 mask)
510 {
511 	unsigned int i;
512 
513 	for (i = 0; i < tb->trace_enable_entry_count; i++)
514 		kbasep_csf_firmware_trace_buffer_update_trace_enable_bit(tb, i, (mask >> i) & 1);
515 }
516 
517 #define U32_BITS 32
kbase_csf_firmware_trace_buffer_get_active_mask64(struct firmware_trace_buffer * tb)518 u64 kbase_csf_firmware_trace_buffer_get_active_mask64(struct firmware_trace_buffer *tb)
519 {
520 	u64 active_mask = tb->trace_enable_init_mask[0];
521 
522 	if (tb->trace_enable_entry_count > U32_BITS)
523 		active_mask |= (u64)tb->trace_enable_init_mask[1] << U32_BITS;
524 
525 	return active_mask;
526 }
527 
kbase_csf_firmware_trace_buffer_set_active_mask64(struct firmware_trace_buffer * tb,u64 mask)528 int kbase_csf_firmware_trace_buffer_set_active_mask64(struct firmware_trace_buffer *tb, u64 mask)
529 {
530 	struct kbase_device *kbdev = tb->kbdev;
531 	unsigned long flags;
532 	int err = 0;
533 
534 	if (!tb->updatable) {
535 		/* If there is already a GPU reset pending, need a retry */
536 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
537 		if (kbase_reset_gpu_silent(kbdev))
538 			err = -EAGAIN;
539 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
540 	}
541 
542 	if (!err) {
543 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
544 		update_trace_buffer_active_mask64(tb, mask);
545 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
546 
547 		/* if we can update the config we need to just trigger
548 		 * FIRMWARE_CONFIG_UPDATE.
549 		 */
550 		if (tb->updatable)
551 			err = kbase_csf_trigger_firmware_config_update(kbdev);
552 	}
553 
554 	return err;
555 }
556