xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/hwcnt/mali_kbase_hwcnt.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * Implementation of hardware counter context and accumulator APIs.
24  */
25 
26 #include "hwcnt/mali_kbase_hwcnt_context.h"
27 #include "hwcnt/mali_kbase_hwcnt_accumulator.h"
28 #include "hwcnt/backend/mali_kbase_hwcnt_backend.h"
29 #include "hwcnt/mali_kbase_hwcnt_types.h"
30 
31 #include <linux/mutex.h>
32 #include <linux/spinlock.h>
33 #include <linux/slab.h>
34 
35 /**
36  * enum kbase_hwcnt_accum_state - Hardware counter accumulator states.
37  * @ACCUM_STATE_ERROR:    Error state, where all accumulator operations fail.
38  * @ACCUM_STATE_DISABLED: Disabled state, where dumping is always disabled.
39  * @ACCUM_STATE_ENABLED:  Enabled state, where dumping is enabled if there are
40  *                        any enabled counters.
41  */
42 enum kbase_hwcnt_accum_state { ACCUM_STATE_ERROR, ACCUM_STATE_DISABLED, ACCUM_STATE_ENABLED };
43 
44 /**
45  * struct kbase_hwcnt_accumulator - Hardware counter accumulator structure.
46  * @metadata:               Pointer to immutable hwcnt metadata.
47  * @backend:                Pointer to created counter backend.
48  * @state:                  The current state of the accumulator.
49  *                           - State transition from disabled->enabled or
50  *                             disabled->error requires state_lock.
51  *                           - State transition from enabled->disabled or
52  *                             enabled->error requires both accum_lock and
53  *                             state_lock.
54  *                           - Error state persists until next disable.
55  * @enable_map:             The current set of enabled counters.
56  *                           - Must only be modified while holding both
57  *                             accum_lock and state_lock.
58  *                           - Can be read while holding either lock.
59  *                           - Must stay in sync with enable_map_any_enabled.
60  * @enable_map_any_enabled: True if any counters in the map are enabled, else
61  *                          false. If true, and state is ACCUM_STATE_ENABLED,
62  *                          then the counter backend will be enabled.
63  *                           - Must only be modified while holding both
64  *                             accum_lock and state_lock.
65  *                           - Can be read while holding either lock.
66  *                           - Must stay in sync with enable_map.
67  * @scratch_map:            Scratch enable map, used as temporary enable map
68  *                          storage during dumps.
69  *                           - Must only be read or modified while holding
70  *                             accum_lock.
71  * @accum_buf:              Accumulation buffer, where dumps will be accumulated
72  *                          into on transition to a disable state.
73  *                           - Must only be read or modified while holding
74  *                             accum_lock.
75  * @accumulated:            True if the accumulation buffer has been accumulated
76  *                          into and not subsequently read from yet, else false.
77  *                           - Must only be read or modified while holding
78  *                             accum_lock.
79  * @ts_last_dump_ns:        Timestamp (ns) of the end time of the most recent
80  *                          dump that was requested by the user.
81  *                           - Must only be read or modified while holding
82  *                             accum_lock.
83  */
84 struct kbase_hwcnt_accumulator {
85 	const struct kbase_hwcnt_metadata *metadata;
86 	struct kbase_hwcnt_backend *backend;
87 	enum kbase_hwcnt_accum_state state;
88 	struct kbase_hwcnt_enable_map enable_map;
89 	bool enable_map_any_enabled;
90 	struct kbase_hwcnt_enable_map scratch_map;
91 	struct kbase_hwcnt_dump_buffer accum_buf;
92 	bool accumulated;
93 	u64 ts_last_dump_ns;
94 };
95 
96 /**
97  * struct kbase_hwcnt_context - Hardware counter context structure.
98  * @iface:         Pointer to hardware counter backend interface.
99  * @state_lock:    Spinlock protecting state.
100  * @disable_count: Disable count of the context. Initialised to 1.
101  *                 Decremented when the accumulator is acquired, and incremented
102  *                 on release. Incremented on calls to
103  *                 kbase_hwcnt_context_disable[_atomic], and decremented on
104  *                 calls to kbase_hwcnt_context_enable.
105  *                  - Must only be read or modified while holding state_lock.
106  * @accum_lock:    Mutex protecting accumulator.
107  * @accum_inited:  Flag to prevent concurrent accumulator initialisation and/or
108  *                 termination. Set to true before accumulator initialisation,
109  *                 and false after accumulator termination.
110  *                  - Must only be modified while holding both accum_lock and
111  *                    state_lock.
112  *                  - Can be read while holding either lock.
113  * @accum:         Hardware counter accumulator structure.
114  * @wq:            Centralized workqueue for users of hardware counters to
115  *                 submit async hardware counter related work. Never directly
116  *                 called, but it's expected that a lot of the functions in this
117  *                 API will end up called from the enqueued async work.
118  */
119 struct kbase_hwcnt_context {
120 	const struct kbase_hwcnt_backend_interface *iface;
121 	spinlock_t state_lock;
122 	size_t disable_count;
123 	struct mutex accum_lock;
124 	bool accum_inited;
125 	struct kbase_hwcnt_accumulator accum;
126 	struct workqueue_struct *wq;
127 };
128 
kbase_hwcnt_context_init(const struct kbase_hwcnt_backend_interface * iface,struct kbase_hwcnt_context ** out_hctx)129 int kbase_hwcnt_context_init(const struct kbase_hwcnt_backend_interface *iface,
130 			     struct kbase_hwcnt_context **out_hctx)
131 {
132 	struct kbase_hwcnt_context *hctx = NULL;
133 
134 	if (!iface || !out_hctx)
135 		return -EINVAL;
136 
137 	hctx = kzalloc(sizeof(*hctx), GFP_KERNEL);
138 	if (!hctx)
139 		goto err_alloc_hctx;
140 
141 	hctx->iface = iface;
142 	spin_lock_init(&hctx->state_lock);
143 	hctx->disable_count = 1;
144 	mutex_init(&hctx->accum_lock);
145 	hctx->accum_inited = false;
146 
147 	hctx->wq = alloc_workqueue("mali_kbase_hwcnt", WQ_HIGHPRI | WQ_UNBOUND, 0);
148 	if (!hctx->wq)
149 		goto err_alloc_workqueue;
150 
151 	*out_hctx = hctx;
152 
153 	return 0;
154 
155 err_alloc_workqueue:
156 	kfree(hctx);
157 err_alloc_hctx:
158 	return -ENOMEM;
159 }
160 
kbase_hwcnt_context_term(struct kbase_hwcnt_context * hctx)161 void kbase_hwcnt_context_term(struct kbase_hwcnt_context *hctx)
162 {
163 	if (!hctx)
164 		return;
165 
166 	/* Make sure we didn't leak the accumulator */
167 	WARN_ON(hctx->accum_inited);
168 
169 	/* We don't expect any work to be pending on this workqueue.
170 	 * Regardless, this will safely drain and complete the work.
171 	 */
172 	destroy_workqueue(hctx->wq);
173 	kfree(hctx);
174 }
175 
176 /**
177  * kbasep_hwcnt_accumulator_term() - Terminate the accumulator for the context.
178  * @hctx: Non-NULL pointer to hardware counter context.
179  */
kbasep_hwcnt_accumulator_term(struct kbase_hwcnt_context * hctx)180 static void kbasep_hwcnt_accumulator_term(struct kbase_hwcnt_context *hctx)
181 {
182 	WARN_ON(!hctx);
183 	WARN_ON(!hctx->accum_inited);
184 
185 	kbase_hwcnt_enable_map_free(&hctx->accum.scratch_map);
186 	kbase_hwcnt_dump_buffer_free(&hctx->accum.accum_buf);
187 	kbase_hwcnt_enable_map_free(&hctx->accum.enable_map);
188 	hctx->iface->term(hctx->accum.backend);
189 	memset(&hctx->accum, 0, sizeof(hctx->accum));
190 }
191 
192 /**
193  * kbasep_hwcnt_accumulator_init() - Initialise the accumulator for the context.
194  * @hctx: Non-NULL pointer to hardware counter context.
195  *
196  * Return: 0 on success, else error code.
197  */
kbasep_hwcnt_accumulator_init(struct kbase_hwcnt_context * hctx)198 static int kbasep_hwcnt_accumulator_init(struct kbase_hwcnt_context *hctx)
199 {
200 	int errcode;
201 
202 	WARN_ON(!hctx);
203 	WARN_ON(!hctx->accum_inited);
204 
205 	errcode = hctx->iface->init(hctx->iface->info, &hctx->accum.backend);
206 	if (errcode)
207 		goto error;
208 
209 	hctx->accum.metadata = hctx->iface->metadata(hctx->iface->info);
210 	hctx->accum.state = ACCUM_STATE_ERROR;
211 
212 	errcode = kbase_hwcnt_enable_map_alloc(hctx->accum.metadata, &hctx->accum.enable_map);
213 	if (errcode)
214 		goto error;
215 
216 	hctx->accum.enable_map_any_enabled = false;
217 
218 	errcode = kbase_hwcnt_dump_buffer_alloc(hctx->accum.metadata, &hctx->accum.accum_buf);
219 	if (errcode)
220 		goto error;
221 
222 	errcode = kbase_hwcnt_enable_map_alloc(hctx->accum.metadata, &hctx->accum.scratch_map);
223 	if (errcode)
224 		goto error;
225 
226 	hctx->accum.accumulated = false;
227 
228 	hctx->accum.ts_last_dump_ns = hctx->iface->timestamp_ns(hctx->accum.backend);
229 
230 	return 0;
231 
232 error:
233 	kbasep_hwcnt_accumulator_term(hctx);
234 	return errcode;
235 }
236 
237 /**
238  * kbasep_hwcnt_accumulator_disable() - Transition the accumulator into the
239  *                                      disabled state, from the enabled or
240  *                                      error states.
241  * @hctx:       Non-NULL pointer to hardware counter context.
242  * @accumulate: True if we should accumulate before disabling, else false.
243  */
kbasep_hwcnt_accumulator_disable(struct kbase_hwcnt_context * hctx,bool accumulate)244 static void kbasep_hwcnt_accumulator_disable(struct kbase_hwcnt_context *hctx, bool accumulate)
245 {
246 	int errcode = 0;
247 	bool backend_enabled = false;
248 	struct kbase_hwcnt_accumulator *accum;
249 	unsigned long flags;
250 	u64 dump_time_ns;
251 
252 	WARN_ON(!hctx);
253 	lockdep_assert_held(&hctx->accum_lock);
254 	WARN_ON(!hctx->accum_inited);
255 
256 	accum = &hctx->accum;
257 
258 	spin_lock_irqsave(&hctx->state_lock, flags);
259 
260 	WARN_ON(hctx->disable_count != 0);
261 	WARN_ON(hctx->accum.state == ACCUM_STATE_DISABLED);
262 
263 	if ((hctx->accum.state == ACCUM_STATE_ENABLED) && (accum->enable_map_any_enabled))
264 		backend_enabled = true;
265 
266 	if (!backend_enabled)
267 		hctx->accum.state = ACCUM_STATE_DISABLED;
268 
269 	spin_unlock_irqrestore(&hctx->state_lock, flags);
270 
271 	/* Early out if the backend is not already enabled */
272 	if (!backend_enabled)
273 		return;
274 
275 	if (!accumulate)
276 		goto disable;
277 
278 	/* Try and accumulate before disabling */
279 	errcode = hctx->iface->dump_request(accum->backend, &dump_time_ns);
280 	if (errcode)
281 		goto disable;
282 
283 	errcode = hctx->iface->dump_wait(accum->backend);
284 	if (errcode)
285 		goto disable;
286 
287 	errcode = hctx->iface->dump_get(accum->backend, &accum->accum_buf, &accum->enable_map,
288 					accum->accumulated);
289 	if (errcode)
290 		goto disable;
291 
292 	accum->accumulated = true;
293 
294 disable:
295 	hctx->iface->dump_disable(accum->backend);
296 
297 	/* Regardless of any errors during the accumulate, put the accumulator
298 	 * in the disabled state.
299 	 */
300 	spin_lock_irqsave(&hctx->state_lock, flags);
301 
302 	hctx->accum.state = ACCUM_STATE_DISABLED;
303 
304 	spin_unlock_irqrestore(&hctx->state_lock, flags);
305 }
306 
307 /**
308  * kbasep_hwcnt_accumulator_enable() - Transition the accumulator into the
309  *                                     enabled state, from the disabled state.
310  * @hctx: Non-NULL pointer to hardware counter context.
311  */
kbasep_hwcnt_accumulator_enable(struct kbase_hwcnt_context * hctx)312 static void kbasep_hwcnt_accumulator_enable(struct kbase_hwcnt_context *hctx)
313 {
314 	int errcode = 0;
315 	struct kbase_hwcnt_accumulator *accum;
316 
317 	WARN_ON(!hctx);
318 	lockdep_assert_held(&hctx->state_lock);
319 	WARN_ON(!hctx->accum_inited);
320 	WARN_ON(hctx->accum.state != ACCUM_STATE_DISABLED);
321 
322 	accum = &hctx->accum;
323 
324 	/* The backend only needs enabling if any counters are enabled */
325 	if (accum->enable_map_any_enabled)
326 		errcode = hctx->iface->dump_enable_nolock(accum->backend, &accum->enable_map);
327 
328 	if (!errcode)
329 		accum->state = ACCUM_STATE_ENABLED;
330 	else
331 		accum->state = ACCUM_STATE_ERROR;
332 }
333 
334 /**
335  * kbasep_hwcnt_accumulator_dump() - Perform a dump with the most up-to-date
336  *                                   values of enabled counters possible, and
337  *                                   optionally update the set of enabled
338  *                                   counters.
339  * @hctx:        Non-NULL pointer to the hardware counter context
340  * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
341  *               be written out to on success
342  * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
343  *               be written out to on success
344  * @dump_buf:    Pointer to the buffer where the dump will be written out to on
345  *               success. If non-NULL, must have the same metadata as the
346  *               accumulator. If NULL, the dump will be discarded.
347  * @new_map:     Pointer to the new counter enable map. If non-NULL, must have
348  *               the same metadata as the accumulator. If NULL, the set of
349  *               enabled counters will be unchanged.
350  *
351  * Return:       0 on success, else error code.
352  */
kbasep_hwcnt_accumulator_dump(struct kbase_hwcnt_context * hctx,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf,const struct kbase_hwcnt_enable_map * new_map)353 static int kbasep_hwcnt_accumulator_dump(struct kbase_hwcnt_context *hctx, u64 *ts_start_ns,
354 					 u64 *ts_end_ns, struct kbase_hwcnt_dump_buffer *dump_buf,
355 					 const struct kbase_hwcnt_enable_map *new_map)
356 {
357 	int errcode = 0;
358 	unsigned long flags;
359 	enum kbase_hwcnt_accum_state state;
360 	bool dump_requested = false;
361 	bool dump_written = false;
362 	bool cur_map_any_enabled;
363 	struct kbase_hwcnt_enable_map *cur_map;
364 	bool new_map_any_enabled = false;
365 	u64 dump_time_ns = 0;
366 	struct kbase_hwcnt_accumulator *accum;
367 
368 	WARN_ON(!hctx);
369 	WARN_ON(!ts_start_ns);
370 	WARN_ON(!ts_end_ns);
371 	WARN_ON(dump_buf && (dump_buf->metadata != hctx->accum.metadata));
372 	WARN_ON(new_map && (new_map->metadata != hctx->accum.metadata));
373 	WARN_ON(!hctx->accum_inited);
374 	lockdep_assert_held(&hctx->accum_lock);
375 
376 	accum = &hctx->accum;
377 	cur_map = &accum->scratch_map;
378 
379 	/* Save out info about the current enable map */
380 	cur_map_any_enabled = accum->enable_map_any_enabled;
381 	kbase_hwcnt_enable_map_copy(cur_map, &accum->enable_map);
382 
383 	if (new_map)
384 		new_map_any_enabled = kbase_hwcnt_enable_map_any_enabled(new_map);
385 
386 	/*
387 	 * We're holding accum_lock, so the accumulator state might transition
388 	 * from disabled to enabled during this function (as enabling is lock
389 	 * free), but it will never disable (as disabling needs to hold the
390 	 * accum_lock), nor will it ever transition from enabled to error (as
391 	 * an enable while we're already enabled is impossible).
392 	 *
393 	 * If we're already disabled, we'll only look at the accumulation buffer
394 	 * rather than do a real dump, so a concurrent enable does not affect
395 	 * us.
396 	 *
397 	 * If a concurrent enable fails, we might transition to the error
398 	 * state, but again, as we're only looking at the accumulation buffer,
399 	 * it's not an issue.
400 	 */
401 	spin_lock_irqsave(&hctx->state_lock, flags);
402 
403 	state = accum->state;
404 
405 	/*
406 	 * Update the new map now, such that if an enable occurs during this
407 	 * dump then that enable will set the new map. If we're already enabled,
408 	 * then we'll do it ourselves after the dump.
409 	 */
410 	if (new_map) {
411 		kbase_hwcnt_enable_map_copy(&accum->enable_map, new_map);
412 		accum->enable_map_any_enabled = new_map_any_enabled;
413 	}
414 
415 	spin_unlock_irqrestore(&hctx->state_lock, flags);
416 
417 	/* Error state, so early out. No need to roll back any map updates */
418 	if (state == ACCUM_STATE_ERROR)
419 		return -EIO;
420 
421 	/* Initiate the dump if the backend is enabled. */
422 	if ((state == ACCUM_STATE_ENABLED) && cur_map_any_enabled) {
423 		if (dump_buf) {
424 			errcode = hctx->iface->dump_request(accum->backend, &dump_time_ns);
425 			dump_requested = true;
426 		} else {
427 			dump_time_ns = hctx->iface->timestamp_ns(accum->backend);
428 			errcode = hctx->iface->dump_clear(accum->backend);
429 		}
430 
431 		if (errcode)
432 			goto error;
433 	} else {
434 		dump_time_ns = hctx->iface->timestamp_ns(accum->backend);
435 	}
436 
437 	/* Copy any accumulation into the dest buffer */
438 	if (accum->accumulated && dump_buf) {
439 		kbase_hwcnt_dump_buffer_copy(dump_buf, &accum->accum_buf, cur_map);
440 		dump_written = true;
441 	}
442 
443 	/* Wait for any requested dumps to complete */
444 	if (dump_requested) {
445 		WARN_ON(state != ACCUM_STATE_ENABLED);
446 		errcode = hctx->iface->dump_wait(accum->backend);
447 		if (errcode)
448 			goto error;
449 	}
450 
451 	/* If we're enabled and there's a new enable map, change the enabled set
452 	 * as soon after the dump has completed as possible.
453 	 */
454 	if ((state == ACCUM_STATE_ENABLED) && new_map) {
455 		/* Backend is only enabled if there were any enabled counters */
456 		if (cur_map_any_enabled)
457 			hctx->iface->dump_disable(accum->backend);
458 
459 		/* (Re-)enable the backend if the new map has enabled counters.
460 		 * No need to acquire the spinlock, as concurrent enable while
461 		 * we're already enabled and holding accum_lock is impossible.
462 		 */
463 		if (new_map_any_enabled) {
464 			errcode = hctx->iface->dump_enable(accum->backend, new_map);
465 			if (errcode)
466 				goto error;
467 		}
468 	}
469 
470 	/* Copy, accumulate, or zero into the dest buffer to finish */
471 	if (dump_buf) {
472 		/* If we dumped, copy or accumulate it into the destination */
473 		if (dump_requested) {
474 			WARN_ON(state != ACCUM_STATE_ENABLED);
475 			errcode = hctx->iface->dump_get(accum->backend, dump_buf, cur_map,
476 							dump_written);
477 			if (errcode)
478 				goto error;
479 			dump_written = true;
480 		}
481 
482 		/* If we've not written anything into the dump buffer so far, it
483 		 * means there was nothing to write. Zero any enabled counters.
484 		 */
485 		if (!dump_written)
486 			kbase_hwcnt_dump_buffer_zero(dump_buf, cur_map);
487 	}
488 
489 	/* Write out timestamps */
490 	*ts_start_ns = accum->ts_last_dump_ns;
491 	*ts_end_ns = dump_time_ns;
492 
493 	accum->accumulated = false;
494 	accum->ts_last_dump_ns = dump_time_ns;
495 
496 	return 0;
497 error:
498 	/* An error was only physically possible if the backend was enabled */
499 	WARN_ON(state != ACCUM_STATE_ENABLED);
500 
501 	/* Disable the backend, and transition to the error state */
502 	hctx->iface->dump_disable(accum->backend);
503 	spin_lock_irqsave(&hctx->state_lock, flags);
504 
505 	accum->state = ACCUM_STATE_ERROR;
506 
507 	spin_unlock_irqrestore(&hctx->state_lock, flags);
508 
509 	return errcode;
510 }
511 
512 /**
513  * kbasep_hwcnt_context_disable() - Increment the disable count of the context.
514  * @hctx:       Non-NULL pointer to hardware counter context.
515  * @accumulate: True if we should accumulate before disabling, else false.
516  */
kbasep_hwcnt_context_disable(struct kbase_hwcnt_context * hctx,bool accumulate)517 static void kbasep_hwcnt_context_disable(struct kbase_hwcnt_context *hctx, bool accumulate)
518 {
519 	unsigned long flags;
520 
521 	WARN_ON(!hctx);
522 	lockdep_assert_held(&hctx->accum_lock);
523 
524 	if (!kbase_hwcnt_context_disable_atomic(hctx)) {
525 		kbasep_hwcnt_accumulator_disable(hctx, accumulate);
526 
527 		spin_lock_irqsave(&hctx->state_lock, flags);
528 
529 		/* Atomic disable failed and we're holding the mutex, so current
530 		 * disable count must be 0.
531 		 */
532 		WARN_ON(hctx->disable_count != 0);
533 		hctx->disable_count++;
534 
535 		spin_unlock_irqrestore(&hctx->state_lock, flags);
536 	}
537 }
538 
kbase_hwcnt_accumulator_acquire(struct kbase_hwcnt_context * hctx,struct kbase_hwcnt_accumulator ** accum)539 int kbase_hwcnt_accumulator_acquire(struct kbase_hwcnt_context *hctx,
540 				    struct kbase_hwcnt_accumulator **accum)
541 {
542 	int errcode = 0;
543 	unsigned long flags;
544 
545 	if (!hctx || !accum)
546 		return -EINVAL;
547 
548 	mutex_lock(&hctx->accum_lock);
549 	spin_lock_irqsave(&hctx->state_lock, flags);
550 
551 	if (!hctx->accum_inited)
552 		/* Set accum initing now to prevent concurrent init */
553 		hctx->accum_inited = true;
554 	else
555 		/* Already have an accum, or already being inited */
556 		errcode = -EBUSY;
557 
558 	spin_unlock_irqrestore(&hctx->state_lock, flags);
559 	mutex_unlock(&hctx->accum_lock);
560 
561 	if (errcode)
562 		return errcode;
563 
564 	errcode = kbasep_hwcnt_accumulator_init(hctx);
565 
566 	if (errcode) {
567 		mutex_lock(&hctx->accum_lock);
568 		spin_lock_irqsave(&hctx->state_lock, flags);
569 
570 		hctx->accum_inited = false;
571 
572 		spin_unlock_irqrestore(&hctx->state_lock, flags);
573 		mutex_unlock(&hctx->accum_lock);
574 
575 		return errcode;
576 	}
577 
578 	spin_lock_irqsave(&hctx->state_lock, flags);
579 
580 	WARN_ON(hctx->disable_count == 0);
581 	WARN_ON(hctx->accum.enable_map_any_enabled);
582 
583 	/* Decrement the disable count to allow the accumulator to be accessible
584 	 * now that it's fully constructed.
585 	 */
586 	hctx->disable_count--;
587 
588 	/*
589 	 * Make sure the accumulator is initialised to the correct state.
590 	 * Regardless of initial state, counters don't need to be enabled via
591 	 * the backend, as the initial enable map has no enabled counters.
592 	 */
593 	hctx->accum.state = (hctx->disable_count == 0) ? ACCUM_STATE_ENABLED : ACCUM_STATE_DISABLED;
594 
595 	spin_unlock_irqrestore(&hctx->state_lock, flags);
596 
597 	*accum = &hctx->accum;
598 
599 	return 0;
600 }
601 
kbase_hwcnt_accumulator_release(struct kbase_hwcnt_accumulator * accum)602 void kbase_hwcnt_accumulator_release(struct kbase_hwcnt_accumulator *accum)
603 {
604 	unsigned long flags;
605 	struct kbase_hwcnt_context *hctx;
606 
607 	if (!accum)
608 		return;
609 
610 	hctx = container_of(accum, struct kbase_hwcnt_context, accum);
611 
612 	mutex_lock(&hctx->accum_lock);
613 
614 	/* Double release is a programming error */
615 	WARN_ON(!hctx->accum_inited);
616 
617 	/* Disable the context to ensure the accumulator is inaccesible while
618 	 * we're destroying it. This performs the corresponding disable count
619 	 * increment to the decrement done during acquisition.
620 	 */
621 	kbasep_hwcnt_context_disable(hctx, false);
622 
623 	mutex_unlock(&hctx->accum_lock);
624 
625 	kbasep_hwcnt_accumulator_term(hctx);
626 
627 	mutex_lock(&hctx->accum_lock);
628 	spin_lock_irqsave(&hctx->state_lock, flags);
629 
630 	hctx->accum_inited = false;
631 
632 	spin_unlock_irqrestore(&hctx->state_lock, flags);
633 	mutex_unlock(&hctx->accum_lock);
634 }
635 
kbase_hwcnt_context_disable(struct kbase_hwcnt_context * hctx)636 void kbase_hwcnt_context_disable(struct kbase_hwcnt_context *hctx)
637 {
638 	if (WARN_ON(!hctx))
639 		return;
640 
641 	/* Try and atomically disable first, so we can avoid locking the mutex
642 	 * if we don't need to.
643 	 */
644 	if (kbase_hwcnt_context_disable_atomic(hctx))
645 		return;
646 
647 	mutex_lock(&hctx->accum_lock);
648 
649 	kbasep_hwcnt_context_disable(hctx, true);
650 
651 	mutex_unlock(&hctx->accum_lock);
652 }
653 
kbase_hwcnt_context_disable_atomic(struct kbase_hwcnt_context * hctx)654 bool kbase_hwcnt_context_disable_atomic(struct kbase_hwcnt_context *hctx)
655 {
656 	unsigned long flags;
657 	bool atomic_disabled = false;
658 
659 	if (WARN_ON(!hctx))
660 		return false;
661 
662 	spin_lock_irqsave(&hctx->state_lock, flags);
663 
664 	if (!WARN_ON(hctx->disable_count == SIZE_MAX)) {
665 		/*
666 		 * If disable count is non-zero, we can just bump the disable
667 		 * count.
668 		 *
669 		 * Otherwise, we can't disable in an atomic context.
670 		 */
671 		if (hctx->disable_count != 0) {
672 			hctx->disable_count++;
673 			atomic_disabled = true;
674 		}
675 	}
676 
677 	spin_unlock_irqrestore(&hctx->state_lock, flags);
678 
679 	return atomic_disabled;
680 }
681 
kbase_hwcnt_context_enable(struct kbase_hwcnt_context * hctx)682 void kbase_hwcnt_context_enable(struct kbase_hwcnt_context *hctx)
683 {
684 	unsigned long flags;
685 
686 	if (WARN_ON(!hctx))
687 		return;
688 
689 	spin_lock_irqsave(&hctx->state_lock, flags);
690 
691 	if (!WARN_ON(hctx->disable_count == 0)) {
692 		if (hctx->disable_count == 1)
693 			kbasep_hwcnt_accumulator_enable(hctx);
694 
695 		hctx->disable_count--;
696 	}
697 
698 	spin_unlock_irqrestore(&hctx->state_lock, flags);
699 }
700 
kbase_hwcnt_context_metadata(struct kbase_hwcnt_context * hctx)701 const struct kbase_hwcnt_metadata *kbase_hwcnt_context_metadata(struct kbase_hwcnt_context *hctx)
702 {
703 	if (!hctx)
704 		return NULL;
705 
706 	return hctx->iface->metadata(hctx->iface->info);
707 }
708 
kbase_hwcnt_context_queue_work(struct kbase_hwcnt_context * hctx,struct work_struct * work)709 bool kbase_hwcnt_context_queue_work(struct kbase_hwcnt_context *hctx, struct work_struct *work)
710 {
711 	if (WARN_ON(!hctx) || WARN_ON(!work))
712 		return false;
713 
714 	return queue_work(hctx->wq, work);
715 }
716 
kbase_hwcnt_accumulator_set_counters(struct kbase_hwcnt_accumulator * accum,const struct kbase_hwcnt_enable_map * new_map,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)717 int kbase_hwcnt_accumulator_set_counters(struct kbase_hwcnt_accumulator *accum,
718 					 const struct kbase_hwcnt_enable_map *new_map,
719 					 u64 *ts_start_ns, u64 *ts_end_ns,
720 					 struct kbase_hwcnt_dump_buffer *dump_buf)
721 {
722 	int errcode;
723 	struct kbase_hwcnt_context *hctx;
724 
725 	if (!accum || !new_map || !ts_start_ns || !ts_end_ns)
726 		return -EINVAL;
727 
728 	hctx = container_of(accum, struct kbase_hwcnt_context, accum);
729 
730 	if ((new_map->metadata != hctx->accum.metadata) ||
731 	    (dump_buf && (dump_buf->metadata != hctx->accum.metadata)))
732 		return -EINVAL;
733 
734 	mutex_lock(&hctx->accum_lock);
735 
736 	errcode = kbasep_hwcnt_accumulator_dump(hctx, ts_start_ns, ts_end_ns, dump_buf, new_map);
737 
738 	mutex_unlock(&hctx->accum_lock);
739 
740 	return errcode;
741 }
742 
kbase_hwcnt_accumulator_dump(struct kbase_hwcnt_accumulator * accum,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)743 int kbase_hwcnt_accumulator_dump(struct kbase_hwcnt_accumulator *accum, u64 *ts_start_ns,
744 				 u64 *ts_end_ns, struct kbase_hwcnt_dump_buffer *dump_buf)
745 {
746 	int errcode;
747 	struct kbase_hwcnt_context *hctx;
748 
749 	if (!accum || !ts_start_ns || !ts_end_ns)
750 		return -EINVAL;
751 
752 	hctx = container_of(accum, struct kbase_hwcnt_context, accum);
753 
754 	if (dump_buf && (dump_buf->metadata != hctx->accum.metadata))
755 		return -EINVAL;
756 
757 	mutex_lock(&hctx->accum_lock);
758 
759 	errcode = kbasep_hwcnt_accumulator_dump(hctx, ts_start_ns, ts_end_ns, dump_buf, NULL);
760 
761 	mutex_unlock(&hctx->accum_lock);
762 
763 	return errcode;
764 }
765 
kbase_hwcnt_accumulator_timestamp_ns(struct kbase_hwcnt_accumulator * accum)766 u64 kbase_hwcnt_accumulator_timestamp_ns(struct kbase_hwcnt_accumulator *accum)
767 {
768 	struct kbase_hwcnt_context *hctx;
769 
770 	if (WARN_ON(!accum))
771 		return 0;
772 
773 	hctx = container_of(accum, struct kbase_hwcnt_context, accum);
774 	return hctx->iface->timestamp_ns(accum->backend);
775 }
776