xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/hwcnt/mali_kbase_hwcnt_context.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2018, 2020-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * Hardware counter context API.
24  */
25 
26 #ifndef _KBASE_HWCNT_CONTEXT_H_
27 #define _KBASE_HWCNT_CONTEXT_H_
28 
29 #include <linux/types.h>
30 #include <linux/workqueue.h>
31 
32 struct kbase_hwcnt_backend_interface;
33 struct kbase_hwcnt_context;
34 
35 /**
36  * kbase_hwcnt_context_init() - Initialise a hardware counter context.
37  * @iface:    Non-NULL pointer to a hardware counter backend interface.
38  * @out_hctx: Non-NULL pointer to where the pointer to the created context will
39  *            be stored on success.
40  *
41  * On creation, the disable count of the context will be 0.
42  * A hardware counter accumulator can be acquired using a created context.
43  *
44  * Return: 0 on success, else error code.
45  */
46 int kbase_hwcnt_context_init(const struct kbase_hwcnt_backend_interface *iface,
47 			     struct kbase_hwcnt_context **out_hctx);
48 
49 /**
50  * kbase_hwcnt_context_term() - Terminate a hardware counter context.
51  * @hctx: Pointer to context to be terminated.
52  */
53 void kbase_hwcnt_context_term(struct kbase_hwcnt_context *hctx);
54 
55 /**
56  * kbase_hwcnt_context_metadata() - Get the hardware counter metadata used by
57  *                                  the context, so related counter data
58  *                                  structures can be created.
59  * @hctx: Non-NULL pointer to the hardware counter context.
60  *
61  * Return: Non-NULL pointer to metadata, or NULL on error.
62  */
63 const struct kbase_hwcnt_metadata *kbase_hwcnt_context_metadata(struct kbase_hwcnt_context *hctx);
64 
65 /**
66  * kbase_hwcnt_context_disable() - Increment the disable count of the context.
67  * @hctx: Non-NULL pointer to the hardware counter context.
68  *
69  * If a call to this function increments the disable count from 0 to 1, and
70  * an accumulator has been acquired, then a counter dump will be performed
71  * before counters are disabled via the backend interface.
72  *
73  * Subsequent dumps via the accumulator while counters are disabled will first
74  * return the accumulated dump, then will return dumps with zeroed counters.
75  *
76  * After this function call returns, it is guaranteed that counters will not be
77  * enabled via the backend interface.
78  */
79 void kbase_hwcnt_context_disable(struct kbase_hwcnt_context *hctx);
80 
81 /**
82  * kbase_hwcnt_context_disable_atomic() - Increment the disable count of the
83  *                                        context if possible in an atomic
84  *                                        context.
85  * @hctx: Non-NULL pointer to the hardware counter context.
86  *
87  * This function will only succeed if hardware counters are effectively already
88  * disabled, i.e. there is no accumulator, the disable count is already
89  * non-zero, or the accumulator has no counters set.
90  *
91  * After this function call returns true, it is guaranteed that counters will
92  * not be enabled via the backend interface.
93  *
94  * Return: True if the disable count was incremented, else False.
95  */
96 bool kbase_hwcnt_context_disable_atomic(struct kbase_hwcnt_context *hctx);
97 
98 /**
99  * kbase_hwcnt_context_enable() - Decrement the disable count of the context.
100  * @hctx: Non-NULL pointer to the hardware counter context.
101  *
102  * If a call to this function decrements the disable count from 1 to 0, and
103  * an accumulator has been acquired, then counters will be re-enabled via the
104  * backend interface.
105  *
106  * If an accumulator has been acquired and enabling counters fails for some
107  * reason, the accumulator will be placed into an error state.
108  *
109  * It is only valid to call this function one time for each prior returned call
110  * to kbase_hwcnt_context_disable.
111  *
112  * The spinlock documented in the backend interface that was passed in to
113  * kbase_hwcnt_context_init() must be held before calling this function.
114  */
115 void kbase_hwcnt_context_enable(struct kbase_hwcnt_context *hctx);
116 
117 /**
118  * kbase_hwcnt_context_queue_work() - Queue hardware counter related async
119  *                                    work on a workqueue specialized for
120  *                                    hardware counters.
121  * @hctx: Non-NULL pointer to the hardware counter context.
122  * @work: Non-NULL pointer to work to queue.
123  *
124  * Return: false if work was already on a queue, true otherwise.
125  *
126  * Performance counter related work is high priority, short running, and
127  * generally CPU locality is unimportant. There is no standard workqueue that
128  * can service this flavor of work.
129  *
130  * Rather than have each user of counters define their own workqueue, we have
131  * a centralized one in here that anybody using this hardware counter API
132  * should use.
133  *
134  * Before the context is destroyed, all work submitted must have been completed.
135  * Given that the work enqueued via this function is likely to be hardware
136  * counter related and will therefore use the context object, this is likely
137  * to be behavior that will occur naturally.
138  *
139  * Historical note: prior to this centralized workqueue, the system_highpri_wq
140  * was used. This was generally fine, except when a particularly long running,
141  * higher priority thread ended up scheduled on the enqueuing CPU core. Given
142  * that hardware counters requires tight integration with power management,
143  * this meant progress through the power management states could be stalled
144  * for however long that higher priority thread took.
145  */
146 bool kbase_hwcnt_context_queue_work(struct kbase_hwcnt_context *hctx, struct work_struct *work);
147 
148 #endif /* _KBASE_HWCNT_CONTEXT_H_ */
149