1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3 *
4 * (C) COPYRIGHT 2021-2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #ifndef _KBASE_HWCNT_GPU_NARROW_H_
23 #define _KBASE_HWCNT_GPU_NARROW_H_
24
25 #include "hwcnt/mali_kbase_hwcnt_types.h"
26 #include <linux/types.h>
27
28 struct kbase_device;
29 struct kbase_hwcnt_metadata;
30 struct kbase_hwcnt_enable_map;
31 struct kbase_hwcnt_dump_buffer;
32
33 /**
34 * struct kbase_hwcnt_metadata_narrow - Narrow metadata describing the physical
35 * layout of narrow dump buffers.
36 * For backward compatibility, the narrow
37 * metadata only supports 64 counters per
38 * block and 32-bit per block entry.
39 * @metadata: Non-NULL pointer to the metadata before narrow down to
40 * 32-bit per block entry, it has 64 counters per block and
41 * 64-bit per value.
42 * @dump_buf_bytes: The size in bytes after narrow 64-bit to 32-bit per block
43 * entry.
44 */
45 struct kbase_hwcnt_metadata_narrow {
46 const struct kbase_hwcnt_metadata *metadata;
47 size_t dump_buf_bytes;
48 };
49
50 /**
51 * struct kbase_hwcnt_dump_buffer_narrow - Hardware counter narrow dump buffer.
52 * @md_narrow: Non-NULL pointer to narrow metadata used to identify, and to
53 * describe the layout of the narrow dump buffer.
54 * @dump_buf: Non-NULL pointer to an array of u32 values, the array size
55 * is md_narrow->dump_buf_bytes.
56 * @clk_cnt_buf: A pointer to an array of u64 values for cycle count elapsed
57 * for each clock domain.
58 */
59 struct kbase_hwcnt_dump_buffer_narrow {
60 const struct kbase_hwcnt_metadata_narrow *md_narrow;
61 u32 *dump_buf;
62 u64 *clk_cnt_buf;
63 };
64
65 /**
66 * struct kbase_hwcnt_dump_buffer_narrow_array - Hardware counter narrow dump
67 * buffer array.
68 * @page_addr: Address of first allocated page. A single allocation is used for
69 * all narrow dump buffers in the array.
70 * @page_order: The allocation order of the pages, the order is on a logarithmic
71 * scale.
72 * @buf_cnt: The number of allocated dump buffers.
73 * @bufs: Non-NULL pointer to the array of narrow dump buffer descriptors.
74 */
75 struct kbase_hwcnt_dump_buffer_narrow_array {
76 unsigned long page_addr;
77 unsigned int page_order;
78 size_t buf_cnt;
79 struct kbase_hwcnt_dump_buffer_narrow *bufs;
80 };
81
82 /**
83 * kbase_hwcnt_metadata_narrow_group_count() - Get the number of groups from
84 * narrow metadata.
85 * @md_narrow: Non-NULL pointer to narrow metadata.
86 *
87 * Return: Number of hardware counter groups described by narrow metadata.
88 */
89 static inline size_t
kbase_hwcnt_metadata_narrow_group_count(const struct kbase_hwcnt_metadata_narrow * md_narrow)90 kbase_hwcnt_metadata_narrow_group_count(const struct kbase_hwcnt_metadata_narrow *md_narrow)
91 {
92 return kbase_hwcnt_metadata_group_count(md_narrow->metadata);
93 }
94
95 /**
96 * kbase_hwcnt_metadata_narrow_group_type() - Get the arbitrary type of a group
97 * from narrow metadata.
98 * @md_narrow: Non-NULL pointer to narrow metadata.
99 * @grp: Index of the group in the narrow metadata.
100 *
101 * Return: Type of the group grp.
102 */
103 static inline u64
kbase_hwcnt_metadata_narrow_group_type(const struct kbase_hwcnt_metadata_narrow * md_narrow,size_t grp)104 kbase_hwcnt_metadata_narrow_group_type(const struct kbase_hwcnt_metadata_narrow *md_narrow,
105 size_t grp)
106 {
107 return kbase_hwcnt_metadata_group_type(md_narrow->metadata, grp);
108 }
109
110 /**
111 * kbase_hwcnt_metadata_narrow_block_count() - Get the number of blocks in a
112 * group from narrow metadata.
113 * @md_narrow: Non-NULL pointer to narrow metadata.
114 * @grp: Index of the group in the narrow metadata.
115 *
116 * Return: Number of blocks in group grp.
117 */
118 static inline size_t
kbase_hwcnt_metadata_narrow_block_count(const struct kbase_hwcnt_metadata_narrow * md_narrow,size_t grp)119 kbase_hwcnt_metadata_narrow_block_count(const struct kbase_hwcnt_metadata_narrow *md_narrow,
120 size_t grp)
121 {
122 return kbase_hwcnt_metadata_block_count(md_narrow->metadata, grp);
123 }
124
125 /**
126 * kbase_hwcnt_metadata_narrow_block_instance_count() - Get the number of
127 * instances of a block
128 * from narrow metadata.
129 * @md_narrow: Non-NULL pointer to narrow metadata.
130 * @grp: Index of the group in the narrow metadata.
131 * @blk: Index of the block in the group.
132 *
133 * Return: Number of instances of block blk in group grp.
134 */
kbase_hwcnt_metadata_narrow_block_instance_count(const struct kbase_hwcnt_metadata_narrow * md_narrow,size_t grp,size_t blk)135 static inline size_t kbase_hwcnt_metadata_narrow_block_instance_count(
136 const struct kbase_hwcnt_metadata_narrow *md_narrow, size_t grp, size_t blk)
137 {
138 return kbase_hwcnt_metadata_block_instance_count(md_narrow->metadata, grp, blk);
139 }
140
141 /**
142 * kbase_hwcnt_metadata_narrow_block_headers_count() - Get the number of counter
143 * headers from narrow
144 * metadata.
145 * @md_narrow: Non-NULL pointer to narrow metadata.
146 * @grp: Index of the group in the narrow metadata.
147 * @blk: Index of the block in the group.
148 *
149 * Return: Number of counter headers in each instance of block blk in group grp.
150 */
151 static inline size_t
kbase_hwcnt_metadata_narrow_block_headers_count(const struct kbase_hwcnt_metadata_narrow * md_narrow,size_t grp,size_t blk)152 kbase_hwcnt_metadata_narrow_block_headers_count(const struct kbase_hwcnt_metadata_narrow *md_narrow,
153 size_t grp, size_t blk)
154 {
155 return kbase_hwcnt_metadata_block_headers_count(md_narrow->metadata, grp, blk);
156 }
157
158 /**
159 * kbase_hwcnt_metadata_narrow_block_counters_count() - Get the number of
160 * counters from narrow
161 * metadata.
162 * @md_narrow: Non-NULL pointer to narrow metadata.
163 * @grp: Index of the group in the narrow metadata.
164 * @blk: Index of the block in the group.
165 *
166 * Return: Number of counters in each instance of block blk in group grp.
167 */
kbase_hwcnt_metadata_narrow_block_counters_count(const struct kbase_hwcnt_metadata_narrow * md_narrow,size_t grp,size_t blk)168 static inline size_t kbase_hwcnt_metadata_narrow_block_counters_count(
169 const struct kbase_hwcnt_metadata_narrow *md_narrow, size_t grp, size_t blk)
170 {
171 return kbase_hwcnt_metadata_block_counters_count(md_narrow->metadata, grp, blk);
172 }
173
174 /**
175 * kbase_hwcnt_metadata_narrow_block_values_count() - Get the number of values
176 * from narrow metadata.
177 * @md_narrow: Non-NULL pointer to narrow metadata.
178 * @grp: Index of the group in the narrow metadata.
179 * @blk: Index of the block in the group.
180 *
181 * Return: Number of headers plus counters in each instance of block blk
182 * in group grp.
183 */
184 static inline size_t
kbase_hwcnt_metadata_narrow_block_values_count(const struct kbase_hwcnt_metadata_narrow * md_narrow,size_t grp,size_t blk)185 kbase_hwcnt_metadata_narrow_block_values_count(const struct kbase_hwcnt_metadata_narrow *md_narrow,
186 size_t grp, size_t blk)
187 {
188 return kbase_hwcnt_metadata_narrow_block_counters_count(md_narrow, grp, blk) +
189 kbase_hwcnt_metadata_narrow_block_headers_count(md_narrow, grp, blk);
190 }
191
192 /**
193 * kbase_hwcnt_dump_buffer_narrow_block_instance() - Get the pointer to a
194 * narrowed block instance's
195 * dump buffer.
196 * @buf: Non-NULL pointer to narrow dump buffer.
197 * @grp: Index of the group in the narrow metadata.
198 * @blk: Index of the block in the group.
199 * @blk_inst: Index of the block instance in the block.
200 *
201 * Return: u32* to the dump buffer for the block instance.
202 */
203 static inline u32 *
kbase_hwcnt_dump_buffer_narrow_block_instance(const struct kbase_hwcnt_dump_buffer_narrow * buf,size_t grp,size_t blk,size_t blk_inst)204 kbase_hwcnt_dump_buffer_narrow_block_instance(const struct kbase_hwcnt_dump_buffer_narrow *buf,
205 size_t grp, size_t blk, size_t blk_inst)
206 {
207 return buf->dump_buf + buf->md_narrow->metadata->grp_metadata[grp].dump_buf_index +
208 buf->md_narrow->metadata->grp_metadata[grp].blk_metadata[blk].dump_buf_index +
209 (buf->md_narrow->metadata->grp_metadata[grp].blk_metadata[blk].dump_buf_stride *
210 blk_inst);
211 }
212
213 /**
214 * kbase_hwcnt_gpu_metadata_narrow_create() - Create HWC metadata with HWC
215 * entries per block truncated to
216 * 64 entries and block entry size
217 * narrowed down to 32-bit.
218 *
219 * @dst_md_narrow: Non-NULL pointer to where created narrow metadata is stored
220 * on success.
221 * @src_md: Non-NULL pointer to the HWC metadata used as the source to
222 * create dst_md_narrow.
223 *
224 * For backward compatibility of the interface to user clients, a new metadata
225 * with entries per block truncated to 64 and block entry size narrowed down
226 * to 32-bit will be created for dst_md_narrow.
227 * The total entries per block in src_md must be 64 or 128, if it's other
228 * values, function returns error since it's not supported.
229 *
230 * Return: 0 on success, else error code.
231 */
232 int kbase_hwcnt_gpu_metadata_narrow_create(const struct kbase_hwcnt_metadata_narrow **dst_md_narrow,
233 const struct kbase_hwcnt_metadata *src_md);
234
235 /**
236 * kbase_hwcnt_gpu_metadata_narrow_destroy() - Destroy a hardware counter narrow
237 * metadata object.
238 * @md_narrow: Pointer to hardware counter narrow metadata.
239 */
240 void kbase_hwcnt_gpu_metadata_narrow_destroy(const struct kbase_hwcnt_metadata_narrow *md_narrow);
241
242 /**
243 * kbase_hwcnt_dump_buffer_narrow_alloc() - Allocate a narrow dump buffer.
244 * @md_narrow: Non-NULL pointer to narrow metadata.
245 * @dump_buf: Non-NULL pointer to narrow dump buffer to be initialised. Will be
246 * initialised to undefined values, so must be used as a copy
247 * destination, or cleared before use.
248 *
249 * Return: 0 on success, else error code.
250 */
251 int kbase_hwcnt_dump_buffer_narrow_alloc(const struct kbase_hwcnt_metadata_narrow *md_narrow,
252 struct kbase_hwcnt_dump_buffer_narrow *dump_buf);
253
254 /**
255 * kbase_hwcnt_dump_buffer_narrow_free() - Free a narrow dump buffer.
256 * @dump_buf: Dump buffer to be freed.
257 *
258 * Can be safely called on an all-zeroed narrow dump buffer structure, or on an
259 * already freed narrow dump buffer.
260 */
261 void kbase_hwcnt_dump_buffer_narrow_free(struct kbase_hwcnt_dump_buffer_narrow *dump_buf);
262
263 /**
264 * kbase_hwcnt_dump_buffer_narrow_array_alloc() - Allocate an array of narrow
265 * dump buffers.
266 * @md_narrow: Non-NULL pointer to narrow metadata.
267 * @n: Number of narrow dump buffers to allocate
268 * @dump_bufs: Non-NULL pointer to a kbase_hwcnt_dump_buffer_narrow_array
269 * object to be initialised.
270 *
271 * A single zeroed contiguous page allocation will be used for all of the
272 * buffers inside the object, where:
273 * dump_bufs->bufs[n].dump_buf == page_addr + n * md_narrow.dump_buf_bytes
274 *
275 * Return: 0 on success, else error code.
276 */
277 int kbase_hwcnt_dump_buffer_narrow_array_alloc(
278 const struct kbase_hwcnt_metadata_narrow *md_narrow, size_t n,
279 struct kbase_hwcnt_dump_buffer_narrow_array *dump_bufs);
280
281 /**
282 * kbase_hwcnt_dump_buffer_narrow_array_free() - Free a narrow dump buffer
283 * array.
284 * @dump_bufs: Narrow Dump buffer array to be freed.
285 *
286 * Can be safely called on an all-zeroed narrow dump buffer array structure, or
287 * on an already freed narrow dump buffer array.
288 */
289 void kbase_hwcnt_dump_buffer_narrow_array_free(
290 struct kbase_hwcnt_dump_buffer_narrow_array *dump_bufs);
291
292 /**
293 * kbase_hwcnt_dump_buffer_block_copy_strict_narrow() - Copy all enabled block
294 * values from source to
295 * destination.
296 * @dst_blk: Non-NULL pointer to destination block obtained from a call to
297 * kbase_hwcnt_dump_buffer_narrow_block_instance.
298 * @src_blk: Non-NULL pointer to source block obtained from a call to
299 * kbase_hwcnt_dump_buffer_block_instance.
300 * @blk_em: Non-NULL pointer to the block bitfield(s) obtained from a call to
301 * kbase_hwcnt_enable_map_block_instance.
302 * @val_cnt: Number of values in the block.
303 *
304 * After the copy, any disabled values in destination will be zero, the enabled
305 * values in destination will be saturated at U32_MAX if the corresponding
306 * source value is bigger than U32_MAX, or copy the value from source if the
307 * corresponding source value is less than or equal to U32_MAX.
308 */
309 void kbase_hwcnt_dump_buffer_block_copy_strict_narrow(u32 *dst_blk, const u64 *src_blk,
310 const u64 *blk_em, size_t val_cnt);
311
312 /**
313 * kbase_hwcnt_dump_buffer_copy_strict_narrow() - Copy all enabled values to a
314 * narrow dump buffer.
315 * @dst_narrow: Non-NULL pointer to destination dump buffer.
316 * @src: Non-NULL pointer to source dump buffer.
317 * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
318 *
319 * After the operation, all non-enabled values (including padding bytes) will be
320 * zero. Slower than the non-strict variant.
321 *
322 * The enabled values in dst_narrow will be saturated at U32_MAX if the
323 * corresponding source value is bigger than U32_MAX, or copy the value from
324 * source if the corresponding source value is less than or equal to U32_MAX.
325 */
326 void kbase_hwcnt_dump_buffer_copy_strict_narrow(struct kbase_hwcnt_dump_buffer_narrow *dst_narrow,
327 const struct kbase_hwcnt_dump_buffer *src,
328 const struct kbase_hwcnt_enable_map *dst_enable_map);
329
330 #endif /* _KBASE_HWCNT_GPU_NARROW_H_ */
331