1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3 *
4 * (C) COPYRIGHT 2018, 2020-2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 /*
23 * Hardware counter types.
24 * Contains structures for describing the physical layout of hardware counter
25 * dump buffers and enable maps within a system.
26 *
27 * Also contains helper functions for manipulation of these dump buffers and
28 * enable maps.
29 *
30 * Through use of these structures and functions, hardware counters can be
31 * enabled, copied, accumulated, and generally manipulated in a generic way,
32 * regardless of the physical counter dump layout.
33 *
34 * Terminology:
35 *
36 * Hardware Counter System:
37 * A collection of hardware counter groups, making a full hardware counter
38 * system.
39 * Hardware Counter Group:
40 * A group of Hardware Counter Blocks (e.g. a t62x might have more than one
41 * core group, so has one counter group per core group, where each group
42 * may have a different number and layout of counter blocks).
43 * Hardware Counter Block:
44 * A block of hardware counters (e.g. shader block, tiler block).
45 * Hardware Counter Block Instance:
46 * An instance of a Hardware Counter Block (e.g. an MP4 GPU might have
47 * 4 shader block instances).
48 *
49 * Block Header:
50 * A header value inside a counter block. Headers don't count anything,
51 * so it is only valid to copy or zero them. Headers are always the first
52 * values in the block.
53 * Block Counter:
54 * A counter value inside a counter block. Counters can be zeroed, copied,
55 * or accumulated. Counters are always immediately after the headers in the
56 * block.
57 * Block Value:
58 * A catch-all term for block headers and block counters.
59 *
60 * Enable Map:
61 * An array of u64 bitfields, where each bit either enables exactly one
62 * block value, or is unused (padding).
63 * Dump Buffer:
64 * An array of u64 values, where each u64 corresponds either to one block
65 * value, or is unused (padding).
66 * Availability Mask:
67 * A bitfield, where each bit corresponds to whether a block instance is
68 * physically available (e.g. an MP3 GPU may have a sparse core mask of
69 * 0b1011, meaning it only has 3 cores but for hardware counter dumps has the
70 * same dump buffer layout as an MP4 GPU with a core mask of 0b1111. In this
71 * case, the availability mask might be 0b1011111 (the exact layout will
72 * depend on the specific hardware architecture), with the 3 extra early bits
73 * corresponding to other block instances in the hardware counter system).
74 * Metadata:
75 * Structure describing the physical layout of the enable map and dump buffers
76 * for a specific hardware counter system.
77 *
78 */
79
80 #ifndef _KBASE_HWCNT_TYPES_H_
81 #define _KBASE_HWCNT_TYPES_H_
82
83 #include <linux/bitops.h>
84 #include <linux/bug.h>
85 #include <linux/kernel.h>
86 #include <linux/string.h>
87 #include <linux/types.h>
88
89 /* Number of bytes in each bitfield */
90 #define KBASE_HWCNT_BITFIELD_BYTES (sizeof(u64))
91
92 /* Number of bits in each bitfield */
93 #define KBASE_HWCNT_BITFIELD_BITS (KBASE_HWCNT_BITFIELD_BYTES * BITS_PER_BYTE)
94
95 /* Number of bytes for each counter value.
96 * Use 64-bit per counter in driver to avoid HW 32-bit register values
97 * overflow after a long time accumulation.
98 */
99 #define KBASE_HWCNT_VALUE_BYTES (sizeof(u64))
100
101 /* Number of bits in an availability mask (i.e. max total number of block
102 * instances supported in a Hardware Counter System)
103 */
104 #define KBASE_HWCNT_AVAIL_MASK_BITS (sizeof(u64) * BITS_PER_BYTE)
105
106 /* Minimum alignment of each block of hardware counters */
107 #define KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT (KBASE_HWCNT_BITFIELD_BITS * KBASE_HWCNT_VALUE_BYTES)
108
109 /**
110 * KBASE_HWCNT_ALIGN_UPWARDS() - Calculate next aligned value.
111 * @value: The value to align upwards.
112 * @alignment: The alignment boundary.
113 *
114 * Return: Input value if already aligned to the specified boundary, or next
115 * (incrementing upwards) aligned value.
116 */
117 #define KBASE_HWCNT_ALIGN_UPWARDS(value, alignment) \
118 (value + ((alignment - (value % alignment)) % alignment))
119
120 /**
121 * struct kbase_hwcnt_block_description - Description of one or more identical,
122 * contiguous, Hardware Counter Blocks.
123 * @type: The arbitrary identifier used to identify the type of the block.
124 * @inst_cnt: The number of Instances of the block.
125 * @hdr_cnt: The number of 64-bit Block Headers in the block.
126 * @ctr_cnt: The number of 64-bit Block Counters in the block.
127 */
128 struct kbase_hwcnt_block_description {
129 u64 type;
130 size_t inst_cnt;
131 size_t hdr_cnt;
132 size_t ctr_cnt;
133 };
134
135 /**
136 * struct kbase_hwcnt_group_description - Description of one or more identical,
137 * contiguous Hardware Counter Groups.
138 * @type: The arbitrary identifier used to identify the type of the group.
139 * @blk_cnt: The number of types of Hardware Counter Block in the group.
140 * @blks: Non-NULL pointer to an array of blk_cnt block descriptions,
141 * describing each type of Hardware Counter Block in the group.
142 */
143 struct kbase_hwcnt_group_description {
144 u64 type;
145 size_t blk_cnt;
146 const struct kbase_hwcnt_block_description *blks;
147 };
148
149 /**
150 * struct kbase_hwcnt_description - Description of a Hardware Counter System.
151 * @grp_cnt: The number of Hardware Counter Groups.
152 * @grps: Non-NULL pointer to an array of grp_cnt group descriptions,
153 * describing each Hardware Counter Group in the system.
154 * @avail_mask: Flat Availability Mask for all block instances in the system.
155 * @clk_cnt: The number of clock domains in the system. The maximum is 64.
156 */
157 struct kbase_hwcnt_description {
158 size_t grp_cnt;
159 const struct kbase_hwcnt_group_description *grps;
160 u64 avail_mask;
161 u8 clk_cnt;
162 };
163
164 /**
165 * struct kbase_hwcnt_block_metadata - Metadata describing the physical layout
166 * of a block in a Hardware Counter System's
167 * Dump Buffers and Enable Maps.
168 * @type: The arbitrary identifier used to identify the type of the
169 * block.
170 * @inst_cnt: The number of Instances of the block.
171 * @hdr_cnt: The number of 64-bit Block Headers in the block.
172 * @ctr_cnt: The number of 64-bit Block Counters in the block.
173 * @enable_map_index: Index in u64s into the parent's Enable Map where the
174 * Enable Map bitfields of the Block Instances described by
175 * this metadata start.
176 * @enable_map_stride: Stride in u64s between the Enable Maps of each of the
177 * Block Instances described by this metadata.
178 * @dump_buf_index: Index in u64s into the parent's Dump Buffer where the
179 * Dump Buffers of the Block Instances described by this
180 * metadata start.
181 * @dump_buf_stride: Stride in u64s between the Dump Buffers of each of the
182 * Block Instances described by this metadata.
183 * @avail_mask_index: Index in bits into the parent's Availability Mask where
184 * the Availability Masks of the Block Instances described
185 * by this metadata start.
186 */
187 struct kbase_hwcnt_block_metadata {
188 u64 type;
189 size_t inst_cnt;
190 size_t hdr_cnt;
191 size_t ctr_cnt;
192 size_t enable_map_index;
193 size_t enable_map_stride;
194 size_t dump_buf_index;
195 size_t dump_buf_stride;
196 size_t avail_mask_index;
197 };
198
199 /**
200 * struct kbase_hwcnt_group_metadata - Metadata describing the physical layout
201 * of a group of blocks in a Hardware
202 * Counter System's Dump Buffers and Enable
203 * Maps.
204 * @type: The arbitrary identifier used to identify the type of the
205 * group.
206 * @blk_cnt: The number of types of Hardware Counter Block in the
207 * group.
208 * @blk_metadata: Non-NULL pointer to an array of blk_cnt block metadata,
209 * describing the physical layout of each type of Hardware
210 * Counter Block in the group.
211 * @enable_map_index: Index in u64s into the parent's Enable Map where the
212 * Enable Maps of the blocks within the group described by
213 * this metadata start.
214 * @dump_buf_index: Index in u64s into the parent's Dump Buffer where the
215 * Dump Buffers of the blocks within the group described by
216 * metadata start.
217 * @avail_mask_index: Index in bits into the parent's Availability Mask where
218 * the Availability Masks of the blocks within the group
219 * described by this metadata start.
220 */
221 struct kbase_hwcnt_group_metadata {
222 u64 type;
223 size_t blk_cnt;
224 const struct kbase_hwcnt_block_metadata *blk_metadata;
225 size_t enable_map_index;
226 size_t dump_buf_index;
227 size_t avail_mask_index;
228 };
229
230 /**
231 * struct kbase_hwcnt_metadata - Metadata describing the memory layout
232 * of Dump Buffers and Enable Maps within a
233 * Hardware Counter System.
234 * @grp_cnt: The number of Hardware Counter Groups.
235 * @grp_metadata: Non-NULL pointer to an array of grp_cnt group metadata,
236 * describing the physical layout of each Hardware Counter
237 * Group in the system.
238 * @enable_map_bytes: The size in bytes of an Enable Map needed for the system.
239 * @dump_buf_bytes: The size in bytes of a Dump Buffer needed for the system.
240 * @avail_mask: The Availability Mask for the system.
241 * @clk_cnt: The number of clock domains in the system.
242 */
243 struct kbase_hwcnt_metadata {
244 size_t grp_cnt;
245 const struct kbase_hwcnt_group_metadata *grp_metadata;
246 size_t enable_map_bytes;
247 size_t dump_buf_bytes;
248 u64 avail_mask;
249 u8 clk_cnt;
250 };
251
252 /**
253 * struct kbase_hwcnt_enable_map - Hardware Counter Enable Map. Array of u64
254 * bitfields.
255 * @metadata: Non-NULL pointer to metadata used to identify, and to describe
256 * the layout of the enable map.
257 * @hwcnt_enable_map: Non-NULL pointer of size metadata->enable_map_bytes to an
258 * array of u64 bitfields, each bit of which enables one hardware
259 * counter.
260 * @clk_enable_map: An array of u64 bitfields, each bit of which enables cycle
261 * counter for a given clock domain.
262 */
263 struct kbase_hwcnt_enable_map {
264 const struct kbase_hwcnt_metadata *metadata;
265 u64 *hwcnt_enable_map;
266 u64 clk_enable_map;
267 };
268
269 /**
270 * struct kbase_hwcnt_dump_buffer - Hardware Counter Dump Buffer.
271 * @metadata: Non-NULL pointer to metadata used to identify, and to describe
272 * the layout of the Dump Buffer.
273 * @dump_buf: Non-NULL pointer to an array of u64 values, the array size is
274 * metadata->dump_buf_bytes.
275 * @clk_cnt_buf: A pointer to an array of u64 values for cycle count elapsed
276 * for each clock domain.
277 */
278 struct kbase_hwcnt_dump_buffer {
279 const struct kbase_hwcnt_metadata *metadata;
280 u64 *dump_buf;
281 u64 *clk_cnt_buf;
282 };
283
284 /**
285 * struct kbase_hwcnt_dump_buffer_array - Hardware Counter Dump Buffer array.
286 * @page_addr: Address of allocated pages. A single allocation is used for all
287 * Dump Buffers in the array.
288 * @page_order: The allocation order of the pages, the order is on a logarithmic
289 * scale.
290 * @buf_cnt: The number of allocated Dump Buffers.
291 * @bufs: Non-NULL pointer to the array of Dump Buffers.
292 */
293 struct kbase_hwcnt_dump_buffer_array {
294 unsigned long page_addr;
295 unsigned int page_order;
296 size_t buf_cnt;
297 struct kbase_hwcnt_dump_buffer *bufs;
298 };
299
300 /**
301 * kbase_hwcnt_metadata_create() - Create a hardware counter metadata object
302 * from a description.
303 * @desc: Non-NULL pointer to a hardware counter description.
304 * @metadata: Non-NULL pointer to where created metadata will be stored on
305 * success.
306 *
307 * Return: 0 on success, else error code.
308 */
309 int kbase_hwcnt_metadata_create(const struct kbase_hwcnt_description *desc,
310 const struct kbase_hwcnt_metadata **metadata);
311
312 /**
313 * kbase_hwcnt_metadata_destroy() - Destroy a hardware counter metadata object.
314 * @metadata: Pointer to hardware counter metadata
315 */
316 void kbase_hwcnt_metadata_destroy(const struct kbase_hwcnt_metadata *metadata);
317
318 /**
319 * kbase_hwcnt_metadata_group_count() - Get the number of groups.
320 * @metadata: Non-NULL pointer to metadata.
321 *
322 * Return: Number of hardware counter groups described by metadata.
323 */
kbase_hwcnt_metadata_group_count(const struct kbase_hwcnt_metadata * metadata)324 static inline size_t kbase_hwcnt_metadata_group_count(const struct kbase_hwcnt_metadata *metadata)
325 {
326 if (WARN_ON(!metadata))
327 return 0;
328
329 return metadata->grp_cnt;
330 }
331
332 /**
333 * kbase_hwcnt_metadata_group_type() - Get the arbitrary type of a group.
334 * @metadata: Non-NULL pointer to metadata.
335 * @grp: Index of the group in the metadata.
336 *
337 * Return: Type of the group grp.
338 */
kbase_hwcnt_metadata_group_type(const struct kbase_hwcnt_metadata * metadata,size_t grp)339 static inline u64 kbase_hwcnt_metadata_group_type(const struct kbase_hwcnt_metadata *metadata,
340 size_t grp)
341 {
342 if (WARN_ON(!metadata) || WARN_ON(grp >= metadata->grp_cnt))
343 return 0;
344
345 return metadata->grp_metadata[grp].type;
346 }
347
348 /**
349 * kbase_hwcnt_metadata_block_count() - Get the number of blocks in a group.
350 * @metadata: Non-NULL pointer to metadata.
351 * @grp: Index of the group in the metadata.
352 *
353 * Return: Number of blocks in group grp.
354 */
kbase_hwcnt_metadata_block_count(const struct kbase_hwcnt_metadata * metadata,size_t grp)355 static inline size_t kbase_hwcnt_metadata_block_count(const struct kbase_hwcnt_metadata *metadata,
356 size_t grp)
357 {
358 if (WARN_ON(!metadata) || WARN_ON(grp >= metadata->grp_cnt))
359 return 0;
360
361 return metadata->grp_metadata[grp].blk_cnt;
362 }
363
364 /**
365 * kbase_hwcnt_metadata_block_type() - Get the arbitrary type of a block.
366 * @metadata: Non-NULL pointer to metadata.
367 * @grp: Index of the group in the metadata.
368 * @blk: Index of the block in the group.
369 *
370 * Return: Type of the block blk in group grp.
371 */
kbase_hwcnt_metadata_block_type(const struct kbase_hwcnt_metadata * metadata,size_t grp,size_t blk)372 static inline u64 kbase_hwcnt_metadata_block_type(const struct kbase_hwcnt_metadata *metadata,
373 size_t grp, size_t blk)
374 {
375 if (WARN_ON(!metadata) || WARN_ON(grp >= metadata->grp_cnt) ||
376 WARN_ON(blk >= metadata->grp_metadata[grp].blk_cnt))
377 return 0;
378
379 return metadata->grp_metadata[grp].blk_metadata[blk].type;
380 }
381
382 /**
383 * kbase_hwcnt_metadata_block_instance_count() - Get the number of instances of
384 * a block.
385 * @metadata: Non-NULL pointer to metadata.
386 * @grp: Index of the group in the metadata.
387 * @blk: Index of the block in the group.
388 *
389 * Return: Number of instances of block blk in group grp.
390 */
391 static inline size_t
kbase_hwcnt_metadata_block_instance_count(const struct kbase_hwcnt_metadata * metadata,size_t grp,size_t blk)392 kbase_hwcnt_metadata_block_instance_count(const struct kbase_hwcnt_metadata *metadata, size_t grp,
393 size_t blk)
394 {
395 if (WARN_ON(!metadata) || WARN_ON(grp >= metadata->grp_cnt) ||
396 WARN_ON(blk >= metadata->grp_metadata[grp].blk_cnt))
397 return 0;
398
399 return metadata->grp_metadata[grp].blk_metadata[blk].inst_cnt;
400 }
401
402 /**
403 * kbase_hwcnt_metadata_block_headers_count() - Get the number of counter
404 * headers.
405 * @metadata: Non-NULL pointer to metadata.
406 * @grp: Index of the group in the metadata.
407 * @blk: Index of the block in the group.
408 *
409 * Return: Number of counter headers in each instance of block blk in group grp.
410 */
411 static inline size_t
kbase_hwcnt_metadata_block_headers_count(const struct kbase_hwcnt_metadata * metadata,size_t grp,size_t blk)412 kbase_hwcnt_metadata_block_headers_count(const struct kbase_hwcnt_metadata *metadata, size_t grp,
413 size_t blk)
414 {
415 if (WARN_ON(!metadata) || WARN_ON(grp >= metadata->grp_cnt) ||
416 WARN_ON(blk >= metadata->grp_metadata[grp].blk_cnt))
417 return 0;
418
419 return metadata->grp_metadata[grp].blk_metadata[blk].hdr_cnt;
420 }
421
422 /**
423 * kbase_hwcnt_metadata_block_counters_count() - Get the number of counters.
424 * @metadata: Non-NULL pointer to metadata.
425 * @grp: Index of the group in the metadata.
426 * @blk: Index of the block in the group.
427 *
428 * Return: Number of counters in each instance of block blk in group grp.
429 */
430 static inline size_t
kbase_hwcnt_metadata_block_counters_count(const struct kbase_hwcnt_metadata * metadata,size_t grp,size_t blk)431 kbase_hwcnt_metadata_block_counters_count(const struct kbase_hwcnt_metadata *metadata, size_t grp,
432 size_t blk)
433 {
434 if (WARN_ON(!metadata) || WARN_ON(grp >= metadata->grp_cnt) ||
435 WARN_ON(blk >= metadata->grp_metadata[grp].blk_cnt))
436 return 0;
437
438 return metadata->grp_metadata[grp].blk_metadata[blk].ctr_cnt;
439 }
440
441 /**
442 * kbase_hwcnt_metadata_block_enable_map_stride() - Get the enable map stride.
443 * @metadata: Non-NULL pointer to metadata.
444 * @grp: Index of the group in the metadata.
445 * @blk: Index of the block in the group.
446 *
447 * Return: enable map stride in each instance of block blk in group grp.
448 */
449 static inline size_t
kbase_hwcnt_metadata_block_enable_map_stride(const struct kbase_hwcnt_metadata * metadata,size_t grp,size_t blk)450 kbase_hwcnt_metadata_block_enable_map_stride(const struct kbase_hwcnt_metadata *metadata,
451 size_t grp, size_t blk)
452 {
453 if (WARN_ON(!metadata) || WARN_ON(grp >= metadata->grp_cnt) ||
454 WARN_ON(blk >= metadata->grp_metadata[grp].blk_cnt))
455 return 0;
456
457 return metadata->grp_metadata[grp].blk_metadata[blk].enable_map_stride;
458 }
459
460 /**
461 * kbase_hwcnt_metadata_block_values_count() - Get the number of values.
462 * @metadata: Non-NULL pointer to metadata.
463 * @grp: Index of the group in the metadata.
464 * @blk: Index of the block in the group.
465 *
466 * Return: Number of headers plus counters in each instance of block blk
467 * in group grp.
468 */
469 static inline size_t
kbase_hwcnt_metadata_block_values_count(const struct kbase_hwcnt_metadata * metadata,size_t grp,size_t blk)470 kbase_hwcnt_metadata_block_values_count(const struct kbase_hwcnt_metadata *metadata, size_t grp,
471 size_t blk)
472 {
473 if (WARN_ON(!metadata) || WARN_ON(grp >= metadata->grp_cnt) ||
474 WARN_ON(blk >= metadata->grp_metadata[grp].blk_cnt))
475 return 0;
476
477 return kbase_hwcnt_metadata_block_counters_count(metadata, grp, blk) +
478 kbase_hwcnt_metadata_block_headers_count(metadata, grp, blk);
479 }
480
481 /**
482 * kbase_hwcnt_metadata_for_each_block() - Iterate over each block instance in
483 * the metadata.
484 * @md: Non-NULL pointer to metadata.
485 * @grp: size_t variable used as group iterator.
486 * @blk: size_t variable used as block iterator.
487 * @blk_inst: size_t variable used as block instance iterator.
488 *
489 * Iteration order is group, then block, then block instance (i.e. linearly
490 * through memory).
491 */
492 #define kbase_hwcnt_metadata_for_each_block(md, grp, blk, blk_inst) \
493 for ((grp) = 0; (grp) < kbase_hwcnt_metadata_group_count((md)); (grp)++) \
494 for ((blk) = 0; (blk) < kbase_hwcnt_metadata_block_count((md), (grp)); (blk)++) \
495 for ((blk_inst) = 0; \
496 (blk_inst) < \
497 kbase_hwcnt_metadata_block_instance_count((md), (grp), (blk)); \
498 (blk_inst)++)
499
500 /**
501 * kbase_hwcnt_metadata_block_avail_bit() - Get the bit index into the avail
502 * mask corresponding to the block.
503 * @metadata: Non-NULL pointer to metadata.
504 * @grp: Index of the group in the metadata.
505 * @blk: Index of the block in the group.
506 *
507 * Return: The bit index into the avail mask for the block.
508 */
509 static inline size_t
kbase_hwcnt_metadata_block_avail_bit(const struct kbase_hwcnt_metadata * metadata,size_t grp,size_t blk)510 kbase_hwcnt_metadata_block_avail_bit(const struct kbase_hwcnt_metadata *metadata, size_t grp,
511 size_t blk)
512 {
513 if (WARN_ON(!metadata) || WARN_ON(grp >= metadata->grp_cnt) ||
514 WARN_ON(blk >= metadata->grp_metadata[grp].blk_cnt))
515 return 0;
516
517 return metadata->grp_metadata[grp].avail_mask_index +
518 metadata->grp_metadata[grp].blk_metadata[blk].avail_mask_index;
519 }
520
521 /**
522 * kbase_hwcnt_metadata_block_instance_avail() - Check if a block instance is
523 * available.
524 * @metadata: Non-NULL pointer to metadata.
525 * @grp: Index of the group in the metadata.
526 * @blk: Index of the block in the group.
527 * @blk_inst: Index of the block instance in the block.
528 *
529 * Return: true if the block instance is available, else false.
530 */
531 static inline bool
kbase_hwcnt_metadata_block_instance_avail(const struct kbase_hwcnt_metadata * metadata,size_t grp,size_t blk,size_t blk_inst)532 kbase_hwcnt_metadata_block_instance_avail(const struct kbase_hwcnt_metadata *metadata, size_t grp,
533 size_t blk, size_t blk_inst)
534 {
535 size_t bit;
536 u64 mask;
537
538 if (WARN_ON(!metadata))
539 return false;
540
541 bit = kbase_hwcnt_metadata_block_avail_bit(metadata, grp, blk) + blk_inst;
542 mask = 1ull << bit;
543
544 return (metadata->avail_mask & mask) != 0;
545 }
546
547 /**
548 * kbase_hwcnt_enable_map_alloc() - Allocate an enable map.
549 * @metadata: Non-NULL pointer to metadata describing the system.
550 * @enable_map: Non-NULL pointer to enable map to be initialised. Will be
551 * initialised to all zeroes (i.e. all counters disabled).
552 *
553 * Return: 0 on success, else error code.
554 */
555 int kbase_hwcnt_enable_map_alloc(const struct kbase_hwcnt_metadata *metadata,
556 struct kbase_hwcnt_enable_map *enable_map);
557
558 /**
559 * kbase_hwcnt_enable_map_free() - Free an enable map.
560 * @enable_map: Enable map to be freed.
561 *
562 * Can be safely called on an all-zeroed enable map structure, or on an already
563 * freed enable map.
564 */
565 void kbase_hwcnt_enable_map_free(struct kbase_hwcnt_enable_map *enable_map);
566
567 /**
568 * kbase_hwcnt_enable_map_block_instance() - Get the pointer to a block
569 * instance's enable map.
570 * @map: Non-NULL pointer to enable map.
571 * @grp: Index of the group in the metadata.
572 * @blk: Index of the block in the group.
573 * @blk_inst: Index of the block instance in the block.
574 *
575 * Return: u64* to the bitfield(s) used as the enable map for the
576 * block instance.
577 */
kbase_hwcnt_enable_map_block_instance(const struct kbase_hwcnt_enable_map * map,size_t grp,size_t blk,size_t blk_inst)578 static inline u64 *kbase_hwcnt_enable_map_block_instance(const struct kbase_hwcnt_enable_map *map,
579 size_t grp, size_t blk, size_t blk_inst)
580 {
581 if (WARN_ON(!map) || WARN_ON(!map->hwcnt_enable_map))
582 return NULL;
583
584 if (WARN_ON(!map->metadata) || WARN_ON(grp >= map->metadata->grp_cnt) ||
585 WARN_ON(blk >= map->metadata->grp_metadata[grp].blk_cnt) ||
586 WARN_ON(blk_inst >= map->metadata->grp_metadata[grp].blk_metadata[blk].inst_cnt))
587 return map->hwcnt_enable_map;
588
589 return map->hwcnt_enable_map + map->metadata->grp_metadata[grp].enable_map_index +
590 map->metadata->grp_metadata[grp].blk_metadata[blk].enable_map_index +
591 (map->metadata->grp_metadata[grp].blk_metadata[blk].enable_map_stride * blk_inst);
592 }
593
594 /**
595 * kbase_hwcnt_bitfield_count() - Calculate the number of u64 bitfields required
596 * to have at minimum one bit per value.
597 * @val_cnt: Number of values.
598 *
599 * Return: Number of required bitfields.
600 */
kbase_hwcnt_bitfield_count(size_t val_cnt)601 static inline size_t kbase_hwcnt_bitfield_count(size_t val_cnt)
602 {
603 return (val_cnt + KBASE_HWCNT_BITFIELD_BITS - 1) / KBASE_HWCNT_BITFIELD_BITS;
604 }
605
606 /**
607 * kbase_hwcnt_enable_map_block_disable_all() - Disable all values in a block.
608 * @dst: Non-NULL pointer to enable map.
609 * @grp: Index of the group in the metadata.
610 * @blk: Index of the block in the group.
611 * @blk_inst: Index of the block instance in the block.
612 */
kbase_hwcnt_enable_map_block_disable_all(struct kbase_hwcnt_enable_map * dst,size_t grp,size_t blk,size_t blk_inst)613 static inline void kbase_hwcnt_enable_map_block_disable_all(struct kbase_hwcnt_enable_map *dst,
614 size_t grp, size_t blk, size_t blk_inst)
615 {
616 size_t val_cnt;
617 size_t bitfld_cnt;
618 u64 *const block_enable_map =
619 kbase_hwcnt_enable_map_block_instance(dst, grp, blk, blk_inst);
620
621 if (WARN_ON(!dst))
622 return;
623
624 val_cnt = kbase_hwcnt_metadata_block_values_count(dst->metadata, grp, blk);
625 bitfld_cnt = kbase_hwcnt_bitfield_count(val_cnt);
626
627 memset(block_enable_map, 0, bitfld_cnt * KBASE_HWCNT_BITFIELD_BYTES);
628 }
629
630 /**
631 * kbase_hwcnt_enable_map_disable_all() - Disable all values in the enable map.
632 * @dst: Non-NULL pointer to enable map to zero.
633 */
kbase_hwcnt_enable_map_disable_all(struct kbase_hwcnt_enable_map * dst)634 static inline void kbase_hwcnt_enable_map_disable_all(struct kbase_hwcnt_enable_map *dst)
635 {
636 if (WARN_ON(!dst) || WARN_ON(!dst->metadata))
637 return;
638
639 if (dst->hwcnt_enable_map != NULL)
640 memset(dst->hwcnt_enable_map, 0, dst->metadata->enable_map_bytes);
641
642 dst->clk_enable_map = 0;
643 }
644
645 /**
646 * kbase_hwcnt_enable_map_block_enable_all() - Enable all values in a block.
647 * @dst: Non-NULL pointer to enable map.
648 * @grp: Index of the group in the metadata.
649 * @blk: Index of the block in the group.
650 * @blk_inst: Index of the block instance in the block.
651 */
kbase_hwcnt_enable_map_block_enable_all(struct kbase_hwcnt_enable_map * dst,size_t grp,size_t blk,size_t blk_inst)652 static inline void kbase_hwcnt_enable_map_block_enable_all(struct kbase_hwcnt_enable_map *dst,
653 size_t grp, size_t blk, size_t blk_inst)
654 {
655 size_t val_cnt;
656 size_t bitfld_cnt;
657 u64 *const block_enable_map =
658 kbase_hwcnt_enable_map_block_instance(dst, grp, blk, blk_inst);
659 size_t bitfld_idx;
660
661 if (WARN_ON(!dst))
662 return;
663
664 val_cnt = kbase_hwcnt_metadata_block_values_count(dst->metadata, grp, blk);
665 bitfld_cnt = kbase_hwcnt_bitfield_count(val_cnt);
666
667 for (bitfld_idx = 0; bitfld_idx < bitfld_cnt; bitfld_idx++) {
668 const u64 remaining_values = val_cnt - (bitfld_idx * KBASE_HWCNT_BITFIELD_BITS);
669 u64 block_enable_map_mask = U64_MAX;
670
671 if (remaining_values < KBASE_HWCNT_BITFIELD_BITS)
672 block_enable_map_mask = (1ull << remaining_values) - 1;
673
674 block_enable_map[bitfld_idx] = block_enable_map_mask;
675 }
676 }
677
678 /**
679 * kbase_hwcnt_enable_map_enable_all() - Enable all values in an enable
680 * map.
681 * @dst: Non-NULL pointer to enable map.
682 */
kbase_hwcnt_enable_map_enable_all(struct kbase_hwcnt_enable_map * dst)683 static inline void kbase_hwcnt_enable_map_enable_all(struct kbase_hwcnt_enable_map *dst)
684 {
685 size_t grp, blk, blk_inst;
686
687 if (WARN_ON(!dst) || WARN_ON(!dst->metadata))
688 return;
689
690 kbase_hwcnt_metadata_for_each_block(dst->metadata, grp, blk, blk_inst)
691 kbase_hwcnt_enable_map_block_enable_all(dst, grp, blk, blk_inst);
692
693 dst->clk_enable_map = (1ull << dst->metadata->clk_cnt) - 1;
694 }
695
696 /**
697 * kbase_hwcnt_enable_map_copy() - Copy an enable map to another.
698 * @dst: Non-NULL pointer to destination enable map.
699 * @src: Non-NULL pointer to source enable map.
700 *
701 * The dst and src MUST have been created from the same metadata.
702 */
kbase_hwcnt_enable_map_copy(struct kbase_hwcnt_enable_map * dst,const struct kbase_hwcnt_enable_map * src)703 static inline void kbase_hwcnt_enable_map_copy(struct kbase_hwcnt_enable_map *dst,
704 const struct kbase_hwcnt_enable_map *src)
705 {
706 if (WARN_ON(!dst) || WARN_ON(!src) || WARN_ON(!dst->metadata) ||
707 WARN_ON(dst->metadata != src->metadata))
708 return;
709
710 if (dst->hwcnt_enable_map != NULL) {
711 if (WARN_ON(!src->hwcnt_enable_map))
712 return;
713
714 memcpy(dst->hwcnt_enable_map, src->hwcnt_enable_map,
715 dst->metadata->enable_map_bytes);
716 }
717
718 dst->clk_enable_map = src->clk_enable_map;
719 }
720
721 /**
722 * kbase_hwcnt_enable_map_union() - Union dst and src enable maps into dst.
723 * @dst: Non-NULL pointer to destination enable map.
724 * @src: Non-NULL pointer to source enable map.
725 *
726 * The dst and src MUST have been created from the same metadata.
727 */
kbase_hwcnt_enable_map_union(struct kbase_hwcnt_enable_map * dst,const struct kbase_hwcnt_enable_map * src)728 static inline void kbase_hwcnt_enable_map_union(struct kbase_hwcnt_enable_map *dst,
729 const struct kbase_hwcnt_enable_map *src)
730 {
731 if (WARN_ON(!dst) || WARN_ON(!src) || WARN_ON(!dst->metadata) ||
732 WARN_ON(dst->metadata != src->metadata))
733 return;
734
735 if (dst->hwcnt_enable_map != NULL) {
736 size_t i;
737 size_t const bitfld_count =
738 dst->metadata->enable_map_bytes / KBASE_HWCNT_BITFIELD_BYTES;
739
740 if (WARN_ON(!src->hwcnt_enable_map))
741 return;
742
743 for (i = 0; i < bitfld_count; i++)
744 dst->hwcnt_enable_map[i] |= src->hwcnt_enable_map[i];
745 }
746
747 dst->clk_enable_map |= src->clk_enable_map;
748 }
749
750 /**
751 * kbase_hwcnt_enable_map_block_enabled() - Check if any values in a block
752 * instance are enabled.
753 * @enable_map: Non-NULL pointer to enable map.
754 * @grp: Index of the group in the metadata.
755 * @blk: Index of the block in the group.
756 * @blk_inst: Index of the block instance in the block.
757 *
758 * Return: true if any values in the block are enabled, else false.
759 */
760 static inline bool
kbase_hwcnt_enable_map_block_enabled(const struct kbase_hwcnt_enable_map * enable_map,size_t grp,size_t blk,size_t blk_inst)761 kbase_hwcnt_enable_map_block_enabled(const struct kbase_hwcnt_enable_map *enable_map, size_t grp,
762 size_t blk, size_t blk_inst)
763 {
764 bool any_enabled = false;
765 size_t val_cnt;
766 size_t bitfld_cnt;
767 const u64 *const block_enable_map =
768 kbase_hwcnt_enable_map_block_instance(enable_map, grp, blk, blk_inst);
769 size_t bitfld_idx;
770
771 if (WARN_ON(!enable_map))
772 return false;
773
774 val_cnt = kbase_hwcnt_metadata_block_values_count(enable_map->metadata, grp, blk);
775 bitfld_cnt = kbase_hwcnt_bitfield_count(val_cnt);
776
777 for (bitfld_idx = 0; bitfld_idx < bitfld_cnt; bitfld_idx++) {
778 const u64 remaining_values = val_cnt - (bitfld_idx * KBASE_HWCNT_BITFIELD_BITS);
779 u64 block_enable_map_mask = U64_MAX;
780
781 if (remaining_values < KBASE_HWCNT_BITFIELD_BITS)
782 block_enable_map_mask = (1ull << remaining_values) - 1;
783
784 any_enabled = any_enabled || (block_enable_map[bitfld_idx] & block_enable_map_mask);
785 }
786
787 return any_enabled;
788 }
789
790 /**
791 * kbase_hwcnt_enable_map_any_enabled() - Check if any values are enabled.
792 * @enable_map: Non-NULL pointer to enable map.
793 *
794 * Return: true if any values are enabled, else false.
795 */
796 static inline bool
kbase_hwcnt_enable_map_any_enabled(const struct kbase_hwcnt_enable_map * enable_map)797 kbase_hwcnt_enable_map_any_enabled(const struct kbase_hwcnt_enable_map *enable_map)
798 {
799 size_t grp, blk, blk_inst;
800 u64 clk_enable_map_mask;
801
802 if (WARN_ON(!enable_map) || WARN_ON(!enable_map->metadata))
803 return false;
804
805 clk_enable_map_mask = (1ull << enable_map->metadata->clk_cnt) - 1;
806
807 if (enable_map->metadata->clk_cnt > 0 && (enable_map->clk_enable_map & clk_enable_map_mask))
808 return true;
809
810 kbase_hwcnt_metadata_for_each_block(enable_map->metadata, grp, blk, blk_inst)
811 {
812 if (kbase_hwcnt_enable_map_block_enabled(enable_map, grp, blk, blk_inst))
813 return true;
814 }
815
816 return false;
817 }
818
819 /**
820 * kbase_hwcnt_enable_map_block_value_enabled() - Check if a value in a block
821 * instance is enabled.
822 * @bitfld: Non-NULL pointer to the block bitfield(s) obtained from a call to
823 * kbase_hwcnt_enable_map_block_instance.
824 * @val_idx: Index of the value to check in the block instance.
825 *
826 * Return: true if the value was enabled, else false.
827 */
kbase_hwcnt_enable_map_block_value_enabled(const u64 * bitfld,size_t val_idx)828 static inline bool kbase_hwcnt_enable_map_block_value_enabled(const u64 *bitfld, size_t val_idx)
829 {
830 const size_t idx = val_idx / KBASE_HWCNT_BITFIELD_BITS;
831 const size_t bit = val_idx % KBASE_HWCNT_BITFIELD_BITS;
832 const u64 mask = 1ull << bit;
833
834 return (bitfld[idx] & mask) != 0;
835 }
836
837 /**
838 * kbase_hwcnt_enable_map_block_enable_value() - Enable a value in a block
839 * instance.
840 * @bitfld: Non-NULL pointer to the block bitfield(s) obtained from a call to
841 * kbase_hwcnt_enable_map_block_instance.
842 * @val_idx: Index of the value to enable in the block instance.
843 */
kbase_hwcnt_enable_map_block_enable_value(u64 * bitfld,size_t val_idx)844 static inline void kbase_hwcnt_enable_map_block_enable_value(u64 *bitfld, size_t val_idx)
845 {
846 const size_t idx = val_idx / KBASE_HWCNT_BITFIELD_BITS;
847 const size_t bit = val_idx % KBASE_HWCNT_BITFIELD_BITS;
848 const u64 mask = 1ull << bit;
849
850 bitfld[idx] |= mask;
851 }
852
853 /**
854 * kbase_hwcnt_enable_map_block_disable_value() - Disable a value in a block
855 * instance.
856 * @bitfld: Non-NULL pointer to the block bitfield(s) obtained from a call to
857 * kbase_hwcnt_enable_map_block_instance.
858 * @val_idx: Index of the value to disable in the block instance.
859 */
kbase_hwcnt_enable_map_block_disable_value(u64 * bitfld,size_t val_idx)860 static inline void kbase_hwcnt_enable_map_block_disable_value(u64 *bitfld, size_t val_idx)
861 {
862 const size_t idx = val_idx / KBASE_HWCNT_BITFIELD_BITS;
863 const size_t bit = val_idx % KBASE_HWCNT_BITFIELD_BITS;
864 const u64 mask = 1ull << bit;
865
866 bitfld[idx] &= ~mask;
867 }
868
869 /**
870 * kbase_hwcnt_dump_buffer_alloc() - Allocate a dump buffer.
871 * @metadata: Non-NULL pointer to metadata describing the system.
872 * @dump_buf: Non-NULL pointer to dump buffer to be initialised. Will be
873 * initialised to undefined values, so must be used as a copy dest,
874 * or cleared before use.
875 *
876 * Return: 0 on success, else error code.
877 */
878 int kbase_hwcnt_dump_buffer_alloc(const struct kbase_hwcnt_metadata *metadata,
879 struct kbase_hwcnt_dump_buffer *dump_buf);
880
881 /**
882 * kbase_hwcnt_dump_buffer_free() - Free a dump buffer.
883 * @dump_buf: Dump buffer to be freed.
884 *
885 * Can be safely called on an all-zeroed dump buffer structure, or on an already
886 * freed dump buffer.
887 */
888 void kbase_hwcnt_dump_buffer_free(struct kbase_hwcnt_dump_buffer *dump_buf);
889
890 /**
891 * kbase_hwcnt_dump_buffer_array_alloc() - Allocate an array of dump buffers.
892 * @metadata: Non-NULL pointer to metadata describing the system.
893 * @n: Number of dump buffers to allocate
894 * @dump_bufs: Non-NULL pointer to dump buffer array to be initialised.
895 *
896 * A single zeroed contiguous page allocation will be used for all of the
897 * buffers inside the array, where:
898 * dump_bufs[n].dump_buf == page_addr + n * metadata.dump_buf_bytes
899 *
900 * Return: 0 on success, else error code.
901 */
902 int kbase_hwcnt_dump_buffer_array_alloc(const struct kbase_hwcnt_metadata *metadata, size_t n,
903 struct kbase_hwcnt_dump_buffer_array *dump_bufs);
904
905 /**
906 * kbase_hwcnt_dump_buffer_array_free() - Free a dump buffer array.
907 * @dump_bufs: Dump buffer array to be freed.
908 *
909 * Can be safely called on an all-zeroed dump buffer array structure, or on an
910 * already freed dump buffer array.
911 */
912 void kbase_hwcnt_dump_buffer_array_free(struct kbase_hwcnt_dump_buffer_array *dump_bufs);
913
914 /**
915 * kbase_hwcnt_dump_buffer_block_instance() - Get the pointer to a block
916 * instance's dump buffer.
917 * @buf: Non-NULL pointer to dump buffer.
918 * @grp: Index of the group in the metadata.
919 * @blk: Index of the block in the group.
920 * @blk_inst: Index of the block instance in the block.
921 *
922 * Return: u64* to the dump buffer for the block instance.
923 */
kbase_hwcnt_dump_buffer_block_instance(const struct kbase_hwcnt_dump_buffer * buf,size_t grp,size_t blk,size_t blk_inst)924 static inline u64 *kbase_hwcnt_dump_buffer_block_instance(const struct kbase_hwcnt_dump_buffer *buf,
925 size_t grp, size_t blk, size_t blk_inst)
926 {
927 if (WARN_ON(!buf) || WARN_ON(!buf->dump_buf))
928 return NULL;
929
930 if (WARN_ON(!buf->metadata) || WARN_ON(grp >= buf->metadata->grp_cnt) ||
931 WARN_ON(blk >= buf->metadata->grp_metadata[grp].blk_cnt) ||
932 WARN_ON(blk_inst >= buf->metadata->grp_metadata[grp].blk_metadata[blk].inst_cnt))
933 return buf->dump_buf;
934
935 return buf->dump_buf + buf->metadata->grp_metadata[grp].dump_buf_index +
936 buf->metadata->grp_metadata[grp].blk_metadata[blk].dump_buf_index +
937 (buf->metadata->grp_metadata[grp].blk_metadata[blk].dump_buf_stride * blk_inst);
938 }
939
940 /**
941 * kbase_hwcnt_dump_buffer_zero() - Zero all enabled values in dst.
942 * After the operation, all non-enabled values
943 * will be undefined.
944 * @dst: Non-NULL pointer to dump buffer.
945 * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
946 *
947 * The dst and dst_enable_map MUST have been created from the same metadata.
948 */
949 void kbase_hwcnt_dump_buffer_zero(struct kbase_hwcnt_dump_buffer *dst,
950 const struct kbase_hwcnt_enable_map *dst_enable_map);
951
952 /**
953 * kbase_hwcnt_dump_buffer_block_zero() - Zero all values in a block.
954 * @dst_blk: Non-NULL pointer to dst block obtained from a call to
955 * kbase_hwcnt_dump_buffer_block_instance.
956 * @val_cnt: Number of values in the block.
957 */
kbase_hwcnt_dump_buffer_block_zero(u64 * dst_blk,size_t val_cnt)958 static inline void kbase_hwcnt_dump_buffer_block_zero(u64 *dst_blk, size_t val_cnt)
959 {
960 if (WARN_ON(!dst_blk))
961 return;
962
963 memset(dst_blk, 0, (val_cnt * KBASE_HWCNT_VALUE_BYTES));
964 }
965
966 /**
967 * kbase_hwcnt_dump_buffer_zero_strict() - Zero all values in dst.
968 * After the operation, all values
969 * (including padding bytes) will be
970 * zero.
971 * Slower than the non-strict variant.
972 * @dst: Non-NULL pointer to dump buffer.
973 */
974 void kbase_hwcnt_dump_buffer_zero_strict(struct kbase_hwcnt_dump_buffer *dst);
975
976 /**
977 * kbase_hwcnt_dump_buffer_zero_non_enabled() - Zero all non-enabled values in
978 * dst (including padding bytes and
979 * unavailable blocks).
980 * After the operation, all enabled
981 * values will be unchanged.
982 * @dst: Non-NULL pointer to dump buffer.
983 * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
984 *
985 * The dst and dst_enable_map MUST have been created from the same metadata.
986 */
987 void kbase_hwcnt_dump_buffer_zero_non_enabled(struct kbase_hwcnt_dump_buffer *dst,
988 const struct kbase_hwcnt_enable_map *dst_enable_map);
989
990 /**
991 * kbase_hwcnt_dump_buffer_block_zero_non_enabled() - Zero all non-enabled
992 * values in a block.
993 * After the operation, all
994 * enabled values will be
995 * unchanged.
996 * @dst_blk: Non-NULL pointer to dst block obtained from a call to
997 * kbase_hwcnt_dump_buffer_block_instance.
998 * @blk_em: Non-NULL pointer to the block bitfield(s) obtained from a call to
999 * kbase_hwcnt_enable_map_block_instance.
1000 * @val_cnt: Number of values in the block.
1001 */
kbase_hwcnt_dump_buffer_block_zero_non_enabled(u64 * dst_blk,const u64 * blk_em,size_t val_cnt)1002 static inline void kbase_hwcnt_dump_buffer_block_zero_non_enabled(u64 *dst_blk, const u64 *blk_em,
1003 size_t val_cnt)
1004 {
1005 size_t val;
1006
1007 if (WARN_ON(!dst_blk))
1008 return;
1009
1010 for (val = 0; val < val_cnt; val++) {
1011 if (!kbase_hwcnt_enable_map_block_value_enabled(blk_em, val))
1012 dst_blk[val] = 0;
1013 }
1014 }
1015
1016 /**
1017 * kbase_hwcnt_dump_buffer_copy() - Copy all enabled values from src to dst.
1018 * After the operation, all non-enabled values
1019 * will be undefined.
1020 * @dst: Non-NULL pointer to dst dump buffer.
1021 * @src: Non-NULL pointer to src dump buffer.
1022 * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
1023 *
1024 * The dst, src, and dst_enable_map MUST have been created from the same
1025 * metadata.
1026 */
1027 void kbase_hwcnt_dump_buffer_copy(struct kbase_hwcnt_dump_buffer *dst,
1028 const struct kbase_hwcnt_dump_buffer *src,
1029 const struct kbase_hwcnt_enable_map *dst_enable_map);
1030
1031 /**
1032 * kbase_hwcnt_dump_buffer_block_copy() - Copy all block values from src to dst.
1033 * @dst_blk: Non-NULL pointer to dst block obtained from a call to
1034 * kbase_hwcnt_dump_buffer_block_instance.
1035 * @src_blk: Non-NULL pointer to src block obtained from a call to
1036 * kbase_hwcnt_dump_buffer_block_instance.
1037 * @val_cnt: Number of values in the block.
1038 */
kbase_hwcnt_dump_buffer_block_copy(u64 * dst_blk,const u64 * src_blk,size_t val_cnt)1039 static inline void kbase_hwcnt_dump_buffer_block_copy(u64 *dst_blk, const u64 *src_blk,
1040 size_t val_cnt)
1041 {
1042 if (WARN_ON(!dst_blk) || WARN_ON(!src_blk))
1043 return;
1044
1045 /* Copy all the counters in the block instance.
1046 * Values of non-enabled counters are undefined.
1047 */
1048 memcpy(dst_blk, src_blk, (val_cnt * KBASE_HWCNT_VALUE_BYTES));
1049 }
1050
1051 /**
1052 * kbase_hwcnt_dump_buffer_copy_strict() - Copy all enabled values from src to
1053 * dst.
1054 * After the operation, all non-enabled
1055 * values (including padding bytes) will
1056 * be zero.
1057 * Slower than the non-strict variant.
1058 * @dst: Non-NULL pointer to dst dump buffer.
1059 * @src: Non-NULL pointer to src dump buffer.
1060 * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
1061 *
1062 * The dst, src, and dst_enable_map MUST have been created from the same
1063 * metadata.
1064 */
1065 void kbase_hwcnt_dump_buffer_copy_strict(struct kbase_hwcnt_dump_buffer *dst,
1066 const struct kbase_hwcnt_dump_buffer *src,
1067 const struct kbase_hwcnt_enable_map *dst_enable_map);
1068
1069 /**
1070 * kbase_hwcnt_dump_buffer_block_copy_strict() - Copy all enabled block values
1071 * from src to dst.
1072 * After the operation, all
1073 * non-enabled values will be
1074 * zero.
1075 * @dst_blk: Non-NULL pointer to dst block obtained from a call to
1076 * kbase_hwcnt_dump_buffer_block_instance.
1077 * @src_blk: Non-NULL pointer to src block obtained from a call to
1078 * kbase_hwcnt_dump_buffer_block_instance.
1079 * @blk_em: Non-NULL pointer to the block bitfield(s) obtained from a call to
1080 * kbase_hwcnt_enable_map_block_instance.
1081 * @val_cnt: Number of values in the block.
1082 *
1083 * After the copy, any disabled values in dst will be zero.
1084 */
kbase_hwcnt_dump_buffer_block_copy_strict(u64 * dst_blk,const u64 * src_blk,const u64 * blk_em,size_t val_cnt)1085 static inline void kbase_hwcnt_dump_buffer_block_copy_strict(u64 *dst_blk, const u64 *src_blk,
1086 const u64 *blk_em, size_t val_cnt)
1087 {
1088 size_t val;
1089
1090 if (WARN_ON(!dst_blk) || WARN_ON(!src_blk))
1091 return;
1092
1093 for (val = 0; val < val_cnt; val++) {
1094 bool val_enabled = kbase_hwcnt_enable_map_block_value_enabled(blk_em, val);
1095
1096 dst_blk[val] = val_enabled ? src_blk[val] : 0;
1097 }
1098 }
1099
1100 /**
1101 * kbase_hwcnt_dump_buffer_accumulate() - Copy all enabled headers and
1102 * accumulate all enabled counters from
1103 * src to dst.
1104 * After the operation, all non-enabled
1105 * values will be undefined.
1106 * @dst: Non-NULL pointer to dst dump buffer.
1107 * @src: Non-NULL pointer to src dump buffer.
1108 * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
1109 *
1110 * The dst, src, and dst_enable_map MUST have been created from the same
1111 * metadata.
1112 */
1113 void kbase_hwcnt_dump_buffer_accumulate(struct kbase_hwcnt_dump_buffer *dst,
1114 const struct kbase_hwcnt_dump_buffer *src,
1115 const struct kbase_hwcnt_enable_map *dst_enable_map);
1116
1117 /**
1118 * kbase_hwcnt_dump_buffer_block_accumulate() - Copy all block headers and
1119 * accumulate all block counters
1120 * from src to dst.
1121 * @dst_blk: Non-NULL pointer to dst block obtained from a call to
1122 * kbase_hwcnt_dump_buffer_block_instance.
1123 * @src_blk: Non-NULL pointer to src block obtained from a call to
1124 * kbase_hwcnt_dump_buffer_block_instance.
1125 * @hdr_cnt: Number of headers in the block.
1126 * @ctr_cnt: Number of counters in the block.
1127 */
kbase_hwcnt_dump_buffer_block_accumulate(u64 * dst_blk,const u64 * src_blk,size_t hdr_cnt,size_t ctr_cnt)1128 static inline void kbase_hwcnt_dump_buffer_block_accumulate(u64 *dst_blk, const u64 *src_blk,
1129 size_t hdr_cnt, size_t ctr_cnt)
1130 {
1131 size_t ctr;
1132
1133 if (WARN_ON(!dst_blk) || WARN_ON(!src_blk))
1134 return;
1135
1136 /* Copy all the headers in the block instance.
1137 * Values of non-enabled headers are undefined.
1138 */
1139 memcpy(dst_blk, src_blk, hdr_cnt * KBASE_HWCNT_VALUE_BYTES);
1140
1141 /* Accumulate all the counters in the block instance.
1142 * Values of non-enabled counters are undefined.
1143 */
1144 for (ctr = hdr_cnt; ctr < ctr_cnt + hdr_cnt; ctr++)
1145 dst_blk[ctr] += src_blk[ctr];
1146 }
1147
1148 /**
1149 * kbase_hwcnt_dump_buffer_accumulate_strict() - Copy all enabled headers and
1150 * accumulate all enabled counters
1151 * from src to dst.
1152 * After the operation, all
1153 * non-enabled values (including
1154 * padding bytes) will be zero.
1155 * Slower than the non-strict
1156 * variant.
1157 * @dst: Non-NULL pointer to dst dump buffer.
1158 * @src: Non-NULL pointer to src dump buffer.
1159 * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
1160 *
1161 * The dst, src, and dst_enable_map MUST have been created from the same
1162 * metadata.
1163 */
1164 void kbase_hwcnt_dump_buffer_accumulate_strict(struct kbase_hwcnt_dump_buffer *dst,
1165 const struct kbase_hwcnt_dump_buffer *src,
1166 const struct kbase_hwcnt_enable_map *dst_enable_map);
1167
1168 /**
1169 * kbase_hwcnt_dump_buffer_block_accumulate_strict() - Copy all enabled block
1170 * headers and accumulate
1171 * all block counters from
1172 * src to dst.
1173 * After the operation, all
1174 * non-enabled values will
1175 * be zero.
1176 * @dst_blk: Non-NULL pointer to dst block obtained from a call to
1177 * kbase_hwcnt_dump_buffer_block_instance.
1178 * @src_blk: Non-NULL pointer to src block obtained from a call to
1179 * kbase_hwcnt_dump_buffer_block_instance.
1180 * @blk_em: Non-NULL pointer to the block bitfield(s) obtained from a call to
1181 * kbase_hwcnt_enable_map_block_instance.
1182 * @hdr_cnt: Number of headers in the block.
1183 * @ctr_cnt: Number of counters in the block.
1184 */
kbase_hwcnt_dump_buffer_block_accumulate_strict(u64 * dst_blk,const u64 * src_blk,const u64 * blk_em,size_t hdr_cnt,size_t ctr_cnt)1185 static inline void kbase_hwcnt_dump_buffer_block_accumulate_strict(u64 *dst_blk, const u64 *src_blk,
1186 const u64 *blk_em,
1187 size_t hdr_cnt, size_t ctr_cnt)
1188 {
1189 size_t ctr;
1190
1191 if (WARN_ON(!dst_blk) || WARN_ON(!src_blk))
1192 return;
1193
1194 kbase_hwcnt_dump_buffer_block_copy_strict(dst_blk, src_blk, blk_em, hdr_cnt);
1195
1196 for (ctr = hdr_cnt; ctr < ctr_cnt + hdr_cnt; ctr++) {
1197 bool ctr_enabled = kbase_hwcnt_enable_map_block_value_enabled(blk_em, ctr);
1198
1199 if (ctr_enabled)
1200 dst_blk[ctr] += src_blk[ctr];
1201 else
1202 dst_blk[ctr] = 0;
1203 }
1204 }
1205
1206 /**
1207 * kbase_hwcnt_metadata_for_each_clock() - Iterate over each clock domain in the
1208 * metadata.
1209 * @md: Non-NULL pointer to metadata.
1210 * @clk: size_t variable used as clock iterator.
1211 */
1212 #define kbase_hwcnt_metadata_for_each_clock(md, clk) for ((clk) = 0; (clk) < (md)->clk_cnt; (clk)++)
1213
1214 /**
1215 * kbase_hwcnt_clk_enable_map_enabled() - Check if the given index is enabled
1216 * in clk_enable_map.
1217 * @clk_enable_map: An enable map for clock domains.
1218 * @index: Index of the enable map for clock domain.
1219 *
1220 * Return: true if the index of the clock domain is enabled, else false.
1221 */
kbase_hwcnt_clk_enable_map_enabled(const u64 clk_enable_map,const size_t index)1222 static inline bool kbase_hwcnt_clk_enable_map_enabled(const u64 clk_enable_map, const size_t index)
1223 {
1224 if (WARN_ON(index >= 64))
1225 return false;
1226 if (clk_enable_map & (1ull << index))
1227 return true;
1228 return false;
1229 }
1230
1231 #endif /* _KBASE_HWCNT_TYPES_H_ */
1232