xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/hwcnt/mali_kbase_hwcnt_types.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2018, 2020-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include "hwcnt/mali_kbase_hwcnt_types.h"
23 
24 #include <linux/slab.h>
25 
kbase_hwcnt_metadata_create(const struct kbase_hwcnt_description * desc,const struct kbase_hwcnt_metadata ** out_metadata)26 int kbase_hwcnt_metadata_create(const struct kbase_hwcnt_description *desc,
27 				const struct kbase_hwcnt_metadata **out_metadata)
28 {
29 	char *buf;
30 	struct kbase_hwcnt_metadata *metadata;
31 	struct kbase_hwcnt_group_metadata *grp_mds;
32 	size_t grp;
33 	size_t enable_map_count; /* Number of u64 bitfields (inc padding) */
34 	size_t dump_buf_count; /* Number of u64 values (inc padding) */
35 	size_t avail_mask_bits; /* Number of availability mask bits */
36 
37 	size_t size;
38 	size_t offset;
39 
40 	if (!desc || !out_metadata)
41 		return -EINVAL;
42 
43 	/* The maximum number of clock domains is 64. */
44 	if (desc->clk_cnt > (sizeof(u64) * BITS_PER_BYTE))
45 		return -EINVAL;
46 
47 	/* Calculate the bytes needed to tightly pack the metadata */
48 
49 	/* Top level metadata */
50 	size = 0;
51 	size += sizeof(struct kbase_hwcnt_metadata);
52 
53 	/* Group metadata */
54 	size += sizeof(struct kbase_hwcnt_group_metadata) * desc->grp_cnt;
55 
56 	/* Block metadata */
57 	for (grp = 0; grp < desc->grp_cnt; grp++) {
58 		size += sizeof(struct kbase_hwcnt_block_metadata) * desc->grps[grp].blk_cnt;
59 	}
60 
61 	/* Single allocation for the entire metadata */
62 	buf = kmalloc(size, GFP_KERNEL);
63 	if (!buf)
64 		return -ENOMEM;
65 
66 	/* Use the allocated memory for the metadata and its members */
67 
68 	/* Bump allocate the top level metadata */
69 	offset = 0;
70 	metadata = (struct kbase_hwcnt_metadata *)(buf + offset);
71 	offset += sizeof(struct kbase_hwcnt_metadata);
72 
73 	/* Bump allocate the group metadata */
74 	grp_mds = (struct kbase_hwcnt_group_metadata *)(buf + offset);
75 	offset += sizeof(struct kbase_hwcnt_group_metadata) * desc->grp_cnt;
76 
77 	enable_map_count = 0;
78 	dump_buf_count = 0;
79 	avail_mask_bits = 0;
80 
81 	for (grp = 0; grp < desc->grp_cnt; grp++) {
82 		size_t blk;
83 
84 		const struct kbase_hwcnt_group_description *grp_desc = desc->grps + grp;
85 		struct kbase_hwcnt_group_metadata *grp_md = grp_mds + grp;
86 
87 		size_t group_enable_map_count = 0;
88 		size_t group_dump_buffer_count = 0;
89 		size_t group_avail_mask_bits = 0;
90 
91 		/* Bump allocate this group's block metadata */
92 		struct kbase_hwcnt_block_metadata *blk_mds =
93 			(struct kbase_hwcnt_block_metadata *)(buf + offset);
94 		offset += sizeof(struct kbase_hwcnt_block_metadata) * grp_desc->blk_cnt;
95 
96 		/* Fill in each block in the group's information */
97 		for (blk = 0; blk < grp_desc->blk_cnt; blk++) {
98 			const struct kbase_hwcnt_block_description *blk_desc = grp_desc->blks + blk;
99 			struct kbase_hwcnt_block_metadata *blk_md = blk_mds + blk;
100 			const size_t n_values = blk_desc->hdr_cnt + blk_desc->ctr_cnt;
101 
102 			blk_md->type = blk_desc->type;
103 			blk_md->inst_cnt = blk_desc->inst_cnt;
104 			blk_md->hdr_cnt = blk_desc->hdr_cnt;
105 			blk_md->ctr_cnt = blk_desc->ctr_cnt;
106 			blk_md->enable_map_index = group_enable_map_count;
107 			blk_md->enable_map_stride = kbase_hwcnt_bitfield_count(n_values);
108 			blk_md->dump_buf_index = group_dump_buffer_count;
109 			blk_md->dump_buf_stride = KBASE_HWCNT_ALIGN_UPWARDS(
110 				n_values,
111 				(KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT / KBASE_HWCNT_VALUE_BYTES));
112 			blk_md->avail_mask_index = group_avail_mask_bits;
113 
114 			group_enable_map_count += blk_md->enable_map_stride * blk_md->inst_cnt;
115 			group_dump_buffer_count += blk_md->dump_buf_stride * blk_md->inst_cnt;
116 			group_avail_mask_bits += blk_md->inst_cnt;
117 		}
118 
119 		/* Fill in the group's information */
120 		grp_md->type = grp_desc->type;
121 		grp_md->blk_cnt = grp_desc->blk_cnt;
122 		grp_md->blk_metadata = blk_mds;
123 		grp_md->enable_map_index = enable_map_count;
124 		grp_md->dump_buf_index = dump_buf_count;
125 		grp_md->avail_mask_index = avail_mask_bits;
126 
127 		enable_map_count += group_enable_map_count;
128 		dump_buf_count += group_dump_buffer_count;
129 		avail_mask_bits += group_avail_mask_bits;
130 	}
131 
132 	/* Fill in the top level metadata's information */
133 	metadata->grp_cnt = desc->grp_cnt;
134 	metadata->grp_metadata = grp_mds;
135 	metadata->enable_map_bytes = enable_map_count * KBASE_HWCNT_BITFIELD_BYTES;
136 	metadata->dump_buf_bytes = dump_buf_count * KBASE_HWCNT_VALUE_BYTES;
137 	metadata->avail_mask = desc->avail_mask;
138 	metadata->clk_cnt = desc->clk_cnt;
139 
140 	WARN_ON(size != offset);
141 	/* Due to the block alignment, there should be exactly one enable map
142 	 * bit per 4 bytes in the dump buffer.
143 	 */
144 	WARN_ON(metadata->dump_buf_bytes !=
145 		(metadata->enable_map_bytes * BITS_PER_BYTE * KBASE_HWCNT_VALUE_BYTES));
146 
147 	*out_metadata = metadata;
148 	return 0;
149 }
150 
kbase_hwcnt_metadata_destroy(const struct kbase_hwcnt_metadata * metadata)151 void kbase_hwcnt_metadata_destroy(const struct kbase_hwcnt_metadata *metadata)
152 {
153 	kfree(metadata);
154 }
155 
kbase_hwcnt_enable_map_alloc(const struct kbase_hwcnt_metadata * metadata,struct kbase_hwcnt_enable_map * enable_map)156 int kbase_hwcnt_enable_map_alloc(const struct kbase_hwcnt_metadata *metadata,
157 				 struct kbase_hwcnt_enable_map *enable_map)
158 {
159 	u64 *enable_map_buf;
160 
161 	if (!metadata || !enable_map)
162 		return -EINVAL;
163 
164 	if (metadata->enable_map_bytes > 0) {
165 		enable_map_buf = kzalloc(metadata->enable_map_bytes, GFP_KERNEL);
166 		if (!enable_map_buf)
167 			return -ENOMEM;
168 	} else {
169 		enable_map_buf = NULL;
170 	}
171 
172 	enable_map->metadata = metadata;
173 	enable_map->hwcnt_enable_map = enable_map_buf;
174 	return 0;
175 }
176 
kbase_hwcnt_enable_map_free(struct kbase_hwcnt_enable_map * enable_map)177 void kbase_hwcnt_enable_map_free(struct kbase_hwcnt_enable_map *enable_map)
178 {
179 	if (!enable_map)
180 		return;
181 
182 	kfree(enable_map->hwcnt_enable_map);
183 	enable_map->hwcnt_enable_map = NULL;
184 	enable_map->metadata = NULL;
185 }
186 
kbase_hwcnt_dump_buffer_alloc(const struct kbase_hwcnt_metadata * metadata,struct kbase_hwcnt_dump_buffer * dump_buf)187 int kbase_hwcnt_dump_buffer_alloc(const struct kbase_hwcnt_metadata *metadata,
188 				  struct kbase_hwcnt_dump_buffer *dump_buf)
189 {
190 	size_t dump_buf_bytes;
191 	size_t clk_cnt_buf_bytes;
192 	u8 *buf;
193 
194 	if (!metadata || !dump_buf)
195 		return -EINVAL;
196 
197 	dump_buf_bytes = metadata->dump_buf_bytes;
198 	clk_cnt_buf_bytes = sizeof(*dump_buf->clk_cnt_buf) * metadata->clk_cnt;
199 
200 	/* Make a single allocation for both dump_buf and clk_cnt_buf. */
201 	buf = kmalloc(dump_buf_bytes + clk_cnt_buf_bytes, GFP_KERNEL);
202 	if (!buf)
203 		return -ENOMEM;
204 
205 	dump_buf->metadata = metadata;
206 	dump_buf->dump_buf = (u64 *)buf;
207 	dump_buf->clk_cnt_buf = (u64 *)(buf + dump_buf_bytes);
208 
209 	return 0;
210 }
211 
kbase_hwcnt_dump_buffer_free(struct kbase_hwcnt_dump_buffer * dump_buf)212 void kbase_hwcnt_dump_buffer_free(struct kbase_hwcnt_dump_buffer *dump_buf)
213 {
214 	if (!dump_buf)
215 		return;
216 
217 	kfree(dump_buf->dump_buf);
218 	memset(dump_buf, 0, sizeof(*dump_buf));
219 }
220 
kbase_hwcnt_dump_buffer_array_alloc(const struct kbase_hwcnt_metadata * metadata,size_t n,struct kbase_hwcnt_dump_buffer_array * dump_bufs)221 int kbase_hwcnt_dump_buffer_array_alloc(const struct kbase_hwcnt_metadata *metadata, size_t n,
222 					struct kbase_hwcnt_dump_buffer_array *dump_bufs)
223 {
224 	struct kbase_hwcnt_dump_buffer *buffers;
225 	size_t buf_idx;
226 	unsigned int order;
227 	unsigned long addr;
228 	size_t dump_buf_bytes;
229 	size_t clk_cnt_buf_bytes;
230 
231 	if (!metadata || !dump_bufs)
232 		return -EINVAL;
233 
234 	dump_buf_bytes = metadata->dump_buf_bytes;
235 	clk_cnt_buf_bytes = sizeof(*dump_bufs->bufs->clk_cnt_buf) * metadata->clk_cnt;
236 
237 	/* Allocate memory for the dump buffer struct array */
238 	buffers = kmalloc_array(n, sizeof(*buffers), GFP_KERNEL);
239 	if (!buffers)
240 		return -ENOMEM;
241 
242 	/* Allocate pages for the actual dump buffers, as they tend to be fairly
243 	 * large.
244 	 */
245 	order = get_order((dump_buf_bytes + clk_cnt_buf_bytes) * n);
246 	addr = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
247 
248 	if (!addr) {
249 		kfree(buffers);
250 		return -ENOMEM;
251 	}
252 
253 	dump_bufs->page_addr = addr;
254 	dump_bufs->page_order = order;
255 	dump_bufs->buf_cnt = n;
256 	dump_bufs->bufs = buffers;
257 
258 	/* Set the buffer of each dump buf */
259 	for (buf_idx = 0; buf_idx < n; buf_idx++) {
260 		const size_t dump_buf_offset = dump_buf_bytes * buf_idx;
261 		const size_t clk_cnt_buf_offset =
262 			(dump_buf_bytes * n) + (clk_cnt_buf_bytes * buf_idx);
263 
264 		buffers[buf_idx].metadata = metadata;
265 		buffers[buf_idx].dump_buf = (u64 *)(addr + dump_buf_offset);
266 		buffers[buf_idx].clk_cnt_buf = (u64 *)(addr + clk_cnt_buf_offset);
267 	}
268 
269 	return 0;
270 }
271 
kbase_hwcnt_dump_buffer_array_free(struct kbase_hwcnt_dump_buffer_array * dump_bufs)272 void kbase_hwcnt_dump_buffer_array_free(struct kbase_hwcnt_dump_buffer_array *dump_bufs)
273 {
274 	if (!dump_bufs)
275 		return;
276 
277 	kfree(dump_bufs->bufs);
278 	free_pages(dump_bufs->page_addr, dump_bufs->page_order);
279 	memset(dump_bufs, 0, sizeof(*dump_bufs));
280 }
281 
kbase_hwcnt_dump_buffer_zero(struct kbase_hwcnt_dump_buffer * dst,const struct kbase_hwcnt_enable_map * dst_enable_map)282 void kbase_hwcnt_dump_buffer_zero(struct kbase_hwcnt_dump_buffer *dst,
283 				  const struct kbase_hwcnt_enable_map *dst_enable_map)
284 {
285 	const struct kbase_hwcnt_metadata *metadata;
286 	size_t grp, blk, blk_inst;
287 
288 	if (WARN_ON(!dst) || WARN_ON(!dst_enable_map) ||
289 	    WARN_ON(dst->metadata != dst_enable_map->metadata))
290 		return;
291 
292 	metadata = dst->metadata;
293 
294 	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst)
295 	{
296 		u64 *dst_blk;
297 		size_t val_cnt;
298 
299 		if (!kbase_hwcnt_enable_map_block_enabled(dst_enable_map, grp, blk, blk_inst))
300 			continue;
301 
302 		dst_blk = kbase_hwcnt_dump_buffer_block_instance(dst, grp, blk, blk_inst);
303 		val_cnt = kbase_hwcnt_metadata_block_values_count(metadata, grp, blk);
304 
305 		kbase_hwcnt_dump_buffer_block_zero(dst_blk, val_cnt);
306 	}
307 
308 	memset(dst->clk_cnt_buf, 0, sizeof(*dst->clk_cnt_buf) * metadata->clk_cnt);
309 }
310 
kbase_hwcnt_dump_buffer_zero_strict(struct kbase_hwcnt_dump_buffer * dst)311 void kbase_hwcnt_dump_buffer_zero_strict(struct kbase_hwcnt_dump_buffer *dst)
312 {
313 	if (WARN_ON(!dst))
314 		return;
315 
316 	memset(dst->dump_buf, 0, dst->metadata->dump_buf_bytes);
317 
318 	memset(dst->clk_cnt_buf, 0, sizeof(*dst->clk_cnt_buf) * dst->metadata->clk_cnt);
319 }
320 
kbase_hwcnt_dump_buffer_zero_non_enabled(struct kbase_hwcnt_dump_buffer * dst,const struct kbase_hwcnt_enable_map * dst_enable_map)321 void kbase_hwcnt_dump_buffer_zero_non_enabled(struct kbase_hwcnt_dump_buffer *dst,
322 					      const struct kbase_hwcnt_enable_map *dst_enable_map)
323 {
324 	const struct kbase_hwcnt_metadata *metadata;
325 	size_t grp, blk, blk_inst;
326 
327 	if (WARN_ON(!dst) || WARN_ON(!dst_enable_map) ||
328 	    WARN_ON(dst->metadata != dst_enable_map->metadata))
329 		return;
330 
331 	metadata = dst->metadata;
332 
333 	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst)
334 	{
335 		u64 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(dst, grp, blk, blk_inst);
336 		const u64 *blk_em =
337 			kbase_hwcnt_enable_map_block_instance(dst_enable_map, grp, blk, blk_inst);
338 		size_t val_cnt = kbase_hwcnt_metadata_block_values_count(metadata, grp, blk);
339 
340 		/* Align upwards to include padding bytes */
341 		val_cnt = KBASE_HWCNT_ALIGN_UPWARDS(
342 			val_cnt, (KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT / KBASE_HWCNT_VALUE_BYTES));
343 
344 		if (kbase_hwcnt_metadata_block_instance_avail(metadata, grp, blk, blk_inst)) {
345 			/* Block available, so only zero non-enabled values */
346 			kbase_hwcnt_dump_buffer_block_zero_non_enabled(dst_blk, blk_em, val_cnt);
347 		} else {
348 			/* Block not available, so zero the entire thing */
349 			kbase_hwcnt_dump_buffer_block_zero(dst_blk, val_cnt);
350 		}
351 	}
352 }
353 
kbase_hwcnt_dump_buffer_copy(struct kbase_hwcnt_dump_buffer * dst,const struct kbase_hwcnt_dump_buffer * src,const struct kbase_hwcnt_enable_map * dst_enable_map)354 void kbase_hwcnt_dump_buffer_copy(struct kbase_hwcnt_dump_buffer *dst,
355 				  const struct kbase_hwcnt_dump_buffer *src,
356 				  const struct kbase_hwcnt_enable_map *dst_enable_map)
357 {
358 	const struct kbase_hwcnt_metadata *metadata;
359 	size_t grp, blk, blk_inst;
360 	size_t clk;
361 
362 	if (WARN_ON(!dst) || WARN_ON(!src) || WARN_ON(!dst_enable_map) || WARN_ON(dst == src) ||
363 	    WARN_ON(dst->metadata != src->metadata) ||
364 	    WARN_ON(dst->metadata != dst_enable_map->metadata))
365 		return;
366 
367 	metadata = dst->metadata;
368 
369 	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst)
370 	{
371 		u64 *dst_blk;
372 		const u64 *src_blk;
373 		size_t val_cnt;
374 
375 		if (!kbase_hwcnt_enable_map_block_enabled(dst_enable_map, grp, blk, blk_inst))
376 			continue;
377 
378 		dst_blk = kbase_hwcnt_dump_buffer_block_instance(dst, grp, blk, blk_inst);
379 		src_blk = kbase_hwcnt_dump_buffer_block_instance(src, grp, blk, blk_inst);
380 		val_cnt = kbase_hwcnt_metadata_block_values_count(metadata, grp, blk);
381 
382 		kbase_hwcnt_dump_buffer_block_copy(dst_blk, src_blk, val_cnt);
383 	}
384 
385 	kbase_hwcnt_metadata_for_each_clock(metadata, clk)
386 	{
387 		if (kbase_hwcnt_clk_enable_map_enabled(dst_enable_map->clk_enable_map, clk))
388 			dst->clk_cnt_buf[clk] = src->clk_cnt_buf[clk];
389 	}
390 }
391 
kbase_hwcnt_dump_buffer_copy_strict(struct kbase_hwcnt_dump_buffer * dst,const struct kbase_hwcnt_dump_buffer * src,const struct kbase_hwcnt_enable_map * dst_enable_map)392 void kbase_hwcnt_dump_buffer_copy_strict(struct kbase_hwcnt_dump_buffer *dst,
393 					 const struct kbase_hwcnt_dump_buffer *src,
394 					 const struct kbase_hwcnt_enable_map *dst_enable_map)
395 {
396 	const struct kbase_hwcnt_metadata *metadata;
397 	size_t grp, blk, blk_inst;
398 	size_t clk;
399 
400 	if (WARN_ON(!dst) || WARN_ON(!src) || WARN_ON(!dst_enable_map) || WARN_ON(dst == src) ||
401 	    WARN_ON(dst->metadata != src->metadata) ||
402 	    WARN_ON(dst->metadata != dst_enable_map->metadata))
403 		return;
404 
405 	metadata = dst->metadata;
406 
407 	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst)
408 	{
409 		u64 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(dst, grp, blk, blk_inst);
410 		const u64 *src_blk =
411 			kbase_hwcnt_dump_buffer_block_instance(src, grp, blk, blk_inst);
412 		const u64 *blk_em =
413 			kbase_hwcnt_enable_map_block_instance(dst_enable_map, grp, blk, blk_inst);
414 		size_t val_cnt = kbase_hwcnt_metadata_block_values_count(metadata, grp, blk);
415 		/* Align upwards to include padding bytes */
416 		val_cnt = KBASE_HWCNT_ALIGN_UPWARDS(
417 			val_cnt, (KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT / KBASE_HWCNT_VALUE_BYTES));
418 
419 		kbase_hwcnt_dump_buffer_block_copy_strict(dst_blk, src_blk, blk_em, val_cnt);
420 	}
421 
422 	kbase_hwcnt_metadata_for_each_clock(metadata, clk)
423 	{
424 		bool clk_enabled =
425 			kbase_hwcnt_clk_enable_map_enabled(dst_enable_map->clk_enable_map, clk);
426 
427 		dst->clk_cnt_buf[clk] = clk_enabled ? src->clk_cnt_buf[clk] : 0;
428 	}
429 }
430 
kbase_hwcnt_dump_buffer_accumulate(struct kbase_hwcnt_dump_buffer * dst,const struct kbase_hwcnt_dump_buffer * src,const struct kbase_hwcnt_enable_map * dst_enable_map)431 void kbase_hwcnt_dump_buffer_accumulate(struct kbase_hwcnt_dump_buffer *dst,
432 					const struct kbase_hwcnt_dump_buffer *src,
433 					const struct kbase_hwcnt_enable_map *dst_enable_map)
434 {
435 	const struct kbase_hwcnt_metadata *metadata;
436 	size_t grp, blk, blk_inst;
437 	size_t clk;
438 
439 	if (WARN_ON(!dst) || WARN_ON(!src) || WARN_ON(!dst_enable_map) || WARN_ON(dst == src) ||
440 	    WARN_ON(dst->metadata != src->metadata) ||
441 	    WARN_ON(dst->metadata != dst_enable_map->metadata))
442 		return;
443 
444 	metadata = dst->metadata;
445 
446 	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst)
447 	{
448 		u64 *dst_blk;
449 		const u64 *src_blk;
450 		size_t hdr_cnt;
451 		size_t ctr_cnt;
452 
453 		if (!kbase_hwcnt_enable_map_block_enabled(dst_enable_map, grp, blk, blk_inst))
454 			continue;
455 
456 		dst_blk = kbase_hwcnt_dump_buffer_block_instance(dst, grp, blk, blk_inst);
457 		src_blk = kbase_hwcnt_dump_buffer_block_instance(src, grp, blk, blk_inst);
458 		hdr_cnt = kbase_hwcnt_metadata_block_headers_count(metadata, grp, blk);
459 		ctr_cnt = kbase_hwcnt_metadata_block_counters_count(metadata, grp, blk);
460 
461 		kbase_hwcnt_dump_buffer_block_accumulate(dst_blk, src_blk, hdr_cnt, ctr_cnt);
462 	}
463 
464 	kbase_hwcnt_metadata_for_each_clock(metadata, clk)
465 	{
466 		if (kbase_hwcnt_clk_enable_map_enabled(dst_enable_map->clk_enable_map, clk))
467 			dst->clk_cnt_buf[clk] += src->clk_cnt_buf[clk];
468 	}
469 }
470 
kbase_hwcnt_dump_buffer_accumulate_strict(struct kbase_hwcnt_dump_buffer * dst,const struct kbase_hwcnt_dump_buffer * src,const struct kbase_hwcnt_enable_map * dst_enable_map)471 void kbase_hwcnt_dump_buffer_accumulate_strict(struct kbase_hwcnt_dump_buffer *dst,
472 					       const struct kbase_hwcnt_dump_buffer *src,
473 					       const struct kbase_hwcnt_enable_map *dst_enable_map)
474 {
475 	const struct kbase_hwcnt_metadata *metadata;
476 	size_t grp, blk, blk_inst;
477 	size_t clk;
478 
479 	if (WARN_ON(!dst) || WARN_ON(!src) || WARN_ON(!dst_enable_map) || WARN_ON(dst == src) ||
480 	    WARN_ON(dst->metadata != src->metadata) ||
481 	    WARN_ON(dst->metadata != dst_enable_map->metadata))
482 		return;
483 
484 	metadata = dst->metadata;
485 
486 	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst)
487 	{
488 		u64 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(dst, grp, blk, blk_inst);
489 		const u64 *src_blk =
490 			kbase_hwcnt_dump_buffer_block_instance(src, grp, blk, blk_inst);
491 		const u64 *blk_em =
492 			kbase_hwcnt_enable_map_block_instance(dst_enable_map, grp, blk, blk_inst);
493 		size_t hdr_cnt = kbase_hwcnt_metadata_block_headers_count(metadata, grp, blk);
494 		size_t ctr_cnt = kbase_hwcnt_metadata_block_counters_count(metadata, grp, blk);
495 		/* Align upwards to include padding bytes */
496 		ctr_cnt = KBASE_HWCNT_ALIGN_UPWARDS(
497 			hdr_cnt + ctr_cnt,
498 			(KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT / KBASE_HWCNT_VALUE_BYTES) - hdr_cnt);
499 
500 		kbase_hwcnt_dump_buffer_block_accumulate_strict(dst_blk, src_blk, blk_em, hdr_cnt,
501 								ctr_cnt);
502 	}
503 
504 	kbase_hwcnt_metadata_for_each_clock(metadata, clk)
505 	{
506 		if (kbase_hwcnt_clk_enable_map_enabled(dst_enable_map->clk_enable_map, clk))
507 			dst->clk_cnt_buf[clk] += src->clk_cnt_buf[clk];
508 		else
509 			dst->clk_cnt_buf[clk] = 0;
510 	}
511 }
512