1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2018, 2020-2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include "hwcnt/mali_kbase_hwcnt_virtualizer.h"
23 #include "hwcnt/mali_kbase_hwcnt_accumulator.h"
24 #include "hwcnt/mali_kbase_hwcnt_context.h"
25 #include "hwcnt/mali_kbase_hwcnt_types.h"
26
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29
30 /**
31 * struct kbase_hwcnt_virtualizer - Hardware counter virtualizer structure.
32 * @hctx: Hardware counter context being virtualized.
33 * @dump_threshold_ns: Minimum threshold period for dumps between different
34 * clients where a new accumulator dump will not be
35 * performed, and instead accumulated values will be used.
36 * If 0, rate limiting is disabled.
37 * @metadata: Hardware counter metadata.
38 * @lock: Lock acquired at all entrypoints, to protect mutable
39 * state.
40 * @client_count: Current number of virtualizer clients.
41 * @clients: List of virtualizer clients.
42 * @accum: Hardware counter accumulator. NULL if no clients.
43 * @scratch_map: Enable map used as scratch space during counter changes.
44 * @scratch_buf: Dump buffer used as scratch space during dumps.
45 * @ts_last_dump_ns: End time of most recent dump across all clients.
46 */
47 struct kbase_hwcnt_virtualizer {
48 struct kbase_hwcnt_context *hctx;
49 u64 dump_threshold_ns;
50 const struct kbase_hwcnt_metadata *metadata;
51 struct mutex lock;
52 size_t client_count;
53 struct list_head clients;
54 struct kbase_hwcnt_accumulator *accum;
55 struct kbase_hwcnt_enable_map scratch_map;
56 struct kbase_hwcnt_dump_buffer scratch_buf;
57 u64 ts_last_dump_ns;
58 };
59
60 /**
61 * struct kbase_hwcnt_virtualizer_client - Virtualizer client structure.
62 * @node: List node used for virtualizer client list.
63 * @hvirt: Hardware counter virtualizer.
64 * @enable_map: Enable map with client's current enabled counters.
65 * @accum_buf: Dump buffer with client's current accumulated counters.
66 * @has_accum: True if accum_buf contains any accumulated counters.
67 * @ts_start_ns: Counter collection start time of current dump.
68 */
69 struct kbase_hwcnt_virtualizer_client {
70 struct list_head node;
71 struct kbase_hwcnt_virtualizer *hvirt;
72 struct kbase_hwcnt_enable_map enable_map;
73 struct kbase_hwcnt_dump_buffer accum_buf;
74 bool has_accum;
75 u64 ts_start_ns;
76 };
77
78 const struct kbase_hwcnt_metadata *
kbase_hwcnt_virtualizer_metadata(struct kbase_hwcnt_virtualizer * hvirt)79 kbase_hwcnt_virtualizer_metadata(struct kbase_hwcnt_virtualizer *hvirt)
80 {
81 if (!hvirt)
82 return NULL;
83
84 return hvirt->metadata;
85 }
86
87 /**
88 * kbasep_hwcnt_virtualizer_client_free - Free a virtualizer client's memory.
89 * @hvcli: Pointer to virtualizer client.
90 *
91 * Will safely free a client in any partial state of construction.
92 */
kbasep_hwcnt_virtualizer_client_free(struct kbase_hwcnt_virtualizer_client * hvcli)93 static void kbasep_hwcnt_virtualizer_client_free(struct kbase_hwcnt_virtualizer_client *hvcli)
94 {
95 if (!hvcli)
96 return;
97
98 kbase_hwcnt_dump_buffer_free(&hvcli->accum_buf);
99 kbase_hwcnt_enable_map_free(&hvcli->enable_map);
100 kfree(hvcli);
101 }
102
103 /**
104 * kbasep_hwcnt_virtualizer_client_alloc - Allocate memory for a virtualizer
105 * client.
106 * @metadata: Non-NULL pointer to counter metadata.
107 * @out_hvcli: Non-NULL pointer to where created client will be stored on
108 * success.
109 *
110 * Return: 0 on success, else error code.
111 */
kbasep_hwcnt_virtualizer_client_alloc(const struct kbase_hwcnt_metadata * metadata,struct kbase_hwcnt_virtualizer_client ** out_hvcli)112 static int kbasep_hwcnt_virtualizer_client_alloc(const struct kbase_hwcnt_metadata *metadata,
113 struct kbase_hwcnt_virtualizer_client **out_hvcli)
114 {
115 int errcode;
116 struct kbase_hwcnt_virtualizer_client *hvcli = NULL;
117
118 WARN_ON(!metadata);
119 WARN_ON(!out_hvcli);
120
121 hvcli = kzalloc(sizeof(*hvcli), GFP_KERNEL);
122 if (!hvcli)
123 return -ENOMEM;
124
125 errcode = kbase_hwcnt_enable_map_alloc(metadata, &hvcli->enable_map);
126 if (errcode)
127 goto error;
128
129 errcode = kbase_hwcnt_dump_buffer_alloc(metadata, &hvcli->accum_buf);
130 if (errcode)
131 goto error;
132
133 *out_hvcli = hvcli;
134 return 0;
135 error:
136 kbasep_hwcnt_virtualizer_client_free(hvcli);
137 return errcode;
138 }
139
140 /**
141 * kbasep_hwcnt_virtualizer_client_accumulate - Accumulate a dump buffer into a
142 * client's accumulation buffer.
143 * @hvcli: Non-NULL pointer to virtualizer client.
144 * @dump_buf: Non-NULL pointer to dump buffer to accumulate from.
145 */
146 static void
kbasep_hwcnt_virtualizer_client_accumulate(struct kbase_hwcnt_virtualizer_client * hvcli,const struct kbase_hwcnt_dump_buffer * dump_buf)147 kbasep_hwcnt_virtualizer_client_accumulate(struct kbase_hwcnt_virtualizer_client *hvcli,
148 const struct kbase_hwcnt_dump_buffer *dump_buf)
149 {
150 WARN_ON(!hvcli);
151 WARN_ON(!dump_buf);
152 lockdep_assert_held(&hvcli->hvirt->lock);
153
154 if (hvcli->has_accum) {
155 /* If already some accumulation, accumulate */
156 kbase_hwcnt_dump_buffer_accumulate(&hvcli->accum_buf, dump_buf, &hvcli->enable_map);
157 } else {
158 /* If no accumulation, copy */
159 kbase_hwcnt_dump_buffer_copy(&hvcli->accum_buf, dump_buf, &hvcli->enable_map);
160 }
161 hvcli->has_accum = true;
162 }
163
164 /**
165 * kbasep_hwcnt_virtualizer_accumulator_term - Terminate the hardware counter
166 * accumulator after final client
167 * removal.
168 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
169 *
170 * Will safely terminate the accumulator in any partial state of initialisation.
171 */
kbasep_hwcnt_virtualizer_accumulator_term(struct kbase_hwcnt_virtualizer * hvirt)172 static void kbasep_hwcnt_virtualizer_accumulator_term(struct kbase_hwcnt_virtualizer *hvirt)
173 {
174 WARN_ON(!hvirt);
175 lockdep_assert_held(&hvirt->lock);
176 WARN_ON(hvirt->client_count);
177
178 kbase_hwcnt_dump_buffer_free(&hvirt->scratch_buf);
179 kbase_hwcnt_enable_map_free(&hvirt->scratch_map);
180 kbase_hwcnt_accumulator_release(hvirt->accum);
181 hvirt->accum = NULL;
182 }
183
184 /**
185 * kbasep_hwcnt_virtualizer_accumulator_init - Initialise the hardware counter
186 * accumulator before first client
187 * addition.
188 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
189 *
190 * Return: 0 on success, else error code.
191 */
kbasep_hwcnt_virtualizer_accumulator_init(struct kbase_hwcnt_virtualizer * hvirt)192 static int kbasep_hwcnt_virtualizer_accumulator_init(struct kbase_hwcnt_virtualizer *hvirt)
193 {
194 int errcode;
195
196 WARN_ON(!hvirt);
197 lockdep_assert_held(&hvirt->lock);
198 WARN_ON(hvirt->client_count);
199 WARN_ON(hvirt->accum);
200
201 errcode = kbase_hwcnt_accumulator_acquire(hvirt->hctx, &hvirt->accum);
202 if (errcode)
203 goto error;
204
205 errcode = kbase_hwcnt_enable_map_alloc(hvirt->metadata, &hvirt->scratch_map);
206 if (errcode)
207 goto error;
208
209 errcode = kbase_hwcnt_dump_buffer_alloc(hvirt->metadata, &hvirt->scratch_buf);
210 if (errcode)
211 goto error;
212
213 return 0;
214 error:
215 kbasep_hwcnt_virtualizer_accumulator_term(hvirt);
216 return errcode;
217 }
218
219 /**
220 * kbasep_hwcnt_virtualizer_client_add - Add a newly allocated client to the
221 * virtualizer.
222 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
223 * @hvcli: Non-NULL pointer to the virtualizer client to add.
224 * @enable_map: Non-NULL pointer to client's initial enable map.
225 *
226 * Return: 0 on success, else error code.
227 */
kbasep_hwcnt_virtualizer_client_add(struct kbase_hwcnt_virtualizer * hvirt,struct kbase_hwcnt_virtualizer_client * hvcli,const struct kbase_hwcnt_enable_map * enable_map)228 static int kbasep_hwcnt_virtualizer_client_add(struct kbase_hwcnt_virtualizer *hvirt,
229 struct kbase_hwcnt_virtualizer_client *hvcli,
230 const struct kbase_hwcnt_enable_map *enable_map)
231 {
232 int errcode = 0;
233 u64 ts_start_ns;
234 u64 ts_end_ns;
235
236 WARN_ON(!hvirt);
237 WARN_ON(!hvcli);
238 WARN_ON(!enable_map);
239 lockdep_assert_held(&hvirt->lock);
240
241 if (hvirt->client_count == 0)
242 /* First client added, so initialise the accumulator */
243 errcode = kbasep_hwcnt_virtualizer_accumulator_init(hvirt);
244 if (errcode)
245 return errcode;
246
247 hvirt->client_count += 1;
248
249 if (hvirt->client_count == 1) {
250 /* First client, so just pass the enable map onwards as is */
251 errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum, enable_map,
252 &ts_start_ns, &ts_end_ns, NULL);
253 } else {
254 struct kbase_hwcnt_virtualizer_client *pos;
255
256 /* Make the scratch enable map the union of all enable maps */
257 kbase_hwcnt_enable_map_copy(&hvirt->scratch_map, enable_map);
258 list_for_each_entry (pos, &hvirt->clients, node)
259 kbase_hwcnt_enable_map_union(&hvirt->scratch_map, &pos->enable_map);
260
261 /* Set the counters with the new union enable map */
262 errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum, &hvirt->scratch_map,
263 &ts_start_ns, &ts_end_ns,
264 &hvirt->scratch_buf);
265 /* Accumulate into only existing clients' accumulation bufs */
266 if (!errcode)
267 list_for_each_entry (pos, &hvirt->clients, node)
268 kbasep_hwcnt_virtualizer_client_accumulate(pos,
269 &hvirt->scratch_buf);
270 }
271 if (errcode)
272 goto error;
273
274 list_add(&hvcli->node, &hvirt->clients);
275 hvcli->hvirt = hvirt;
276 kbase_hwcnt_enable_map_copy(&hvcli->enable_map, enable_map);
277 hvcli->has_accum = false;
278 hvcli->ts_start_ns = ts_end_ns;
279
280 /* Store the most recent dump time for rate limiting */
281 hvirt->ts_last_dump_ns = ts_end_ns;
282
283 return 0;
284 error:
285 hvirt->client_count -= 1;
286 if (hvirt->client_count == 0)
287 kbasep_hwcnt_virtualizer_accumulator_term(hvirt);
288 return errcode;
289 }
290
291 /**
292 * kbasep_hwcnt_virtualizer_client_remove - Remove a client from the
293 * virtualizer.
294 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
295 * @hvcli: Non-NULL pointer to the virtualizer client to remove.
296 */
kbasep_hwcnt_virtualizer_client_remove(struct kbase_hwcnt_virtualizer * hvirt,struct kbase_hwcnt_virtualizer_client * hvcli)297 static void kbasep_hwcnt_virtualizer_client_remove(struct kbase_hwcnt_virtualizer *hvirt,
298 struct kbase_hwcnt_virtualizer_client *hvcli)
299 {
300 int errcode = 0;
301 u64 ts_start_ns;
302 u64 ts_end_ns;
303
304 WARN_ON(!hvirt);
305 WARN_ON(!hvcli);
306 lockdep_assert_held(&hvirt->lock);
307
308 list_del(&hvcli->node);
309 hvirt->client_count -= 1;
310
311 if (hvirt->client_count == 0) {
312 /* Last client removed, so terminate the accumulator */
313 kbasep_hwcnt_virtualizer_accumulator_term(hvirt);
314 } else {
315 struct kbase_hwcnt_virtualizer_client *pos;
316 /* Make the scratch enable map the union of all enable maps */
317 kbase_hwcnt_enable_map_disable_all(&hvirt->scratch_map);
318 list_for_each_entry (pos, &hvirt->clients, node)
319 kbase_hwcnt_enable_map_union(&hvirt->scratch_map, &pos->enable_map);
320 /* Set the counters with the new union enable map */
321 errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum, &hvirt->scratch_map,
322 &ts_start_ns, &ts_end_ns,
323 &hvirt->scratch_buf);
324 /* Accumulate into remaining clients' accumulation bufs */
325 if (!errcode) {
326 list_for_each_entry (pos, &hvirt->clients, node)
327 kbasep_hwcnt_virtualizer_client_accumulate(pos,
328 &hvirt->scratch_buf);
329
330 /* Store the most recent dump time for rate limiting */
331 hvirt->ts_last_dump_ns = ts_end_ns;
332 }
333 }
334 WARN_ON(errcode);
335 }
336
337 /**
338 * kbasep_hwcnt_virtualizer_client_set_counters - Perform a dump of the client's
339 * currently enabled counters,
340 * and enable a new set of
341 * counters that will be used for
342 * subsequent dumps.
343 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
344 * @hvcli: Non-NULL pointer to the virtualizer client.
345 * @enable_map: Non-NULL pointer to the new counter enable map for the client.
346 * Must have the same metadata as the virtualizer.
347 * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
348 * be written out to on success.
349 * @ts_end_ns: Non-NULL pointer where the end timestamp of the dump will
350 * be written out to on success.
351 * @dump_buf: Pointer to the buffer where the dump will be written out to on
352 * success. If non-NULL, must have the same metadata as the
353 * accumulator. If NULL, the dump will be discarded.
354 *
355 * Return: 0 on success or error code.
356 */
kbasep_hwcnt_virtualizer_client_set_counters(struct kbase_hwcnt_virtualizer * hvirt,struct kbase_hwcnt_virtualizer_client * hvcli,const struct kbase_hwcnt_enable_map * enable_map,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)357 static int kbasep_hwcnt_virtualizer_client_set_counters(
358 struct kbase_hwcnt_virtualizer *hvirt, struct kbase_hwcnt_virtualizer_client *hvcli,
359 const struct kbase_hwcnt_enable_map *enable_map, u64 *ts_start_ns, u64 *ts_end_ns,
360 struct kbase_hwcnt_dump_buffer *dump_buf)
361 {
362 int errcode;
363 struct kbase_hwcnt_virtualizer_client *pos;
364
365 WARN_ON(!hvirt);
366 WARN_ON(!hvcli);
367 WARN_ON(!enable_map);
368 WARN_ON(!ts_start_ns);
369 WARN_ON(!ts_end_ns);
370 WARN_ON(enable_map->metadata != hvirt->metadata);
371 WARN_ON(dump_buf && (dump_buf->metadata != hvirt->metadata));
372 lockdep_assert_held(&hvirt->lock);
373
374 /* Make the scratch enable map the union of all enable maps */
375 kbase_hwcnt_enable_map_copy(&hvirt->scratch_map, enable_map);
376 list_for_each_entry (pos, &hvirt->clients, node)
377 /* Ignore the enable map of the selected client */
378 if (pos != hvcli)
379 kbase_hwcnt_enable_map_union(&hvirt->scratch_map, &pos->enable_map);
380
381 /* Set the counters with the new union enable map */
382 errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum, &hvirt->scratch_map,
383 ts_start_ns, ts_end_ns, &hvirt->scratch_buf);
384 if (errcode)
385 return errcode;
386
387 /* Accumulate into all accumulation bufs except the selected client's */
388 list_for_each_entry (pos, &hvirt->clients, node)
389 if (pos != hvcli)
390 kbasep_hwcnt_virtualizer_client_accumulate(pos, &hvirt->scratch_buf);
391
392 /* Finally, write into the dump buf */
393 if (dump_buf) {
394 const struct kbase_hwcnt_dump_buffer *src = &hvirt->scratch_buf;
395
396 if (hvcli->has_accum) {
397 kbase_hwcnt_dump_buffer_accumulate(&hvcli->accum_buf, src,
398 &hvcli->enable_map);
399 src = &hvcli->accum_buf;
400 }
401 kbase_hwcnt_dump_buffer_copy(dump_buf, src, &hvcli->enable_map);
402 }
403 hvcli->has_accum = false;
404
405 /* Update the selected client's enable map */
406 kbase_hwcnt_enable_map_copy(&hvcli->enable_map, enable_map);
407
408 /* Fix up the timestamps */
409 *ts_start_ns = hvcli->ts_start_ns;
410 hvcli->ts_start_ns = *ts_end_ns;
411
412 /* Store the most recent dump time for rate limiting */
413 hvirt->ts_last_dump_ns = *ts_end_ns;
414
415 return errcode;
416 }
417
kbase_hwcnt_virtualizer_client_set_counters(struct kbase_hwcnt_virtualizer_client * hvcli,const struct kbase_hwcnt_enable_map * enable_map,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)418 int kbase_hwcnt_virtualizer_client_set_counters(struct kbase_hwcnt_virtualizer_client *hvcli,
419 const struct kbase_hwcnt_enable_map *enable_map,
420 u64 *ts_start_ns, u64 *ts_end_ns,
421 struct kbase_hwcnt_dump_buffer *dump_buf)
422 {
423 int errcode;
424 struct kbase_hwcnt_virtualizer *hvirt;
425
426 if (!hvcli || !enable_map || !ts_start_ns || !ts_end_ns)
427 return -EINVAL;
428
429 hvirt = hvcli->hvirt;
430
431 if ((enable_map->metadata != hvirt->metadata) ||
432 (dump_buf && (dump_buf->metadata != hvirt->metadata)))
433 return -EINVAL;
434
435 mutex_lock(&hvirt->lock);
436
437 if ((hvirt->client_count == 1) && (!hvcli->has_accum)) {
438 /*
439 * If there's only one client with no prior accumulation, we can
440 * completely skip the virtualize and just pass through the call
441 * to the accumulator, saving a fair few copies and
442 * accumulations.
443 */
444 errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum, enable_map,
445 ts_start_ns, ts_end_ns, dump_buf);
446
447 if (!errcode) {
448 /* Update the selected client's enable map */
449 kbase_hwcnt_enable_map_copy(&hvcli->enable_map, enable_map);
450
451 /* Fix up the timestamps */
452 *ts_start_ns = hvcli->ts_start_ns;
453 hvcli->ts_start_ns = *ts_end_ns;
454
455 /* Store the most recent dump time for rate limiting */
456 hvirt->ts_last_dump_ns = *ts_end_ns;
457 }
458 } else {
459 /* Otherwise, do the full virtualize */
460 errcode = kbasep_hwcnt_virtualizer_client_set_counters(
461 hvirt, hvcli, enable_map, ts_start_ns, ts_end_ns, dump_buf);
462 }
463
464 mutex_unlock(&hvirt->lock);
465
466 return errcode;
467 }
468
469 /**
470 * kbasep_hwcnt_virtualizer_client_dump - Perform a dump of the client's
471 * currently enabled counters.
472 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
473 * @hvcli: Non-NULL pointer to the virtualizer client.
474 * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
475 * be written out to on success.
476 * @ts_end_ns: Non-NULL pointer where the end timestamp of the dump will
477 * be written out to on success.
478 * @dump_buf: Pointer to the buffer where the dump will be written out to on
479 * success. If non-NULL, must have the same metadata as the
480 * accumulator. If NULL, the dump will be discarded.
481 *
482 * Return: 0 on success or error code.
483 */
kbasep_hwcnt_virtualizer_client_dump(struct kbase_hwcnt_virtualizer * hvirt,struct kbase_hwcnt_virtualizer_client * hvcli,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)484 static int kbasep_hwcnt_virtualizer_client_dump(struct kbase_hwcnt_virtualizer *hvirt,
485 struct kbase_hwcnt_virtualizer_client *hvcli,
486 u64 *ts_start_ns, u64 *ts_end_ns,
487 struct kbase_hwcnt_dump_buffer *dump_buf)
488 {
489 int errcode;
490 struct kbase_hwcnt_virtualizer_client *pos;
491
492 WARN_ON(!hvirt);
493 WARN_ON(!hvcli);
494 WARN_ON(!ts_start_ns);
495 WARN_ON(!ts_end_ns);
496 WARN_ON(dump_buf && (dump_buf->metadata != hvirt->metadata));
497 lockdep_assert_held(&hvirt->lock);
498
499 /* Perform the dump */
500 errcode = kbase_hwcnt_accumulator_dump(hvirt->accum, ts_start_ns, ts_end_ns,
501 &hvirt->scratch_buf);
502 if (errcode)
503 return errcode;
504
505 /* Accumulate into all accumulation bufs except the selected client's */
506 list_for_each_entry (pos, &hvirt->clients, node)
507 if (pos != hvcli)
508 kbasep_hwcnt_virtualizer_client_accumulate(pos, &hvirt->scratch_buf);
509
510 /* Finally, write into the dump buf */
511 if (dump_buf) {
512 const struct kbase_hwcnt_dump_buffer *src = &hvirt->scratch_buf;
513
514 if (hvcli->has_accum) {
515 kbase_hwcnt_dump_buffer_accumulate(&hvcli->accum_buf, src,
516 &hvcli->enable_map);
517 src = &hvcli->accum_buf;
518 }
519 kbase_hwcnt_dump_buffer_copy(dump_buf, src, &hvcli->enable_map);
520 }
521 hvcli->has_accum = false;
522
523 /* Fix up the timestamps */
524 *ts_start_ns = hvcli->ts_start_ns;
525 hvcli->ts_start_ns = *ts_end_ns;
526
527 /* Store the most recent dump time for rate limiting */
528 hvirt->ts_last_dump_ns = *ts_end_ns;
529
530 return errcode;
531 }
532
533 /**
534 * kbasep_hwcnt_virtualizer_client_dump_rate_limited - Perform a dump of the
535 * client's currently enabled counters
536 * if it hasn't been rate limited,
537 * otherwise return the client's most
538 * recent accumulation.
539 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
540 * @hvcli: Non-NULL pointer to the virtualizer client.
541 * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
542 * be written out to on success.
543 * @ts_end_ns: Non-NULL pointer where the end timestamp of the dump will
544 * be written out to on success.
545 * @dump_buf: Pointer to the buffer where the dump will be written out to on
546 * success. If non-NULL, must have the same metadata as the
547 * accumulator. If NULL, the dump will be discarded.
548 *
549 * Return: 0 on success or error code.
550 */
kbasep_hwcnt_virtualizer_client_dump_rate_limited(struct kbase_hwcnt_virtualizer * hvirt,struct kbase_hwcnt_virtualizer_client * hvcli,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)551 static int kbasep_hwcnt_virtualizer_client_dump_rate_limited(
552 struct kbase_hwcnt_virtualizer *hvirt, struct kbase_hwcnt_virtualizer_client *hvcli,
553 u64 *ts_start_ns, u64 *ts_end_ns, struct kbase_hwcnt_dump_buffer *dump_buf)
554 {
555 bool rate_limited = true;
556
557 WARN_ON(!hvirt);
558 WARN_ON(!hvcli);
559 WARN_ON(!ts_start_ns);
560 WARN_ON(!ts_end_ns);
561 WARN_ON(dump_buf && (dump_buf->metadata != hvirt->metadata));
562 lockdep_assert_held(&hvirt->lock);
563
564 if (hvirt->dump_threshold_ns == 0) {
565 /* Threshold == 0, so rate limiting disabled */
566 rate_limited = false;
567 } else if (hvirt->ts_last_dump_ns == hvcli->ts_start_ns) {
568 /* Last dump was performed by this client, and dumps from an
569 * individual client are never rate limited
570 */
571 rate_limited = false;
572 } else {
573 const u64 ts_ns = kbase_hwcnt_accumulator_timestamp_ns(hvirt->accum);
574 const u64 time_since_last_dump_ns = ts_ns - hvirt->ts_last_dump_ns;
575
576 /* Dump period equals or exceeds the threshold */
577 if (time_since_last_dump_ns >= hvirt->dump_threshold_ns)
578 rate_limited = false;
579 }
580
581 if (!rate_limited)
582 return kbasep_hwcnt_virtualizer_client_dump(hvirt, hvcli, ts_start_ns, ts_end_ns,
583 dump_buf);
584
585 /* If we've gotten this far, the client must have something accumulated
586 * otherwise it is a logic error
587 */
588 WARN_ON(!hvcli->has_accum);
589
590 if (dump_buf)
591 kbase_hwcnt_dump_buffer_copy(dump_buf, &hvcli->accum_buf, &hvcli->enable_map);
592 hvcli->has_accum = false;
593
594 *ts_start_ns = hvcli->ts_start_ns;
595 *ts_end_ns = hvirt->ts_last_dump_ns;
596 hvcli->ts_start_ns = hvirt->ts_last_dump_ns;
597
598 return 0;
599 }
600
kbase_hwcnt_virtualizer_client_dump(struct kbase_hwcnt_virtualizer_client * hvcli,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)601 int kbase_hwcnt_virtualizer_client_dump(struct kbase_hwcnt_virtualizer_client *hvcli,
602 u64 *ts_start_ns, u64 *ts_end_ns,
603 struct kbase_hwcnt_dump_buffer *dump_buf)
604 {
605 int errcode;
606 struct kbase_hwcnt_virtualizer *hvirt;
607
608 if (!hvcli || !ts_start_ns || !ts_end_ns)
609 return -EINVAL;
610
611 hvirt = hvcli->hvirt;
612
613 if (dump_buf && (dump_buf->metadata != hvirt->metadata))
614 return -EINVAL;
615
616 mutex_lock(&hvirt->lock);
617
618 if ((hvirt->client_count == 1) && (!hvcli->has_accum)) {
619 /*
620 * If there's only one client with no prior accumulation, we can
621 * completely skip the virtualize and just pass through the call
622 * to the accumulator, saving a fair few copies and
623 * accumulations.
624 */
625 errcode = kbase_hwcnt_accumulator_dump(hvirt->accum, ts_start_ns, ts_end_ns,
626 dump_buf);
627
628 if (!errcode) {
629 /* Fix up the timestamps */
630 *ts_start_ns = hvcli->ts_start_ns;
631 hvcli->ts_start_ns = *ts_end_ns;
632
633 /* Store the most recent dump time for rate limiting */
634 hvirt->ts_last_dump_ns = *ts_end_ns;
635 }
636 } else {
637 /* Otherwise, do the full virtualize */
638 errcode = kbasep_hwcnt_virtualizer_client_dump_rate_limited(
639 hvirt, hvcli, ts_start_ns, ts_end_ns, dump_buf);
640 }
641
642 mutex_unlock(&hvirt->lock);
643
644 return errcode;
645 }
646
kbase_hwcnt_virtualizer_client_create(struct kbase_hwcnt_virtualizer * hvirt,const struct kbase_hwcnt_enable_map * enable_map,struct kbase_hwcnt_virtualizer_client ** out_hvcli)647 int kbase_hwcnt_virtualizer_client_create(struct kbase_hwcnt_virtualizer *hvirt,
648 const struct kbase_hwcnt_enable_map *enable_map,
649 struct kbase_hwcnt_virtualizer_client **out_hvcli)
650 {
651 int errcode;
652 struct kbase_hwcnt_virtualizer_client *hvcli;
653
654 if (!hvirt || !enable_map || !out_hvcli || (enable_map->metadata != hvirt->metadata))
655 return -EINVAL;
656
657 errcode = kbasep_hwcnt_virtualizer_client_alloc(hvirt->metadata, &hvcli);
658 if (errcode)
659 return errcode;
660
661 mutex_lock(&hvirt->lock);
662
663 errcode = kbasep_hwcnt_virtualizer_client_add(hvirt, hvcli, enable_map);
664
665 mutex_unlock(&hvirt->lock);
666
667 if (errcode) {
668 kbasep_hwcnt_virtualizer_client_free(hvcli);
669 return errcode;
670 }
671
672 *out_hvcli = hvcli;
673 return 0;
674 }
675
kbase_hwcnt_virtualizer_client_destroy(struct kbase_hwcnt_virtualizer_client * hvcli)676 void kbase_hwcnt_virtualizer_client_destroy(struct kbase_hwcnt_virtualizer_client *hvcli)
677 {
678 if (!hvcli)
679 return;
680
681 mutex_lock(&hvcli->hvirt->lock);
682
683 kbasep_hwcnt_virtualizer_client_remove(hvcli->hvirt, hvcli);
684
685 mutex_unlock(&hvcli->hvirt->lock);
686
687 kbasep_hwcnt_virtualizer_client_free(hvcli);
688 }
689
kbase_hwcnt_virtualizer_init(struct kbase_hwcnt_context * hctx,u64 dump_threshold_ns,struct kbase_hwcnt_virtualizer ** out_hvirt)690 int kbase_hwcnt_virtualizer_init(struct kbase_hwcnt_context *hctx, u64 dump_threshold_ns,
691 struct kbase_hwcnt_virtualizer **out_hvirt)
692 {
693 struct kbase_hwcnt_virtualizer *virt;
694 const struct kbase_hwcnt_metadata *metadata;
695
696 if (!hctx || !out_hvirt)
697 return -EINVAL;
698
699 metadata = kbase_hwcnt_context_metadata(hctx);
700 if (!metadata)
701 return -EINVAL;
702
703 virt = kzalloc(sizeof(*virt), GFP_KERNEL);
704 if (!virt)
705 return -ENOMEM;
706
707 virt->hctx = hctx;
708 virt->dump_threshold_ns = dump_threshold_ns;
709 virt->metadata = metadata;
710
711 mutex_init(&virt->lock);
712 INIT_LIST_HEAD(&virt->clients);
713
714 *out_hvirt = virt;
715 return 0;
716 }
717
kbase_hwcnt_virtualizer_term(struct kbase_hwcnt_virtualizer * hvirt)718 void kbase_hwcnt_virtualizer_term(struct kbase_hwcnt_virtualizer *hvirt)
719 {
720 if (!hvirt)
721 return;
722
723 /* Non-zero client count implies client leak */
724 if (WARN_ON(hvirt->client_count != 0)) {
725 struct kbase_hwcnt_virtualizer_client *pos, *n;
726
727 list_for_each_entry_safe (pos, n, &hvirt->clients, node)
728 kbase_hwcnt_virtualizer_client_destroy(pos);
729 }
730
731 WARN_ON(hvirt->client_count != 0);
732 WARN_ON(hvirt->accum);
733
734 kfree(hvirt);
735 }
736
kbase_hwcnt_virtualizer_queue_work(struct kbase_hwcnt_virtualizer * hvirt,struct work_struct * work)737 bool kbase_hwcnt_virtualizer_queue_work(struct kbase_hwcnt_virtualizer *hvirt,
738 struct work_struct *work)
739 {
740 if (WARN_ON(!hvirt) || WARN_ON(!work))
741 return false;
742
743 return kbase_hwcnt_context_queue_work(hvirt->hctx, work);
744 }
745